| // Copyright 2014 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/compiler/simplified-lowering.h" |
| |
| #include <limits> |
| |
| #include "include/v8-fast-api-calls.h" |
| #include "src/base/bits.h" |
| #include "src/base/small-vector.h" |
| #include "src/codegen/code-factory.h" |
| #include "src/codegen/machine-type.h" |
| #include "src/codegen/tick-counter.h" |
| #include "src/compiler/access-builder.h" |
| #include "src/compiler/common-operator.h" |
| #include "src/compiler/compiler-source-position-table.h" |
| #include "src/compiler/diamond.h" |
| #include "src/compiler/linkage.h" |
| #include "src/compiler/node-matchers.h" |
| #include "src/compiler/node-origin-table.h" |
| #include "src/compiler/node-properties.h" |
| #include "src/compiler/operation-typer.h" |
| #include "src/compiler/operator-properties.h" |
| #include "src/compiler/representation-change.h" |
| #include "src/compiler/simplified-operator.h" |
| #include "src/compiler/type-cache.h" |
| #include "src/numbers/conversions-inl.h" |
| #include "src/objects/objects.h" |
| #include "src/utils/address-map.h" |
| |
| namespace v8 { |
| namespace internal { |
| namespace compiler { |
| |
| // Macro for outputting trace information from representation inference. |
| #define TRACE(...) \ |
| do { \ |
| if (FLAG_trace_representation) PrintF(__VA_ARGS__); \ |
| } while (false) |
| |
| // Representation selection and lowering of {Simplified} operators to machine |
| // operators are interwined. We use a fixpoint calculation to compute both the |
| // output representation and the best possible lowering for {Simplified} nodes. |
| // Representation change insertion ensures that all values are in the correct |
| // machine representation after this phase, as dictated by the machine |
| // operators themselves. |
| enum Phase { |
| // 1.) PROPAGATE: Traverse the graph from the end, pushing usage information |
| // backwards from uses to definitions, around cycles in phis, according |
| // to local rules for each operator. |
| // During this phase, the usage information for a node determines the best |
| // possible lowering for each operator so far, and that in turn determines |
| // the output representation. |
| // Therefore, to be correct, this phase must iterate to a fixpoint before |
| // the next phase can begin. |
| PROPAGATE, |
| |
| // 2.) RETYPE: Propagate types from type feedback forwards. |
| RETYPE, |
| |
| // 3.) LOWER: perform lowering for all {Simplified} nodes by replacing some |
| // operators for some nodes, expanding some nodes to multiple nodes, or |
| // removing some (redundant) nodes. |
| // During this phase, use the {RepresentationChanger} to insert |
| // representation changes between uses that demand a particular |
| // representation and nodes that produce a different representation. |
| LOWER |
| }; |
| |
| namespace { |
| |
| MachineRepresentation MachineRepresentationFromArrayType( |
| ExternalArrayType array_type) { |
| switch (array_type) { |
| case kExternalUint8Array: |
| case kExternalUint8ClampedArray: |
| case kExternalInt8Array: |
| return MachineRepresentation::kWord8; |
| case kExternalUint16Array: |
| case kExternalInt16Array: |
| return MachineRepresentation::kWord16; |
| case kExternalUint32Array: |
| case kExternalInt32Array: |
| return MachineRepresentation::kWord32; |
| case kExternalFloat32Array: |
| return MachineRepresentation::kFloat32; |
| case kExternalFloat64Array: |
| return MachineRepresentation::kFloat64; |
| case kExternalBigInt64Array: |
| case kExternalBigUint64Array: |
| UNIMPLEMENTED(); |
| } |
| UNREACHABLE(); |
| } |
| |
| UseInfo CheckedUseInfoAsWord32FromHint( |
| NumberOperationHint hint, const FeedbackSource& feedback = FeedbackSource(), |
| IdentifyZeros identify_zeros = kDistinguishZeros) { |
| switch (hint) { |
| case NumberOperationHint::kSignedSmall: |
| case NumberOperationHint::kSignedSmallInputs: |
| return UseInfo::CheckedSignedSmallAsWord32(identify_zeros, feedback); |
| case NumberOperationHint::kSigned32: |
| return UseInfo::CheckedSigned32AsWord32(identify_zeros, feedback); |
| case NumberOperationHint::kNumber: |
| return UseInfo::CheckedNumberAsWord32(feedback); |
| case NumberOperationHint::kNumberOrBoolean: |
| // Not used currently. |
| UNREACHABLE(); |
| case NumberOperationHint::kNumberOrOddball: |
| return UseInfo::CheckedNumberOrOddballAsWord32(feedback); |
| } |
| UNREACHABLE(); |
| } |
| |
| UseInfo CheckedUseInfoAsFloat64FromHint( |
| NumberOperationHint hint, const FeedbackSource& feedback, |
| IdentifyZeros identify_zeros = kDistinguishZeros) { |
| switch (hint) { |
| case NumberOperationHint::kSignedSmall: |
| case NumberOperationHint::kSignedSmallInputs: |
| case NumberOperationHint::kSigned32: |
| // Not used currently. |
| UNREACHABLE(); |
| case NumberOperationHint::kNumber: |
| return UseInfo::CheckedNumberAsFloat64(identify_zeros, feedback); |
| case NumberOperationHint::kNumberOrBoolean: |
| return UseInfo::CheckedNumberOrBooleanAsFloat64(identify_zeros, feedback); |
| case NumberOperationHint::kNumberOrOddball: |
| return UseInfo::CheckedNumberOrOddballAsFloat64(identify_zeros, feedback); |
| } |
| UNREACHABLE(); |
| } |
| |
| UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) { |
| switch (rep) { |
| case MachineRepresentation::kTaggedSigned: |
| return UseInfo::TaggedSigned(); |
| case MachineRepresentation::kTaggedPointer: |
| case MachineRepresentation::kTagged: |
| return UseInfo::AnyTagged(); |
| case MachineRepresentation::kFloat64: |
| return UseInfo::TruncatingFloat64(); |
| case MachineRepresentation::kFloat32: |
| return UseInfo::Float32(); |
| case MachineRepresentation::kWord8: |
| case MachineRepresentation::kWord16: |
| case MachineRepresentation::kWord32: |
| return UseInfo::TruncatingWord32(); |
| case MachineRepresentation::kWord64: |
| return UseInfo::Word64(); |
| case MachineRepresentation::kBit: |
| return UseInfo::Bool(); |
| case MachineRepresentation::kCompressedPointer: |
| case MachineRepresentation::kCompressed: |
| case MachineRepresentation::kSimd128: |
| case MachineRepresentation::kNone: |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| UseInfo UseInfoForBasePointer(const FieldAccess& access) { |
| return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::Word(); |
| } |
| |
| UseInfo UseInfoForBasePointer(const ElementAccess& access) { |
| return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::Word(); |
| } |
| |
| void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) { |
| for (Edge edge : node->use_edges()) { |
| if (NodeProperties::IsControlEdge(edge)) { |
| edge.UpdateTo(control); |
| } else if (NodeProperties::IsEffectEdge(edge)) { |
| edge.UpdateTo(effect); |
| } else { |
| DCHECK(NodeProperties::IsValueEdge(edge) || |
| NodeProperties::IsContextEdge(edge)); |
| } |
| } |
| } |
| |
| bool CanOverflowSigned32(const Operator* op, Type left, Type right, |
| TypeCache const* type_cache, Zone* type_zone) { |
| // We assume the inputs are checked Signed32 (or known statically to be |
| // Signed32). Technically, the inputs could also be minus zero, which we treat |
| // as 0 for the purpose of this function. |
| if (left.Maybe(Type::MinusZero())) { |
| left = Type::Union(left, type_cache->kSingletonZero, type_zone); |
| } |
| if (right.Maybe(Type::MinusZero())) { |
| right = Type::Union(right, type_cache->kSingletonZero, type_zone); |
| } |
| left = Type::Intersect(left, Type::Signed32(), type_zone); |
| right = Type::Intersect(right, Type::Signed32(), type_zone); |
| if (left.IsNone() || right.IsNone()) return false; |
| switch (op->opcode()) { |
| case IrOpcode::kSpeculativeSafeIntegerAdd: |
| return (left.Max() + right.Max() > kMaxInt) || |
| (left.Min() + right.Min() < kMinInt); |
| |
| case IrOpcode::kSpeculativeSafeIntegerSubtract: |
| return (left.Max() - right.Min() > kMaxInt) || |
| (left.Min() - right.Max() < kMinInt); |
| |
| default: |
| UNREACHABLE(); |
| } |
| return true; |
| } |
| |
| bool IsSomePositiveOrderedNumber(Type type) { |
| return type.Is(Type::OrderedNumber()) && !type.IsNone() && type.Min() > 0; |
| } |
| |
| } // namespace |
| |
| #ifdef DEBUG |
| // Helpers for monotonicity checking. |
| class InputUseInfos { |
| public: |
| explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {} |
| |
| void SetAndCheckInput(Node* node, int index, UseInfo use_info) { |
| if (input_use_infos_.empty()) { |
| input_use_infos_.resize(node->InputCount(), UseInfo::None()); |
| } |
| // Check that the new use informatin is a super-type of the old |
| // one. |
| DCHECK(IsUseLessGeneral(input_use_infos_[index], use_info)); |
| input_use_infos_[index] = use_info; |
| } |
| |
| private: |
| ZoneVector<UseInfo> input_use_infos_; |
| |
| static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) { |
| return use1.truncation().IsLessGeneralThan(use2.truncation()); |
| } |
| }; |
| |
| #endif // DEBUG |
| |
| class RepresentationSelector { |
| public: |
| // Information for each node tracked during the fixpoint. |
| class NodeInfo final { |
| public: |
| // Adds new use to the node. Returns true if something has changed |
| // and the node has to be requeued. |
| bool AddUse(UseInfo info) { |
| Truncation old_truncation = truncation_; |
| truncation_ = Truncation::Generalize(truncation_, info.truncation()); |
| return truncation_ != old_truncation; |
| } |
| |
| void set_queued() { state_ = kQueued; } |
| void set_visited() { state_ = kVisited; } |
| void set_pushed() { state_ = kPushed; } |
| void reset_state() { state_ = kUnvisited; } |
| bool visited() const { return state_ == kVisited; } |
| bool queued() const { return state_ == kQueued; } |
| bool pushed() const { return state_ == kPushed; } |
| bool unvisited() const { return state_ == kUnvisited; } |
| Truncation truncation() const { return truncation_; } |
| void set_output(MachineRepresentation output) { representation_ = output; } |
| |
| MachineRepresentation representation() const { return representation_; } |
| |
| // Helpers for feedback typing. |
| void set_feedback_type(Type type) { feedback_type_ = type; } |
| Type feedback_type() const { return feedback_type_; } |
| void set_weakened() { weakened_ = true; } |
| bool weakened() const { return weakened_; } |
| void set_restriction_type(Type type) { restriction_type_ = type; } |
| Type restriction_type() const { return restriction_type_; } |
| |
| private: |
| enum State : uint8_t { kUnvisited, kPushed, kVisited, kQueued }; |
| State state_ = kUnvisited; |
| MachineRepresentation representation_ = |
| MachineRepresentation::kNone; // Output representation. |
| Truncation truncation_ = Truncation::None(); // Information about uses. |
| |
| Type restriction_type_ = Type::Any(); |
| Type feedback_type_; |
| bool weakened_ = false; |
| }; |
| |
| RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone, |
| RepresentationChanger* changer, |
| SourcePositionTable* source_positions, |
| NodeOriginTable* node_origins, |
| TickCounter* tick_counter, Linkage* linkage) |
| : jsgraph_(jsgraph), |
| zone_(zone), |
| might_need_revisit_(zone), |
| count_(jsgraph->graph()->NodeCount()), |
| info_(count_, zone), |
| #ifdef DEBUG |
| node_input_use_infos_(count_, InputUseInfos(zone), zone), |
| #endif |
| replacements_(zone), |
| changer_(changer), |
| revisit_queue_(zone), |
| traversal_nodes_(zone), |
| source_positions_(source_positions), |
| node_origins_(node_origins), |
| type_cache_(TypeCache::Get()), |
| op_typer_(broker, graph_zone()), |
| tick_counter_(tick_counter), |
| linkage_(linkage) { |
| } |
| |
| void ResetNodeInfoState() { |
| // Clean up for the next phase. |
| for (NodeInfo& info : info_) { |
| info.reset_state(); |
| } |
| } |
| |
| Type TypeOf(Node* node) { |
| Type type = GetInfo(node)->feedback_type(); |
| return type.IsInvalid() ? NodeProperties::GetType(node) : type; |
| } |
| |
| Type FeedbackTypeOf(Node* node) { |
| Type type = GetInfo(node)->feedback_type(); |
| return type.IsInvalid() ? Type::None() : type; |
| } |
| |
| Type TypePhi(Node* node) { |
| int arity = node->op()->ValueInputCount(); |
| Type type = FeedbackTypeOf(node->InputAt(0)); |
| for (int i = 1; i < arity; ++i) { |
| type = op_typer_.Merge(type, FeedbackTypeOf(node->InputAt(i))); |
| } |
| return type; |
| } |
| |
| Type TypeSelect(Node* node) { |
| return op_typer_.Merge(FeedbackTypeOf(node->InputAt(1)), |
| FeedbackTypeOf(node->InputAt(2))); |
| } |
| |
| bool UpdateFeedbackType(Node* node) { |
| if (node->op()->ValueOutputCount() == 0) return false; |
| |
| // For any non-phi node just wait until we get all inputs typed. We only |
| // allow untyped inputs for phi nodes because phis are the only places |
| // where cycles need to be broken. |
| if (node->opcode() != IrOpcode::kPhi) { |
| for (int i = 0; i < node->op()->ValueInputCount(); i++) { |
| if (GetInfo(node->InputAt(i))->feedback_type().IsInvalid()) { |
| return false; |
| } |
| } |
| } |
| |
| NodeInfo* info = GetInfo(node); |
| Type type = info->feedback_type(); |
| Type new_type = NodeProperties::GetType(node); |
| |
| // We preload these values here to avoid increasing the binary size too |
| // much, which happens if we inline the calls into the macros below. |
| Type input0_type; |
| if (node->InputCount() > 0) input0_type = FeedbackTypeOf(node->InputAt(0)); |
| Type input1_type; |
| if (node->InputCount() > 1) input1_type = FeedbackTypeOf(node->InputAt(1)); |
| |
| switch (node->opcode()) { |
| #define DECLARE_CASE(Name) \ |
| case IrOpcode::k##Name: { \ |
| new_type = op_typer_.Name(input0_type, input1_type); \ |
| break; \ |
| } |
| SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE) |
| DECLARE_CASE(SameValue) |
| #undef DECLARE_CASE |
| |
| #define DECLARE_CASE(Name) \ |
| case IrOpcode::k##Name: { \ |
| new_type = Type::Intersect(op_typer_.Name(input0_type, input1_type), \ |
| info->restriction_type(), graph_zone()); \ |
| break; \ |
| } |
| SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE) |
| SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE) |
| #undef DECLARE_CASE |
| |
| #define DECLARE_CASE(Name) \ |
| case IrOpcode::k##Name: { \ |
| new_type = op_typer_.Name(input0_type); \ |
| break; \ |
| } |
| SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE) |
| #undef DECLARE_CASE |
| |
| #define DECLARE_CASE(Name) \ |
| case IrOpcode::k##Name: { \ |
| new_type = Type::Intersect(op_typer_.Name(input0_type), \ |
| info->restriction_type(), graph_zone()); \ |
| break; \ |
| } |
| SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE) |
| #undef DECLARE_CASE |
| |
| case IrOpcode::kConvertReceiver: |
| new_type = op_typer_.ConvertReceiver(input0_type); |
| break; |
| |
| case IrOpcode::kPlainPrimitiveToNumber: |
| new_type = op_typer_.ToNumber(input0_type); |
| break; |
| |
| case IrOpcode::kCheckBounds: |
| new_type = |
| Type::Intersect(op_typer_.CheckBounds(input0_type, input1_type), |
| info->restriction_type(), graph_zone()); |
| break; |
| |
| case IrOpcode::kCheckFloat64Hole: |
| new_type = Type::Intersect(op_typer_.CheckFloat64Hole(input0_type), |
| info->restriction_type(), graph_zone()); |
| break; |
| |
| case IrOpcode::kCheckNumber: |
| new_type = Type::Intersect(op_typer_.CheckNumber(input0_type), |
| info->restriction_type(), graph_zone()); |
| break; |
| |
| case IrOpcode::kPhi: { |
| new_type = TypePhi(node); |
| if (!type.IsInvalid()) { |
| new_type = Weaken(node, type, new_type); |
| } |
| break; |
| } |
| |
| case IrOpcode::kConvertTaggedHoleToUndefined: |
| new_type = op_typer_.ConvertTaggedHoleToUndefined( |
| FeedbackTypeOf(node->InputAt(0))); |
| break; |
| |
| case IrOpcode::kTypeGuard: { |
| new_type = op_typer_.TypeTypeGuard(node->op(), |
| FeedbackTypeOf(node->InputAt(0))); |
| break; |
| } |
| |
| case IrOpcode::kSelect: { |
| new_type = TypeSelect(node); |
| break; |
| } |
| |
| default: |
| // Shortcut for operations that we do not handle. |
| if (type.IsInvalid()) { |
| GetInfo(node)->set_feedback_type(NodeProperties::GetType(node)); |
| return true; |
| } |
| return false; |
| } |
| // We need to guarantee that the feedback type is a subtype of the upper |
| // bound. Naively that should hold, but weakening can actually produce |
| // a bigger type if we are unlucky with ordering of phi typing. To be |
| // really sure, just intersect the upper bound with the feedback type. |
| new_type = Type::Intersect(GetUpperBound(node), new_type, graph_zone()); |
| |
| if (!type.IsInvalid() && new_type.Is(type)) return false; |
| GetInfo(node)->set_feedback_type(new_type); |
| if (FLAG_trace_representation) { |
| PrintNodeFeedbackType(node); |
| } |
| return true; |
| } |
| |
| void PrintNodeFeedbackType(Node* n) { |
| StdoutStream os; |
| os << "#" << n->id() << ":" << *n->op() << "("; |
| int j = 0; |
| for (Node* const i : n->inputs()) { |
| if (j++ > 0) os << ", "; |
| os << "#" << i->id() << ":" << i->op()->mnemonic(); |
| } |
| os << ")"; |
| if (NodeProperties::IsTyped(n)) { |
| Type static_type = NodeProperties::GetType(n); |
| os << " [Static type: " << static_type; |
| Type feedback_type = GetInfo(n)->feedback_type(); |
| if (!feedback_type.IsInvalid() && feedback_type != static_type) { |
| os << ", Feedback type: " << feedback_type; |
| } |
| os << "]"; |
| } |
| os << std::endl; |
| } |
| |
| Type Weaken(Node* node, Type previous_type, Type current_type) { |
| // If the types have nothing to do with integers, return the types. |
| Type const integer = type_cache_->kInteger; |
| if (!previous_type.Maybe(integer)) { |
| return current_type; |
| } |
| DCHECK(current_type.Maybe(integer)); |
| |
| Type current_integer = Type::Intersect(current_type, integer, graph_zone()); |
| DCHECK(!current_integer.IsNone()); |
| Type previous_integer = |
| Type::Intersect(previous_type, integer, graph_zone()); |
| DCHECK(!previous_integer.IsNone()); |
| |
| // Once we start weakening a node, we should always weaken. |
| if (!GetInfo(node)->weakened()) { |
| // Only weaken if there is range involved; we should converge quickly |
| // for all other types (the exception is a union of many constants, |
| // but we currently do not increase the number of constants in unions). |
| Type previous = previous_integer.GetRange(); |
| Type current = current_integer.GetRange(); |
| if (current.IsInvalid() || previous.IsInvalid()) { |
| return current_type; |
| } |
| // Range is involved => we are weakening. |
| GetInfo(node)->set_weakened(); |
| } |
| |
| return Type::Union(current_type, |
| op_typer_.WeakenRange(previous_integer, current_integer), |
| graph_zone()); |
| } |
| |
| // Generates a pre-order traversal of the nodes, starting with End. |
| void GenerateTraversal() { |
| ZoneStack<NodeState> stack(zone_); |
| |
| stack.push({graph()->end(), 0}); |
| GetInfo(graph()->end())->set_pushed(); |
| while (!stack.empty()) { |
| NodeState& current = stack.top(); |
| Node* node = current.node; |
| |
| // If there is an unvisited input, push it and continue with that node. |
| bool pushed_unvisited = false; |
| while (current.input_index < node->InputCount()) { |
| Node* input = node->InputAt(current.input_index); |
| NodeInfo* input_info = GetInfo(input); |
| current.input_index++; |
| if (input_info->unvisited()) { |
| input_info->set_pushed(); |
| stack.push({input, 0}); |
| pushed_unvisited = true; |
| break; |
| } else if (input_info->pushed()) { |
| // Optimization for the Retype phase. |
| // If we had already pushed (and not visited) an input, it means that |
| // the current node will be visited in the Retype phase before one of |
| // its inputs. If this happens, the current node might need to be |
| // revisited. |
| MarkAsPossibleRevisit(node, input); |
| } |
| } |
| |
| if (pushed_unvisited) continue; |
| |
| stack.pop(); |
| NodeInfo* info = GetInfo(node); |
| info->set_visited(); |
| |
| // Generate the traversal |
| traversal_nodes_.push_back(node); |
| } |
| } |
| |
| void PushNodeToRevisitIfVisited(Node* node) { |
| NodeInfo* info = GetInfo(node); |
| if (info->visited()) { |
| TRACE(" QUEUEING #%d: %s\n", node->id(), node->op()->mnemonic()); |
| info->set_queued(); |
| revisit_queue_.push(node); |
| } |
| } |
| |
| // Tries to update the feedback type of the node, as well as setting its |
| // machine representation (in VisitNode). Returns true iff updating the |
| // feedback type is successful. |
| bool RetypeNode(Node* node) { |
| NodeInfo* info = GetInfo(node); |
| info->set_visited(); |
| bool updated = UpdateFeedbackType(node); |
| TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic()); |
| VisitNode<RETYPE>(node, info->truncation(), nullptr); |
| TRACE(" ==> output %s\n", MachineReprToString(info->representation())); |
| return updated; |
| } |
| |
| // Visits the node and marks it as visited. Inside of VisitNode, we might |
| // change the truncation of one of our inputs (see EnqueueInput<PROPAGATE> for |
| // this). If we change the truncation of an already visited node, we will add |
| // it to the revisit queue. |
| void PropagateTruncation(Node* node) { |
| NodeInfo* info = GetInfo(node); |
| info->set_visited(); |
| TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(), |
| info->truncation().description()); |
| VisitNode<PROPAGATE>(node, info->truncation(), nullptr); |
| } |
| |
| // Backward propagation of truncations to a fixpoint. |
| void RunPropagatePhase() { |
| TRACE("--{Propagate phase}--\n"); |
| ResetNodeInfoState(); |
| DCHECK(revisit_queue_.empty()); |
| |
| // Process nodes in reverse post order, with End as the root. |
| for (auto it = traversal_nodes_.crbegin(); it != traversal_nodes_.crend(); |
| ++it) { |
| PropagateTruncation(*it); |
| |
| while (!revisit_queue_.empty()) { |
| Node* node = revisit_queue_.front(); |
| revisit_queue_.pop(); |
| PropagateTruncation(node); |
| } |
| } |
| } |
| |
| // Forward propagation of types from type feedback to a fixpoint. |
| void RunRetypePhase() { |
| TRACE("--{Retype phase}--\n"); |
| ResetNodeInfoState(); |
| DCHECK(revisit_queue_.empty()); |
| |
| for (auto it = traversal_nodes_.cbegin(); it != traversal_nodes_.cend(); |
| ++it) { |
| Node* node = *it; |
| if (!RetypeNode(node)) continue; |
| |
| auto revisit_it = might_need_revisit_.find(node); |
| if (revisit_it == might_need_revisit_.end()) continue; |
| |
| for (Node* const user : revisit_it->second) { |
| PushNodeToRevisitIfVisited(user); |
| } |
| |
| // Process the revisit queue. |
| while (!revisit_queue_.empty()) { |
| Node* revisit_node = revisit_queue_.front(); |
| revisit_queue_.pop(); |
| if (!RetypeNode(revisit_node)) continue; |
| // Here we need to check all uses since we can't easily know which |
| // nodes will need to be revisited due to having an input which was |
| // a revisited node. |
| for (Node* const user : revisit_node->uses()) { |
| PushNodeToRevisitIfVisited(user); |
| } |
| } |
| } |
| } |
| |
| // Lowering and change insertion phase. |
| void RunLowerPhase(SimplifiedLowering* lowering) { |
| TRACE("--{Lower phase}--\n"); |
| for (auto it = traversal_nodes_.cbegin(); it != traversal_nodes_.cend(); |
| ++it) { |
| Node* node = *it; |
| NodeInfo* info = GetInfo(node); |
| TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic()); |
| // Reuse {VisitNode()} so the representation rules are in one place. |
| SourcePositionTable::Scope scope( |
| source_positions_, source_positions_->GetSourcePosition(node)); |
| NodeOriginTable::Scope origin_scope(node_origins_, "simplified lowering", |
| node); |
| VisitNode<LOWER>(node, info->truncation(), lowering); |
| } |
| |
| // Perform the final replacements. |
| for (NodeVector::iterator i = replacements_.begin(); |
| i != replacements_.end(); ++i) { |
| Node* node = *i; |
| Node* replacement = *(++i); |
| node->ReplaceUses(replacement); |
| node->Kill(); |
| // We also need to replace the node in the rest of the vector. |
| for (NodeVector::iterator j = i + 1; j != replacements_.end(); ++j) { |
| ++j; |
| if (*j == node) *j = replacement; |
| } |
| } |
| } |
| |
| void Run(SimplifiedLowering* lowering) { |
| GenerateTraversal(); |
| RunPropagatePhase(); |
| RunRetypePhase(); |
| RunLowerPhase(lowering); |
| } |
| |
| // Just assert for Retype and Lower. Propagate specialized below. |
| template <Phase T> |
| void EnqueueInput(Node* use_node, int index, |
| UseInfo use_info = UseInfo::None()) { |
| static_assert(retype<T>() || lower<T>(), |
| "This version of ProcessRemainingInputs has to be called in " |
| "the Retype or Lower phase."); |
| } |
| |
| template <Phase T> |
| static constexpr bool propagate() { |
| return T == PROPAGATE; |
| } |
| |
| template <Phase T> |
| static constexpr bool retype() { |
| return T == RETYPE; |
| } |
| |
| template <Phase T> |
| static constexpr bool lower() { |
| return T == LOWER; |
| } |
| |
| template <Phase T> |
| void SetOutput(Node* node, MachineRepresentation representation, |
| Type restriction_type = Type::Any()); |
| |
| Type GetUpperBound(Node* node) { return NodeProperties::GetType(node); } |
| |
| bool InputCannotBe(Node* node, Type type) { |
| DCHECK_EQ(1, node->op()->ValueInputCount()); |
| return !GetUpperBound(node->InputAt(0)).Maybe(type); |
| } |
| |
| bool InputIs(Node* node, Type type) { |
| DCHECK_EQ(1, node->op()->ValueInputCount()); |
| return GetUpperBound(node->InputAt(0)).Is(type); |
| } |
| |
| bool BothInputsAreSigned32(Node* node) { |
| return BothInputsAre(node, Type::Signed32()); |
| } |
| |
| bool BothInputsAreUnsigned32(Node* node) { |
| return BothInputsAre(node, Type::Unsigned32()); |
| } |
| |
| bool BothInputsAre(Node* node, Type type) { |
| DCHECK_EQ(2, node->op()->ValueInputCount()); |
| return GetUpperBound(node->InputAt(0)).Is(type) && |
| GetUpperBound(node->InputAt(1)).Is(type); |
| } |
| |
| bool IsNodeRepresentationTagged(Node* node) { |
| MachineRepresentation representation = GetInfo(node)->representation(); |
| return IsAnyTagged(representation); |
| } |
| |
| bool OneInputCannotBe(Node* node, Type type) { |
| DCHECK_EQ(2, node->op()->ValueInputCount()); |
| return !GetUpperBound(node->InputAt(0)).Maybe(type) || |
| !GetUpperBound(node->InputAt(1)).Maybe(type); |
| } |
| |
| void ChangeToDeadValue(Node* node, Node* effect, Node* control) { |
| DCHECK(TypeOf(node).IsNone()); |
| // If the node is unreachable, insert an Unreachable node and mark the |
| // value dead. |
| // TODO(jarin,tebbi) Find a way to unify/merge this insertion with |
| // InsertUnreachableIfNecessary. |
| Node* unreachable = effect = |
| graph()->NewNode(jsgraph_->common()->Unreachable(), effect, control); |
| const Operator* dead_value = |
| jsgraph_->common()->DeadValue(GetInfo(node)->representation()); |
| node->ReplaceInput(0, unreachable); |
| node->TrimInputCount(dead_value->ValueInputCount()); |
| ReplaceEffectControlUses(node, effect, control); |
| NodeProperties::ChangeOp(node, dead_value); |
| } |
| |
| void ChangeToPureOp(Node* node, const Operator* new_op) { |
| DCHECK(new_op->HasProperty(Operator::kPure)); |
| DCHECK_EQ(new_op->ValueInputCount(), node->op()->ValueInputCount()); |
| if (node->op()->EffectInputCount() > 0) { |
| DCHECK_LT(0, node->op()->ControlInputCount()); |
| Node* control = NodeProperties::GetControlInput(node); |
| Node* effect = NodeProperties::GetEffectInput(node); |
| if (TypeOf(node).IsNone()) { |
| ChangeToDeadValue(node, effect, control); |
| return; |
| } |
| // Rewire the effect and control chains. |
| node->TrimInputCount(new_op->ValueInputCount()); |
| ReplaceEffectControlUses(node, effect, control); |
| } else { |
| DCHECK_EQ(0, node->op()->ControlInputCount()); |
| } |
| NodeProperties::ChangeOp(node, new_op); |
| } |
| |
| void ChangeUnaryToPureBinaryOp(Node* node, const Operator* new_op, |
| int new_input_index, Node* new_input) { |
| DCHECK(new_op->HasProperty(Operator::kPure)); |
| DCHECK_EQ(new_op->ValueInputCount(), 2); |
| DCHECK_EQ(node->op()->ValueInputCount(), 1); |
| DCHECK_LE(0, new_input_index); |
| DCHECK_LE(new_input_index, 1); |
| if (node->op()->EffectInputCount() > 0) { |
| DCHECK_LT(0, node->op()->ControlInputCount()); |
| Node* control = NodeProperties::GetControlInput(node); |
| Node* effect = NodeProperties::GetEffectInput(node); |
| if (TypeOf(node).IsNone()) { |
| ChangeToDeadValue(node, effect, control); |
| return; |
| } |
| node->TrimInputCount(node->op()->ValueInputCount()); |
| ReplaceEffectControlUses(node, effect, control); |
| } else { |
| DCHECK_EQ(0, node->op()->ControlInputCount()); |
| } |
| node->InsertInput(jsgraph_->zone(), new_input_index, new_input); |
| NodeProperties::ChangeOp(node, new_op); |
| } |
| |
| // Converts input {index} of {node} according to given UseInfo {use}, |
| // assuming the type of the input is {input_type}. If {input_type} is null, |
| // it takes the input from the input node {TypeOf(node->InputAt(index))}. |
| void ConvertInput(Node* node, int index, UseInfo use, |
| Type input_type = Type::Invalid()) { |
| // In the change phase, insert a change before the use if necessary. |
| if (use.representation() == MachineRepresentation::kNone) |
| return; // No input requirement on the use. |
| Node* input = node->InputAt(index); |
| DCHECK_NOT_NULL(input); |
| NodeInfo* input_info = GetInfo(input); |
| MachineRepresentation input_rep = input_info->representation(); |
| if (input_rep != use.representation() || |
| use.type_check() != TypeCheckKind::kNone) { |
| // Output representation doesn't match usage. |
| TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(), |
| index, input->id(), input->op()->mnemonic()); |
| TRACE("from %s to %s:%s\n", |
| MachineReprToString(input_info->representation()), |
| MachineReprToString(use.representation()), |
| use.truncation().description()); |
| if (input_type.IsInvalid()) { |
| input_type = TypeOf(input); |
| } |
| Node* n = changer_->GetRepresentationFor(input, input_rep, input_type, |
| node, use); |
| node->ReplaceInput(index, n); |
| } |
| } |
| |
| template <Phase T> |
| void ProcessInput(Node* node, int index, UseInfo use); |
| |
| // Just assert for Retype and Lower. Propagate specialized below. |
| template <Phase T> |
| void ProcessRemainingInputs(Node* node, int index) { |
| static_assert(retype<T>() || lower<T>(), |
| "This version of ProcessRemainingInputs has to be called in " |
| "the Retype or Lower phase."); |
| DCHECK_GE(index, NodeProperties::PastValueIndex(node)); |
| DCHECK_GE(index, NodeProperties::PastContextIndex(node)); |
| } |
| |
| // Marks node as a possible revisit since it is a use of input that will be |
| // visited before input is visited. |
| void MarkAsPossibleRevisit(Node* node, Node* input) { |
| auto it = might_need_revisit_.find(input); |
| if (it == might_need_revisit_.end()) { |
| it = might_need_revisit_.insert({input, ZoneVector<Node*>(zone())}).first; |
| } |
| it->second.push_back(node); |
| TRACE(" Marking #%d: %s as needing revisit due to #%d: %s\n", node->id(), |
| node->op()->mnemonic(), input->id(), input->op()->mnemonic()); |
| } |
| |
| // Just assert for Retype. Propagate and Lower specialized below. |
| template <Phase T> |
| void VisitInputs(Node* node) { |
| static_assert( |
| retype<T>(), |
| "This version of VisitInputs has to be called in the Retype phase."); |
| } |
| |
| template <Phase T> |
| void VisitReturn(Node* node) { |
| int first_effect_index = NodeProperties::FirstEffectIndex(node); |
| // Visit integer slot count to pop |
| ProcessInput<T>(node, 0, UseInfo::TruncatingWord32()); |
| |
| // Visit value, context and frame state inputs as tagged. |
| for (int i = 1; i < first_effect_index; i++) { |
| ProcessInput<T>(node, i, UseInfo::AnyTagged()); |
| } |
| // Only enqueue other inputs (effects, control). |
| for (int i = first_effect_index; i < node->InputCount(); i++) { |
| EnqueueInput<T>(node, i); |
| } |
| } |
| |
| // Helper for an unused node. |
| template <Phase T> |
| void VisitUnused(Node* node) { |
| int first_effect_index = NodeProperties::FirstEffectIndex(node); |
| for (int i = 0; i < first_effect_index; i++) { |
| ProcessInput<T>(node, i, UseInfo::None()); |
| } |
| ProcessRemainingInputs<T>(node, first_effect_index); |
| if (lower<T>()) Kill(node); |
| } |
| |
| // Helper for no-op node. |
| template <Phase T> |
| void VisitNoop(Node* node, Truncation truncation) { |
| if (truncation.IsUnused()) return VisitUnused<T>(node); |
| MachineRepresentation representation = |
| GetOutputInfoForPhi(node, TypeOf(node), truncation); |
| VisitUnop<T>(node, UseInfo(representation, truncation), representation); |
| if (lower<T>()) DeferReplacement(node, node->InputAt(0)); |
| } |
| |
| // Helper for binops of the R x L -> O variety. |
| template <Phase T> |
| void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use, |
| MachineRepresentation output, |
| Type restriction_type = Type::Any()) { |
| DCHECK_EQ(2, node->op()->ValueInputCount()); |
| ProcessInput<T>(node, 0, left_use); |
| ProcessInput<T>(node, 1, right_use); |
| for (int i = 2; i < node->InputCount(); i++) { |
| EnqueueInput<T>(node, i); |
| } |
| SetOutput<T>(node, output, restriction_type); |
| } |
| |
| // Helper for binops of the I x I -> O variety. |
| template <Phase T> |
| void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output, |
| Type restriction_type = Type::Any()) { |
| VisitBinop<T>(node, input_use, input_use, output, restriction_type); |
| } |
| |
| template <Phase T> |
| void VisitSpeculativeInt32Binop(Node* node) { |
| DCHECK_EQ(2, node->op()->ValueInputCount()); |
| if (BothInputsAre(node, Type::NumberOrOddball())) { |
| return VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| } |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| return VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint), |
| MachineRepresentation::kWord32); |
| } |
| |
| // Helper for unops of the I -> O variety. |
| template <Phase T> |
| void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output, |
| Type restriction_type = Type::Any()) { |
| DCHECK_EQ(1, node->op()->ValueInputCount()); |
| ProcessInput<T>(node, 0, input_use); |
| ProcessRemainingInputs<T>(node, 1); |
| SetOutput<T>(node, output, restriction_type); |
| } |
| |
| // Helper for leaf nodes. |
| template <Phase T> |
| void VisitLeaf(Node* node, MachineRepresentation output) { |
| DCHECK_EQ(0, node->InputCount()); |
| SetOutput<T>(node, output); |
| } |
| |
| // Helpers for specific types of binops. |
| |
| template <Phase T> |
| void VisitFloat64Binop(Node* node) { |
| VisitBinop<T>(node, UseInfo::TruncatingFloat64(), |
| MachineRepresentation::kFloat64); |
| } |
| |
| template <Phase T> |
| void VisitInt64Binop(Node* node) { |
| VisitBinop<T>(node, UseInfo::Word64(), MachineRepresentation::kWord64); |
| } |
| |
| template <Phase T> |
| void VisitWord32TruncatingBinop(Node* node) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| } |
| |
| // Infer representation for phi-like nodes. |
| // The {node} parameter is only used to decide on the int64 representation. |
| // Once the type system supports an external pointer type, the {node} |
| // parameter can be removed. |
| MachineRepresentation GetOutputInfoForPhi(Node* node, Type type, |
| Truncation use) { |
| // Compute the representation. |
| if (type.Is(Type::None())) { |
| return MachineRepresentation::kNone; |
| } else if (type.Is(Type::Signed32()) || type.Is(Type::Unsigned32())) { |
| return MachineRepresentation::kWord32; |
| } else if (type.Is(Type::NumberOrOddball()) && use.IsUsedAsWord32()) { |
| return MachineRepresentation::kWord32; |
| } else if (type.Is(Type::Boolean())) { |
| return MachineRepresentation::kBit; |
| } else if (type.Is(Type::NumberOrOddball()) && |
| use.TruncatesOddballAndBigIntToNumber()) { |
| return MachineRepresentation::kFloat64; |
| } else if (type.Is(Type::Union(Type::SignedSmall(), Type::NaN(), zone()))) { |
| // TODO(turbofan): For Phis that return either NaN or some Smi, it's |
| // beneficial to not go all the way to double, unless the uses are |
| // double uses. For tagging that just means some potentially expensive |
| // allocation code; we might want to do the same for -0 as well? |
| return MachineRepresentation::kTagged; |
| } else if (type.Is(Type::Number())) { |
| return MachineRepresentation::kFloat64; |
| } else if (type.Is(Type::BigInt()) && use.IsUsedAsWord64()) { |
| return MachineRepresentation::kWord64; |
| } else if (type.Is(Type::ExternalPointer()) || |
| type.Is(Type::SandboxedExternalPointer())) { |
| return MachineType::PointerRepresentation(); |
| } |
| return MachineRepresentation::kTagged; |
| } |
| |
| // Helper for handling selects. |
| template <Phase T> |
| void VisitSelect(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean())); |
| ProcessInput<T>(node, 0, UseInfo::Bool()); |
| |
| MachineRepresentation output = |
| GetOutputInfoForPhi(node, TypeOf(node), truncation); |
| SetOutput<T>(node, output); |
| |
| if (lower<T>()) { |
| // Update the select operator. |
| SelectParameters p = SelectParametersOf(node->op()); |
| if (output != p.representation()) { |
| NodeProperties::ChangeOp(node, |
| lowering->common()->Select(output, p.hint())); |
| } |
| } |
| // Convert inputs to the output representation of this phi, pass the |
| // truncation truncation along. |
| UseInfo input_use(output, truncation); |
| ProcessInput<T>(node, 1, input_use); |
| ProcessInput<T>(node, 2, input_use); |
| } |
| |
| // Helper for handling phis. |
| template <Phase T> |
| void VisitPhi(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| MachineRepresentation output = |
| GetOutputInfoForPhi(node, TypeOf(node), truncation); |
| // Only set the output representation if not running with type |
| // feedback. (Feedback typing will set the representation.) |
| SetOutput<T>(node, output); |
| |
| int values = node->op()->ValueInputCount(); |
| if (lower<T>()) { |
| // Update the phi operator. |
| if (output != PhiRepresentationOf(node->op())) { |
| NodeProperties::ChangeOp(node, lowering->common()->Phi(output, values)); |
| } |
| } |
| |
| // Convert inputs to the output representation of this phi, pass the |
| // truncation along. |
| UseInfo input_use(output, truncation); |
| for (int i = 0; i < node->InputCount(); i++) { |
| ProcessInput<T>(node, i, i < values ? input_use : UseInfo::None()); |
| } |
| } |
| |
| template <Phase T> |
| void VisitObjectIs(Node* node, Type type, SimplifiedLowering* lowering) { |
| Type const input_type = TypeOf(node->InputAt(0)); |
| if (input_type.Is(type)) { |
| VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit); |
| if (lower<T>()) { |
| DeferReplacement(node, lowering->jsgraph()->Int32Constant(1)); |
| } |
| } else { |
| VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit); |
| if (lower<T>() && !input_type.Maybe(type)) { |
| DeferReplacement(node, lowering->jsgraph()->Int32Constant(0)); |
| } |
| } |
| } |
| |
| template <Phase T> |
| void VisitCheck(Node* node, Type type, SimplifiedLowering* lowering) { |
| if (InputIs(node, type)) { |
| VisitUnop<T>(node, UseInfo::AnyTagged(), |
| MachineRepresentation::kTaggedPointer); |
| if (lower<T>()) DeferReplacement(node, node->InputAt(0)); |
| } else { |
| VisitUnop<T>(node, |
| UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()), |
| MachineRepresentation::kTaggedPointer); |
| } |
| } |
| |
| template <Phase T> |
| void VisitCall(Node* node, SimplifiedLowering* lowering) { |
| auto call_descriptor = CallDescriptorOf(node->op()); |
| int params = static_cast<int>(call_descriptor->ParameterCount()); |
| int value_input_count = node->op()->ValueInputCount(); |
| |
| DCHECK_GT(value_input_count, 0); |
| DCHECK_GE(value_input_count, params); |
| |
| // The target of the call. |
| ProcessInput<T>(node, 0, UseInfo::Any()); |
| |
| // For the parameters (indexes [1, ..., params]), propagate representation |
| // information from call descriptor. |
| for (int i = 1; i <= params; i++) { |
| ProcessInput<T>(node, i, |
| TruncatingUseInfoFromRepresentation( |
| call_descriptor->GetInputType(i).representation())); |
| } |
| |
| // Rest of the value inputs. |
| for (int i = params + 1; i < value_input_count; i++) { |
| ProcessInput<T>(node, i, UseInfo::AnyTagged()); |
| } |
| |
| // Effect and Control. |
| ProcessRemainingInputs<T>(node, value_input_count); |
| |
| if (call_descriptor->ReturnCount() > 0) { |
| SetOutput<T>(node, call_descriptor->GetReturnType(0).representation()); |
| } else { |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| } |
| |
| void MaskShiftOperand(Node* node, Type rhs_type) { |
| if (!rhs_type.Is(type_cache_->kZeroToThirtyOne)) { |
| Node* const rhs = NodeProperties::GetValueInput(node, 1); |
| node->ReplaceInput(1, |
| graph()->NewNode(jsgraph_->machine()->Word32And(), rhs, |
| jsgraph_->Int32Constant(0x1F))); |
| } |
| } |
| |
| static MachineSemantic DeoptValueSemanticOf(Type type) { |
| // We only need signedness to do deopt correctly. |
| if (type.Is(Type::Signed32())) { |
| return MachineSemantic::kInt32; |
| } else if (type.Is(Type::Unsigned32())) { |
| return MachineSemantic::kUint32; |
| } else { |
| return MachineSemantic::kAny; |
| } |
| } |
| |
| static MachineType DeoptMachineTypeOf(MachineRepresentation rep, Type type) { |
| if (type.IsNone()) { |
| return MachineType::None(); |
| } |
| // Do not distinguish between various Tagged variations. |
| if (IsAnyTagged(rep)) { |
| return MachineType::AnyTagged(); |
| } |
| if (rep == MachineRepresentation::kWord64) { |
| if (type.Is(Type::BigInt())) { |
| return MachineType::AnyTagged(); |
| } |
| |
| DCHECK(type.Is(TypeCache::Get()->kSafeInteger)); |
| return MachineType(rep, MachineSemantic::kInt64); |
| } |
| MachineType machine_type(rep, DeoptValueSemanticOf(type)); |
| DCHECK(machine_type.representation() != MachineRepresentation::kWord32 || |
| machine_type.semantic() == MachineSemantic::kInt32 || |
| machine_type.semantic() == MachineSemantic::kUint32); |
| DCHECK(machine_type.representation() != MachineRepresentation::kBit || |
| type.Is(Type::Boolean())); |
| return machine_type; |
| } |
| |
| template <Phase T> |
| void VisitStateValues(Node* node) { |
| if (propagate<T>()) { |
| for (int i = 0; i < node->InputCount(); i++) { |
| // When lowering 64 bit BigInts to Word64 representation, we have to |
| // make sure they are rematerialized before deoptimization. By |
| // propagating a AnyTagged use, the RepresentationChanger is going to |
| // insert the necessary conversions. |
| // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize |
| // truncated BigInts. |
| if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) { |
| EnqueueInput<T>(node, i, UseInfo::AnyTagged()); |
| } else { |
| EnqueueInput<T>(node, i, UseInfo::Any()); |
| } |
| } |
| } else if (lower<T>()) { |
| Zone* zone = jsgraph_->zone(); |
| ZoneVector<MachineType>* types = |
| zone->New<ZoneVector<MachineType>>(node->InputCount(), zone); |
| for (int i = 0; i < node->InputCount(); i++) { |
| Node* input = node->InputAt(i); |
| // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize |
| // truncated BigInts. |
| if (TypeOf(input).Is(Type::BigInt())) { |
| ConvertInput(node, i, UseInfo::AnyTagged()); |
| } |
| |
| (*types)[i] = |
| DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input)); |
| } |
| SparseInputMask mask = SparseInputMaskOf(node->op()); |
| NodeProperties::ChangeOp( |
| node, jsgraph_->common()->TypedStateValues(types, mask)); |
| } |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| |
| template <Phase T> |
| void VisitFrameState(Node* node) { |
| DCHECK_EQ(5, node->op()->ValueInputCount()); |
| DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op())); |
| |
| ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // Parameters. |
| ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // Registers. |
| |
| // Accumulator is a special flower - we need to remember its type in |
| // a singleton typed-state-values node (as if it was a singleton |
| // state-values node). |
| Node* accumulator = node->InputAt(2); |
| if (propagate<T>()) { |
| // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize |
| // truncated BigInts. |
| if (TypeOf(accumulator).Is(Type::BigInt())) { |
| EnqueueInput<T>(node, 2, UseInfo::AnyTagged()); |
| } else { |
| EnqueueInput<T>(node, 2, UseInfo::Any()); |
| } |
| } else if (lower<T>()) { |
| // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize |
| // truncated BigInts. |
| if (TypeOf(accumulator).Is(Type::BigInt())) { |
| ConvertInput(node, 2, UseInfo::AnyTagged()); |
| } |
| Zone* zone = jsgraph_->zone(); |
| if (accumulator == jsgraph_->OptimizedOutConstant()) { |
| node->ReplaceInput(2, jsgraph_->SingleDeadTypedStateValues()); |
| } else { |
| ZoneVector<MachineType>* types = |
| zone->New<ZoneVector<MachineType>>(1, zone); |
| (*types)[0] = DeoptMachineTypeOf(GetInfo(accumulator)->representation(), |
| TypeOf(accumulator)); |
| |
| node->ReplaceInput( |
| 2, jsgraph_->graph()->NewNode(jsgraph_->common()->TypedStateValues( |
| types, SparseInputMask::Dense()), |
| node->InputAt(2))); |
| } |
| } |
| |
| ProcessInput<T>(node, 3, UseInfo::AnyTagged()); // Context. |
| ProcessInput<T>(node, 4, UseInfo::AnyTagged()); // Closure. |
| ProcessInput<T>(node, 5, UseInfo::AnyTagged()); // Outer frame state. |
| return SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| |
| template <Phase T> |
| void VisitObjectState(Node* node) { |
| if (propagate<T>()) { |
| for (int i = 0; i < node->InputCount(); i++) { |
| // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize |
| // truncated BigInts. |
| if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) { |
| EnqueueInput<T>(node, i, UseInfo::AnyTagged()); |
| } else { |
| EnqueueInput<T>(node, i, UseInfo::Any()); |
| } |
| } |
| } else if (lower<T>()) { |
| Zone* zone = jsgraph_->zone(); |
| ZoneVector<MachineType>* types = |
| zone->New<ZoneVector<MachineType>>(node->InputCount(), zone); |
| for (int i = 0; i < node->InputCount(); i++) { |
| Node* input = node->InputAt(i); |
| (*types)[i] = |
| DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input)); |
| // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize |
| // truncated BigInts. |
| if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) { |
| ConvertInput(node, i, UseInfo::AnyTagged()); |
| } |
| } |
| NodeProperties::ChangeOp(node, jsgraph_->common()->TypedObjectState( |
| ObjectIdOf(node->op()), types)); |
| } |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| |
| const Operator* Int32Op(Node* node) { |
| return changer_->Int32OperatorFor(node->opcode()); |
| } |
| |
| const Operator* Int32OverflowOp(Node* node) { |
| return changer_->Int32OverflowOperatorFor(node->opcode()); |
| } |
| |
| const Operator* Int64Op(Node* node) { |
| return changer_->Int64OperatorFor(node->opcode()); |
| } |
| |
| const Operator* Uint32Op(Node* node) { |
| return changer_->Uint32OperatorFor(node->opcode()); |
| } |
| |
| const Operator* Uint32OverflowOp(Node* node) { |
| return changer_->Uint32OverflowOperatorFor(node->opcode()); |
| } |
| |
| const Operator* Float64Op(Node* node) { |
| return changer_->Float64OperatorFor(node->opcode()); |
| } |
| |
| WriteBarrierKind WriteBarrierKindFor( |
| BaseTaggedness base_taggedness, |
| MachineRepresentation field_representation, Type field_type, |
| MachineRepresentation value_representation, Node* value) { |
| if (base_taggedness == kTaggedBase && |
| CanBeTaggedPointer(field_representation)) { |
| Type value_type = NodeProperties::GetType(value); |
| if (value_representation == MachineRepresentation::kTaggedSigned) { |
| // Write barriers are only for stores of heap objects. |
| return kNoWriteBarrier; |
| } |
| if (field_type.Is(Type::BooleanOrNullOrUndefined()) || |
| value_type.Is(Type::BooleanOrNullOrUndefined())) { |
| // Write barriers are not necessary when storing true, false, null or |
| // undefined, because these special oddballs are always in the root set. |
| return kNoWriteBarrier; |
| } |
| if (value_type.IsHeapConstant()) { |
| RootIndex root_index; |
| const RootsTable& roots_table = jsgraph_->isolate()->roots_table(); |
| if (roots_table.IsRootHandle(value_type.AsHeapConstant()->Value(), |
| &root_index)) { |
| if (RootsTable::IsImmortalImmovable(root_index)) { |
| // Write barriers are unnecessary for immortal immovable roots. |
| return kNoWriteBarrier; |
| } |
| } |
| } |
| if (field_representation == MachineRepresentation::kTaggedPointer || |
| value_representation == MachineRepresentation::kTaggedPointer) { |
| // Write barriers for heap objects are cheaper. |
| return kPointerWriteBarrier; |
| } |
| NumberMatcher m(value); |
| if (m.HasResolvedValue()) { |
| if (IsSmiDouble(m.ResolvedValue())) { |
| // Storing a smi doesn't need a write barrier. |
| return kNoWriteBarrier; |
| } |
| // The NumberConstant will be represented as HeapNumber. |
| return kPointerWriteBarrier; |
| } |
| return kFullWriteBarrier; |
| } |
| return kNoWriteBarrier; |
| } |
| |
| WriteBarrierKind WriteBarrierKindFor( |
| BaseTaggedness base_taggedness, |
| MachineRepresentation field_representation, int field_offset, |
| Type field_type, MachineRepresentation value_representation, |
| Node* value) { |
| WriteBarrierKind write_barrier_kind = |
| WriteBarrierKindFor(base_taggedness, field_representation, field_type, |
| value_representation, value); |
| if (write_barrier_kind != kNoWriteBarrier) { |
| if (base_taggedness == kTaggedBase && |
| field_offset == HeapObject::kMapOffset) { |
| write_barrier_kind = kMapWriteBarrier; |
| } |
| } |
| return write_barrier_kind; |
| } |
| |
| Graph* graph() const { return jsgraph_->graph(); } |
| CommonOperatorBuilder* common() const { return jsgraph_->common(); } |
| SimplifiedOperatorBuilder* simplified() const { |
| return jsgraph_->simplified(); |
| } |
| |
| void LowerToCheckedInt32Mul(Node* node, Truncation truncation, |
| Type input0_type, Type input1_type) { |
| // If one of the inputs is positive and/or truncation is being applied, |
| // there is no need to return -0. |
| CheckForMinusZeroMode mz_mode = |
| truncation.IdentifiesZeroAndMinusZero() || |
| IsSomePositiveOrderedNumber(input0_type) || |
| IsSomePositiveOrderedNumber(input1_type) |
| ? CheckForMinusZeroMode::kDontCheckForMinusZero |
| : CheckForMinusZeroMode::kCheckForMinusZero; |
| NodeProperties::ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode)); |
| } |
| |
| void ChangeToInt32OverflowOp(Node* node) { |
| NodeProperties::ChangeOp(node, Int32OverflowOp(node)); |
| } |
| |
| void ChangeToUint32OverflowOp(Node* node) { |
| NodeProperties::ChangeOp(node, Uint32OverflowOp(node)); |
| } |
| |
| template <Phase T> |
| void VisitSpeculativeIntegerAdditiveOp(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| Type left_upper = GetUpperBound(node->InputAt(0)); |
| Type right_upper = GetUpperBound(node->InputAt(1)); |
| |
| if (left_upper.Is(type_cache_->kAdditiveSafeIntegerOrMinusZero) && |
| right_upper.Is(type_cache_->kAdditiveSafeIntegerOrMinusZero)) { |
| // Only eliminate the node if its typing rule can be satisfied, namely |
| // that a safe integer is produced. |
| if (truncation.IsUnused()) return VisitUnused<T>(node); |
| |
| // If we know how to interpret the result or if the users only care |
| // about the low 32-bits, we can truncate to Word32 do a wrapping |
| // addition. |
| if (GetUpperBound(node).Is(Type::Signed32()) || |
| GetUpperBound(node).Is(Type::Unsigned32()) || |
| truncation.IsUsedAsWord32()) { |
| // => Int32Add/Sub |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) ChangeToPureOp(node, Int32Op(node)); |
| return; |
| } |
| } |
| |
| // Try to use type feedback. |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| DCHECK(hint == NumberOperationHint::kSignedSmall || |
| hint == NumberOperationHint::kSigned32); |
| |
| Type left_feedback_type = TypeOf(node->InputAt(0)); |
| Type right_feedback_type = TypeOf(node->InputAt(1)); |
| |
| // Using Signed32 as restriction type amounts to promising there won't be |
| // signed overflow. This is incompatible with relying on a Word32 |
| // truncation in order to skip the overflow check. |
| Type const restriction = |
| truncation.IsUsedAsWord32() ? Type::Any() : Type::Signed32(); |
| |
| // Handle the case when no int32 checks on inputs are necessary (but |
| // an overflow check is needed on the output). Note that we do not |
| // have to do any check if at most one side can be minus zero. For |
| // subtraction we need to handle the case of -0 - 0 properly, since |
| // that can produce -0. |
| Type left_constraint_type = |
| node->opcode() == IrOpcode::kSpeculativeSafeIntegerAdd |
| ? Type::Signed32OrMinusZero() |
| : Type::Signed32(); |
| if (left_upper.Is(left_constraint_type) && |
| right_upper.Is(Type::Signed32OrMinusZero()) && |
| (left_upper.Is(Type::Signed32()) || right_upper.Is(Type::Signed32()))) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, restriction); |
| } else { |
| // If the output's truncation is identify-zeros, we can pass it |
| // along. Moreover, if the operation is addition and we know the |
| // right-hand side is not minus zero, we do not have to distinguish |
| // between 0 and -0. |
| IdentifyZeros left_identify_zeros = truncation.identify_zeros(); |
| if (node->opcode() == IrOpcode::kSpeculativeSafeIntegerAdd && |
| !right_feedback_type.Maybe(Type::MinusZero())) { |
| left_identify_zeros = kIdentifyZeros; |
| } |
| UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(), |
| left_identify_zeros); |
| // For CheckedInt32Add and CheckedInt32Sub, we don't need to do |
| // a minus zero check for the right hand side, since we already |
| // know that the left hand side is a proper Signed32 value, |
| // potentially guarded by a check. |
| UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(), |
| kIdentifyZeros); |
| VisitBinop<T>(node, left_use, right_use, MachineRepresentation::kWord32, |
| restriction); |
| } |
| |
| if (lower<T>()) { |
| if (truncation.IsUsedAsWord32() || |
| !CanOverflowSigned32(node->op(), left_feedback_type, |
| right_feedback_type, type_cache_, |
| graph_zone())) { |
| ChangeToPureOp(node, Int32Op(node)); |
| |
| } else { |
| ChangeToInt32OverflowOp(node); |
| } |
| } |
| return; |
| } |
| |
| template <Phase T> |
| void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| if (BothInputsAre(node, type_cache_->kAdditiveSafeIntegerOrMinusZero) && |
| (GetUpperBound(node).Is(Type::Signed32()) || |
| GetUpperBound(node).Is(Type::Unsigned32()) || |
| truncation.IsUsedAsWord32())) { |
| // => Int32Add/Sub |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) ChangeToPureOp(node, Int32Op(node)); |
| return; |
| } |
| |
| // default case => Float64Add/Sub |
| VisitBinop<T>(node, |
| UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros, |
| FeedbackSource()), |
| MachineRepresentation::kFloat64, Type::Number()); |
| if (lower<T>()) { |
| ChangeToPureOp(node, Float64Op(node)); |
| } |
| return; |
| } |
| |
| template <Phase T> |
| void VisitSpeculativeNumberModulus(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) && |
| (truncation.IsUsedAsWord32() || |
| NodeProperties::GetType(node).Is(Type::Unsigned32()))) { |
| // => unsigned Uint32Mod |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node)); |
| return; |
| } |
| if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) && |
| (truncation.IsUsedAsWord32() || |
| NodeProperties::GetType(node).Is(Type::Signed32()))) { |
| // => signed Int32Mod |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node)); |
| return; |
| } |
| |
| // Try to use type feedback. |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| |
| // Handle the case when no uint32 checks on inputs are necessary |
| // (but an overflow check is needed on the output). |
| if (BothInputsAreUnsigned32(node)) { |
| if (hint == NumberOperationHint::kSignedSmall || |
| hint == NumberOperationHint::kSigned32) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Unsigned32()); |
| if (lower<T>()) ChangeToUint32OverflowOp(node); |
| return; |
| } |
| } |
| |
| // Handle the case when no int32 checks on inputs are necessary |
| // (but an overflow check is needed on the output). |
| if (BothInputsAre(node, Type::Signed32())) { |
| // If both the inputs the feedback are int32, use the overflow op. |
| if (hint == NumberOperationHint::kSignedSmall || |
| hint == NumberOperationHint::kSigned32) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Signed32()); |
| if (lower<T>()) ChangeToInt32OverflowOp(node); |
| return; |
| } |
| } |
| |
| if (hint == NumberOperationHint::kSignedSmall || |
| hint == NumberOperationHint::kSigned32) { |
| // If the result is truncated, we only need to check the inputs. |
| // For the left hand side we just propagate the identify zeros |
| // mode of the {truncation}; and for modulus the sign of the |
| // right hand side doesn't matter anyways, so in particular there's |
| // no observable difference between a 0 and a -0 then. |
| UseInfo const lhs_use = CheckedUseInfoAsWord32FromHint( |
| hint, FeedbackSource(), truncation.identify_zeros()); |
| UseInfo const rhs_use = CheckedUseInfoAsWord32FromHint( |
| hint, FeedbackSource(), kIdentifyZeros); |
| if (truncation.IsUsedAsWord32()) { |
| VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node)); |
| } else if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN())) { |
| VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32, |
| Type::Unsigned32()); |
| if (lower<T>()) ChangeToUint32OverflowOp(node); |
| } else { |
| VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32, |
| Type::Signed32()); |
| if (lower<T>()) ChangeToInt32OverflowOp(node); |
| } |
| return; |
| } |
| |
| if (TypeOf(node->InputAt(0)).Is(Type::Unsigned32()) && |
| TypeOf(node->InputAt(1)).Is(Type::Unsigned32()) && |
| (truncation.IsUsedAsWord32() || |
| NodeProperties::GetType(node).Is(Type::Unsigned32()))) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Number()); |
| if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node)); |
| return; |
| } |
| if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) && |
| TypeOf(node->InputAt(1)).Is(Type::Signed32()) && |
| (truncation.IsUsedAsWord32() || |
| NodeProperties::GetType(node).Is(Type::Signed32()))) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Number()); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node)); |
| return; |
| } |
| |
| // default case => Float64Mod |
| // For the left hand side we just propagate the identify zeros |
| // mode of the {truncation}; and for modulus the sign of the |
| // right hand side doesn't matter anyways, so in particular there's |
| // no observable difference between a 0 and a -0 then. |
| UseInfo const lhs_use = UseInfo::CheckedNumberOrOddballAsFloat64( |
| truncation.identify_zeros(), FeedbackSource()); |
| UseInfo const rhs_use = UseInfo::CheckedNumberOrOddballAsFloat64( |
| kIdentifyZeros, FeedbackSource()); |
| VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kFloat64, |
| Type::Number()); |
| if (lower<T>()) ChangeToPureOp(node, Float64Op(node)); |
| return; |
| } |
| |
| // Just assert for Propagate and Retype. Lower specialized below. |
| template <Phase T> |
| void InsertUnreachableIfNecessary(Node* node) { |
| static_assert(propagate<T>() || retype<T>(), |
| "This version of InsertUnreachableIfNecessary has to be " |
| "called in the Propagate or Retype phase."); |
| } |
| |
| template <Phase T> |
| void VisitCheckBounds(Node* node, SimplifiedLowering* lowering) { |
| CheckBoundsParameters const& p = CheckBoundsParametersOf(node->op()); |
| FeedbackSource const& feedback = p.check_parameters().feedback(); |
| Type const index_type = TypeOf(node->InputAt(0)); |
| Type const length_type = TypeOf(node->InputAt(1)); |
| |
| // Conversions, if requested and needed, will be handled by the |
| // representation changer, not by the lower-level Checked*Bounds operators. |
| CheckBoundsFlags new_flags = |
| p.flags().without(CheckBoundsFlag::kConvertStringAndMinusZero); |
| |
| if (length_type.Is(Type::Unsigned31())) { |
| if (index_type.Is(Type::Integral32()) || |
| (index_type.Is(Type::Integral32OrMinusZero()) && |
| p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero)) { |
| // Map the values in the [-2^31,-1] range to the [2^31,2^32-1] range, |
| // which will be considered out-of-bounds because the {length_type} is |
| // limited to Unsigned31. This also converts -0 to 0. |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) { |
| if (lowering->poisoning_level_ == |
| PoisoningMitigationLevel::kDontPoison && |
| (index_type.IsNone() || length_type.IsNone() || |
| (index_type.Min() >= 0.0 && |
| index_type.Max() < length_type.Min()))) { |
| // The bounds check is redundant if we already know that |
| // the index is within the bounds of [0.0, length[. |
| // TODO(neis): Move this into TypedOptimization? |
| new_flags |= CheckBoundsFlag::kAbortOnOutOfBounds; |
| } |
| NodeProperties::ChangeOp( |
| node, simplified()->CheckedUint32Bounds(feedback, new_flags)); |
| } |
| } else if (p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero) { |
| VisitBinop<T>(node, UseInfo::CheckedTaggedAsArrayIndex(feedback), |
| UseInfo::Word(), MachineType::PointerRepresentation()); |
| if (lower<T>()) { |
| if (jsgraph_->machine()->Is64()) { |
| NodeProperties::ChangeOp( |
| node, simplified()->CheckedUint64Bounds(feedback, new_flags)); |
| } else { |
| NodeProperties::ChangeOp( |
| node, simplified()->CheckedUint32Bounds(feedback, new_flags)); |
| } |
| } |
| } else { |
| VisitBinop<T>( |
| node, UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, feedback), |
| UseInfo::TruncatingWord32(), MachineRepresentation::kWord32); |
| if (lower<T>()) { |
| NodeProperties::ChangeOp( |
| node, simplified()->CheckedUint32Bounds(feedback, new_flags)); |
| } |
| } |
| } else { |
| CHECK(length_type.Is(type_cache_->kPositiveSafeInteger)); |
| IdentifyZeros zero_handling = |
| (p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero) |
| ? kIdentifyZeros |
| : kDistinguishZeros; |
| VisitBinop<T>(node, |
| UseInfo::CheckedSigned64AsWord64(zero_handling, feedback), |
| UseInfo::Word64(), MachineRepresentation::kWord64); |
| if (lower<T>()) { |
| NodeProperties::ChangeOp( |
| node, simplified()->CheckedUint64Bounds(feedback, new_flags)); |
| } |
| } |
| } |
| |
| static MachineType MachineTypeFor(CTypeInfo::Type type) { |
| switch (type) { |
| case CTypeInfo::Type::kVoid: |
| return MachineType::AnyTagged(); |
| case CTypeInfo::Type::kBool: |
| return MachineType::Bool(); |
| case CTypeInfo::Type::kInt32: |
| return MachineType::Int32(); |
| case CTypeInfo::Type::kUint32: |
| return MachineType::Uint32(); |
| case CTypeInfo::Type::kInt64: |
| return MachineType::Int64(); |
| case CTypeInfo::Type::kUint64: |
| return MachineType::Uint64(); |
| case CTypeInfo::Type::kFloat32: |
| return MachineType::Float32(); |
| case CTypeInfo::Type::kFloat64: |
| return MachineType::Float64(); |
| case CTypeInfo::Type::kV8Value: |
| return MachineType::AnyTagged(); |
| } |
| } |
| |
| UseInfo UseInfoForFastApiCallArgument(CTypeInfo::Type type, |
| FeedbackSource const& feedback) { |
| switch (type) { |
| case CTypeInfo::Type::kVoid: |
| UNREACHABLE(); |
| case CTypeInfo::Type::kBool: |
| return UseInfo::Bool(); |
| case CTypeInfo::Type::kInt32: |
| case CTypeInfo::Type::kUint32: |
| return UseInfo::CheckedNumberAsWord32(feedback); |
| // TODO(mslekova): We deopt for unsafe integers, but ultimately we want |
| // to make this less restrictive in order to stay on the fast path. |
| case CTypeInfo::Type::kInt64: |
| case CTypeInfo::Type::kUint64: |
| return UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, feedback); |
| case CTypeInfo::Type::kFloat32: |
| case CTypeInfo::Type::kFloat64: |
| return UseInfo::CheckedNumberAsFloat64(kDistinguishZeros, feedback); |
| case CTypeInfo::Type::kV8Value: |
| return UseInfo::AnyTagged(); |
| } |
| } |
| |
| static constexpr int kInitialArgumentsCount = 10; |
| |
| template <Phase T> |
| void VisitFastApiCall(Node* node, SimplifiedLowering* lowering) { |
| FastApiCallParameters const& op_params = |
| FastApiCallParametersOf(node->op()); |
| const CFunctionInfo* c_signature = op_params.signature(); |
| const int c_arg_count = c_signature->ArgumentCount(); |
| CallDescriptor* call_descriptor = op_params.descriptor(); |
| int js_arg_count = static_cast<int>(call_descriptor->ParameterCount()); |
| const int value_input_count = node->op()->ValueInputCount(); |
| CHECK_EQ(FastApiCallNode::ArityForArgc(c_arg_count, js_arg_count), |
| value_input_count); |
| |
| base::SmallVector<UseInfo, kInitialArgumentsCount> arg_use_info( |
| c_arg_count); |
| // The target of the fast call. |
| ProcessInput<T>(node, 0, UseInfo::Word()); |
| // Propagate representation information from TypeInfo. |
| for (int i = 0; i < c_arg_count; i++) { |
| arg_use_info[i] = UseInfoForFastApiCallArgument( |
| c_signature->ArgumentInfo(i).GetType(), op_params.feedback()); |
| ProcessInput<T>(node, i + FastApiCallNode::kFastTargetInputCount, |
| arg_use_info[i]); |
| } |
| |
| // The call code for the slow call. |
| ProcessInput<T>(node, c_arg_count + FastApiCallNode::kFastTargetInputCount, |
| UseInfo::AnyTagged()); |
| for (int i = 1; i <= js_arg_count; i++) { |
| ProcessInput<T>(node, |
| c_arg_count + FastApiCallNode::kFastTargetInputCount + i, |
| TruncatingUseInfoFromRepresentation( |
| call_descriptor->GetInputType(i).representation())); |
| } |
| for (int i = c_arg_count + FastApiCallNode::kFastTargetInputCount + |
| js_arg_count; |
| i < value_input_count; ++i) { |
| ProcessInput<T>(node, i, UseInfo::AnyTagged()); |
| } |
| ProcessRemainingInputs<T>(node, value_input_count); |
| |
| MachineType return_type = |
| MachineTypeFor(c_signature->ReturnInfo().GetType()); |
| SetOutput<T>(node, return_type.representation()); |
| } |
| |
| // Dispatching routine for visiting the node {node} with the usage {use}. |
| // Depending on the operator, propagate new usage info to the inputs. |
| template <Phase T> |
| void VisitNode(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| tick_counter_->TickAndMaybeEnterSafepoint(); |
| |
| // Unconditionally eliminate unused pure nodes (only relevant if there's |
| // a pure operation in between two effectful ones, where the last one |
| // is unused). |
| // Note: We must not do this for constants, as they are cached and we |
| // would thus kill the cached {node} during lowering (i.e. replace all |
| // uses with Dead), but at that point some node lowering might have |
| // already taken the constant {node} from the cache (while it was not |
| // yet killed) and we would afterwards replace that use with Dead as well. |
| if (node->op()->ValueInputCount() > 0 && |
| node->op()->HasProperty(Operator::kPure) && truncation.IsUnused()) { |
| return VisitUnused<T>(node); |
| } |
| |
| if (lower<T>()) InsertUnreachableIfNecessary<T>(node); |
| |
| switch (node->opcode()) { |
| //------------------------------------------------------------------ |
| // Common operators. |
| //------------------------------------------------------------------ |
| case IrOpcode::kStart: |
| // We use Start as a terminator for the frame state chain, so even |
| // tho Start doesn't really produce a value, we have to say Tagged |
| // here, otherwise the input conversion will fail. |
| return VisitLeaf<T>(node, MachineRepresentation::kTagged); |
| case IrOpcode::kParameter: |
| return VisitUnop<T>(node, UseInfo::None(), |
| linkage() |
| ->GetParameterType(ParameterIndexOf(node->op())) |
| .representation()); |
| case IrOpcode::kInt32Constant: |
| return VisitLeaf<T>(node, MachineRepresentation::kWord32); |
| case IrOpcode::kInt64Constant: |
| return VisitLeaf<T>(node, MachineRepresentation::kWord64); |
| case IrOpcode::kExternalConstant: |
| return VisitLeaf<T>(node, MachineType::PointerRepresentation()); |
| case IrOpcode::kNumberConstant: { |
| double const value = OpParameter<double>(node->op()); |
| int value_as_int; |
| if (DoubleToSmiInteger(value, &value_as_int)) { |
| VisitLeaf<T>(node, MachineRepresentation::kTaggedSigned); |
| if (lower<T>()) { |
| intptr_t smi = bit_cast<intptr_t>(Smi::FromInt(value_as_int)); |
| DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(smi)); |
| } |
| return; |
| } |
| VisitLeaf<T>(node, MachineRepresentation::kTagged); |
| return; |
| } |
| case IrOpcode::kHeapConstant: |
| case IrOpcode::kDelayedStringConstant: |
| return VisitLeaf<T>(node, MachineRepresentation::kTaggedPointer); |
| case IrOpcode::kPointerConstant: { |
| VisitLeaf<T>(node, MachineType::PointerRepresentation()); |
| if (lower<T>()) { |
| intptr_t const value = OpParameter<intptr_t>(node->op()); |
| DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(value)); |
| } |
| return; |
| } |
| |
| case IrOpcode::kBranch: { |
| DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean())); |
| ProcessInput<T>(node, 0, UseInfo::Bool()); |
| EnqueueInput<T>(node, NodeProperties::FirstControlIndex(node)); |
| return; |
| } |
| case IrOpcode::kSwitch: |
| ProcessInput<T>(node, 0, UseInfo::TruncatingWord32()); |
| EnqueueInput<T>(node, NodeProperties::FirstControlIndex(node)); |
| return; |
| case IrOpcode::kSelect: |
| return VisitSelect<T>(node, truncation, lowering); |
| case IrOpcode::kPhi: |
| return VisitPhi<T>(node, truncation, lowering); |
| case IrOpcode::kCall: |
| return VisitCall<T>(node, lowering); |
| |
| //------------------------------------------------------------------ |
| // JavaScript operators. |
| //------------------------------------------------------------------ |
| case IrOpcode::kToBoolean: { |
| if (truncation.IsUsedAsBool()) { |
| ProcessInput<T>(node, 0, UseInfo::Bool()); |
| SetOutput<T>(node, MachineRepresentation::kBit); |
| if (lower<T>()) DeferReplacement(node, node->InputAt(0)); |
| } else { |
| VisitInputs<T>(node); |
| SetOutput<T>(node, MachineRepresentation::kTaggedPointer); |
| } |
| return; |
| } |
| case IrOpcode::kJSToNumber: |
| case IrOpcode::kJSToNumberConvertBigInt: |
| case IrOpcode::kJSToNumeric: { |
| DCHECK(NodeProperties::GetType(node).Is(Type::Union( |
| Type::BigInt(), Type::NumberOrOddball(), graph()->zone()))); |
| VisitInputs<T>(node); |
| // TODO(bmeurer): Optimize somewhat based on input type? |
| if (truncation.IsUsedAsWord32()) { |
| SetOutput<T>(node, MachineRepresentation::kWord32); |
| if (lower<T>()) |
| lowering->DoJSToNumberOrNumericTruncatesToWord32(node, this); |
| } else if (truncation.TruncatesOddballAndBigIntToNumber()) { |
| SetOutput<T>(node, MachineRepresentation::kFloat64); |
| if (lower<T>()) |
| lowering->DoJSToNumberOrNumericTruncatesToFloat64(node, this); |
| } else { |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| return; |
| } |
| |
| //------------------------------------------------------------------ |
| // Simplified operators. |
| //------------------------------------------------------------------ |
| case IrOpcode::kBooleanNot: { |
| if (lower<T>()) { |
| NodeInfo* input_info = GetInfo(node->InputAt(0)); |
| if (input_info->representation() == MachineRepresentation::kBit) { |
| // BooleanNot(x: kRepBit) => Word32Equal(x, #0) |
| node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0)); |
| NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal()); |
| } else if (CanBeTaggedPointer(input_info->representation())) { |
| // BooleanNot(x: kRepTagged) => WordEqual(x, #false) |
| node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant()); |
| NodeProperties::ChangeOp(node, lowering->machine()->WordEqual()); |
| } else { |
| DCHECK(TypeOf(node->InputAt(0)).IsNone()); |
| DeferReplacement(node, lowering->jsgraph()->Int32Constant(0)); |
| } |
| } else { |
| // No input representation requirement; adapt during lowering. |
| ProcessInput<T>(node, 0, UseInfo::AnyTruncatingToBool()); |
| SetOutput<T>(node, MachineRepresentation::kBit); |
| } |
| return; |
| } |
| case IrOpcode::kNumberEqual: { |
| Type const lhs_type = TypeOf(node->InputAt(0)); |
| Type const rhs_type = TypeOf(node->InputAt(1)); |
| // Regular number comparisons in JavaScript generally identify zeros, |
| // so we always pass kIdentifyZeros for the inputs, and in addition |
| // we can truncate -0 to 0 for otherwise Unsigned32 or Signed32 inputs. |
| // For equality we also handle the case that one side is non-zero, in |
| // which case we allow to truncate NaN to 0 on the other side. |
| if ((lhs_type.Is(Type::Unsigned32OrMinusZero()) && |
| rhs_type.Is(Type::Unsigned32OrMinusZero())) || |
| (lhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) && |
| rhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) && |
| OneInputCannotBe(node, type_cache_->kZeroish))) { |
| // => unsigned Int32Cmp |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kBit); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node)); |
| return; |
| } |
| if ((lhs_type.Is(Type::Signed32OrMinusZero()) && |
| rhs_type.Is(Type::Signed32OrMinusZero())) || |
| (lhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) && |
| rhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) && |
| OneInputCannotBe(node, type_cache_->kZeroish))) { |
| // => signed Int32Cmp |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kBit); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node)); |
| return; |
| } |
| // => Float64Cmp |
| VisitBinop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros), |
| MachineRepresentation::kBit); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node)); |
| return; |
| } |
| case IrOpcode::kNumberLessThan: |
| case IrOpcode::kNumberLessThanOrEqual: { |
| Type const lhs_type = TypeOf(node->InputAt(0)); |
| Type const rhs_type = TypeOf(node->InputAt(1)); |
| // Regular number comparisons in JavaScript generally identify zeros, |
| // so we always pass kIdentifyZeros for the inputs, and in addition |
| // we can truncate -0 to 0 for otherwise Unsigned32 or Signed32 inputs. |
| if (lhs_type.Is(Type::Unsigned32OrMinusZero()) && |
| rhs_type.Is(Type::Unsigned32OrMinusZero())) { |
| // => unsigned Int32Cmp |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kBit); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node)); |
| } else if (lhs_type.Is(Type::Signed32OrMinusZero()) && |
| rhs_type.Is(Type::Signed32OrMinusZero())) { |
| // => signed Int32Cmp |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kBit); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node)); |
| } else { |
| // => Float64Cmp |
| VisitBinop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros), |
| MachineRepresentation::kBit); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node)); |
| } |
| return; |
| } |
| |
| case IrOpcode::kSpeculativeSafeIntegerAdd: |
| case IrOpcode::kSpeculativeSafeIntegerSubtract: |
| return VisitSpeculativeIntegerAdditiveOp<T>(node, truncation, lowering); |
| |
| case IrOpcode::kSpeculativeNumberAdd: |
| case IrOpcode::kSpeculativeNumberSubtract: |
| return VisitSpeculativeAdditiveOp<T>(node, truncation, lowering); |
| |
| case IrOpcode::kSpeculativeNumberLessThan: |
| case IrOpcode::kSpeculativeNumberLessThanOrEqual: |
| case IrOpcode::kSpeculativeNumberEqual: { |
| Type const lhs_type = TypeOf(node->InputAt(0)); |
| Type const rhs_type = TypeOf(node->InputAt(1)); |
| // Regular number comparisons in JavaScript generally identify zeros, |
| // so we always pass kIdentifyZeros for the inputs, and in addition |
| // we can truncate -0 to 0 for otherwise Unsigned32 or Signed32 inputs. |
| if (lhs_type.Is(Type::Unsigned32OrMinusZero()) && |
| rhs_type.Is(Type::Unsigned32OrMinusZero())) { |
| // => unsigned Int32Cmp |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kBit); |
| if (lower<T>()) ChangeToPureOp(node, Uint32Op(node)); |
| return; |
| } else if (lhs_type.Is(Type::Signed32OrMinusZero()) && |
| rhs_type.Is(Type::Signed32OrMinusZero())) { |
| // => signed Int32Cmp |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kBit); |
| if (lower<T>()) ChangeToPureOp(node, Int32Op(node)); |
| return; |
| } |
| // Try to use type feedback. |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| switch (hint) { |
| case NumberOperationHint::kSigned32: |
| case NumberOperationHint::kSignedSmall: |
| if (propagate<T>()) { |
| VisitBinop<T>(node, |
| CheckedUseInfoAsWord32FromHint( |
| hint, FeedbackSource(), kIdentifyZeros), |
| MachineRepresentation::kBit); |
| } else if (retype<T>()) { |
| SetOutput<T>(node, MachineRepresentation::kBit, Type::Any()); |
| } else { |
| DCHECK(lower<T>()); |
| Node* lhs = node->InputAt(0); |
| Node* rhs = node->InputAt(1); |
| if (IsNodeRepresentationTagged(lhs) && |
| IsNodeRepresentationTagged(rhs)) { |
| VisitBinop<T>(node, |
| UseInfo::CheckedSignedSmallAsTaggedSigned( |
| FeedbackSource(), kIdentifyZeros), |
| MachineRepresentation::kBit); |
| ChangeToPureOp( |
| node, changer_->TaggedSignedOperatorFor(node->opcode())); |
| |
| } else { |
| VisitBinop<T>(node, |
| CheckedUseInfoAsWord32FromHint( |
| hint, FeedbackSource(), kIdentifyZeros), |
| MachineRepresentation::kBit); |
| ChangeToPureOp(node, Int32Op(node)); |
| } |
| } |
| return; |
| case NumberOperationHint::kSignedSmallInputs: |
| // This doesn't make sense for compare operations. |
| UNREACHABLE(); |
| case NumberOperationHint::kNumberOrOddball: |
| // Abstract and strict equality don't perform ToNumber conversions |
| // on Oddballs, so make sure we don't accidentially sneak in a |
| // hint with Oddball feedback here. |
| DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode()); |
| V8_FALLTHROUGH; |
| case NumberOperationHint::kNumberOrBoolean: |
| case NumberOperationHint::kNumber: |
| VisitBinop<T>(node, |
| CheckedUseInfoAsFloat64FromHint( |
| hint, FeedbackSource(), kIdentifyZeros), |
| MachineRepresentation::kBit); |
| if (lower<T>()) ChangeToPureOp(node, Float64Op(node)); |
| return; |
| } |
| UNREACHABLE(); |
| return; |
| } |
| |
| case IrOpcode::kNumberAdd: |
| case IrOpcode::kNumberSubtract: { |
| if (TypeOf(node->InputAt(0)) |
| .Is(type_cache_->kAdditiveSafeIntegerOrMinusZero) && |
| TypeOf(node->InputAt(1)) |
| .Is(type_cache_->kAdditiveSafeIntegerOrMinusZero) && |
| (TypeOf(node).Is(Type::Signed32()) || |
| TypeOf(node).Is(Type::Unsigned32()) || |
| truncation.IsUsedAsWord32())) { |
| // => Int32Add/Sub |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) ChangeToPureOp(node, Int32Op(node)); |
| } else if (jsgraph_->machine()->Is64() && |
| BothInputsAre(node, type_cache_->kSafeInteger) && |
| GetUpperBound(node).Is(type_cache_->kSafeInteger)) { |
| // => Int64Add/Sub |
| VisitInt64Binop<T>(node); |
| if (lower<T>()) ChangeToPureOp(node, Int64Op(node)); |
| } else { |
| // => Float64Add/Sub |
| VisitFloat64Binop<T>(node); |
| if (lower<T>()) ChangeToPureOp(node, Float64Op(node)); |
| } |
| return; |
| } |
| case IrOpcode::kSpeculativeNumberMultiply: { |
| if (BothInputsAre(node, Type::Integral32()) && |
| (NodeProperties::GetType(node).Is(Type::Signed32()) || |
| NodeProperties::GetType(node).Is(Type::Unsigned32()) || |
| (truncation.IsUsedAsWord32() && |
| NodeProperties::GetType(node).Is( |
| type_cache_->kSafeIntegerOrMinusZero)))) { |
| // Multiply reduces to Int32Mul if the inputs are integers, and |
| // (a) the output is either known to be Signed32, or |
| // (b) the output is known to be Unsigned32, or |
| // (c) the uses are truncating and the result is in the safe |
| // integer range. |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) ChangeToPureOp(node, Int32Op(node)); |
| return; |
| } |
| // Try to use type feedback. |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| Type input0_type = TypeOf(node->InputAt(0)); |
| Type input1_type = TypeOf(node->InputAt(1)); |
| |
| // Handle the case when no int32 checks on inputs are necessary |
| // (but an overflow check is needed on the output). |
| if (BothInputsAre(node, Type::Signed32())) { |
| // If both inputs and feedback are int32, use the overflow op. |
| if (hint == NumberOperationHint::kSignedSmall || |
| hint == NumberOperationHint::kSigned32) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Signed32()); |
| if (lower<T>()) { |
| LowerToCheckedInt32Mul(node, truncation, input0_type, |
| input1_type); |
| } |
| return; |
| } |
| } |
| |
| if (hint == NumberOperationHint::kSignedSmall || |
| hint == NumberOperationHint::kSigned32) { |
| VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint), |
| MachineRepresentation::kWord32, Type::Signed32()); |
| if (lower<T>()) { |
| LowerToCheckedInt32Mul(node, truncation, input0_type, input1_type); |
| } |
| return; |
| } |
| |
| // Checked float64 x float64 => float64 |
| VisitBinop<T>(node, |
| UseInfo::CheckedNumberOrOddballAsFloat64( |
| kDistinguishZeros, FeedbackSource()), |
| MachineRepresentation::kFloat64, Type::Number()); |
| if (lower<T>()) ChangeToPureOp(node, Float64Op(node)); |
| return; |
| } |
| case IrOpcode::kNumberMultiply: { |
| if (TypeOf(node->InputAt(0)).Is(Type::Integral32()) && |
| TypeOf(node->InputAt(1)).Is(Type::Integral32()) && |
| (TypeOf(node).Is(Type::Signed32()) || |
| TypeOf(node).Is(Type::Unsigned32()) || |
| (truncation.IsUsedAsWord32() && |
| TypeOf(node).Is(type_cache_->kSafeIntegerOrMinusZero)))) { |
| // Multiply reduces to Int32Mul if the inputs are integers, and |
| // (a) the output is either known to be Signed32, or |
| // (b) the output is known to be Unsigned32, or |
| // (c) the uses are truncating and the result is in the safe |
| // integer range. |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) ChangeToPureOp(node, Int32Op(node)); |
| return; |
| } |
| // Number x Number => Float64Mul |
| VisitFloat64Binop<T>(node); |
| if (lower<T>()) ChangeToPureOp(node, Float64Op(node)); |
| return; |
| } |
| case IrOpcode::kSpeculativeNumberDivide: { |
| if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) { |
| // => unsigned Uint32Div |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Uint32Div(node)); |
| return; |
| } |
| if (BothInputsAreSigned32(node)) { |
| if (NodeProperties::GetType(node).Is(Type::Signed32())) { |
| // => signed Int32Div |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node)); |
| return; |
| } |
| if (truncation.IsUsedAsWord32()) { |
| // => signed Int32Div |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node)); |
| return; |
| } |
| } |
| |
| // Try to use type feedback. |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| |
| // Handle the case when no uint32 checks on inputs are necessary |
| // (but an overflow check is needed on the output). |
| if (BothInputsAreUnsigned32(node)) { |
| if (hint == NumberOperationHint::kSignedSmall || |
| hint == NumberOperationHint::kSigned32) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Unsigned32()); |
| if (lower<T>()) ChangeToUint32OverflowOp(node); |
| return; |
| } |
| } |
| |
| // Handle the case when no int32 checks on inputs are necessary |
| // (but an overflow check is needed on the output). |
| if (BothInputsAreSigned32(node)) { |
| // If both the inputs the feedback are int32, use the overflow op. |
| if (hint == NumberOperationHint::kSignedSmall || |
| hint == NumberOperationHint::kSigned32) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Signed32()); |
| if (lower<T>()) ChangeToInt32OverflowOp(node); |
| return; |
| } |
| } |
| |
| if (hint == NumberOperationHint::kSigned32 || |
| hint == NumberOperationHint::kSignedSmall || |
| hint == NumberOperationHint::kSignedSmallInputs) { |
| // If the result is truncated, we only need to check the inputs. |
| if (truncation.IsUsedAsWord32()) { |
| VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node)); |
| return; |
| } else if (hint != NumberOperationHint::kSignedSmallInputs) { |
| VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint), |
| MachineRepresentation::kWord32, Type::Signed32()); |
| if (lower<T>()) ChangeToInt32OverflowOp(node); |
| return; |
| } |
| } |
| |
| // default case => Float64Div |
| VisitBinop<T>(node, |
| UseInfo::CheckedNumberOrOddballAsFloat64( |
| kDistinguishZeros, FeedbackSource()), |
| MachineRepresentation::kFloat64, Type::Number()); |
| if (lower<T>()) ChangeToPureOp(node, Float64Op(node)); |
| return; |
| } |
| case IrOpcode::kNumberDivide: { |
| if (TypeOf(node->InputAt(0)).Is(Type::Unsigned32()) && |
| TypeOf(node->InputAt(1)).Is(Type::Unsigned32()) && |
| (truncation.IsUsedAsWord32() || |
| TypeOf(node).Is(Type::Unsigned32()))) { |
| // => unsigned Uint32Div |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Uint32Div(node)); |
| return; |
| } |
| if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) && |
| TypeOf(node->InputAt(1)).Is(Type::Signed32()) && |
| (truncation.IsUsedAsWord32() || |
| TypeOf(node).Is(Type::Signed32()))) { |
| // => signed Int32Div |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node)); |
| return; |
| } |
| // Number x Number => Float64Div |
| VisitFloat64Binop<T>(node); |
| if (lower<T>()) ChangeToPureOp(node, Float64Op(node)); |
| return; |
| } |
| case IrOpcode::kSpeculativeNumberModulus: |
| return VisitSpeculativeNumberModulus<T>(node, truncation, lowering); |
| case IrOpcode::kNumberModulus: { |
| Type const lhs_type = TypeOf(node->InputAt(0)); |
| Type const rhs_type = TypeOf(node->InputAt(1)); |
| if ((lhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) && |
| rhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN())) && |
| (truncation.IsUsedAsWord32() || |
| TypeOf(node).Is(Type::Unsigned32()))) { |
| // => unsigned Uint32Mod |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node)); |
| return; |
| } |
| if ((lhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) && |
| rhs_type.Is(Type::Signed32OrMinusZeroOrNaN())) && |
| (truncation.IsUsedAsWord32() || TypeOf(node).Is(Type::Signed32()) || |
| (truncation.IdentifiesZeroAndMinusZero() && |
| TypeOf(node).Is(Type::Signed32OrMinusZero())))) { |
| // => signed Int32Mod |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node)); |
| return; |
| } |
| // => Float64Mod |
| // For the left hand side we just propagate the identify zeros |
| // mode of the {truncation}; and for modulus the sign of the |
| // right hand side doesn't matter anyways, so in particular there's |
| // no observable difference between a 0 and a -0 then. |
| UseInfo const lhs_use = |
| UseInfo::TruncatingFloat64(truncation.identify_zeros()); |
| UseInfo const rhs_use = UseInfo::TruncatingFloat64(kIdentifyZeros); |
| VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kFloat64); |
| if (lower<T>()) ChangeToPureOp(node, Float64Op(node)); |
| return; |
| } |
| case IrOpcode::kNumberBitwiseOr: |
| case IrOpcode::kNumberBitwiseXor: |
| case IrOpcode::kNumberBitwiseAnd: { |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node)); |
| return; |
| } |
| case IrOpcode::kSpeculativeNumberBitwiseOr: |
| case IrOpcode::kSpeculativeNumberBitwiseXor: |
| case IrOpcode::kSpeculativeNumberBitwiseAnd: |
| VisitSpeculativeInt32Binop<T>(node); |
| if (lower<T>()) { |
| ChangeToPureOp(node, Int32Op(node)); |
| } |
| return; |
| case IrOpcode::kNumberShiftLeft: { |
| Type rhs_type = GetUpperBound(node->InputAt(1)); |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) { |
| MaskShiftOperand(node, rhs_type); |
| ChangeToPureOp(node, lowering->machine()->Word32Shl()); |
| } |
| return; |
| } |
| case IrOpcode::kSpeculativeNumberShiftLeft: { |
| if (BothInputsAre(node, Type::NumberOrOddball())) { |
| Type rhs_type = GetUpperBound(node->InputAt(1)); |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) { |
| MaskShiftOperand(node, rhs_type); |
| ChangeToPureOp(node, lowering->machine()->Word32Shl()); |
| } |
| return; |
| } |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| Type rhs_type = GetUpperBound(node->InputAt(1)); |
| VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint), |
| MachineRepresentation::kWord32, Type::Signed32()); |
| if (lower<T>()) { |
| MaskShiftOperand(node, rhs_type); |
| ChangeToPureOp(node, lowering->machine()->Word32Shl()); |
| } |
| return; |
| } |
| case IrOpcode::kNumberShiftRight: { |
| Type rhs_type = GetUpperBound(node->InputAt(1)); |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) { |
| MaskShiftOperand(node, rhs_type); |
| ChangeToPureOp(node, lowering->machine()->Word32Sar()); |
| } |
| return; |
| } |
| case IrOpcode::kSpeculativeNumberShiftRight: { |
| if (BothInputsAre(node, Type::NumberOrOddball())) { |
| Type rhs_type = GetUpperBound(node->InputAt(1)); |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) { |
| MaskShiftOperand(node, rhs_type); |
| ChangeToPureOp(node, lowering->machine()->Word32Sar()); |
| } |
| return; |
| } |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| Type rhs_type = GetUpperBound(node->InputAt(1)); |
| VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint), |
| MachineRepresentation::kWord32, Type::Signed32()); |
| if (lower<T>()) { |
| MaskShiftOperand(node, rhs_type); |
| ChangeToPureOp(node, lowering->machine()->Word32Sar()); |
| } |
| return; |
| } |
| case IrOpcode::kNumberShiftRightLogical: { |
| Type rhs_type = GetUpperBound(node->InputAt(1)); |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) { |
| MaskShiftOperand(node, rhs_type); |
| ChangeToPureOp(node, lowering->machine()->Word32Shr()); |
| } |
| return; |
| } |
| case IrOpcode::kSpeculativeNumberShiftRightLogical: { |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| Type rhs_type = GetUpperBound(node->InputAt(1)); |
| if (rhs_type.Is(type_cache_->kZeroish) && |
| (hint == NumberOperationHint::kSignedSmall || |
| hint == NumberOperationHint::kSigned32) && |
| !truncation.IsUsedAsWord32()) { |
| // The SignedSmall or Signed32 feedback means that the results that we |
| // have seen so far were of type Unsigned31. We speculate that this |
| // will continue to hold. Moreover, since the RHS is 0, the result |
| // will just be the (converted) LHS. |
| VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint), |
| MachineRepresentation::kWord32, Type::Unsigned31()); |
| if (lower<T>()) { |
| node->RemoveInput(1); |
| NodeProperties::ChangeOp( |
| node, simplified()->CheckedUint32ToInt32(FeedbackSource())); |
| } |
| return; |
| } |
| if (BothInputsAre(node, Type::NumberOrOddball())) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) { |
| MaskShiftOperand(node, rhs_type); |
| ChangeToPureOp(node, lowering->machine()->Word32Shr()); |
| } |
| return; |
| } |
| VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint), |
| MachineRepresentation::kWord32, Type::Unsigned32()); |
| if (lower<T>()) { |
| MaskShiftOperand(node, rhs_type); |
| ChangeToPureOp(node, lowering->machine()->Word32Shr()); |
| } |
| return; |
| } |
| case IrOpcode::kNumberAbs: { |
| // NumberAbs maps both 0 and -0 to 0, so we can generally |
| // pass the kIdentifyZeros truncation to its input, and |
| // choose to ignore minus zero in all cases. |
| Type const input_type = TypeOf(node->InputAt(0)); |
| if (input_type.Is(Type::Unsigned32OrMinusZero())) { |
| VisitUnop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) DeferReplacement(node, node->InputAt(0)); |
| } else if (input_type.Is(Type::Signed32OrMinusZero())) { |
| VisitUnop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Abs(node)); |
| } else if (input_type.Is(type_cache_->kPositiveIntegerOrNaN)) { |
| VisitUnop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros), |
| MachineRepresentation::kFloat64); |
| if (lower<T>()) DeferReplacement(node, node->InputAt(0)); |
| } else { |
| VisitUnop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros), |
| MachineRepresentation::kFloat64); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node)); |
| } |
| return; |
| } |
| case IrOpcode::kNumberClz32: { |
| VisitUnop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node)); |
| return; |
| } |
| case IrOpcode::kNumberImul: { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node)); |
| return; |
| } |
| case IrOpcode::kNumberFround: { |
| VisitUnop<T>(node, UseInfo::TruncatingFloat64(), |
| MachineRepresentation::kFloat32); |
| if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node)); |
| return; |
| } |
| case IrOpcode::kNumberMax: { |
| // It is safe to use the feedback types for left and right hand side |
| // here, since we can only narrow those types and thus we can only |
| // promise a more specific truncation. |
| // For NumberMax we generally propagate whether the truncation |
| // identifies zeros to the inputs, and we choose to ignore minus |
| // zero in those cases. |
| Type const lhs_type = TypeOf(node->InputAt(0)); |
| Type const rhs_type = TypeOf(node->InputAt(1)); |
| if ((lhs_type.Is(Type::Unsigned32()) && |
| rhs_type.Is(Type::Unsigned32())) || |
| (lhs_type.Is(Type::Unsigned32OrMinusZero()) && |
| rhs_type.Is(Type::Unsigned32OrMinusZero()) && |
| truncation.IdentifiesZeroAndMinusZero())) { |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) { |
| lowering->DoMax(node, lowering->machine()->Uint32LessThan(), |
| MachineRepresentation::kWord32); |
| } |
| } else if ((lhs_type.Is(Type::Signed32()) && |
| rhs_type.Is(Type::Signed32())) || |
| (lhs_type.Is(Type::Signed32OrMinusZero()) && |
| rhs_type.Is(Type::Signed32OrMinusZero()) && |
| truncation.IdentifiesZeroAndMinusZero())) { |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) { |
| lowering->DoMax(node, lowering->machine()->Int32LessThan(), |
| MachineRepresentation::kWord32); |
| } |
| } else if (jsgraph_->machine()->Is64() && |
| lhs_type.Is(type_cache_->kSafeInteger) && |
| rhs_type.Is(type_cache_->kSafeInteger)) { |
| VisitInt64Binop<T>(node); |
| if (lower<T>()) { |
| lowering->DoMax(node, lowering->machine()->Int64LessThan(), |
| MachineRepresentation::kWord64); |
| } |
| } else { |
| VisitBinop<T>(node, |
| UseInfo::TruncatingFloat64(truncation.identify_zeros()), |
| MachineRepresentation::kFloat64); |
| if (lower<T>()) { |
| // If the right hand side is not NaN, and the left hand side |
| // is not NaN (or -0 if the difference between the zeros is |
| // observed), we can do a simple floating point comparison here. |
| if (lhs_type.Is(truncation.IdentifiesZeroAndMinusZero() |
| ? Type::OrderedNumber() |
| : Type::PlainNumber()) && |
| rhs_type.Is(Type::OrderedNumber())) { |
| lowering->DoMax(node, lowering->machine()->Float64LessThan(), |
| MachineRepresentation::kFloat64); |
| } else { |
| NodeProperties::ChangeOp(node, Float64Op(node)); |
| } |
| } |
| } |
| return; |
| } |
| case IrOpcode::kNumberMin: { |
| // It is safe to use the feedback types for left and right hand side |
| // here, since we can only narrow those types and thus we can only |
| |