blob: ce04e7bd411ab67a54bd226934a8ddb1b82db5c1 [file] [log] [blame]
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/instruction-selector.h"
#include <limits>
#include "src/base/iterator.h"
#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/wasm/simd-shuffle.h"
namespace v8 {
namespace internal {
namespace compiler {
InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
size_t* max_pushed_argument_count, SourcePositionMode source_position_mode,
Features features, EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
source_positions_(source_positions),
source_position_mode_(source_position_mode),
features_(features),
schedule_(schedule),
current_block_(nullptr),
instructions_(zone),
continuation_inputs_(sequence->zone()),
continuation_outputs_(sequence->zone()),
continuation_temps_(sequence->zone()),
defined_(node_count, false, zone),
used_(node_count, false, zone),
effect_level_(node_count, 0, zone),
virtual_registers_(node_count,
InstructionOperand::kInvalidVirtualRegister, zone),
virtual_register_rename_(zone),
scheduler_(nullptr),
enable_scheduling_(enable_scheduling),
enable_roots_relative_addressing_(enable_roots_relative_addressing),
enable_switch_jump_table_(enable_switch_jump_table),
poisoning_level_(poisoning_level),
frame_(frame),
instruction_selection_failed_(false),
instr_origins_(sequence->zone()),
trace_turbo_(trace_turbo),
tick_counter_(tick_counter),
broker_(broker),
max_unoptimized_frame_height_(max_unoptimized_frame_height),
max_pushed_argument_count_(max_pushed_argument_count)
#if V8_TARGET_ARCH_64_BIT
,
phi_states_(node_count, Upper32BitsState::kNotYetChecked, zone)
#endif
{
DCHECK_EQ(*max_unoptimized_frame_height, 0); // Caller-initialized.
instructions_.reserve(node_count);
continuation_inputs_.reserve(5);
continuation_outputs_.reserve(2);
if (trace_turbo_ == kEnableTraceTurboJson) {
instr_origins_.assign(node_count, {-1, 0});
}
}
bool InstructionSelector::SelectInstructions() {
// Mark the inputs of all phis in loop headers as used.
BasicBlockVector* blocks = schedule()->rpo_order();
for (auto const block : *blocks) {
if (!block->IsLoopHeader()) continue;
DCHECK_LE(2u, block->PredecessorCount());
for (Node* const phi : *block) {
if (phi->opcode() != IrOpcode::kPhi) continue;
// Mark all inputs as used.
for (Node* const input : phi->inputs()) {
MarkAsUsed(input);
}
}
}
// Visit each basic block in post order.
for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
VisitBlock(*i);
if (instruction_selection_failed()) return false;
}
// Schedule the selected instructions.
if (UseInstructionScheduling()) {
scheduler_ = zone()->New<InstructionScheduler>(zone(), sequence());
}
for (auto const block : *blocks) {
InstructionBlock* instruction_block =
sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
for (size_t i = 0; i < instruction_block->phis().size(); i++) {
UpdateRenamesInPhi(instruction_block->PhiAt(i));
}
size_t end = instruction_block->code_end();
size_t start = instruction_block->code_start();
DCHECK_LE(end, start);
StartBlock(RpoNumber::FromInt(block->rpo_number()));
if (end != start) {
while (start-- > end + 1) {
UpdateRenames(instructions_[start]);
AddInstruction(instructions_[start]);
}
UpdateRenames(instructions_[end]);
AddTerminator(instructions_[end]);
}
EndBlock(RpoNumber::FromInt(block->rpo_number()));
}
#if DEBUG
sequence()->ValidateSSA();
#endif
return true;
}
void InstructionSelector::StartBlock(RpoNumber rpo) {
if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->StartBlock(rpo);
} else {
sequence()->StartBlock(rpo);
}
}
void InstructionSelector::EndBlock(RpoNumber rpo) {
if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->EndBlock(rpo);
} else {
sequence()->EndBlock(rpo);
}
}
void InstructionSelector::AddTerminator(Instruction* instr) {
if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->AddTerminator(instr);
} else {
sequence()->AddInstruction(instr);
}
}
void InstructionSelector::AddInstruction(Instruction* instr) {
if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->AddInstruction(instr);
} else {
sequence()->AddInstruction(instr);
}
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
InstructionOperand a, size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
InstructionOperand a,
InstructionOperand b, size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
InstructionOperand a,
InstructionOperand b,
InstructionOperand c, size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b, c};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, InstructionOperand c, InstructionOperand d,
size_t temp_count, InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b, c, d};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, InstructionOperand c, InstructionOperand d,
InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b, c, d, e};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, InstructionOperand c, InstructionOperand d,
InstructionOperand e, InstructionOperand f, size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b, c, d, e, f};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps) {
if (output_count >= Instruction::kMaxOutputCount ||
input_count >= Instruction::kMaxInputCount ||
temp_count >= Instruction::kMaxTempCount) {
set_instruction_selection_failed();
return nullptr;
}
Instruction* instr =
Instruction::New(instruction_zone(), opcode, output_count, outputs,
input_count, inputs, temp_count, temps);
return Emit(instr);
}
Instruction* InstructionSelector::Emit(Instruction* instr) {
instructions_.push_back(instr);
return instr;
}
bool InstructionSelector::CanCover(Node* user, Node* node) const {
// 1. Both {user} and {node} must be in the same basic block.
if (schedule()->block(node) != schedule()->block(user)) {
return false;
}
// 2. Pure {node}s must be owned by the {user}.
if (node->op()->HasProperty(Operator::kPure)) {
return node->OwnedBy(user);
}
// 3. Impure {node}s must match the effect level of {user}.
if (GetEffectLevel(node) != GetEffectLevel(user)) {
return false;
}
// 4. Only {node} must have value edges pointing to {user}.
for (Edge const edge : node->use_edges()) {
if (edge.from() != user && NodeProperties::IsValueEdge(edge)) {
return false;
}
}
return true;
}
bool InstructionSelector::CanCoverTransitively(Node* user, Node* node,
Node* node_input) const {
if (CanCover(user, node) && CanCover(node, node_input)) {
// If {node} is pure, transitivity might not hold.
if (node->op()->HasProperty(Operator::kPure)) {
// If {node_input} is pure, the effect levels do not matter.
if (node_input->op()->HasProperty(Operator::kPure)) return true;
// Otherwise, {user} and {node_input} must have the same effect level.
return GetEffectLevel(user) == GetEffectLevel(node_input);
}
return true;
}
return false;
}
bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
Node* node) const {
BasicBlock* bb_user = schedule()->block(user);
BasicBlock* bb_node = schedule()->block(node);
if (bb_user != bb_node) return false;
for (Edge const edge : node->use_edges()) {
Node* from = edge.from();
if ((from != user) && (schedule()->block(from) == bb_user)) {
return false;
}
}
return true;
}
void InstructionSelector::UpdateRenames(Instruction* instruction) {
for (size_t i = 0; i < instruction->InputCount(); i++) {
TryRename(instruction->InputAt(i));
}
}
void InstructionSelector::UpdateRenamesInPhi(PhiInstruction* phi) {
for (size_t i = 0; i < phi->operands().size(); i++) {
int vreg = phi->operands()[i];
int renamed = GetRename(vreg);
if (vreg != renamed) {
phi->RenameInput(i, renamed);
}
}
}
int InstructionSelector::GetRename(int virtual_register) {
int rename = virtual_register;
while (true) {
if (static_cast<size_t>(rename) >= virtual_register_rename_.size()) break;
int next = virtual_register_rename_[rename];
if (next == InstructionOperand::kInvalidVirtualRegister) {
break;
}
rename = next;
}
return rename;
}
void InstructionSelector::TryRename(InstructionOperand* op) {
if (!op->IsUnallocated()) return;
UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
int vreg = unalloc->virtual_register();
int rename = GetRename(vreg);
if (rename != vreg) {
*unalloc = UnallocatedOperand(*unalloc, rename);
}
}
void InstructionSelector::SetRename(const Node* node, const Node* rename) {
int vreg = GetVirtualRegister(node);
if (static_cast<size_t>(vreg) >= virtual_register_rename_.size()) {
int invalid = InstructionOperand::kInvalidVirtualRegister;
virtual_register_rename_.resize(vreg + 1, invalid);
}
virtual_register_rename_[vreg] = GetVirtualRegister(rename);
}
int InstructionSelector::GetVirtualRegister(const Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, virtual_registers_.size());
int virtual_register = virtual_registers_[id];
if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
virtual_register = sequence()->NextVirtualRegister();
virtual_registers_[id] = virtual_register;
}
return virtual_register;
}
const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
const {
std::map<NodeId, int> virtual_registers;
for (size_t n = 0; n < virtual_registers_.size(); ++n) {
if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
NodeId const id = static_cast<NodeId>(n);
virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
}
}
return virtual_registers;
}
bool InstructionSelector::IsDefined(Node* node) const {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, defined_.size());
return defined_[id];
}
void InstructionSelector::MarkAsDefined(Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, defined_.size());
defined_[id] = true;
}
bool InstructionSelector::IsUsed(Node* node) const {
DCHECK_NOT_NULL(node);
// TODO(bmeurer): This is a terrible monster hack, but we have to make sure
// that the Retain is actually emitted, otherwise the GC will mess up.
if (node->opcode() == IrOpcode::kRetain) return true;
if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
size_t const id = node->id();
DCHECK_LT(id, used_.size());
return used_[id];
}
void InstructionSelector::MarkAsUsed(Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, used_.size());
used_[id] = true;
}
int InstructionSelector::GetEffectLevel(Node* node) const {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, effect_level_.size());
return effect_level_[id];
}
int InstructionSelector::GetEffectLevel(Node* node,
FlagsContinuation* cont) const {
return cont->IsBranch()
? GetEffectLevel(
cont->true_block()->PredecessorAt(0)->control_input())
: GetEffectLevel(node);
}
void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
DCHECK_LT(id, effect_level_.size());
effect_level_[id] = effect_level;
}
bool InstructionSelector::CanAddressRelativeToRootsRegister(
const ExternalReference& reference) const {
// There are three things to consider here:
// 1. CanUseRootsRegister: Is kRootRegister initialized?
const bool root_register_is_available_and_initialized = CanUseRootsRegister();
if (!root_register_is_available_and_initialized) return false;
// 2. enable_roots_relative_addressing_: Can we address everything on the heap
// through the root register, i.e. are root-relative addresses to arbitrary
// addresses guaranteed not to change between code generation and
// execution?
const bool all_root_relative_offsets_are_constant =
(enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing);
if (all_root_relative_offsets_are_constant) return true;
// 3. IsAddressableThroughRootRegister: Is the target address guaranteed to
// have a fixed root-relative offset? If so, we can ignore 2.
const bool this_root_relative_offset_is_constant =
TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(),
reference);
return this_root_relative_offset_is_constant;
}
bool InstructionSelector::CanUseRootsRegister() const {
return linkage()->GetIncomingDescriptor()->flags() &
CallDescriptor::kCanUseRoots;
}
void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
const InstructionOperand& op) {
UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
}
void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
Node* node) {
sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
}
namespace {
InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
Node* input, FrameStateInputKind kind,
MachineRepresentation rep) {
if (rep == MachineRepresentation::kNone) {
return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
}
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kNumberConstant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
case IrOpcode::kDelayedStringConstant:
return g->UseImmediate(input);
case IrOpcode::kCompressedHeapConstant:
case IrOpcode::kHeapConstant: {
if (!CanBeTaggedOrCompressedPointer(rep)) {
// If we have inconsistent static and dynamic types, e.g. if we
// smi-check a string, we can get here with a heap object that
// says it is a smi. In that case, we return an invalid instruction
// operand, which will be interpreted as an optimized-out value.
// TODO(jarin) Ideally, we should turn the current instruction
// into an abort (we should never execute it).
return InstructionOperand();
}
Handle<HeapObject> constant = HeapConstantOf(input->op());
RootIndex root_index;
if (isolate->roots_table().IsRootHandle(constant, &root_index) &&
root_index == RootIndex::kOptimizedOut) {
// For an optimized-out object we return an invalid instruction
// operand, so that we take the fast path for optimized-out values.
return InstructionOperand();
}
return g->UseImmediate(input);
}
case IrOpcode::kArgumentsElementsState:
case IrOpcode::kArgumentsLengthState:
case IrOpcode::kObjectState:
case IrOpcode::kTypedObjectState:
UNREACHABLE();
default:
switch (kind) {
case FrameStateInputKind::kStackSlot:
return g->UseUniqueSlot(input);
case FrameStateInputKind::kAny:
// Currently deopts "wrap" other operations, so the deopt's inputs
// are potentially needed until the end of the deoptimising code.
return g->UseAnyAtEnd(input);
}
}
UNREACHABLE();
}
} // namespace
class StateObjectDeduplicator {
public:
explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {}
static const size_t kNotDuplicated = SIZE_MAX;
size_t GetObjectId(Node* node) {
DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
node->opcode() == IrOpcode::kObjectId ||
node->opcode() == IrOpcode::kArgumentsElementsState);
for (size_t i = 0; i < objects_.size(); ++i) {
if (objects_[i] == node) return i;
// ObjectId nodes are the Turbofan way to express objects with the same
// identity in the deopt info. So they should always be mapped to
// previously appearing TypedObjectState nodes.
if (HasObjectId(objects_[i]) && HasObjectId(node) &&
ObjectIdOf(objects_[i]->op()) == ObjectIdOf(node->op())) {
return i;
}
}
DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
node->opcode() == IrOpcode::kArgumentsElementsState);
return kNotDuplicated;
}
size_t InsertObject(Node* node) {
DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
node->opcode() == IrOpcode::kObjectId ||
node->opcode() == IrOpcode::kArgumentsElementsState);
size_t id = objects_.size();
objects_.push_back(node);
return id;
}
private:
static bool HasObjectId(Node* node) {
return node->opcode() == IrOpcode::kTypedObjectState ||
node->opcode() == IrOpcode::kObjectId;
}
ZoneVector<Node*> objects_;
};
// Returns the number of instruction operands added to inputs.
size_t InstructionSelector::AddOperandToStateValueDescriptor(
StateValueList* values, InstructionOperandVector* inputs,
OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input,
MachineType type, FrameStateInputKind kind, Zone* zone) {
DCHECK_NOT_NULL(input);
switch (input->opcode()) {
case IrOpcode::kArgumentsElementsState: {
values->PushArgumentsElements(ArgumentsStateTypeOf(input->op()));
// The elements backing store of an arguments object participates in the
// duplicate object counting, but can itself never appear duplicated.
DCHECK_EQ(StateObjectDeduplicator::kNotDuplicated,
deduplicator->GetObjectId(input));
deduplicator->InsertObject(input);
return 0;
}
case IrOpcode::kArgumentsLengthState: {
values->PushArgumentsLength();
return 0;
}
case IrOpcode::kObjectState:
UNREACHABLE();
case IrOpcode::kTypedObjectState:
case IrOpcode::kObjectId: {
size_t id = deduplicator->GetObjectId(input);
if (id == StateObjectDeduplicator::kNotDuplicated) {
DCHECK_EQ(IrOpcode::kTypedObjectState, input->opcode());
size_t entries = 0;
id = deduplicator->InsertObject(input);
StateValueList* nested = values->PushRecursiveField(zone, id);
int const input_count = input->op()->ValueInputCount();
ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
for (int i = 0; i < input_count; ++i) {
entries += AddOperandToStateValueDescriptor(
nested, inputs, g, deduplicator, input->InputAt(i), types->at(i),
kind, zone);
}
return entries;
} else {
// Deoptimizer counts duplicate objects for the running id, so we have
// to push the input again.
deduplicator->InsertObject(input);
values->PushDuplicate(id);
return 0;
}
}
default: {
InstructionOperand op =
OperandForDeopt(isolate(), g, input, kind, type.representation());
if (op.kind() == InstructionOperand::INVALID) {
// Invalid operand means the value is impossible or optimized-out.
values->PushOptimizedOut();
return 0;
} else {
inputs->push_back(op);
values->PushPlain(type);
return 1;
}
}
}
}
size_t InstructionSelector::AddInputsToFrameStateDescriptor(
StateValueList* values, InstructionOperandVector* inputs,
OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* node,
FrameStateInputKind kind, Zone* zone) {
size_t entries = 0;
StateValuesAccess::iterator it = StateValuesAccess(node).begin();
// Take advantage of sparse nature of StateValuesAccess to skip over multiple
// empty nodes at once pushing repeated OptimizedOuts all in one go.
while (!it.done()) {
values->PushOptimizedOut(it.AdvanceTillNotEmpty());
if (it.done()) break;
StateValuesAccess::TypedNode input_node = *it;
entries += AddOperandToStateValueDescriptor(values, inputs, g, deduplicator,
input_node.node,
input_node.type, kind, zone);
++it;
}
return entries;
}
// Returns the number of instruction operands added to inputs.
size_t InstructionSelector::AddInputsToFrameStateDescriptor(
FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
FrameStateInputKind kind, Zone* zone) {
DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
size_t entries = 0;
size_t initial_size = inputs->size();
USE(initial_size); // initial_size is only used for debug.
if (descriptor->outer_state()) {
entries += AddInputsToFrameStateDescriptor(
descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
g, deduplicator, inputs, kind, zone);
}
Node* parameters = state->InputAt(kFrameStateParametersInput);
Node* locals = state->InputAt(kFrameStateLocalsInput);
Node* stack = state->InputAt(kFrameStateStackInput);
Node* context = state->InputAt(kFrameStateContextInput);
Node* function = state->InputAt(kFrameStateFunctionInput);
DCHECK_EQ(descriptor->parameters_count(),
StateValuesAccess(parameters).size());
DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
DCHECK_EQ(values_descriptor->size(), 0u);
values_descriptor->ReserveSize(descriptor->GetSize());
DCHECK_NOT_NULL(function);
entries += AddOperandToStateValueDescriptor(
values_descriptor, inputs, g, deduplicator, function,
MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
entries += AddInputsToFrameStateDescriptor(
values_descriptor, inputs, g, deduplicator, parameters, kind, zone);
if (descriptor->HasContext()) {
DCHECK_NOT_NULL(context);
entries += AddOperandToStateValueDescriptor(
values_descriptor, inputs, g, deduplicator, context,
MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
}
entries += AddInputsToFrameStateDescriptor(values_descriptor, inputs, g,
deduplicator, locals, kind, zone);
entries += AddInputsToFrameStateDescriptor(values_descriptor, inputs, g,
deduplicator, stack, kind, zone);
DCHECK_EQ(initial_size + entries, inputs->size());
return entries;
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, FlagsContinuation* cont) {
return EmitWithContinuation(opcode, 0, nullptr, 0, nullptr, cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, InstructionOperand a, FlagsContinuation* cont) {
return EmitWithContinuation(opcode, 0, nullptr, 1, &a, cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, InstructionOperand a, InstructionOperand b,
FlagsContinuation* cont) {
InstructionOperand inputs[] = {a, b};
return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, InstructionOperand a, InstructionOperand b,
InstructionOperand c, FlagsContinuation* cont) {
InstructionOperand inputs[] = {a, b, c};
return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) {
return EmitWithContinuation(opcode, output_count, outputs, input_count,
inputs, 0, nullptr, cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps, FlagsContinuation* cont) {
OperandGenerator g(this);
opcode = cont->Encode(opcode);
continuation_inputs_.resize(0);
for (size_t i = 0; i < input_count; i++) {
continuation_inputs_.push_back(inputs[i]);
}
continuation_outputs_.resize(0);
for (size_t i = 0; i < output_count; i++) {
continuation_outputs_.push_back(outputs[i]);
}
continuation_temps_.resize(0);
for (size_t i = 0; i < temp_count; i++) {
continuation_temps_.push_back(temps[i]);
}
if (cont->IsBranch()) {
continuation_inputs_.push_back(g.Label(cont->true_block()));
continuation_inputs_.push_back(g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
opcode |= MiscField::encode(static_cast<int>(input_count));
AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
cont->reason(), cont->feedback(),
cont->frame_state());
} else if (cont->IsSet()) {
continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
} else if (cont->IsTrap()) {
int trap_id = static_cast<int>(cont->trap_id());
continuation_inputs_.push_back(g.UseImmediate(trap_id));
} else {
DCHECK(cont->IsNone());
}
size_t const emit_inputs_size = continuation_inputs_.size();
auto* emit_inputs =
emit_inputs_size ? &continuation_inputs_.front() : nullptr;
size_t const emit_outputs_size = continuation_outputs_.size();
auto* emit_outputs =
emit_outputs_size ? &continuation_outputs_.front() : nullptr;
size_t const emit_temps_size = continuation_temps_.size();
auto* emit_temps = emit_temps_size ? &continuation_temps_.front() : nullptr;
return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size,
emit_inputs, emit_temps_size, emit_temps);
}
void InstructionSelector::AppendDeoptimizeArguments(
InstructionOperandVector* args, DeoptimizeKind kind,
DeoptimizeReason reason, FeedbackSource const& feedback,
Node* frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
DCHECK_NE(DeoptimizeKind::kLazy, kind);
int const state_id =
sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
args->push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
args, FrameStateInputKind::kAny,
instruction_zone());
}
// An internal helper class for generating the operands to calls.
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
struct CallBuffer {
CallBuffer(Zone* zone, const CallDescriptor* call_descriptor,
FrameStateDescriptor* frame_state)
: descriptor(call_descriptor),
frame_state_descriptor(frame_state),
output_nodes(zone),
outputs(zone),
instruction_args(zone),
pushed_nodes(zone) {
output_nodes.reserve(call_descriptor->ReturnCount());
outputs.reserve(call_descriptor->ReturnCount());
pushed_nodes.reserve(input_count());
instruction_args.reserve(input_count() + frame_state_value_count());
}
const CallDescriptor* descriptor;
FrameStateDescriptor* frame_state_descriptor;
ZoneVector<PushParameter> output_nodes;
InstructionOperandVector outputs;
InstructionOperandVector instruction_args;
ZoneVector<PushParameter> pushed_nodes;
size_t input_count() const { return descriptor->InputCount(); }
size_t frame_state_count() const { return descriptor->FrameStateCount(); }
size_t frame_state_value_count() const {
return (frame_state_descriptor == nullptr)
? 0
: (frame_state_descriptor->GetTotalSize() +
1); // Include deopt id.
}
};
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
CallBufferFlags flags,
bool is_tail_call,
int stack_param_delta) {
OperandGenerator g(this);
size_t ret_count = buffer->descriptor->ReturnCount();
DCHECK_LE(call->op()->ValueOutputCount(), ret_count);
DCHECK_EQ(
call->op()->ValueInputCount(),
static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
if (ret_count > 0) {
// Collect the projections that represent multiple outputs from this call.
if (ret_count == 1) {
PushParameter result = {call, buffer->descriptor->GetReturnLocation(0)};
buffer->output_nodes.push_back(result);
} else {
buffer->output_nodes.resize(ret_count);
int stack_count = 0;
for (size_t i = 0; i < ret_count; ++i) {
LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
buffer->output_nodes[i] = PushParameter(nullptr, location);
if (location.IsCallerFrameSlot()) {
stack_count += location.GetSizeInPointers();
}
}
for (Edge const edge : call->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue;
Node* node = edge.from();
DCHECK_EQ(IrOpcode::kProjection, node->opcode());
size_t const index = ProjectionIndexOf(node->op());
DCHECK_LT(index, buffer->output_nodes.size());
DCHECK(!buffer->output_nodes[index].node);
buffer->output_nodes[index].node = node;
}
frame_->EnsureReturnSlots(stack_count);
}
// Filter out the outputs that aren't live because no projection uses them.
size_t outputs_needed_by_framestate =
buffer->frame_state_descriptor == nullptr
? 0
: buffer->frame_state_descriptor->state_combine()
.ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
bool output_is_live = buffer->output_nodes[i].node != nullptr ||
i < outputs_needed_by_framestate;
if (output_is_live) {
LinkageLocation location = buffer->output_nodes[i].location;
MachineRepresentation rep = location.GetType().representation();
Node* output = buffer->output_nodes[i].node;
InstructionOperand op = output == nullptr
? g.TempLocation(location)
: g.DefineAsLocation(output, location);
MarkAsRepresentation(rep, op);
if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
buffer->outputs.push_back(op);
buffer->output_nodes[i].node = nullptr;
}
}
}
}
// The first argument is always the callee code.
Node* callee = call->InputAt(0);
bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
// TODO(jgruber, v8:7449): The below is a hack to support tail-calls from
// JS-linkage callers with a register code target. The problem is that the
// code target register may be clobbered before the final jmp by
// AssemblePopArgumentsAdaptorFrame. As a more permanent fix we could
// entirely remove support for tail-calls from JS-linkage callers.
buffer->instruction_args.push_back(
(call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: is_tail_call ? g.UseUniqueRegister(callee)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
(call_address_immediate &&
callee->opcode() == IrOpcode::kExternalConstant)
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallWasmCapiFunction:
case CallDescriptor::kCallWasmFunction:
case CallDescriptor::kCallWasmImportWrapper:
buffer->instruction_args.push_back(
(call_address_immediate &&
(callee->opcode() == IrOpcode::kRelocatableInt64Constant ||
callee->opcode() == IrOpcode::kRelocatableInt32Constant))
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallBuiltinPointer:
// The common case for builtin pointers is to have the target in a
// register. If we have a constant, we use a register anyway to simplify
// related code.
buffer->instruction_args.push_back(
call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
break;
}
DCHECK_EQ(1u, buffer->instruction_args.size());
// Argument 1 is used for poison-alias index (encoded in a word-sized
// immediate. This an index of the operand that aliases with poison register
// or -1 if there is no aliasing.
buffer->instruction_args.push_back(g.TempImmediate(-1));
const size_t poison_alias_index = 1;
DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
// arg 2 : deoptimization id.
// arg 3 - arg (n + 2) : value inputs to the frame state.
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
Node* frame_state =
call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
// If it was a syntactic tail call we need to drop the current frame and
// all the frames on top of it that are either an arguments adaptor frame
// or a tail caller frame.
if (is_tail_call) {
frame_state = NodeProperties::GetFrameStateInput(frame_state);
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
while (buffer->frame_state_descriptor != nullptr &&
buffer->frame_state_descriptor->type() ==
FrameStateType::kArgumentsAdaptor) {
frame_state = NodeProperties::GetFrameStateInput(frame_state);
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
}
}
int const state_id = sequence()->AddDeoptimizationEntry(
buffer->frame_state_descriptor, DeoptimizeKind::kLazy,
DeoptimizeReason::kUnknown, FeedbackSource());
buffer->instruction_args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
frame_state_entries =
1 + AddInputsToFrameStateDescriptor(
buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
&buffer->instruction_args, FrameStateInputKind::kStackSlot,
instruction_zone());
DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
}
size_t input_count = static_cast<size_t>(buffer->input_count());
// Split the arguments into pushed_nodes and instruction_args. Pushed
// arguments require an explicit push instruction before the call and do
// not appear as arguments to the call. Everything else ends up
// as an InstructionOperand argument to the call.
auto iter(call->inputs().begin());
size_t pushed_count = 0;
bool call_tail = (flags & kCallTail) != 0;
for (size_t index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
DCHECK_NE(IrOpcode::kFrameState, (*iter)->op()->opcode());
if (index == 0) continue; // The first argument (callee) is already done.
LinkageLocation location = buffer->descriptor->GetInputLocation(index);
if (call_tail) {
location = LinkageLocation::ConvertToTailCallerLocation(
location, stack_param_delta);
}
InstructionOperand op = g.UseLocation(*iter, location);
UnallocatedOperand unallocated = UnallocatedOperand::cast(op);
if (unallocated.HasFixedSlotPolicy() && !call_tail) {
int stack_index = buffer->descriptor->GetStackIndexFromSlot(
unallocated.fixed_slot_index());
// This can insert empty slots before stack_index and will insert enough
// slots after stack_index to store the parameter.
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
int num_slots = std::max(
1, (ElementSizeInBytes(location.GetType().representation()) /
kSystemPointerSize));
buffer->pushed_nodes.resize(stack_index + num_slots);
}
PushParameter param = {*iter, location};
buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
// If we do load poisoning and the linkage uses the poisoning register,
// then we request the input in memory location, and during code
// generation, we move the input to the register.
if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
unallocated.HasFixedRegisterPolicy()) {
int reg = unallocated.fixed_register_index();
if (Register::from_code(reg) == kSpeculationPoisonRegister) {
buffer->instruction_args[poison_alias_index] = g.TempImmediate(
static_cast<int32_t>(buffer->instruction_args.size()));
op = g.UseRegisterOrSlotOrConstant(*iter);
}
}
buffer->instruction_args.push_back(op);
}
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
frame_state_entries - 1);
if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
stack_param_delta != 0) {
// For tail calls that change the size of their parameter list and keep
// their return address on the stack, move the return address to just above
// the parameters.
LinkageLocation saved_return_location =
LinkageLocation::ForSavedCallerReturnAddress();
InstructionOperand return_address =
g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation(
saved_return_location, stack_param_delta),
saved_return_location);
buffer->instruction_args.push_back(return_address);
}
}
bool InstructionSelector::IsSourcePositionUsed(Node* node) {
return (source_position_mode_ == kAllSourcePositions ||
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kTrapIf ||
node->opcode() == IrOpcode::kTrapUnless ||
node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kProtectedStore);
}
void InstructionSelector::VisitBlock(BasicBlock* block) {
DCHECK(!current_block_);
current_block_ = block;
auto current_num_instructions = [&] {
DCHECK_GE(kMaxInt, instructions_.size());
return static_cast<int>(instructions_.size());
};
int current_block_end = current_num_instructions();
int effect_level = 0;
for (Node* const node : *block) {
SetEffectLevel(node, effect_level);
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kProtectedStore ||
node->opcode() == IrOpcode::kLoadTransform ||
#define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \
node->opcode() == IrOpcode::k##Opcode ||
MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP)
#undef ADD_EFFECT_FOR_ATOMIC_OP
node->opcode() == IrOpcode::kMemoryBarrier) {
++effect_level;
}
}
// We visit the control first, then the nodes in the block, so the block's
// control input should be on the same effect level as the last node.
if (block->control_input() != nullptr) {
SetEffectLevel(block->control_input(), effect_level);
}
auto FinishEmittedInstructions = [&](Node* node, int instruction_start) {
if (instruction_selection_failed()) return false;
if (current_num_instructions() == instruction_start) return true;
std::reverse(instructions_.begin() + instruction_start,
instructions_.end());
if (!node) return true;
if (!source_positions_) return true;
SourcePosition source_position = source_positions_->GetSourcePosition(node);
if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
sequence()->SetSourcePosition(instructions_[instruction_start],
source_position);
}
return true;
};
// Generate code for the block control "top down", but schedule the code
// "bottom up".
VisitControl(block);
if (!FinishEmittedInstructions(block->control_input(), current_block_end))
return;
// Visit code in reverse control flow order, because architecture-specific
// matching may cover more than one node at a time.
for (auto node : base::Reversed(*block)) {
int current_node_end = current_num_instructions();
// Skip nodes that are unused or already defined.
if (IsUsed(node) && !IsDefined(node)) {
// Generate code for this node "top down", but schedule the code "bottom
// up".
VisitNode(node);
if (!FinishEmittedInstructions(node, current_node_end)) return;
}
if (trace_turbo_ == kEnableTraceTurboJson) {
instr_origins_[node->id()] = {current_num_instructions(),
current_node_end};
}
}
// We're done with the block.
InstructionBlock* instruction_block =
sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
if (current_num_instructions() == current_block_end) {
// Avoid empty block: insert a {kArchNop} instruction.
Emit(Instruction::New(sequence()->zone(), kArchNop));
}
instruction_block->set_code_start(current_num_instructions());
instruction_block->set_code_end(current_block_end);
current_block_ = nullptr;
}
void InstructionSelector::VisitControl(BasicBlock* block) {
#ifdef DEBUG
// SSA deconstruction requires targets of branches not to have phis.
// Edge split form guarantees this property, but is more strict.
if (block->SuccessorCount() > 1) {
for (BasicBlock* const successor : block->successors()) {
for (Node* const node : *successor) {
if (IrOpcode::IsPhiOpcode(node->opcode())) {
std::ostringstream str;
str << "You might have specified merged variables for a label with "
<< "only one predecessor." << std::endl
<< "# Current Block: " << *successor << std::endl
<< "# Node: " << *node;
FATAL("%s", str.str().c_str());
}
}
}
}
#endif
Node* input = block->control_input();
int instruction_end = static_cast<int>(instructions_.size());
switch (block->control()) {
case BasicBlock::kGoto:
VisitGoto(block->SuccessorAt(0));
break;
case BasicBlock::kCall: {
DCHECK_EQ(IrOpcode::kCall, input->opcode());
BasicBlock* success = block->SuccessorAt(0);
BasicBlock* exception = block->SuccessorAt(1);
VisitCall(input, exception);
VisitGoto(success);
break;
}
case BasicBlock::kTailCall: {
DCHECK_EQ(IrOpcode::kTailCall, input->opcode());
VisitTailCall(input);
break;
}
case BasicBlock::kBranch: {
DCHECK_EQ(IrOpcode::kBranch, input->opcode());
BasicBlock* tbranch = block->SuccessorAt(0);
BasicBlock* fbranch = block->SuccessorAt(1);
if (tbranch == fbranch) {
VisitGoto(tbranch);
} else {
VisitBranch(input, tbranch, fbranch);
}
break;
}
case BasicBlock::kSwitch: {
DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
// Last successor must be {IfDefault}.
BasicBlock* default_branch = block->successors().back();
DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
// All other successors must be {IfValue}s.
int32_t min_value = std::numeric_limits<int32_t>::max();
int32_t max_value = std::numeric_limits<int32_t>::min();
size_t case_count = block->SuccessorCount() - 1;
ZoneVector<CaseInfo> cases(case_count, zone());
for (size_t i = 0; i < case_count; ++i) {
BasicBlock* branch = block->SuccessorAt(i);
const IfValueParameters& p = IfValueParametersOf(branch->front()->op());
cases[i] = CaseInfo{p.value(), p.comparison_order(), branch};
if (min_value > p.value()) min_value = p.value();
if (max_value < p.value()) max_value = p.value();
}
SwitchInfo sw(cases, min_value, max_value, default_branch);
VisitSwitch(input, sw);
break;
}
case BasicBlock::kReturn: {
DCHECK_EQ(IrOpcode::kReturn, input->opcode());
VisitReturn(input);
break;
}
case BasicBlock::kDeoptimize: {
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
Node* value = input->InputAt(0);
VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
break;
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
VisitThrow(input);
break;
case BasicBlock::kNone: {
// Exit block doesn't have control.
DCHECK_NULL(input);
break;
}
default:
UNREACHABLE();
}
if (trace_turbo_ == kEnableTraceTurboJson && input) {
int instruction_start = static_cast<int>(instructions_.size());
instr_origins_[input->id()] = {instruction_start, instruction_end};
}
}
void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
Node* projection0 = NodeProperties::FindProjection(node, 0);
if (projection0) {
MarkAsWord32(projection0);
}
Node* projection1 = NodeProperties::FindProjection(node, 1);
if (projection1) {
MarkAsWord32(projection1);
}
}
void InstructionSelector::VisitNode(Node* node) {
tick_counter_->TickAndMaybeEnterSafepoint();
DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
switch (node->opcode()) {
case IrOpcode::kStart:
case IrOpcode::kLoop:
case IrOpcode::kEnd:
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
case IrOpcode::kIfSuccess:
case IrOpcode::kSwitch:
case IrOpcode::kIfValue:
case IrOpcode::kIfDefault:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
case IrOpcode::kTerminate:
case IrOpcode::kBeginRegion:
// No code needed for these graph artifacts.
return;
case IrOpcode::kIfException:
return MarkAsTagged(node), VisitIfException(node);
case IrOpcode::kFinishRegion:
return MarkAsTagged(node), VisitFinishRegion(node);
case IrOpcode::kParameter: {
// Parameters should always be scheduled to the first block.
DCHECK_EQ(schedule()->block(node)->rpo_number(), 0);
MachineType type =
linkage()->GetParameterType(ParameterIndexOf(node->op()));
MarkAsRepresentation(type.representation(), node);
return VisitParameter(node);
}
case IrOpcode::kOsrValue:
return MarkAsTagged(node), VisitOsrValue(node);
case IrOpcode::kPhi: {
MachineRepresentation rep = PhiRepresentationOf(node->op());
if (rep == MachineRepresentation::kNone) return;
MarkAsRepresentation(rep, node);
return VisitPhi(node);
}
case IrOpcode::kProjection:
return VisitProjection(node);
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kTaggedIndexConstant:
case IrOpcode::kExternalConstant:
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kRelocatableInt64Constant:
return VisitConstant(node);
case IrOpcode::kFloat32Constant:
return MarkAsFloat32(node), VisitConstant(node);
case IrOpcode::kFloat64Constant:
return MarkAsFloat64(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
return MarkAsTagged(node), VisitConstant(node);
case IrOpcode::kCompressedHeapConstant:
return MarkAsCompressed(node), VisitConstant(node);
case IrOpcode::kNumberConstant: {
double value = OpParameter<double>(node->op());
if (!IsSmiDouble(value)) MarkAsTagged(node);
return VisitConstant(node);
}
case IrOpcode::kDelayedStringConstant:
return MarkAsTagged(node), VisitConstant(node);
case IrOpcode::kCall:
return VisitCall(node);
case IrOpcode::kDeoptimizeIf:
return VisitDeoptimizeIf(node);
case IrOpcode::kDeoptimizeUnless:
return VisitDeoptimizeUnless(node);
case IrOpcode::kTrapIf:
return VisitTrapIf(node, TrapIdOf(node->op()));
case IrOpcode::kTrapUnless:
return VisitTrapUnless(node, TrapIdOf(node->op()));
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
return;
case IrOpcode::kAbortCSAAssert:
VisitAbortCSAAssert(node);
return;
case IrOpcode::kDebugBreak:
VisitDebugBreak(node);
return;
case IrOpcode::kUnreachable:
VisitUnreachable(node);
return;
case IrOpcode::kStaticAssert:
VisitStaticAssert(node);
return;
case IrOpcode::kDeadValue:
VisitDeadValue(node);
return;
case IrOpcode::kComment:
VisitComment(node);
return;
case IrOpcode::kRetain:
VisitRetain(node);
return;
case IrOpcode::kLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitLoad(node);
}
case IrOpcode::kLoadTransform: {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadTransform(node);
}
case IrOpcode::kLoadLane: {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadLane(node);
}
case IrOpcode::kPoisonedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitPoisonedLoad(node);
}
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
return VisitProtectedStore(node);
case IrOpcode::kStoreLane: {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitStoreLane(node);
}
case IrOpcode::kWord32And:
return MarkAsWord32(node), VisitWord32And(node);
case IrOpcode::kWord32Or:
return MarkAsWord32(node), VisitWord32Or(node);
case IrOpcode::kWord32Xor:
return MarkAsWord32(node), VisitWord32Xor(node);
case IrOpcode::kWord32Shl:
return MarkAsWord32(node), VisitWord32Shl(node);
case IrOpcode::kWord32Shr:
return MarkAsWord32(node), VisitWord32Shr(node);
case IrOpcode::kWord32Sar:
return MarkAsWord32(node), VisitWord32Sar(node);
case IrOpcode::kWord32Rol:
return MarkAsWord32(node), VisitWord32Rol(node);
case IrOpcode::kWord32Ror:
return MarkAsWord32(node), VisitWord32Ror(node);
case IrOpcode::kWord32Equal:
return VisitWord32Equal(node);
case IrOpcode::kWord32Clz:
return MarkAsWord32(node), VisitWord32Clz(node);
case IrOpcode::kWord32Ctz:
return MarkAsWord32(node), VisitWord32Ctz(node);
case IrOpcode::kWord32ReverseBits:
return MarkAsWord32(node), VisitWord32ReverseBits(node);
case IrOpcode::kWord32ReverseBytes:
return MarkAsWord32(node), VisitWord32ReverseBytes(node);
case IrOpcode::kInt32AbsWithOverflow:
return MarkAsWord32(node), VisitInt32AbsWithOverflow(node);
case IrOpcode::kWord32Popcnt:
return MarkAsWord32(node), VisitWord32Popcnt(node);
case IrOpcode::kWord64Popcnt:
return MarkAsWord32(node), VisitWord64Popcnt(node);
case IrOpcode::kWord64And:
return MarkAsWord64(node), VisitWord64And(node);
case IrOpcode::kWord64Or:
return MarkAsWord64(node), VisitWord64Or(node);
case IrOpcode::kWord64Xor:
return MarkAsWord64(node), VisitWord64Xor(node);
case IrOpcode::kWord64Shl:
return MarkAsWord64(node), VisitWord64Shl(node);
case IrOpcode::kWord64Shr:
return MarkAsWord64(node), VisitWord64Shr(node);
case IrOpcode::kWord64Sar:
return MarkAsWord64(node), VisitWord64Sar(node);
case IrOpcode::kWord64Rol:
return MarkAsWord64(node), VisitWord64Rol(node);
case IrOpcode::kWord64Ror:
return MarkAsWord64(node), VisitWord64Ror(node);
case IrOpcode::kWord64Clz:
return MarkAsWord64(node), VisitWord64Clz(node);
case IrOpcode::kWord64Ctz:
return MarkAsWord64(node), VisitWord64Ctz(node);
case IrOpcode::kWord64ReverseBits:
return MarkAsWord64(node), VisitWord64ReverseBits(node);
case IrOpcode::kWord64ReverseBytes:
return MarkAsWord64(node), VisitWord64ReverseBytes(node);
case IrOpcode::kSimd128ReverseBytes:
return MarkAsSimd128(node), VisitSimd128ReverseBytes(node);
case IrOpcode::kInt64AbsWithOverflow:
return MarkAsWord64(node), VisitInt64AbsWithOverflow(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
return MarkAsWord32(node), VisitInt32Add(node);
case IrOpcode::kInt32AddWithOverflow:
return MarkAsWord32(node), VisitInt32AddWithOverflow(node);
case IrOpcode::kInt32Sub:
return MarkAsWord32(node), VisitInt32Sub(node);
case IrOpcode::kInt32SubWithOverflow:
return VisitInt32SubWithOverflow(node);
case IrOpcode::kInt32Mul:
return MarkAsWord32(node), VisitInt32Mul(node);
case IrOpcode::kInt32MulWithOverflow:
return MarkAsWord32(node), VisitInt32MulWithOverflow(node);
case IrOpcode::kInt32MulHigh:
return VisitInt32MulHigh(node);
case IrOpcode::kInt32Div:
return MarkAsWord32(node), VisitInt32Div(node);
case IrOpcode::kInt32Mod:
return MarkAsWord32(node), VisitInt32Mod(node);
case IrOpcode::kInt32LessThan:
return VisitInt32LessThan(node);
case IrOpcode::kInt32LessThanOrEqual:
return VisitInt32LessThanOrEqual(node);
case IrOpcode::kUint32Div:
return MarkAsWord32(node), VisitUint32Div(node);
case IrOpcode::kUint32LessThan:
return VisitUint32LessThan(node);
case IrOpcode::kUint32LessThanOrEqual:
return VisitUint32LessThanOrEqual(node);
case IrOpcode::kUint32Mod:
return MarkAsWord32(node), VisitUint32Mod(node);
case IrOpcode::kUint32MulHigh:
return VisitUint32MulHigh(node);
case IrOpcode::kInt64Add:
return MarkAsWord64(node), VisitInt64Add(node);
case IrOpcode::kInt64AddWithOverflow:
return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
case IrOpcode::kInt64Sub:
return MarkAsWord64(node), VisitInt64Sub(node);
case IrOpcode::kInt64SubWithOverflow:
return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
case IrOpcode::kInt64Mul:
return MarkAsWord64(node), VisitInt64Mul(node);
case IrOpcode::kInt64Div:
return MarkAsWord64(node), VisitInt64Div(node);
case IrOpcode::kInt64Mod:
return MarkAsWord64(node), VisitInt64Mod(node);
case IrOpcode::kInt64LessThan:
return VisitInt64LessThan(node);
case IrOpcode::kInt64LessThanOrEqual:
return VisitInt64LessThanOrEqual(node);
case IrOpcode::kUint64Div:
return MarkAsWord64(node), VisitUint64Div(node);
case IrOpcode::kUint64LessThan:
return VisitUint64LessThan(node);
case IrOpcode::kUint64LessThanOrEqual:
return VisitUint64LessThanOrEqual(node);
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
VisitBitcastTaggedToWord(node);
case IrOpcode::kBitcastWordToTagged:
return MarkAsTagged(node), VisitBitcastWordToTagged(node);
case IrOpcode::kBitcastWordToTaggedSigned:
return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node),
EmitIdentity(node);
case IrOpcode::kChangeFloat32ToFloat64:
return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
case IrOpcode::kChangeInt64ToFloat64:
return MarkAsFloat64(node), VisitChangeInt64ToFloat64(node);
case IrOpcode::kChangeUint32ToFloat64:
return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
case IrOpcode::kChangeFloat64ToInt32:
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToInt64:
return MarkAsWord64(node), VisitChangeFloat64ToInt64(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
case IrOpcode::kChangeFloat64ToUint64:
return MarkAsWord64(node), VisitChangeFloat64ToUint64(node);
case IrOpcode::kFloat64SilenceNaN:
MarkAsFloat64(node);
if (CanProduceSignalingNaN(node->InputAt(0))) {
return VisitFloat64SilenceNaN(node);
} else {
return EmitIdentity(node);
}
case IrOpcode::kTruncateFloat64ToInt64:
return MarkAsWord64(node), VisitTruncateFloat64ToInt64(node);
case IrOpcode::kTruncateFloat64ToUint32:
return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
case IrOpcode::kTruncateFloat32ToInt32:
return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
case IrOpcode::kTruncateFloat32ToUint32:
return MarkAsWord32(node), VisitTruncateFloat32ToUint32(node);
case IrOpcode::kTryTruncateFloat32ToInt64:
return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
case IrOpcode::kTryTruncateFloat64ToInt64:
return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node);
case IrOpcode::kTryTruncateFloat32ToUint64:
return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
case IrOpcode::kTryTruncateFloat64ToUint64:
return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
case IrOpcode::kBitcastWord32ToWord64:
return MarkAsWord64(node), VisitBitcastWord32ToWord64(node);
case IrOpcode::kChangeInt32ToInt64:
return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
case IrOpcode::kChangeUint32ToUint64:
return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
case IrOpcode::kTruncateFloat64ToFloat32:
return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
case IrOpcode::kTruncateFloat64ToWord32:
return MarkAsWord32(node), VisitTruncateFloat64ToWord32(node);
case IrOpcode::kTruncateInt64ToInt32:
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
case IrOpcode::kRoundFloat64ToInt32:
return MarkAsWord32(node), VisitRoundFloat64ToInt32(node);
case IrOpcode::kRoundInt64ToFloat32:
return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
case IrOpcode::kRoundInt32ToFloat32:
return MarkAsFloat32(node), VisitRoundInt32ToFloat32(node);
case IrOpcode::kRoundInt64ToFloat64:
return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
case IrOpcode::kBitcastFloat32ToInt32:
return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
case IrOpcode::kRoundUint32ToFloat32:
return MarkAsFloat32(node), VisitRoundUint32ToFloat32(node);
case IrOpcode::kRoundUint64ToFloat32:
return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
case IrOpcode::kRoundUint64ToFloat64:
return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node);
case IrOpcode::kBitcastFloat64ToInt64:
return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
case IrOpcode::kBitcastInt32ToFloat32:
return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
case IrOpcode::kBitcastInt64ToFloat64:
return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
case IrOpcode::kFloat32Add:
return MarkAsFloat32(node), VisitFloat32Add(node);
case IrOpcode::kFloat32Sub:
return MarkAsFloat32(node), VisitFloat32Sub(node);
case IrOpcode::kFloat32Neg:
return MarkAsFloat32(node), VisitFloat32Neg(node);
case IrOpcode::kFloat32Mul:
return MarkAsFloat32(node), VisitFloat32Mul(node);
case IrOpcode::kFloat32Div:
return MarkAsFloat32(node), VisitFloat32Div(node);
case IrOpcode::kFloat32Abs:
return MarkAsFloat32(node), VisitFloat32Abs(node);
case IrOpcode::kFloat32Sqrt:
return MarkAsFloat32(node), VisitFloat32Sqrt(node);
case IrOpcode::kFloat32Equal:
return VisitFloat32Equal(node);
case IrOpcode::kFloat32LessThan:
return VisitFloat32LessThan(node);
case IrOpcode::kFloat32LessThanOrEqual:
return VisitFloat32LessThanOrEqual(node);
case IrOpcode::kFloat32Max:
return MarkAsFloat32(node), VisitFloat32Max(node);
case IrOpcode::kFloat32Min:
return MarkAsFloat32(node), VisitFloat32Min(node);
case IrOpcode::kFloat64Add:
return MarkAsFloat64(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
return MarkAsFloat64(node), VisitFloat64Sub(node);
case IrOpcode::kFloat64Neg:
return MarkAsFloat64(node), VisitFloat64Neg(node);
case IrOpcode::kFloat64Mul:
return MarkAsFloat64(node), VisitFloat64Mul(node);
case IrOpcode::kFloat64Div:
return MarkAsFloat64(node), VisitFloat64Div(node);
case IrOpcode::kFloat64Mod:
return MarkAsFloat64(node), VisitFloat64Mod(node);
case IrOpcode::kFloat64Min:
return MarkAsFloat64(node), VisitFloat64Min(node);
case IrOpcode::kFloat64Max:
return MarkAsFloat64(node), VisitFloat64Max(node);
case IrOpcode::kFloat64Abs:
return MarkAsFloat64(node), VisitFloat64Abs(node);
case IrOpcode::kFloat64Acos:
return MarkAsFloat64(node), VisitFloat64Acos(node);
case IrOpcode::kFloat64Acosh:
return MarkAsFloat64(node), VisitFloat64Acosh(node);
case IrOpcode::kFloat64Asin:
return MarkAsFloat64(node), VisitFloat64Asin(node);
case IrOpcode::kFloat64Asinh:
return MarkAsFloat64(node), VisitFloat64Asinh(node);
case IrOpcode::kFloat64Atan:
return MarkAsFloat64(node), VisitFloat64Atan(node);
case IrOpcode::kFloat64Atanh:
return MarkAsFloat64(node), VisitFloat64Atanh(node);
case IrOpcode::kFloat64Atan2:
return MarkAsFloat64(node), VisitFloat64Atan2(node);
case IrOpcode::kFloat64Cbrt:
return MarkAsFloat64(node), VisitFloat64Cbrt(node);
case IrOpcode::kFloat64Cos:
return MarkAsFloat64(node), VisitFloat64Cos(node);
case IrOpcode::kFloat64Cosh:
return MarkAsFloat64(node), VisitFloat64Cosh(node);
case IrOpcode::kFloat64Exp:
return MarkAsFloat64(node), VisitFloat64Exp(node);
case IrOpcode::kFloat64Expm1:
return MarkAsFloat64(node), VisitFloat64Expm1(node);
case IrOpcode::kFloat64Log:
return MarkAsFloat64(node), VisitFloat64Log(node);
case IrOpcode::kFloat64Log1p:
return MarkAsFloat64(node), VisitFloat64Log1p(node);
case IrOpcode::kFloat64Log10:
return MarkAsFloat64(node), VisitFloat64Log10(node);
case IrOpcode::kFloat64Log2:
return MarkAsFloat64(node), VisitFloat64Log2(node);
case IrOpcode::kFloat64Pow:
return MarkAsFloat64(node), VisitFloat64Pow(node);
case IrOpcode::kFloat64Sin:
return MarkAsFloat64(node), VisitFloat64Sin(node);
case IrOpcode::kFloat64Sinh:
return MarkAsFloat64(node), VisitFloat64Sinh(node);
case IrOpcode::kFloat64Sqrt:
return MarkAsFloat64(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Tan:
return MarkAsFloat64(node), VisitFloat64Tan(node);
case IrOpcode::kFloat64Tanh:
return MarkAsFloat64(node), VisitFloat64Tanh(node);
case IrOpcode::kFloat64Equal:
return VisitFloat64Equal(node);
case IrOpcode::kFloat64LessThan:
return VisitFloat64LessThan(node);
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
case IrOpcode::kFloat32RoundDown:
return MarkAsFloat32(node), VisitFloat32RoundDown(node);
case IrOpcode::kFloat64RoundDown:
return MarkAsFloat64(node), VisitFloat64RoundDown(node);
case IrOpcode::kFloat32RoundUp:
return MarkAsFloat32(node), VisitFloat32RoundUp(node);
case IrOpcode::kFloat64RoundUp:
return MarkAsFloat64(node), VisitFloat64RoundUp(node);
case IrOpcode::kFloat32RoundTruncate:
return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
case IrOpcode::kFloat64RoundTruncate:
return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
case IrOpcode::kFloat32RoundTiesEven:
return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node);
case IrOpcode::kFloat64RoundTiesEven:
return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node);
case IrOpcode::kFloat64ExtractLowWord32:
return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
case IrOpcode::kFloat64ExtractHighWord32:
return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node);
case IrOpcode::kFloat64InsertLowWord32:
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
case IrOpcode::kTaggedPoisonOnSpeculation:
return MarkAsTagged(node), VisitTaggedPoisonOnSpeculation(node);
case IrOpcode::kWord32PoisonOnSpeculation:
return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
case IrOpcode::kWord64PoisonOnSpeculation:
return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kStackPointerGreaterThan:
return VisitStackPointerGreaterThan(node);
case IrOpcode::kLoadStackCheckOffset:
return VisitLoadStackCheckOffset(node);
case IrOpcode::kLoadFramePointer:
return VisitLoadFramePointer(node);
case IrOpcode::kLoadParentFramePointer:
return VisitLoadParentFramePointer(node);
case IrOpcode::kUnalignedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitUnalignedLoad(node);
}
case IrOpcode::kUnalignedStore:
return VisitUnalignedStore(node);
case IrOpcode::kInt32PairAdd:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitInt32PairAdd(node);
case IrOpcode::kInt32PairSub:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitInt32PairSub(node);
case IrOpcode::kInt32PairMul:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitInt32PairMul(node);
case IrOpcode::kWord32PairShl:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairShl(node);
case IrOpcode::kWord32PairShr:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairShr(node);
case IrOpcode::kWord32PairSar:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairSar(node);
case IrOpcode::kMemoryBarrier:
return VisitMemoryBarrier(node);
case IrOpcode::kWord32AtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitWord32AtomicLoad(node);
}
case IrOpcode::kWord64AtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitWord64AtomicLoad(node);
}
case IrOpcode::kWord32AtomicStore:
return VisitWord32AtomicStore(node);
case IrOpcode::kWord64AtomicStore:
return VisitWord64AtomicStore(node);
case IrOpcode::kWord32AtomicPairStore:
return VisitWord32AtomicPairStore(node);
case IrOpcode::kWord32AtomicPairLoad: {
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32AtomicPairLoad(node);
}
#define ATOMIC_CASE(name, rep) \
case IrOpcode::k##rep##Atomic##name: { \
MachineType type = AtomicOpType(node->op()); \
MarkAsRepresentation(type.representation(), node); \
return Visit##rep##Atomic##name(node); \
}
ATOMIC_CASE(Add, Word32)
ATOMIC_CASE(Add, Word64)
ATOMIC_CASE(Sub, Word32)
ATOMIC_CASE(Sub, Word64)
ATOMIC_CASE(And, Word32)
ATOMIC_CASE(And, Word64)
ATOMIC_CASE(Or, Word32)
ATOMIC_CASE(Or, Word64)
ATOMIC_CASE(Xor, Word32)
ATOMIC_CASE(Xor, Word64)
ATOMIC_CASE(Exchange, Word32)
ATOMIC_CASE(Exchange, Word64)
ATOMIC_CASE(CompareExchange, Word32)
ATOMIC_CASE(CompareExchange, Word64)
#undef ATOMIC_CASE
#define ATOMIC_CASE(name) \
case IrOpcode::kWord32AtomicPair##name: { \
MarkAsWord32(node); \
MarkPairProjectionsAsWord32(node); \
return VisitWord32AtomicPair##name(node); \
}
ATOMIC_CASE(Add)
ATOMIC_CASE(Sub)
ATOMIC_CASE(And)
ATOMIC_CASE(Or)
ATOMIC_CASE(Xor)
ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange)
#undef ATOMIC_CASE
case IrOpcode::kProtectedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitProtectedLoad(node);
}
case IrOpcode::kSignExtendWord8ToInt32:
return MarkAsWord32(node), VisitSignExtendWord8ToInt32(node);
case IrOpcode::kSignExtendWord16ToInt32:
return MarkAsWord32(node), VisitSignExtendWord16ToInt32(node);
case IrOpcode::kSignExtendWord8ToInt64:
return MarkAsWord64(node), VisitSignExtendWord8ToInt64(node);
case IrOpcode::kSignExtendWord16ToInt64:
return MarkAsWord64(node), VisitSignExtendWord16ToInt64(node);
case IrOpcode::kSignExtendWord32ToInt64:
return MarkAsWord64(node), VisitSignExtendWord32ToInt64(node);
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
case IrOpcode::kF64x2Splat:
return MarkAsSimd128(node), VisitF64x2Splat(node);
case IrOpcode::kF64x2ExtractLane:
return MarkAsFloat64(node), VisitF64x2ExtractLane(node);
case IrOpcode::kF64x2ReplaceLane:
return MarkAsSimd128(node), VisitF64x2ReplaceLane(node);
case IrOpcode::kF64x2Abs:
return MarkAsSimd128(node), VisitF64x2Abs(node);
case IrOpcode::kF64x2Neg:
return MarkAsSimd128(node), VisitF64x2Neg(node);
case IrOpcode::kF64x2Sqrt:
return MarkAsSimd128(node), VisitF64x2Sqrt(node);
case IrOpcode::kF64x2Add:
return MarkAsSimd128(node), VisitF64x2Add(node);
case IrOpcode::kF64x2Sub:
return MarkAsSimd128(node), VisitF64x2Sub(node);
case IrOpcode::kF64x2Mul:
return MarkAsSimd128(node), VisitF64x2Mul(node);
case IrOpcode::kF64x2Div:
return MarkAsSimd128(node), VisitF64x2Div(node);
case IrOpcode::kF64x2Min:
return MarkAsSimd128(node), VisitF64x2Min(node);
case IrOpcode::kF64x2Max:
return MarkAsSimd128(node), VisitF64x2Max(node);
case IrOpcode::kF64x2Eq:
return MarkAsSimd128(node), VisitF64x2Eq(node);
case IrOpcode::kF64x2Ne:
return MarkAsSimd128(node), VisitF64x2Ne(node);
case IrOpcode::kF64x2Lt:
return MarkAsSimd128(node), VisitF64x2Lt(node);
case IrOpcode::kF64x2Le:
return MarkAsSimd128(node), VisitF64x2Le(node);
case IrOpcode::kF64x2Qfma:
return MarkAsSimd128(node), VisitF64x2Qfma(node);
case IrOpcode::kF64x2Qfms:
return MarkAsSimd128(node), VisitF64x2Qfms(node);
case IrOpcode::kF64x2Pmin:
return MarkAsSimd128(node), VisitF64x2Pmin(node);
case IrOpcode::kF64x2Pmax:
return MarkAsSimd128(node), VisitF64x2Pmax(node);
case IrOpcode::kF64x2Ceil:
return MarkAsSimd128(node), VisitF64x2Ceil(node);
case IrOpcode::kF64x2Floor:
return MarkAsSimd128(node), VisitF64x2Floor(node);
case IrOpcode::kF64x2Trunc:
return MarkAsSimd128(node), VisitF64x2Trunc(node);
case IrOpcode::kF64x2NearestInt:
return MarkAsSimd128(node), VisitF64x2NearestInt(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
return MarkAsFloat32(node), VisitF32x4ExtractLane(node);
case IrOpcode::kF32x4ReplaceLane:
return MarkAsSimd128(node), VisitF32x4ReplaceLane(node);
case IrOpcode::kF32x4SConvertI32x4:
return MarkAsSimd128(node), VisitF32x4SConvertI32x4(node);
case IrOpcode::kF32x4UConvertI32x4:
return MarkAsSimd128(node), VisitF32x4UConvertI32x4(node);
case IrOpcode::kF32x4Abs:
return MarkAsSimd128(node), VisitF32x4Abs(node);
case IrOpcode::kF32x4Neg:
return MarkAsSimd128(node), VisitF32x4Neg(node);
case IrOpcode::kF32x4Sqrt:
return MarkAsSimd128(node), VisitF32x4Sqrt(node);
case IrOpcode::kF32x4RecipApprox:
return MarkAsSimd128(node), VisitF32x4RecipApprox(node);
case IrOpcode::kF32x4RecipSqrtApprox:
return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
case IrOpcode::kF32x4Add:
return MarkAsSimd128(node), VisitF32x4Add(node);
case IrOpcode::kF32x4AddHoriz:
return MarkAsSimd128(node), VisitF32x4AddHoriz(node);
case IrOpcode::kF32x4Sub:
return MarkAsSimd128(node), VisitF32x4Sub(node);
case IrOpcode::kF32x4Mul:
return MarkAsSimd128(node), VisitF32x4Mul(node);
case IrOpcode::kF32x4Div:
return MarkAsSimd128(node), VisitF32x4Div(node);
case IrOpcode::kF32x4Min:
return MarkAsSimd128(node), VisitF32x4Min(node);
case IrOpcode::kF32x4Max:
return MarkAsSimd128(node), VisitF32x4Max(node);
case IrOpcode::kF32x4Eq:
return MarkAsSimd128(node), VisitF32x4Eq(node);
case IrOpcode::kF32x4Ne:
return MarkAsSimd128(node), VisitF32x4Ne(node);
case IrOpcode::kF32x4Lt:
return MarkAsSimd128(node), VisitF32x4Lt(node);
case IrOpcode::kF32x4Le:
return MarkAsSimd128(node), VisitF32x4Le(node);
case IrOpcode::kF32x4Qfma:
return MarkAsSimd128(node), VisitF32x4Qfma(node);
case IrOpcode::kF32x4Qfms:
return MarkAsSimd128(node), VisitF32x4Qfms(node);
case IrOpcode::kF32x4Pmin:
return MarkAsSimd128(node), VisitF32x4Pmin(node);
case IrOpcode::kF32x4Pmax:
return MarkAsSimd128(node), VisitF32x4Pmax(node);
case IrOpcode::kF32x4Ceil:
return MarkAsSimd128(node), VisitF32x4Ceil(node);
case IrOpcode::kF32x4Floor:
return MarkAsSimd128(node), VisitF32x4Floor(node);
case IrOpcode::kF32x4Trunc:
return MarkAsSimd128(node), VisitF32x4Trunc(node);
case IrOpcode::kF32x4NearestInt:
return MarkAsSimd128(node), VisitF32x4NearestInt(node);
case IrOpcode::kI64x2Splat:
return MarkAsSimd128(node), VisitI64x2Splat(node);
case IrOpcode::kI64x2SplatI32Pair:
return MarkAsSimd128(node), VisitI64x2SplatI32Pair(node);
case IrOpcode::kI64x2ExtractLane:
return MarkAsWord64(node), VisitI64x2ExtractLane(node);
case IrOpcode::kI64x2ReplaceLane:
return MarkAsSimd128(node), VisitI64x2ReplaceLane(node);
case IrOpcode::kI64x2ReplaceLaneI32Pair:
return MarkAsSimd128(node), VisitI64x2ReplaceLaneI32Pair(node);
case IrOpcode::kI64x2Neg:
return MarkAsSimd128(node), VisitI64x2Neg(node);
case IrOpcode::kI64x2SConvertI32x4Low:
return MarkAsSimd128(node), VisitI64x2SConvertI32x4Low(node);
case IrOpcode::kI64x2SConvertI32x4High:
return MarkAsSimd128(node), VisitI64x2SConvertI32x4High(node);
case IrOpcode::kI64x2UConvertI32x4Low:
return MarkAsSimd128(node), VisitI64x2UConvertI32x4Low(node);
case IrOpcode::kI64x2UConvertI32x4High:
return MarkAsSimd128(node), VisitI64x2UConvertI32x4High(node);
case IrOpcode::kI64x2BitMask:
return MarkAsWord32(node), VisitI64x2BitMask(node);
case IrOpcode::kI64x2Shl:
return MarkAsSimd128(node), VisitI64x2Shl(node);
case IrOpcode::kI64x2ShrS:
return MarkAsSimd128(node), VisitI64x2ShrS(node);
case IrOpcode::kI64x2Add:
return MarkAsSimd128(node), VisitI64x2Add(node);
case IrOpcode::kI64x2Sub:
return MarkAsSimd128(node), VisitI64x2Sub(node);
case IrOpcode::kI64x2Mul:
return MarkAsSimd128(node), VisitI64x2Mul(node);
case IrOpcode::kI64x2Eq:
return MarkAsSimd128(node), VisitI64x2Eq(node);
case IrOpcode::kI64x2ShrU:
return MarkAsSimd128(node), VisitI64x2ShrU(node);
case IrOpcode::kI64x2ExtMulLowI32x4S:
return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4S(node);
case IrOpcode::kI64x2ExtMulHighI32x4S:
return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4S(node);
case IrOpcode::kI64x2ExtMulLowI32x4U:
return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4U(node);
case IrOpcode::kI64x2ExtMulHighI32x4U:
return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4U(node);
case IrOpcode::kI64x2SignSelect:
return MarkAsSimd128(node), VisitI64x2SignSelect(node);
case IrOpcode::kI32x4Splat:
return MarkAsSimd128(node), VisitI32x4Splat(node);
case IrOpcode::kI32x4ExtractLane:
return MarkAsWord32(node), VisitI32x4ExtractLane(node);
case IrOpcode::kI32x4ReplaceLane:
return MarkAsSimd128(node), VisitI32x4ReplaceLane(node);
case IrOpcode::kI32x4SConvertF32x4:
return MarkAsSimd128(node), VisitI32x4SConvertF32x4(node);
case IrOpcode::kI32x4SConvertI16x8Low:
return MarkAsSimd128(node), VisitI32x4SConvertI16x8Low(node);
case IrOpcode::kI32x4SConvertI16x8High:
return MarkAsSimd128(node), VisitI32x4SConvertI16x8High(node);
case IrOpcode::kI32x4Neg:
return MarkAsSimd128(node), VisitI32x4Neg(node);
case IrOpcode::kI32x4Shl:
return MarkAsSimd128(node), VisitI32x4Shl(node);
case IrOpcode::kI32x4ShrS:
return MarkAsSimd128(node), VisitI32x4ShrS(node);
case IrOpcode::kI32x4Add:
return MarkAsSimd128(node), VisitI32x4Add(node);
case IrOpcode::kI32x4AddHoriz:
return MarkAsSimd128(node), VisitI32x4AddHoriz(node);
case IrOpcode::kI32x4Sub:
return MarkAsSimd128(node), VisitI32x4Sub(node);
case IrOpcode::kI32x4Mul:
return MarkAsSimd128(node), VisitI32x4Mul(node);
case IrOpcode::kI32x4MinS:
return MarkAsSimd128(node), VisitI32x4MinS(node);
case IrOpcode::kI32x4MaxS:
return MarkAsSimd128(node), VisitI32x4MaxS(node);
case IrOpcode::kI32x4Eq:
return MarkAsSimd128(node), VisitI32x4Eq(node);
case IrOpcode::kI32x4Ne:
return MarkAsSimd128(node), VisitI32x4Ne(node);
case IrOpcode::kI32x4GtS:
return MarkAsSimd128(node), VisitI32x4GtS(node);
case IrOpcode::kI32x4GeS:
return MarkAsSimd128(node), VisitI32x4GeS(node);
case IrOpcode::kI32x4UConvertF32x4:
return MarkAsSimd128(node), VisitI32x4UConvertF32x4(node);
case IrOpcode::kI32x4UConvertI16x8Low:
return MarkAsSimd128(node), VisitI32x4UConvertI16x8Low(node);
case IrOpcode::kI32x4UConvertI16x8High:
return MarkAsSimd128(node), VisitI32x4UConvertI16x8High(node);
case IrOpcode::kI32x4ShrU:
return MarkAsSimd128(node), VisitI32x4ShrU(node);
case IrOpcode::kI32x4MinU:
return MarkAsSimd128(node), VisitI32x4MinU(node);
case IrOpcode::kI32x4MaxU:
return MarkAsSimd128(node), VisitI32x4MaxU(node);
case IrOpcode::kI32x4GtU:
return MarkAsSimd128(node), VisitI32x4GtU(node);
case IrOpcode::kI32x4GeU:
return MarkAsSimd128(node), VisitI32x4GeU(node);
case IrOpcode::kI32x4Abs:
return MarkAsSimd128(node), VisitI32x4Abs(node);
case IrOpcode::kI32x4BitMask:
return MarkAsWord32(node), VisitI32x4BitMask(node);
case IrOpcode::kI32x4DotI16x8S:
return MarkAsSimd128(node), VisitI32x4DotI16x8S(node);
case IrOpcode::kI32x4ExtMulLowI16x8S:
return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8S(node);
case IrOpcode::kI32x4ExtMulHighI16x8S:
return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8S(node);
case IrOpcode::kI32x4ExtMulLowI16x8U:
return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8U(node);
case IrOpcode::kI32x4ExtMulHighI16x8U:
return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8U(node);
case IrOpcode::kI32x4SignSelect:
return MarkAsSimd128(node), VisitI32x4SignSelect(node);
case IrOpcode::kI32x4ExtAddPairwiseI16x8S:
return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8S(node);
case IrOpcode::kI32x4ExtAddPairwiseI16x8U:
return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8U(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLaneU:
return MarkAsWord32(node), VisitI16x8ExtractLaneU(node);
case IrOpcode::kI16x8ExtractLaneS:
return MarkAsWord32(node), VisitI16x8ExtractLaneS(node);
case IrOpcode::kI16x8ReplaceLane:
return MarkAsSimd128(node), VisitI16x8ReplaceLane(node);
case IrOpcode::kI16x8SConvertI8x16Low:
return MarkAsSimd128(node), VisitI16x8SConvertI8x16Low(node);
case IrOpcode::kI16x8SConvertI8x16High:
return MarkAsSimd128(node), VisitI16x8SConvertI8x16High(node);
case IrOpcode::kI16x8Neg:
return MarkAsSimd128(node), VisitI16x8Neg(node);
case IrOpcode::kI16x8Shl:
return MarkAsSimd128(node), VisitI16x8Shl(node);
case IrOpcode::kI16x8ShrS:
return MarkAsSimd128(node), VisitI16x8ShrS(node);
case IrOpcode::kI16x8SConvertI32x4:
return MarkAsSimd128(node), VisitI16x8SConvertI32x4(node);
case IrOpcode::kI16x8Add:
return MarkAsSimd128(node), VisitI16x8Add(node);
case IrOpcode::kI16x8AddSatS:
return MarkAsSimd128(node), VisitI16x8AddSatS(node);
case IrOpcode::kI16x8AddHoriz:
return MarkAsSimd128(node), VisitI16x8AddHoriz(node);
case IrOpcode::kI16x8Sub:
return MarkAsSimd128(node), VisitI16x8Sub(node);
case IrOpcode::kI16x8SubSatS:
return MarkAsSimd128(node), VisitI16x8SubSatS(node);
case IrOpcode::kI16x8Mul:
return MarkAsSimd128(node), VisitI16x8Mul(node);
case IrOpcode::kI16x8MinS:
return MarkAsSimd128(node), VisitI16x8MinS(node);
case IrOpcode::kI16x8MaxS:
return MarkAsSimd128(node), VisitI16x8MaxS(node);
case IrOpcode::kI16x8Eq:
return MarkAsSimd128(node), VisitI16x8Eq(node);
case IrOpcode::kI16x8Ne:
return MarkAsSimd128(node), VisitI16x8Ne(node);
case IrOpcode::kI16x8GtS:
return MarkAsSimd128(node), VisitI16x8GtS(node);
case IrOpcode::kI16x8GeS:
return MarkAsSimd128(node), VisitI16x8GeS(node);
case IrOpcode::kI16x8UConvertI8x16Low:
return MarkAsSimd128(node), VisitI16x8UConvertI8x16Low(node);
case IrOpcode::kI16x8UConvertI8x16High:
return MarkAsSimd128(node), VisitI16x8UConvertI8x16High(node);
case IrOpcode::kI16x8ShrU:
return MarkAsSimd128(node), VisitI16x8ShrU(node);
case IrOpcode::kI16x8UConvertI32x4:
return MarkAsSimd128(node), VisitI16x8UConvertI32x4(node);
case IrOpcode::kI16x8AddSatU:
return MarkAsSimd128(node), VisitI16x8AddSatU(node);
case IrOpcode::kI16x8SubSatU:
return MarkAsSimd128(node), VisitI16x8SubSatU(node);
case IrOpcode::kI16x8MinU:
return MarkAsSimd128(node), VisitI16x8MinU(node);
case IrOpcode::kI16x8MaxU:
return MarkAsSimd128(node), VisitI16x8MaxU(node);
case IrOpcode::kI16x8GtU:
return MarkAsSimd128(node), VisitI16x8GtU(node);
case IrOpcode::kI16x8GeU:
return MarkAsSimd128(node), VisitI16x8GeU(node);
case IrOpcode::kI16x8RoundingAverageU:
return MarkAsSimd128(node), VisitI16x8RoundingAverageU(node);
case IrOpcode::kI16x8Q15MulRSatS:
return MarkAsSimd128(node), VisitI16x8Q15MulRSatS(node);
case IrOpcode::kI16x8Abs:
return MarkAsSimd128(node), VisitI16x8Abs(node);
case IrOpcode::kI16x8BitMask:
return MarkAsWord32(node), VisitI16x8BitMask(node);
case IrOpcode::kI16x8ExtMulLowI8x16S:
return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16S(node);
case IrOpcode::kI16x8ExtMulHighI8x16S:
return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16S(node);
case IrOpcode::kI16x8ExtMulLowI8x16U:
return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16U(node);
case IrOpcode::kI16x8ExtMulHighI8x16U:
return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16U(node);
case IrOpcode::kI16x8SignSelect:
return MarkAsSimd128(node), VisitI16x8SignSelect(node);
case IrOpcode::kI16x8ExtAddPairwiseI8x16S:
return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16S(node);
case IrOpcode::kI16x8ExtAddPairwiseI8x16U:
return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16U(node);
case IrOpcode::kI8x16Splat:
return MarkAsSimd128(node), VisitI8x16Splat(node);
case IrOpcode::kI8x16ExtractLaneU:
return MarkAsWord32(node), VisitI8x16ExtractLaneU(node);
case IrOpcode::kI8x16ExtractLaneS:
return MarkAsWord32(node), VisitI8x16ExtractLaneS(node);
case IrOpcode::kI8x16ReplaceLane:
return MarkAsSimd128(node), VisitI8x16ReplaceLane(node);
case IrOpcode::kI8x16Neg:
return MarkAsSimd128(node), VisitI8x16Neg(node);
case IrOpcode::kI8x16Shl:
return MarkAsSimd128(node), VisitI8x16Shl(node);
case IrOpcode::kI8x16ShrS:
return MarkAsSimd128(node), VisitI8x16ShrS(node);
case IrOpcode::kI8x16SConvertI16x8:
return MarkAsSimd128(node), VisitI8x16SConvertI16x8(node);
case IrOpcode::kI8x16Add:
return MarkAsSimd128(node), VisitI8x16Add(node);
case IrOpcode::kI8x16AddSatS:
return MarkAsSimd128(node), VisitI8x16AddSatS(node);
case IrOpcode::kI8x16Sub:
return MarkAsSimd128(node), VisitI8x16Sub(node);
case IrOpcode::kI8x16SubSatS:
return MarkAsSimd128(node), VisitI8x16SubSatS(node);
case IrOpcode::kI8x16Mul:
return MarkAsSimd128(node), VisitI8x16Mul(node);
case IrOpcode::kI8x16MinS:
return MarkAsSimd128(node), VisitI8x16MinS(node);
case IrOpcode::kI8x16MaxS:
return MarkAsSimd128(node), VisitI8x16MaxS(node);
case IrOpcode::kI8x16Eq:
return MarkAsSimd128(node), VisitI8x16Eq(node);
case IrOpcode::kI8x16Ne:
return MarkAsSimd128(node), VisitI8x16Ne(node);
case IrOpcode::kI8x16GtS:
return MarkAsSimd128(node), VisitI8x16GtS(node);
case IrOpcode::kI8x16GeS:
return MarkAsSimd128(node), VisitI8x16GeS(node);
case IrOpcode::kI8x16ShrU:
return MarkAsSimd128(node), VisitI8x16ShrU(node);
case IrOpcode::kI8x16UConvertI16x8:
return MarkAsSimd128(node), VisitI8x16UConvertI16x8(node);
case IrOpcode::kI8x16AddSatU:
return MarkAsSimd128(node), VisitI8x16AddSatU(node);
case IrOpcode::kI8x16SubSatU:
return MarkAsSimd128(node), VisitI8x16SubSatU(node);
case IrOpcode::kI8x16MinU:
return MarkAsSimd128(node), VisitI8x16MinU(node);
case IrOpcode::kI8x16MaxU:
return MarkAsSimd128(node), VisitI8x16MaxU(node);
case IrOpcode::kI8x16GtU:
return MarkAsSimd128(node), VisitI8x16GtU(node);
case IrOpcode::kI8x16GeU:
return MarkAsSimd128(node), VisitI8x16GeU(node);
case IrOpcode::kI8x16RoundingAverageU:
return MarkAsSimd128(node), VisitI8x16RoundingAverageU(node);
case IrOpcode::kI8x16Popcnt:
return MarkAsSimd128(node), VisitI8x16Popcnt(node);
case IrOpcode::kI8x16Abs:
return MarkAsSimd128(node), VisitI8x16Abs(node);
case IrOpcode::kI8x16BitMask:
return MarkAsWord32(node), VisitI8x16BitMask(node);
case IrOpcode::kI8x16SignSelect:
return MarkAsSimd128(node), VisitI8x16SignSelect(node);
case IrOpcode::kS128Const:
return MarkAsSimd128(node), VisitS128Const(node);
case IrOpcode::kS128Zero:
return MarkAsSimd128(node), VisitS128Zero(node);
case IrOpcode::kS128And:
return MarkAsSimd128(node), VisitS128And(node);
case IrOpcode::kS128Or:
return MarkAsSimd128(node), VisitS128Or(node);
case IrOpcode::kS128Xor:
return MarkAsSimd128(node), VisitS128Xor(node);
case IrOpcode::kS128Not:
return MarkAsSimd128(node), VisitS128Not(node);
case IrOpcode::kS128Select:
return MarkAsSimd128(node), VisitS128Select(node);
case IrOpcode::kS128AndNot:
return MarkAsSimd128(node), VisitS128AndNot(node);
case IrOpcode::kI8x16Swizzle:
return MarkAsSimd128(node), VisitI8x16Swizzle(node);
case IrOpcode::kI8x16Shuffle:
return MarkAsSimd128(node), VisitI8x16Shuffle(node);
case IrOpcode::kV32x4AnyTrue:
return MarkAsWord32(node), VisitV32x4AnyTrue(node);
case IrOpcode::kV32x4AllTrue:
return MarkAsWord32(node), VisitV32x4AllTrue(node);
case IrOpcode::kV16x8AnyTrue:
return MarkAsWord32(node), VisitV16x8AnyTrue(node);
case IrOpcode::kV16x8AllTrue:
return MarkAsWord32(node), VisitV16x8AllTrue(node);
case IrOpcode::kV8x16AnyTrue:
return MarkAsWord32(node), VisitV8x16AnyTrue(node);
case IrOpcode::kV8x16AllTrue:
return MarkAsWord32(node), VisitV8x16AllTrue(node);
default:</