| // Copyright 2017 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/wasm/baseline/liftoff-compiler.h" |
| |
| #include "src/base/optional.h" |
| #include "src/codegen/assembler-inl.h" |
| // TODO(clemensb): Remove dependences on compiler stuff. |
| #include "src/codegen/external-reference.h" |
| #include "src/codegen/interface-descriptors.h" |
| #include "src/codegen/machine-type.h" |
| #include "src/codegen/macro-assembler-inl.h" |
| #include "src/compiler/linkage.h" |
| #include "src/compiler/wasm-compiler.h" |
| #include "src/logging/counters.h" |
| #include "src/logging/log.h" |
| #include "src/objects/smi.h" |
| #include "src/tracing/trace-event.h" |
| #include "src/utils/ostreams.h" |
| #include "src/utils/utils.h" |
| #include "src/wasm/baseline/liftoff-assembler.h" |
| #include "src/wasm/function-body-decoder-impl.h" |
| #include "src/wasm/function-compiler.h" |
| #include "src/wasm/memory-tracing.h" |
| #include "src/wasm/object-access.h" |
| #include "src/wasm/simd-shuffle.h" |
| #include "src/wasm/wasm-debug.h" |
| #include "src/wasm/wasm-engine.h" |
| #include "src/wasm/wasm-linkage.h" |
| #include "src/wasm/wasm-objects.h" |
| #include "src/wasm/wasm-opcodes-inl.h" |
| |
| namespace v8 { |
| namespace internal { |
| namespace wasm { |
| |
| constexpr auto kRegister = LiftoffAssembler::VarState::kRegister; |
| constexpr auto kIntConst = LiftoffAssembler::VarState::kIntConst; |
| constexpr auto kStack = LiftoffAssembler::VarState::kStack; |
| |
| namespace { |
| |
| #define __ asm_. |
| |
| #define TRACE(...) \ |
| do { \ |
| if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \ |
| } while (false) |
| |
| #define WASM_INSTANCE_OBJECT_FIELD_OFFSET(name) \ |
| ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset) |
| |
| template <int expected_size, int actual_size> |
| struct assert_field_size { |
| static_assert(expected_size == actual_size, |
| "field in WasmInstance does not have the expected size"); |
| static constexpr int size = actual_size; |
| }; |
| |
| #define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \ |
| FIELD_SIZE(WasmInstanceObject::k##name##Offset) |
| |
| #define LOAD_INSTANCE_FIELD(dst, name, load_size) \ |
| __ LoadFromInstance(dst, WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \ |
| assert_field_size<WASM_INSTANCE_OBJECT_FIELD_SIZE(name), \ |
| load_size>::size); |
| |
| #define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name) \ |
| static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \ |
| "field in WasmInstance does not have the expected size"); \ |
| __ LoadTaggedPointerFromInstance(dst, \ |
| WASM_INSTANCE_OBJECT_FIELD_OFFSET(name)); |
| |
| #ifdef DEBUG |
| #define DEBUG_CODE_COMMENT(str) \ |
| do { \ |
| __ RecordComment(str); \ |
| } while (false) |
| #else |
| #define DEBUG_CODE_COMMENT(str) ((void)0) |
| #endif |
| |
| constexpr LoadType::LoadTypeValue kPointerLoadType = |
| kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load; |
| |
| constexpr ValueType kPointerValueType = |
| kSystemPointerSize == 8 ? kWasmI64 : kWasmI32; |
| |
| #if V8_TARGET_ARCH_ARM64 |
| // On ARM64, the Assembler keeps track of pointers to Labels to resolve |
| // branches to distant targets. Moving labels would confuse the Assembler, |
| // thus store the label on the heap and keep a unique_ptr. |
| class MovableLabel { |
| public: |
| MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel); |
| MovableLabel() : label_(new Label()) {} |
| |
| Label* get() { return label_.get(); } |
| |
| private: |
| std::unique_ptr<Label> label_; |
| }; |
| #else |
| // On all other platforms, just store the Label directly. |
| class MovableLabel { |
| public: |
| MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel); |
| |
| Label* get() { return &label_; } |
| |
| private: |
| Label label_; |
| }; |
| #endif |
| |
| compiler::CallDescriptor* GetLoweredCallDescriptor( |
| Zone* zone, compiler::CallDescriptor* call_desc) { |
| return kSystemPointerSize == 4 |
| ? compiler::GetI32WasmCallDescriptor(zone, call_desc) |
| : call_desc; |
| } |
| |
| constexpr ValueType kSupportedTypesArr[] = { |
| kWasmI32, kWasmI64, kWasmF32, kWasmF64, |
| kWasmS128, kWasmExternRef, kWasmFuncRef}; |
| constexpr Vector<const ValueType> kSupportedTypes = |
| ArrayVector(kSupportedTypesArr); |
| |
| constexpr ValueType kSupportedTypesWithoutRefsArr[] = { |
| kWasmI32, kWasmI64, kWasmF32, kWasmF64, kWasmS128}; |
| constexpr Vector<const ValueType> kSupportedTypesWithoutRefs = |
| ArrayVector(kSupportedTypesWithoutRefsArr); |
| |
| constexpr Condition GetCompareCondition(WasmOpcode opcode) { |
| switch (opcode) { |
| case kExprI32Eq: |
| return kEqual; |
| case kExprI32Ne: |
| return kUnequal; |
| case kExprI32LtS: |
| return kSignedLessThan; |
| case kExprI32LtU: |
| return kUnsignedLessThan; |
| case kExprI32GtS: |
| return kSignedGreaterThan; |
| case kExprI32GtU: |
| return kUnsignedGreaterThan; |
| case kExprI32LeS: |
| return kSignedLessEqual; |
| case kExprI32LeU: |
| return kUnsignedLessEqual; |
| case kExprI32GeS: |
| return kSignedGreaterEqual; |
| case kExprI32GeU: |
| return kUnsignedGreaterEqual; |
| default: |
| #if V8_HAS_CXX14_CONSTEXPR |
| UNREACHABLE(); |
| #else |
| // We need to return something for old compilers here. |
| return kEqual; |
| #endif |
| } |
| } |
| |
| // Builds a {DebugSideTable}. |
| class DebugSideTableBuilder { |
| public: |
| enum AssumeSpilling { |
| // All register values will be spilled before the pc covered by the debug |
| // side table entry. Register slots will be marked as stack slots in the |
| // generated debug side table entry. |
| kAssumeSpilling, |
| // Register slots will be written out as they are. |
| kAllowRegisters, |
| // Register slots cannot appear since we already spilled. |
| kDidSpill |
| }; |
| |
| class EntryBuilder { |
| public: |
| explicit EntryBuilder(int pc_offset, |
| std::vector<DebugSideTable::Entry::Value> values) |
| : pc_offset_(pc_offset), values_(std::move(values)) {} |
| |
| DebugSideTable::Entry ToTableEntry() { |
| return DebugSideTable::Entry{pc_offset_, std::move(values_)}; |
| } |
| |
| int pc_offset() const { return pc_offset_; } |
| void set_pc_offset(int new_pc_offset) { pc_offset_ = new_pc_offset; } |
| |
| private: |
| int pc_offset_; |
| std::vector<DebugSideTable::Entry::Value> values_; |
| }; |
| |
| // Adds a new entry, and returns a pointer to a builder for modifying that |
| // entry ({stack_height} includes {num_locals}). |
| EntryBuilder* NewEntry(int pc_offset, int num_locals, int stack_height, |
| LiftoffAssembler::VarState* stack_state, |
| AssumeSpilling assume_spilling) { |
| DCHECK_LE(num_locals, stack_height); |
| // Record stack types. |
| std::vector<DebugSideTable::Entry::Value> values(stack_height); |
| for (int i = 0; i < stack_height; ++i) { |
| const auto& slot = stack_state[i]; |
| values[i].type = slot.type(); |
| values[i].stack_offset = slot.offset(); |
| switch (slot.loc()) { |
| case kIntConst: |
| values[i].kind = DebugSideTable::Entry::kConstant; |
| values[i].i32_const = slot.i32_const(); |
| break; |
| case kRegister: |
| DCHECK_NE(kDidSpill, assume_spilling); |
| if (assume_spilling == kAllowRegisters) { |
| values[i].kind = DebugSideTable::Entry::kRegister; |
| values[i].reg_code = slot.reg().liftoff_code(); |
| break; |
| } |
| DCHECK_EQ(kAssumeSpilling, assume_spilling); |
| V8_FALLTHROUGH; |
| case kStack: |
| values[i].kind = DebugSideTable::Entry::kStack; |
| values[i].stack_offset = slot.offset(); |
| break; |
| } |
| } |
| entries_.emplace_back(pc_offset, std::move(values)); |
| return &entries_.back(); |
| } |
| |
| void SetNumLocals(int num_locals) { |
| DCHECK_EQ(-1, num_locals_); |
| DCHECK_LE(0, num_locals); |
| num_locals_ = num_locals; |
| } |
| |
| std::unique_ptr<DebugSideTable> GenerateDebugSideTable() { |
| DCHECK_LE(0, num_locals_); |
| std::vector<DebugSideTable::Entry> entries; |
| entries.reserve(entries_.size()); |
| for (auto& entry : entries_) entries.push_back(entry.ToTableEntry()); |
| std::sort(entries.begin(), entries.end(), |
| [](DebugSideTable::Entry& a, DebugSideTable::Entry& b) { |
| return a.pc_offset() < b.pc_offset(); |
| }); |
| return std::make_unique<DebugSideTable>(num_locals_, std::move(entries)); |
| } |
| |
| private: |
| int num_locals_ = -1; |
| std::list<EntryBuilder> entries_; |
| }; |
| |
| class LiftoffCompiler { |
| public: |
| // TODO(clemensb): Make this a template parameter. |
| static constexpr Decoder::ValidateFlag validate = Decoder::kBooleanValidation; |
| |
| using Value = ValueBase<validate>; |
| |
| static constexpr auto kI32 = ValueType::kI32; |
| static constexpr auto kI64 = ValueType::kI64; |
| static constexpr auto kF32 = ValueType::kF32; |
| static constexpr auto kF64 = ValueType::kF64; |
| static constexpr auto kS128 = ValueType::kS128; |
| |
| struct ElseState { |
| MovableLabel label; |
| LiftoffAssembler::CacheState state; |
| }; |
| |
| struct Control : public ControlBase<Value, validate> { |
| std::unique_ptr<ElseState> else_state; |
| LiftoffAssembler::CacheState label_state; |
| MovableLabel label; |
| |
| MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control); |
| |
| template <typename... Args> |
| explicit Control(Args&&... args) V8_NOEXCEPT |
| : ControlBase(std::forward<Args>(args)...) {} |
| }; |
| |
| using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>; |
| |
| // For debugging, we need to spill registers before a trap or a stack check to |
| // be able to inspect them. |
| struct SpilledRegistersForInspection : public ZoneObject { |
| struct Entry { |
| int offset; |
| LiftoffRegister reg; |
| ValueType type; |
| }; |
| ZoneVector<Entry> entries; |
| |
| explicit SpilledRegistersForInspection(Zone* zone) : entries(zone) {} |
| }; |
| |
| struct OutOfLineCode { |
| MovableLabel label; |
| MovableLabel continuation; |
| WasmCode::RuntimeStubId stub; |
| WasmCodePosition position; |
| LiftoffRegList regs_to_save; |
| uint32_t pc; // for trap handler. |
| // These two pointers will only be used for debug code: |
| DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder; |
| SpilledRegistersForInspection* spilled_registers; |
| |
| // Named constructors: |
| static OutOfLineCode Trap( |
| WasmCode::RuntimeStubId s, WasmCodePosition pos, uint32_t pc, |
| DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder, |
| SpilledRegistersForInspection* spilled_registers) { |
| DCHECK_LT(0, pos); |
| return {{}, |
| {}, |
| s, |
| pos, |
| {}, |
| pc, |
| debug_sidetable_entry_builder, |
| spilled_registers}; |
| } |
| static OutOfLineCode StackCheck( |
| WasmCodePosition pos, LiftoffRegList regs_to_save, |
| SpilledRegistersForInspection* spilled_regs, |
| DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) { |
| return {{}, {}, WasmCode::kWasmStackGuard, pos, |
| regs_to_save, 0, debug_sidetable_entry_builder, spilled_regs}; |
| } |
| }; |
| |
| LiftoffCompiler(compiler::CallDescriptor* call_descriptor, |
| CompilationEnv* env, Zone* compilation_zone, |
| std::unique_ptr<AssemblerBuffer> buffer, |
| DebugSideTableBuilder* debug_sidetable_builder, |
| ForDebugging for_debugging, int func_index, |
| Vector<int> breakpoints = {}, int dead_breakpoint = 0) |
| : asm_(std::move(buffer)), |
| descriptor_( |
| GetLoweredCallDescriptor(compilation_zone, call_descriptor)), |
| env_(env), |
| debug_sidetable_builder_(debug_sidetable_builder), |
| for_debugging_(for_debugging), |
| func_index_(func_index), |
| out_of_line_code_(compilation_zone), |
| source_position_table_builder_(compilation_zone), |
| protected_instructions_(compilation_zone), |
| compilation_zone_(compilation_zone), |
| safepoint_table_builder_(compilation_zone_), |
| next_breakpoint_ptr_(breakpoints.begin()), |
| next_breakpoint_end_(breakpoints.end()), |
| dead_breakpoint_(dead_breakpoint) { |
| if (breakpoints.empty()) { |
| next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr; |
| } |
| } |
| |
| bool did_bailout() const { return bailout_reason_ != kSuccess; } |
| LiftoffBailoutReason bailout_reason() const { return bailout_reason_; } |
| |
| void GetCode(CodeDesc* desc) { |
| asm_.GetCode(nullptr, desc, &safepoint_table_builder_, |
| Assembler::kNoHandlerTable); |
| } |
| |
| OwnedVector<uint8_t> GetSourcePositionTable() { |
| return source_position_table_builder_.ToSourcePositionTableVector(); |
| } |
| |
| OwnedVector<uint8_t> GetProtectedInstructionsData() const { |
| return OwnedVector<uint8_t>::Of( |
| Vector<const uint8_t>::cast(VectorOf(protected_instructions_))); |
| } |
| |
| uint32_t GetTotalFrameSlotCountForGC() const { |
| return __ GetTotalFrameSlotCountForGC(); |
| } |
| |
| void unsupported(FullDecoder* decoder, LiftoffBailoutReason reason, |
| const char* detail) { |
| DCHECK_NE(kSuccess, reason); |
| if (did_bailout()) return; |
| bailout_reason_ = reason; |
| TRACE("unsupported: %s\n", detail); |
| decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s", |
| detail); |
| UnuseLabels(decoder); |
| } |
| |
| bool DidAssemblerBailout(FullDecoder* decoder) { |
| if (decoder->failed() || !__ did_bailout()) return false; |
| unsupported(decoder, __ bailout_reason(), __ bailout_detail()); |
| return true; |
| } |
| |
| LiftoffBailoutReason BailoutReasonForType(ValueType type) { |
| switch (type.kind()) { |
| case ValueType::kS128: |
| return kSimd; |
| case ValueType::kOptRef: |
| case ValueType::kRef: |
| if (type.is_reference_to(HeapType::kExn)) { |
| return kExceptionHandling; |
| } else { |
| return kRefTypes; |
| } |
| case ValueType::kBottom: |
| return kMultiValue; |
| default: |
| return kOtherReason; |
| } |
| } |
| |
| bool CheckSupportedType(FullDecoder* decoder, |
| Vector<const ValueType> supported_types, |
| ValueType type, const char* context) { |
| // Special case for kWasm128 which requires specific hardware support. |
| if (type == kWasmS128 && (!CpuFeatures::SupportsWasmSimd128())) { |
| unsupported(decoder, kSimd, "simd"); |
| return false; |
| } |
| // Check supported types. |
| for (ValueType supported : supported_types) { |
| if (type == supported) return true; |
| } |
| LiftoffBailoutReason bailout_reason = BailoutReasonForType(type); |
| EmbeddedVector<char, 128> buffer; |
| SNPrintF(buffer, "%s %s", type.name().c_str(), context); |
| unsupported(decoder, bailout_reason, buffer.begin()); |
| return false; |
| } |
| |
| int GetSafepointTableOffset() const { |
| return safepoint_table_builder_.GetCodeOffset(); |
| } |
| |
| void UnuseLabels(FullDecoder* decoder) { |
| #ifdef DEBUG |
| auto Unuse = [](Label* label) { |
| label->Unuse(); |
| label->UnuseNear(); |
| }; |
| // Unuse all labels now, otherwise their destructor will fire a DCHECK error |
| // if they where referenced before. |
| uint32_t control_depth = decoder ? decoder->control_depth() : 0; |
| for (uint32_t i = 0; i < control_depth; ++i) { |
| Control* c = decoder->control_at(i); |
| Unuse(c->label.get()); |
| if (c->else_state) Unuse(c->else_state->label.get()); |
| } |
| for (auto& ool : out_of_line_code_) Unuse(ool.label.get()); |
| #endif |
| } |
| |
| void StartFunction(FullDecoder* decoder) { |
| if (FLAG_trace_liftoff && !FLAG_trace_wasm_decoder) { |
| StdoutStream{} << "hint: add --trace-wasm-decoder to also see the wasm " |
| "instructions being decoded\n"; |
| } |
| int num_locals = decoder->num_locals(); |
| __ set_num_locals(num_locals); |
| for (int i = 0; i < num_locals; ++i) { |
| ValueType type = decoder->local_type(i); |
| __ set_local_type(i, type); |
| } |
| } |
| |
| // Returns the number of inputs processed (1 or 2). |
| uint32_t ProcessParameter(ValueType type, uint32_t input_idx) { |
| const bool needs_pair = needs_gp_reg_pair(type); |
| const ValueType reg_type = needs_pair ? kWasmI32 : type; |
| const RegClass rc = reg_class_for(reg_type); |
| |
| auto LoadToReg = [this, reg_type, rc](compiler::LinkageLocation location, |
| LiftoffRegList pinned) { |
| if (location.IsRegister()) { |
| DCHECK(!location.IsAnyRegister()); |
| return LiftoffRegister::from_external_code(rc, reg_type, |
| location.AsRegister()); |
| } |
| DCHECK(location.IsCallerFrameSlot()); |
| LiftoffRegister reg = __ GetUnusedRegister(rc, pinned); |
| __ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_type); |
| return reg; |
| }; |
| |
| LiftoffRegister reg = |
| LoadToReg(descriptor_->GetInputLocation(input_idx), {}); |
| if (needs_pair) { |
| LiftoffRegister reg2 = |
| LoadToReg(descriptor_->GetInputLocation(input_idx + 1), |
| LiftoffRegList::ForRegs(reg)); |
| reg = LiftoffRegister::ForPair(reg.gp(), reg2.gp()); |
| } |
| __ PushRegister(type, reg); |
| |
| return needs_pair ? 2 : 1; |
| } |
| |
| void StackCheck(WasmCodePosition position) { |
| DEBUG_CODE_COMMENT("stack check"); |
| if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) return; |
| LiftoffRegList regs_to_save = __ cache_state()->used_registers; |
| SpilledRegistersForInspection* spilled_regs = nullptr; |
| Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp(); |
| if (V8_UNLIKELY(for_debugging_)) { |
| regs_to_save = {}; |
| spilled_regs = GetSpilledRegistersForInspection(); |
| } |
| out_of_line_code_.push_back(OutOfLineCode::StackCheck( |
| position, regs_to_save, spilled_regs, |
| RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling))); |
| OutOfLineCode& ool = out_of_line_code_.back(); |
| LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize); |
| __ StackCheck(ool.label.get(), limit_address); |
| __ bind(ool.continuation.get()); |
| } |
| |
| bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) { |
| int actual_locals = __ num_locals() - num_params; |
| DCHECK_LE(0, actual_locals); |
| constexpr int kNumCacheRegisters = NumRegs(kLiftoffAssemblerGpCacheRegs); |
| // If we have many locals, we put them on the stack initially. This avoids |
| // having to spill them on merge points. Use of these initial values should |
| // be rare anyway. |
| if (actual_locals > kNumCacheRegisters / 2) return true; |
| // If there are locals which are not i32 or i64, we also spill all locals, |
| // because other types cannot be initialized to constants. |
| for (uint32_t param_idx = num_params; param_idx < __ num_locals(); |
| ++param_idx) { |
| ValueType type = decoder->local_type(param_idx); |
| if (type != kWasmI32 && type != kWasmI64) return true; |
| } |
| return false; |
| } |
| |
| void TierUpFunction(FullDecoder* decoder) { |
| __ CallRuntimeStub(WasmCode::kWasmTriggerTierUp); |
| DefineSafepoint(); |
| } |
| |
| void TraceFunctionEntry(FullDecoder* decoder) { |
| DEBUG_CODE_COMMENT("trace function entry"); |
| __ SpillAllRegisters(); |
| source_position_table_builder_.AddPosition( |
| __ pc_offset(), SourcePosition(decoder->position()), false); |
| __ CallRuntimeStub(WasmCode::kWasmTraceEnter); |
| DefineSafepoint(); |
| } |
| |
| void StartFunctionBody(FullDecoder* decoder, Control* block) { |
| for (uint32_t i = 0; i < __ num_locals(); ++i) { |
| if (!CheckSupportedType(decoder, |
| FLAG_experimental_liftoff_extern_ref |
| ? kSupportedTypes |
| : kSupportedTypesWithoutRefs, |
| __ local_type(i), "param")) |
| return; |
| } |
| |
| // Input 0 is the call target, the instance is at 1. |
| constexpr int kInstanceParameterIndex = 1; |
| // Store the instance parameter to a special stack slot. |
| compiler::LinkageLocation instance_loc = |
| descriptor_->GetInputLocation(kInstanceParameterIndex); |
| DCHECK(instance_loc.IsRegister()); |
| DCHECK(!instance_loc.IsAnyRegister()); |
| Register instance_reg = Register::from_code(instance_loc.AsRegister()); |
| DCHECK_EQ(kWasmInstanceRegister, instance_reg); |
| |
| // Parameter 0 is the instance parameter. |
| uint32_t num_params = |
| static_cast<uint32_t>(decoder->sig_->parameter_count()); |
| |
| __ CodeEntry(); |
| |
| DEBUG_CODE_COMMENT("enter frame"); |
| __ EnterFrame(StackFrame::WASM); |
| __ set_has_frame(true); |
| pc_offset_stack_frame_construction_ = __ PrepareStackFrame(); |
| // {PrepareStackFrame} is the first platform-specific assembler method. |
| // If this failed, we can bail out immediately, avoiding runtime overhead |
| // and potential failures because of other unimplemented methods. |
| // A platform implementing {PrepareStackFrame} must ensure that we can |
| // finish compilation without errors even if we hit unimplemented |
| // LiftoffAssembler methods. |
| if (DidAssemblerBailout(decoder)) return; |
| |
| // Process parameters. |
| if (num_params) DEBUG_CODE_COMMENT("process parameters"); |
| __ SpillInstance(instance_reg); |
| // Input 0 is the code target, 1 is the instance. First parameter at 2. |
| uint32_t input_idx = kInstanceParameterIndex + 1; |
| for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) { |
| input_idx += ProcessParameter(__ local_type(param_idx), input_idx); |
| } |
| int params_size = __ TopSpillOffset(); |
| DCHECK_EQ(input_idx, descriptor_->InputCount()); |
| |
| // Initialize locals beyond parameters. |
| if (num_params < __ num_locals()) DEBUG_CODE_COMMENT("init locals"); |
| if (SpillLocalsInitially(decoder, num_params)) { |
| for (uint32_t param_idx = num_params; param_idx < __ num_locals(); |
| ++param_idx) { |
| ValueType type = decoder->local_type(param_idx); |
| __ PushStack(type); |
| } |
| int spill_size = __ TopSpillOffset() - params_size; |
| __ FillStackSlotsWithZero(params_size, spill_size); |
| } else { |
| for (uint32_t param_idx = num_params; param_idx < __ num_locals(); |
| ++param_idx) { |
| ValueType type = decoder->local_type(param_idx); |
| __ PushConstant(type, int32_t{0}); |
| } |
| } |
| |
| if (FLAG_experimental_liftoff_extern_ref) { |
| // Initialize all reference type locals with ref.null. |
| for (uint32_t param_idx = num_params; param_idx < __ num_locals(); |
| ++param_idx) { |
| ValueType type = decoder->local_type(param_idx); |
| if (type.is_reference_type()) { |
| Register isolate_root = __ GetUnusedRegister(kGpReg, {}).gp(); |
| // We can re-use the isolate_root register as result register. |
| Register result = isolate_root; |
| |
| LOAD_INSTANCE_FIELD(isolate_root, IsolateRoot, kSystemPointerSize); |
| __ LoadTaggedPointer( |
| result, isolate_root, no_reg, |
| IsolateData::root_slot_offset(RootIndex::kNullValue), {}); |
| __ Spill(__ cache_state()->stack_state.back().offset(), |
| LiftoffRegister(result), type); |
| } |
| } |
| } |
| DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height()); |
| |
| if (V8_UNLIKELY(debug_sidetable_builder_)) { |
| debug_sidetable_builder_->SetNumLocals(__ num_locals()); |
| } |
| |
| // The function-prologue stack check is associated with position 0, which |
| // is never a position of any instruction in the function. |
| StackCheck(0); |
| |
| if (FLAG_wasm_dynamic_tiering) { |
| // TODO(arobin): Avoid spilling registers unconditionally. |
| __ SpillAllRegisters(); |
| DEBUG_CODE_COMMENT("dynamic tiering"); |
| LiftoffRegList pinned; |
| |
| // Get the number of calls array address. |
| LiftoffRegister array_address = |
| pinned.set(__ GetUnusedRegister(kGpReg, pinned)); |
| LOAD_INSTANCE_FIELD(array_address.gp(), NumLiftoffFunctionCallsArray, |
| kSystemPointerSize); |
| |
| // Compute the correct offset in the array. |
| uint32_t offset = |
| kInt32Size * declared_function_index(env_->module, func_index_); |
| |
| // Get the number of calls and update it. |
| LiftoffRegister old_number_of_calls = |
| pinned.set(__ GetUnusedRegister(kGpReg, pinned)); |
| LiftoffRegister new_number_of_calls = |
| pinned.set(__ GetUnusedRegister(kGpReg, pinned)); |
| __ Load(old_number_of_calls, array_address.gp(), no_reg, offset, |
| LoadType::kI32Load, pinned); |
| __ emit_i32_addi(new_number_of_calls.gp(), old_number_of_calls.gp(), 1); |
| __ Store(array_address.gp(), no_reg, offset, new_number_of_calls, |
| StoreType::kI32Store, pinned); |
| |
| // Emit the runtime call if necessary. |
| Label no_tierup; |
| // Check if the number of calls is a power of 2. |
| __ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(), |
| new_number_of_calls.gp()); |
| // Unary "unequal" means "different from zero". |
| __ emit_cond_jump(kUnequal, &no_tierup, kWasmI32, |
| old_number_of_calls.gp()); |
| TierUpFunction(decoder); |
| __ bind(&no_tierup); |
| } |
| |
| if (FLAG_trace_wasm) TraceFunctionEntry(decoder); |
| } |
| |
| void GenerateOutOfLineCode(OutOfLineCode* ool) { |
| DEBUG_CODE_COMMENT( |
| (std::string("out of line: ") + GetRuntimeStubName(ool->stub)).c_str()); |
| __ bind(ool->label.get()); |
| const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard; |
| const bool is_mem_out_of_bounds = |
| ool->stub == WasmCode::kThrowWasmTrapMemOutOfBounds; |
| |
| if (is_mem_out_of_bounds && env_->use_trap_handler) { |
| uint32_t pc = static_cast<uint32_t>(__ pc_offset()); |
| DCHECK_EQ(pc, __ pc_offset()); |
| protected_instructions_.emplace_back( |
| trap_handler::ProtectedInstructionData{ool->pc, pc}); |
| } |
| |
| if (!env_->runtime_exception_support) { |
| // We cannot test calls to the runtime in cctest/test-run-wasm. |
| // Therefore we emit a call to C here instead of a call to the runtime. |
| // In this mode, we never generate stack checks. |
| DCHECK(!is_stack_check); |
| __ CallTrapCallbackForTesting(); |
| DEBUG_CODE_COMMENT("leave frame"); |
| __ LeaveFrame(StackFrame::WASM); |
| __ DropStackSlotsAndRet( |
| static_cast<uint32_t>(descriptor_->StackParameterCount())); |
| return; |
| } |
| |
| // We cannot both push and spill registers. |
| DCHECK(ool->regs_to_save.is_empty() || ool->spilled_registers == nullptr); |
| if (!ool->regs_to_save.is_empty()) { |
| __ PushRegisters(ool->regs_to_save); |
| } else if (V8_UNLIKELY(ool->spilled_registers != nullptr)) { |
| for (auto& entry : ool->spilled_registers->entries) { |
| __ Spill(entry.offset, entry.reg, entry.type); |
| } |
| } |
| |
| source_position_table_builder_.AddPosition( |
| __ pc_offset(), SourcePosition(ool->position), true); |
| __ CallRuntimeStub(ool->stub); |
| // TODO(ahaas): Define a proper safepoint here. |
| safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt); |
| DCHECK_EQ(!debug_sidetable_builder_, !ool->debug_sidetable_entry_builder); |
| if (V8_UNLIKELY(ool->debug_sidetable_entry_builder)) { |
| ool->debug_sidetable_entry_builder->set_pc_offset(__ pc_offset()); |
| } |
| DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check); |
| if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save); |
| if (is_stack_check) { |
| if (V8_UNLIKELY(ool->spilled_registers != nullptr)) { |
| DCHECK(for_debugging_); |
| for (auto& entry : ool->spilled_registers->entries) { |
| __ Fill(entry.reg, entry.offset, entry.type); |
| } |
| } |
| __ emit_jump(ool->continuation.get()); |
| } else { |
| __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap); |
| } |
| } |
| |
| void FinishFunction(FullDecoder* decoder) { |
| if (DidAssemblerBailout(decoder)) return; |
| for (OutOfLineCode& ool : out_of_line_code_) { |
| GenerateOutOfLineCode(&ool); |
| } |
| __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_, |
| __ GetTotalFrameSize()); |
| __ FinishCode(); |
| safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCountForGC()); |
| __ MaybeEmitOutOfLineConstantPool(); |
| // The previous calls may have also generated a bailout. |
| DidAssemblerBailout(decoder); |
| } |
| |
| void OnFirstError(FullDecoder* decoder) { |
| if (!did_bailout()) bailout_reason_ = kDecodeError; |
| UnuseLabels(decoder); |
| asm_.AbortCompilation(); |
| } |
| |
| V8_NOINLINE void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) { |
| DCHECK(for_debugging_); |
| if (!WasmOpcodes::IsBreakable(opcode)) return; |
| bool has_breakpoint = false; |
| if (next_breakpoint_ptr_) { |
| if (*next_breakpoint_ptr_ == 0) { |
| // A single breakpoint at offset 0 indicates stepping. |
| DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_); |
| has_breakpoint = true; |
| } else { |
| while (next_breakpoint_ptr_ != next_breakpoint_end_ && |
| *next_breakpoint_ptr_ < decoder->position()) { |
| // Skip unreachable breakpoints. |
| ++next_breakpoint_ptr_; |
| } |
| if (next_breakpoint_ptr_ == next_breakpoint_end_) { |
| next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr; |
| } else if (*next_breakpoint_ptr_ == decoder->position()) { |
| has_breakpoint = true; |
| } |
| } |
| } |
| if (has_breakpoint) { |
| EmitBreakpoint(decoder); |
| // Once we emitted a breakpoint, we don't need to check the "hook on |
| // function call" any more. |
| checked_hook_on_function_call_ = true; |
| } else if (!checked_hook_on_function_call_) { |
| checked_hook_on_function_call_ = true; |
| // Check the "hook on function call" flag. If set, trigger a break. |
| DEBUG_CODE_COMMENT("check hook on function call"); |
| Register flag = __ GetUnusedRegister(kGpReg, {}).gp(); |
| LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize); |
| Label no_break; |
| __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {}); |
| // Unary "equal" means "equals zero". |
| __ emit_cond_jump(kEqual, &no_break, kWasmI32, flag); |
| EmitBreakpoint(decoder); |
| __ bind(&no_break); |
| } else if (dead_breakpoint_ == decoder->position()) { |
| DCHECK(!next_breakpoint_ptr_ || |
| *next_breakpoint_ptr_ != dead_breakpoint_); |
| // The top frame is paused at this position, but the breakpoint was |
| // removed. Adding a dead breakpoint here ensures that the source |
| // position exists, and that the offset to the return address is the |
| // same as in the old code. |
| Label cont; |
| __ emit_jump(&cont); |
| EmitBreakpoint(decoder); |
| __ bind(&cont); |
| } |
| } |
| |
| void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) { |
| // Add a single check, so that the fast path can be inlined while |
| // {EmitDebuggingInfo} stays outlined. |
| if (V8_UNLIKELY(for_debugging_)) EmitDebuggingInfo(decoder, opcode); |
| TraceCacheState(decoder); |
| #ifdef DEBUG |
| SLOW_DCHECK(__ ValidateCacheState()); |
| if (WasmOpcodes::IsPrefixOpcode(opcode)) { |
| opcode = decoder->read_prefixed_opcode<Decoder::kFullValidation>( |
| decoder->pc()); |
| } |
| DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode)); |
| #endif |
| } |
| |
| void EmitBreakpoint(FullDecoder* decoder) { |
| DEBUG_CODE_COMMENT("breakpoint"); |
| DCHECK(for_debugging_); |
| source_position_table_builder_.AddPosition( |
| __ pc_offset(), SourcePosition(decoder->position()), true); |
| __ CallRuntimeStub(WasmCode::kWasmDebugBreak); |
| // TODO(ahaas): Define a proper safepoint here. |
| safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt); |
| RegisterDebugSideTableEntry(DebugSideTableBuilder::kAllowRegisters); |
| } |
| |
| void Block(FullDecoder* decoder, Control* block) {} |
| |
| void Loop(FullDecoder* decoder, Control* loop) { |
| // Before entering a loop, spill all locals to the stack, in order to free |
| // the cache registers, and to avoid unnecessarily reloading stack values |
| // into registers at branches. |
| // TODO(clemensb): Come up with a better strategy here, involving |
| // pre-analysis of the function. |
| __ SpillLocals(); |
| |
| __ PrepareLoopArgs(loop->start_merge.arity); |
| |
| // Loop labels bind at the beginning of the block. |
| __ bind(loop->label.get()); |
| |
| // Save the current cache state for the merge when jumping to this loop. |
| loop->label_state.Split(*__ cache_state()); |
| |
| // Execute a stack check in the loop header. |
| StackCheck(decoder->position()); |
| } |
| |
| void Try(FullDecoder* decoder, Control* block) { |
| unsupported(decoder, kExceptionHandling, "try"); |
| } |
| |
| void Catch(FullDecoder* decoder, Control* block, Value* exception) { |
| unsupported(decoder, kExceptionHandling, "catch"); |
| } |
| |
| void If(FullDecoder* decoder, const Value& cond, Control* if_block) { |
| DCHECK_EQ(if_block, decoder->control_at(0)); |
| DCHECK(if_block->is_if()); |
| |
| // Allocate the else state. |
| if_block->else_state = std::make_unique<ElseState>(); |
| |
| // Test the condition, jump to else if zero. |
| Register value = __ PopToRegister().gp(); |
| __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32, |
| value); |
| |
| // Store the state (after popping the value) for executing the else branch. |
| if_block->else_state->state.Split(*__ cache_state()); |
| } |
| |
| void FallThruTo(FullDecoder* decoder, Control* c) { |
| if (c->end_merge.reached) { |
| __ MergeFullStackWith(c->label_state, *__ cache_state()); |
| } else { |
| c->label_state.Split(*__ cache_state()); |
| } |
| TraceCacheState(decoder); |
| } |
| |
| void FinishOneArmedIf(FullDecoder* decoder, Control* c) { |
| DCHECK(c->is_onearmed_if()); |
| if (c->end_merge.reached) { |
| // Someone already merged to the end of the if. Merge both arms into that. |
| if (c->reachable()) { |
| // Merge the if state into the end state. |
| __ MergeFullStackWith(c->label_state, *__ cache_state()); |
| __ emit_jump(c->label.get()); |
| } |
| // Merge the else state into the end state. |
| __ bind(c->else_state->label.get()); |
| __ MergeFullStackWith(c->label_state, c->else_state->state); |
| __ cache_state()->Steal(c->label_state); |
| } else if (c->reachable()) { |
| // No merge yet at the end of the if, but we need to create a merge for |
| // the both arms of this if. Thus init the merge point from the else |
| // state, then merge the if state into that. |
| DCHECK_EQ(c->start_merge.arity, c->end_merge.arity); |
| c->label_state.InitMerge(c->else_state->state, __ num_locals(), |
| c->start_merge.arity, c->stack_depth); |
| __ MergeFullStackWith(c->label_state, *__ cache_state()); |
| __ emit_jump(c->label.get()); |
| // Merge the else state into the end state. |
| __ bind(c->else_state->label.get()); |
| __ MergeFullStackWith(c->label_state, c->else_state->state); |
| __ cache_state()->Steal(c->label_state); |
| } else { |
| // No merge needed, just continue with the else state. |
| __ bind(c->else_state->label.get()); |
| __ cache_state()->Steal(c->else_state->state); |
| } |
| } |
| |
| void PopControl(FullDecoder* decoder, Control* c) { |
| if (c->is_loop()) return; // A loop just falls through. |
| if (c->is_onearmed_if()) { |
| // Special handling for one-armed ifs. |
| FinishOneArmedIf(decoder, c); |
| } else if (c->end_merge.reached) { |
| // There is a merge already. Merge our state into that, then continue with |
| // that state. |
| if (c->reachable()) { |
| __ MergeFullStackWith(c->label_state, *__ cache_state()); |
| } |
| __ cache_state()->Steal(c->label_state); |
| } else { |
| // No merge, just continue with our current state. |
| } |
| |
| if (!c->label.get()->is_bound()) __ bind(c->label.get()); |
| } |
| |
| void EndControl(FullDecoder* decoder, Control* c) {} |
| |
| void GenerateCCall(const LiftoffRegister* result_regs, const FunctionSig* sig, |
| ValueType out_argument_type, |
| const LiftoffRegister* arg_regs, |
| ExternalReference ext_ref) { |
| // Before making a call, spill all cache registers. |
| __ SpillAllRegisters(); |
| |
| // Store arguments on our stack, then align the stack for calling to C. |
| int param_bytes = 0; |
| for (ValueType param_type : sig->parameters()) { |
| param_bytes += param_type.element_size_bytes(); |
| } |
| int out_arg_bytes = out_argument_type == kWasmStmt |
| ? 0 |
| : out_argument_type.element_size_bytes(); |
| int stack_bytes = std::max(param_bytes, out_arg_bytes); |
| __ CallC(sig, arg_regs, result_regs, out_argument_type, stack_bytes, |
| ext_ref); |
| } |
| |
| template <typename EmitFn, typename... Args> |
| typename std::enable_if<!std::is_member_function_pointer<EmitFn>::value>::type |
| CallEmitFn(EmitFn fn, Args... args) { |
| fn(args...); |
| } |
| |
| template <typename EmitFn, typename... Args> |
| typename std::enable_if<std::is_member_function_pointer<EmitFn>::value>::type |
| CallEmitFn(EmitFn fn, Args... args) { |
| (asm_.*fn)(ConvertAssemblerArg(args)...); |
| } |
| |
| // Wrap a {LiftoffRegister} with implicit conversions to {Register} and |
| // {DoubleRegister}. |
| struct AssemblerRegisterConverter { |
| LiftoffRegister reg; |
| operator LiftoffRegister() { return reg; } |
| operator Register() { return reg.gp(); } |
| operator DoubleRegister() { return reg.fp(); } |
| }; |
| |
| // Convert {LiftoffRegister} to {AssemblerRegisterConverter}, other types stay |
| // unchanged. |
| template <typename T> |
| typename std::conditional<std::is_same<LiftoffRegister, T>::value, |
| AssemblerRegisterConverter, T>::type |
| ConvertAssemblerArg(T t) { |
| return {t}; |
| } |
| |
| template <typename EmitFn, typename ArgType> |
| struct EmitFnWithFirstArg { |
| EmitFn fn; |
| ArgType first_arg; |
| }; |
| |
| template <typename EmitFn, typename ArgType> |
| EmitFnWithFirstArg<EmitFn, ArgType> BindFirst(EmitFn fn, ArgType arg) { |
| return {fn, arg}; |
| } |
| |
| template <typename EmitFn, typename T, typename... Args> |
| void CallEmitFn(EmitFnWithFirstArg<EmitFn, T> bound_fn, Args... args) { |
| CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...); |
| } |
| |
| template <ValueType::Kind src_type, ValueType::Kind result_type, class EmitFn> |
| void EmitUnOp(EmitFn fn) { |
| constexpr RegClass src_rc = reg_class_for(src_type); |
| constexpr RegClass result_rc = reg_class_for(result_type); |
| LiftoffRegister src = __ PopToRegister(); |
| LiftoffRegister dst = src_rc == result_rc |
| ? __ GetUnusedRegister(result_rc, {src}, {}) |
| : __ GetUnusedRegister(result_rc, {}); |
| CallEmitFn(fn, dst, src); |
| __ PushRegister(ValueType::Primitive(result_type), dst); |
| } |
| |
| template <ValueType::Kind type> |
| void EmitFloatUnOpWithCFallback( |
| bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister), |
| ExternalReference (*fallback_fn)()) { |
| auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) { |
| if ((asm_.*emit_fn)(dst.fp(), src.fp())) return; |
| ExternalReference ext_ref = fallback_fn(); |
| ValueType sig_reps[] = {ValueType::Primitive(type)}; |
| FunctionSig sig(0, 1, sig_reps); |
| GenerateCCall(&dst, &sig, ValueType::Primitive(type), &src, ext_ref); |
| }; |
| EmitUnOp<type, type>(emit_with_c_fallback); |
| } |
| |
| enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false }; |
| template <ValueType::Kind dst_type, ValueType::Kind src_type, |
| TypeConversionTrapping can_trap> |
| void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(), |
| WasmCodePosition trap_position) { |
| static constexpr RegClass src_rc = reg_class_for(src_type); |
| static constexpr RegClass dst_rc = reg_class_for(dst_type); |
| LiftoffRegister src = __ PopToRegister(); |
| LiftoffRegister dst = src_rc == dst_rc |
| ? __ GetUnusedRegister(dst_rc, {src}, {}) |
| : __ GetUnusedRegister(dst_rc, {}); |
| DCHECK_EQ(!!can_trap, trap_position > 0); |
| Label* trap = can_trap ? AddOutOfLineTrap( |
| trap_position, |
| WasmCode::kThrowWasmTrapFloatUnrepresentable) |
| : nullptr; |
| if (!__ emit_type_conversion(opcode, dst, src, trap)) { |
| DCHECK_NOT_NULL(fallback_fn); |
| ExternalReference ext_ref = fallback_fn(); |
| if (can_trap) { |
| // External references for potentially trapping conversions return int. |
| ValueType sig_reps[] = {kWasmI32, ValueType::Primitive(src_type)}; |
| FunctionSig sig(1, 1, sig_reps); |
| LiftoffRegister ret_reg = |
| __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)); |
| LiftoffRegister dst_regs[] = {ret_reg, dst}; |
| GenerateCCall(dst_regs, &sig, ValueType::Primitive(dst_type), &src, |
| ext_ref); |
| __ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp()); |
| } else { |
| ValueType sig_reps[] = {ValueType::Primitive(src_type)}; |
| FunctionSig sig(0, 1, sig_reps); |
| GenerateCCall(&dst, &sig, ValueType::Primitive(dst_type), &src, |
| ext_ref); |
| } |
| } |
| __ PushRegister(ValueType::Primitive(dst_type), dst); |
| } |
| |
| void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value, |
| Value* result) { |
| #define CASE_I32_UNOP(opcode, fn) \ |
| case kExpr##opcode: \ |
| return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_##fn); |
| #define CASE_I64_UNOP(opcode, fn) \ |
| case kExpr##opcode: \ |
| return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn); |
| #define CASE_FLOAT_UNOP(opcode, type, fn) \ |
| case kExpr##opcode: \ |
| return EmitUnOp<k##type, k##type>(&LiftoffAssembler::emit_##fn); |
| #define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \ |
| case kExpr##opcode: \ |
| return EmitFloatUnOpWithCFallback<k##type>(&LiftoffAssembler::emit_##fn, \ |
| &ExternalReference::wasm_##fn); |
| #define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \ |
| case kExpr##opcode: \ |
| return EmitTypeConversion<k##dst_type, k##src_type, can_trap>( \ |
| kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0); |
| switch (opcode) { |
| CASE_I32_UNOP(I32Clz, i32_clz) |
| CASE_I32_UNOP(I32Ctz, i32_ctz) |
| CASE_FLOAT_UNOP(F32Abs, F32, f32_abs) |
| CASE_FLOAT_UNOP(F32Neg, F32, f32_neg) |
| CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Ceil, F32, f32_ceil) |
| CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Floor, F32, f32_floor) |
| CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Trunc, F32, f32_trunc) |
| CASE_FLOAT_UNOP_WITH_CFALLBACK(F32NearestInt, F32, f32_nearest_int) |
| CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt) |
| CASE_FLOAT_UNOP(F64Abs, F64, f64_abs) |
| CASE_FLOAT_UNOP(F64Neg, F64, f64_neg) |
| CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Ceil, F64, f64_ceil) |
| CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Floor, F64, f64_floor) |
| CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Trunc, F64, f64_trunc) |
| CASE_FLOAT_UNOP_WITH_CFALLBACK(F64NearestInt, F64, f64_nearest_int) |
| CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt) |
| CASE_TYPE_CONVERSION(I32ConvertI64, I32, I64, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(I32SConvertF32, I32, F32, nullptr, kCanTrap) |
| CASE_TYPE_CONVERSION(I32UConvertF32, I32, F32, nullptr, kCanTrap) |
| CASE_TYPE_CONVERSION(I32SConvertF64, I32, F64, nullptr, kCanTrap) |
| CASE_TYPE_CONVERSION(I32UConvertF64, I32, F64, nullptr, kCanTrap) |
| CASE_TYPE_CONVERSION(I32ReinterpretF32, I32, F32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(I64SConvertI32, I64, I32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(I64UConvertI32, I64, I32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(I64SConvertF32, I64, F32, |
| &ExternalReference::wasm_float32_to_int64, kCanTrap) |
| CASE_TYPE_CONVERSION(I64UConvertF32, I64, F32, |
| &ExternalReference::wasm_float32_to_uint64, kCanTrap) |
| CASE_TYPE_CONVERSION(I64SConvertF64, I64, F64, |
| &ExternalReference::wasm_float64_to_int64, kCanTrap) |
| CASE_TYPE_CONVERSION(I64UConvertF64, I64, F64, |
| &ExternalReference::wasm_float64_to_uint64, kCanTrap) |
| CASE_TYPE_CONVERSION(I64ReinterpretF64, I64, F64, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(F32SConvertI32, F32, I32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(F32UConvertI32, F32, I32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(F32SConvertI64, F32, I64, |
| &ExternalReference::wasm_int64_to_float32, kNoTrap) |
| CASE_TYPE_CONVERSION(F32UConvertI64, F32, I64, |
| &ExternalReference::wasm_uint64_to_float32, kNoTrap) |
| CASE_TYPE_CONVERSION(F32ConvertF64, F32, F64, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(F32ReinterpretI32, F32, I32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(F64SConvertI32, F64, I32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(F64UConvertI32, F64, I32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(F64SConvertI64, F64, I64, |
| &ExternalReference::wasm_int64_to_float64, kNoTrap) |
| CASE_TYPE_CONVERSION(F64UConvertI64, F64, I64, |
| &ExternalReference::wasm_uint64_to_float64, kNoTrap) |
| CASE_TYPE_CONVERSION(F64ConvertF32, F64, F32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(F64ReinterpretI64, F64, I64, nullptr, kNoTrap) |
| CASE_I32_UNOP(I32SExtendI8, i32_signextend_i8) |
| CASE_I32_UNOP(I32SExtendI16, i32_signextend_i16) |
| CASE_I64_UNOP(I64SExtendI8, i64_signextend_i8) |
| CASE_I64_UNOP(I64SExtendI16, i64_signextend_i16) |
| CASE_I64_UNOP(I64SExtendI32, i64_signextend_i32) |
| CASE_I64_UNOP(I64Clz, i64_clz) |
| CASE_I64_UNOP(I64Ctz, i64_ctz) |
| CASE_TYPE_CONVERSION(I32SConvertSatF32, I32, F32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(I32UConvertSatF32, I32, F32, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(I32SConvertSatF64, I32, F64, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(I32UConvertSatF64, I32, F64, nullptr, kNoTrap) |
| CASE_TYPE_CONVERSION(I64SConvertSatF32, I64, F32, |
| &ExternalReference::wasm_float32_to_int64_sat, |
| kNoTrap) |
| CASE_TYPE_CONVERSION(I64UConvertSatF32, I64, F32, |
| &ExternalReference::wasm_float32_to_uint64_sat, |
| kNoTrap) |
| CASE_TYPE_CONVERSION(I64SConvertSatF64, I64, F64, |
| &ExternalReference::wasm_float64_to_int64_sat, |
| kNoTrap) |
| CASE_TYPE_CONVERSION(I64UConvertSatF64, I64, F64, |
| &ExternalReference::wasm_float64_to_uint64_sat, |
| kNoTrap) |
| case kExprI32Eqz: |
| DCHECK(decoder->lookahead(0, kExprI32Eqz)); |
| if (decoder->lookahead(1, kExprBrIf) && !for_debugging_) { |
| DCHECK(!has_outstanding_op()); |
| outstanding_op_ = kExprI32Eqz; |
| break; |
| } |
| return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_i32_eqz); |
| case kExprI64Eqz: |
| return EmitUnOp<kI64, kI32>(&LiftoffAssembler::emit_i64_eqz); |
| case kExprI32Popcnt: |
| return EmitUnOp<kI32, kI32>( |
| [=](LiftoffRegister dst, LiftoffRegister src) { |
| if (__ emit_i32_popcnt(dst.gp(), src.gp())) return; |
| ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32}; |
| FunctionSig sig_i_i(1, 1, sig_i_i_reps); |
| GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src, |
| ExternalReference::wasm_word32_popcnt()); |
| }); |
| case kExprI64Popcnt: |
| return EmitUnOp<kI64, kI64>( |
| [=](LiftoffRegister dst, LiftoffRegister src) { |
| if (__ emit_i64_popcnt(dst, src)) return; |
| // The c function returns i32. We will zero-extend later. |
| ValueType sig_i_l_reps[] = {kWasmI32, kWasmI64}; |
| FunctionSig sig_i_l(1, 1, sig_i_l_reps); |
| LiftoffRegister c_call_dst = kNeedI64RegPair ? dst.low() : dst; |
| GenerateCCall(&c_call_dst, &sig_i_l, kWasmStmt, &src, |
| ExternalReference::wasm_word64_popcnt()); |
| // Now zero-extend the result to i64. |
| __ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst, |
| nullptr); |
| }); |
| case kExprRefIsNull: |
| unsupported(decoder, kRefTypes, "ref_is_null"); |
| return; |
| default: |
| UNREACHABLE(); |
| } |
| #undef CASE_I32_UNOP |
| #undef CASE_I64_UNOP |
| #undef CASE_FLOAT_UNOP |
| #undef CASE_FLOAT_UNOP_WITH_CFALLBACK |
| #undef CASE_TYPE_CONVERSION |
| } |
| |
| template <ValueType::Kind src_type, ValueType::Kind result_type, |
| typename EmitFn, typename EmitFnImm> |
| void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) { |
| static constexpr RegClass src_rc = reg_class_for(src_type); |
| static constexpr RegClass result_rc = reg_class_for(result_type); |
| |
| LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back(); |
| // Check if the RHS is an immediate. |
| if (rhs_slot.is_const()) { |
| __ cache_state()->stack_state.pop_back(); |
| int32_t imm = rhs_slot.i32_const(); |
| |
| LiftoffRegister lhs = __ PopToRegister(); |
| // Either reuse {lhs} for {dst}, or choose a register (pair) which does |
| // not overlap, for easier code generation. |
| LiftoffRegList pinned = LiftoffRegList::ForRegs(lhs); |
| LiftoffRegister dst = src_rc == result_rc |
| ? __ GetUnusedRegister(result_rc, {lhs}, pinned) |
| : __ GetUnusedRegister(result_rc, pinned); |
| |
| CallEmitFn(fnImm, dst, lhs, imm); |
| __ PushRegister(ValueType::Primitive(result_type), dst); |
| } else { |
| // The RHS was not an immediate. |
| EmitBinOp<src_type, result_type>(fn); |
| } |
| } |
| |
| template <ValueType::Kind src_type, ValueType::Kind result_type, |
| bool swap_lhs_rhs = false, typename EmitFn> |
| void EmitBinOp(EmitFn fn) { |
| static constexpr RegClass src_rc = reg_class_for(src_type); |
| static constexpr RegClass result_rc = reg_class_for(result_type); |
| LiftoffRegister rhs = __ PopToRegister(); |
| LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)); |
| LiftoffRegister dst = src_rc == result_rc |
| ? __ GetUnusedRegister(result_rc, {lhs, rhs}, {}) |
| : __ GetUnusedRegister(result_rc, {}); |
| |
| if (swap_lhs_rhs) std::swap(lhs, rhs); |
| |
| CallEmitFn(fn, dst, lhs, rhs); |
| __ PushRegister(ValueType::Primitive(result_type), dst); |
| } |
| |
| void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs, |
| LiftoffRegister rhs, ExternalReference ext_ref, |
| Label* trap_by_zero, |
| Label* trap_unrepresentable = nullptr) { |
| // Cannot emit native instructions, build C call. |
| LiftoffRegister ret = |
| __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)); |
| LiftoffRegister tmp = |
| __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret)); |
| LiftoffRegister arg_regs[] = {lhs, rhs}; |
| LiftoffRegister result_regs[] = {ret, dst}; |
| ValueType sig_types[] = {kWasmI32, kWasmI64, kWasmI64}; |
| // <i64, i64> -> i32 (with i64 output argument) |
| FunctionSig sig(1, 2, sig_types); |
| GenerateCCall(result_regs, &sig, kWasmI64, arg_regs, ext_ref); |
| __ LoadConstant(tmp, WasmValue(int32_t{0})); |
| __ emit_cond_jump(kEqual, trap_by_zero, kWasmI32, ret.gp(), tmp.gp()); |
| if (trap_unrepresentable) { |
| __ LoadConstant(tmp, WasmValue(int32_t{-1})); |
| __ emit_cond_jump(kEqual, trap_unrepresentable, kWasmI32, ret.gp(), |
| tmp.gp()); |
| } |
| } |
| |
| template <WasmOpcode opcode> |
| void EmitI32CmpOp(FullDecoder* decoder) { |
| DCHECK(decoder->lookahead(0, opcode)); |
| if (decoder->lookahead(1, kExprBrIf) && !for_debugging_) { |
| DCHECK(!has_outstanding_op()); |
| outstanding_op_ = opcode; |
| return; |
| } |
| return EmitBinOp<kI32, kI32>(BindFirst(&LiftoffAssembler::emit_i32_set_cond, |
| GetCompareCondition(opcode))); |
| } |
| |
| void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs, |
| const Value& rhs, Value* result) { |
| #define CASE_I64_SHIFTOP(opcode, fn) \ |
| case kExpr##opcode: \ |
| return EmitBinOpImm<kI64, kI64>( \ |
| [=](LiftoffRegister dst, LiftoffRegister src, \ |
| LiftoffRegister amount) { \ |
| __ emit_##fn(dst, src, \ |
| amount.is_gp_pair() ? amount.low_gp() : amount.gp()); \ |
| }, \ |
| &LiftoffAssembler::emit_##fn##i); |
| #define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \ |
| case kExpr##opcode: \ |
| return EmitBinOp<k##type, k##type>( \ |
| [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \ |
| LiftoffRegister args[] = {lhs, rhs}; \ |
| auto ext_ref = ExternalReference::ext_ref_fn(); \ |
| ValueType sig_reps[] = {kWasm##type, kWasm##type, kWasm##type}; \ |
| const bool out_via_stack = kWasm##type == kWasmI64; \ |
| FunctionSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \ |
| ValueType out_arg_type = out_via_stack ? kWasmI64 : kWasmStmt; \ |
| GenerateCCall(&dst, &sig, out_arg_type, args, ext_ref); \ |
| }); |
| switch (opcode) { |
| case kExprI32Add: |
| return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_add, |
| &LiftoffAssembler::emit_i32_addi); |
| case kExprI32Sub: |
| return EmitBinOp<kI32, kI32>(&LiftoffAssembler::emit_i32_sub); |
| case kExprI32Mul: |
| return EmitBinOp<kI32, kI32>(&LiftoffAssembler::emit_i32_mul); |
| case kExprI32And: |
| return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_and, |
| &LiftoffAssembler::emit_i32_andi); |
| case kExprI32Ior: |
| return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_or, |
| &LiftoffAssembler::emit_i32_ori); |
| case kExprI32Xor: |
| return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_xor, |
| &LiftoffAssembler::emit_i32_xori); |
| case kExprI32Eq: |
| return EmitI32CmpOp<kExprI32Eq>(decoder); |
| case kExprI32Ne: |
| return EmitI32CmpOp<kExprI32Ne>(decoder); |
| case kExprI32LtS: |
| return EmitI32CmpOp<kExprI32LtS>(decoder); |
| case kExprI32LtU: |
| return EmitI32CmpOp<kExprI32LtU>(decoder); |
| case kExprI32GtS: |
| return EmitI32CmpOp<kExprI32GtS>(decoder); |
| case kExprI32GtU: |
| return EmitI32CmpOp<kExprI32GtU>(decoder); |
| case kExprI32LeS: |
| return EmitI32CmpOp<kExprI32LeS>(decoder); |
| case kExprI32LeU: |
| return EmitI32CmpOp<kExprI32LeU>(decoder); |
| case kExprI32GeS: |
| return EmitI32CmpOp<kExprI32GeS>(decoder); |
| case kExprI32GeU: |
| return EmitI32CmpOp<kExprI32GeU>(decoder); |
| case kExprI64Add: |
| return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_add, |
| &LiftoffAssembler::emit_i64_addi); |
| case kExprI64Sub: |
| return EmitBinOp<kI64, kI64>(&LiftoffAssembler::emit_i64_sub); |
| case kExprI64Mul: |
| return EmitBinOp<kI64, kI64>(&LiftoffAssembler::emit_i64_mul); |
| case kExprI64And: |
| return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_and, |
| &LiftoffAssembler::emit_i64_andi); |
| case kExprI64Ior: |
| return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_or, |
| &LiftoffAssembler::emit_i64_ori); |
| case kExprI64Xor: |
| return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_xor, |
| &LiftoffAssembler::emit_i64_xori); |
| case kExprI64Eq: |
| return EmitBinOp<kI64, kI32>( |
| BindFirst(&LiftoffAssembler::emit_i64_set_cond, kEqual)); |
| case kExprI64Ne: |
| return EmitBinOp<kI64, kI32>( |
| BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnequal)); |
| case kExprI64LtS: |
| return EmitBinOp<kI64, kI32>( |
| BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessThan)); |
| case kExprI64LtU: |
| return EmitBinOp<kI64, kI32>( |
| BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnsignedLessThan)); |
| case kExprI64GtS: |
| return EmitBinOp<kI64, kI32>(BindFirst( |
| &LiftoffAssembler::emit_i64_set_cond, kSignedGreaterThan)); |
| case kExprI64GtU: |
| return EmitBinOp<kI64, kI32>(BindFirst( |
| &LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterThan)); |
| case kExprI64LeS: |
| return EmitBinOp<kI64, kI32>( |
| BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessEqual)); |
| case kExprI64LeU: |
| return EmitBinOp<kI64, kI32>(BindFirst( |
| &LiftoffAssembler::emit_i64_set_cond, kUnsignedLessEqual)); |
| case kExprI64GeS: |
| return EmitBinOp<kI64, kI32>(BindFirst( |
| &LiftoffAssembler::emit_i64_set_cond, kSignedGreaterEqual)); |
| case kExprI64GeU: |
| return EmitBinOp<kI64, kI32>(BindFirst( |
| &LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterEqual)); |
| case kExprF32Eq: |
| return EmitBinOp<kF32, kI32>( |
| BindFirst(&LiftoffAssembler::emit_f32_set_cond, kEqual)); |
| case kExprF32Ne: |
| return EmitBinOp<kF32, kI32>( |
| BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnequal)); |
| case kExprF32Lt: |
| return EmitBinOp<kF32, kI32>( |
| BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnsignedLessThan)); |
| case kExprF32Gt: |
| return EmitBinOp<kF32, kI32>(BindFirst( |
| &LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterThan)); |
| case kExprF32Le: |
| return EmitBinOp<kF32, kI32>(BindFirst( |
| &LiftoffAssembler::emit_f32_set_cond, kUnsignedLessEqual)); |
| case kExprF32Ge: |
| return EmitBinOp<kF32, kI32>(BindFirst( |
| &LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterEqual)); |
| case kExprF64Eq: |
| return EmitBinOp<kF64, kI32>( |
| BindFirst(&LiftoffAssembler::emit_f64_set_cond, kEqual)); |
| case kExprF64Ne: |
| return EmitBinOp<kF64, kI32>( |
| BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnequal)); |
| case kExprF64Lt: |
| return EmitBinOp<kF64, kI32>( |
| BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnsignedLessThan)); |
| case kExprF64Gt: |
| return EmitBinOp<kF64, kI32>(BindFirst( |
| &LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterThan)); |
| case kExprF64Le: |
| return EmitBinOp<kF64, kI32>(BindFirst( |
| &LiftoffAssembler::emit_f64_set_cond, kUnsignedLessEqual)); |
| case kExprF64Ge: |
| return EmitBinOp<kF64, kI32>(BindFirst( |
| &LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterEqual)); |
| case kExprI32Shl: |
| return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shl, |
| &LiftoffAssembler::emit_i32_shli); |
| case kExprI32ShrS: |
| return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_sar, |
| &LiftoffAssembler::emit_i32_sari); |
| case kExprI32ShrU: |
| return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shr, |
| &LiftoffAssembler::emit_i32_shri); |
| CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol) |
| CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror) |
| CASE_I64_SHIFTOP(I64Shl, i64_shl) |
| CASE_I64_SHIFTOP(I64ShrS, i64_sar) |
| CASE_I64_SHIFTOP(I64ShrU, i64_shr) |
| CASE_CCALL_BINOP(I64Rol, I64, wasm_word64_rol) |
| CASE_CCALL_BINOP(I64Ror, I64, wasm_word64_ror) |
| case kExprF32Add: |
| return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_add); |
| case kExprF32Sub: |
| return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_sub); |
| case kExprF32Mul: |
| return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_mul); |
| case kExprF32Div: |
| return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_div); |
| case kExprF32Min: |
| return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_min); |
| case kExprF32Max: |
| return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_max); |
| case kExprF32CopySign: |
| return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_copysign); |
| case kExprF64Add: |
| return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_add); |
| case kExprF64Sub: |
| return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_sub); |
| case kExprF64Mul: |
| return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_mul); |
| case kExprF64Div: |
| return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_div); |
| case kExprF64Min: |
| return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_min); |
| case kExprF64Max: |
| return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_max); |
| case kExprF64CopySign: |
| return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_copysign); |
| case kExprI32DivS: |
| return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst, |
| LiftoffRegister lhs, |
| LiftoffRegister rhs) { |
| WasmCodePosition position = decoder->position(); |
| AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero); |
| // Adding the second trap might invalidate the pointer returned for |
| // the first one, thus get both pointers afterwards. |
| AddOutOfLineTrap(position, |
| WasmCode::kThrowWasmTrapDivUnrepresentable); |
| Label* div_by_zero = out_of_line_code_.end()[-2].label.get(); |
| Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get(); |
| __ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero, |
| div_unrepresentable); |
| }); |
| case kExprI32DivU: |
| return EmitBinOp<kI32, kI32>( |
| [this, decoder](LiftoffRegister dst, LiftoffRegister lhs, |
| LiftoffRegister rhs) { |
| Label* div_by_zero = AddOutOfLineTrap( |
| decoder->position(), WasmCode::kThrowWasmTrapDivByZero); |
| __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero); |
| }); |
| case kExprI32RemS: |
| return EmitBinOp<kI32, kI32>( |
| [this, decoder](LiftoffRegister dst, LiftoffRegister lhs, |
| LiftoffRegister rhs) { |
| Label* rem_by_zero = AddOutOfLineTrap( |
| decoder->position(), WasmCode::kThrowWasmTrapRemByZero); |
| __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero); |
| }); |
| case kExprI32RemU: |
| return EmitBinOp<kI32, kI32>( |
| [this, decoder](LiftoffRegister dst, LiftoffRegister lhs, |
| LiftoffRegister rhs) { |
| Label* rem_by_zero = AddOutOfLineTrap( |
| decoder->position(), WasmCode::kThrowWasmTrapRemByZero); |
| __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero); |
| }); |
| case kExprI64DivS: |
| return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst, |
| LiftoffRegister lhs, |
| LiftoffRegister rhs) { |
| WasmCodePosition position = decoder->position(); |
| AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero); |
| // Adding the second trap might invalidate the pointer returned for |
| // the first one, thus get both pointers afterwards. |
| AddOutOfLineTrap(position, |
| WasmCode::kThrowWasmTrapDivUnrepresentable); |
| Label* div_by_zero = out_of_line_code_.end()[-2].label.get(); |
| Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get(); |
| if (!__ emit_i64_divs(dst, lhs, rhs, div_by_zero, |
| div_unrepresentable)) { |
| ExternalReference ext_ref = ExternalReference::wasm_int64_div(); |
| EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero, |
| div_unrepresentable); |
| } |
| }); |
| case kExprI64DivU: |
| return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst, |
| LiftoffRegister lhs, |
| LiftoffRegister rhs) { |
| Label* div_by_zero = AddOutOfLineTrap( |
| decoder->position(), WasmCode::kThrowWasmTrapDivByZero); |
| if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) { |
| ExternalReference ext_ref = ExternalReference::wasm_uint64_div(); |
| EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero); |
| } |
| }); |
| case kExprI64RemS: |
| return EmitBinOp<kI64, kI64>( |
| [this, decoder](LiftoffRegister dst, LiftoffRegister lhs, |
| LiftoffRegister rhs) { |
| Label* rem_by_zero = AddOutOfLineTrap( |
| decoder->position(), WasmCode::kThrowWasmTrapRemByZero); |
| if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) { |
| ExternalReference ext_ref = ExternalReference::wasm_int64_mod(); |
| EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero); |
| } |
| }); |
| case kExprI64RemU: |
| return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst, |
| LiftoffRegister lhs, |
| LiftoffRegister rhs) { |
| Label* rem_by_zero = AddOutOfLineTrap( |
| decoder->position(), WasmCode::kThrowWasmTrapRemByZero); |
| if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) { |
| ExternalReference ext_ref = ExternalReference::wasm_uint64_mod(); |
| EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero); |
| } |
| }); |
| default: |
| UNREACHABLE(); |
| } |
| #undef CASE_I64_SHIFTOP |
| #undef CASE_CCALL_BINOP |
| } |
| |
| void I32Const(FullDecoder* decoder, Value* result, int32_t value) { |
| __ PushConstant(kWasmI32, value); |
| } |
| |
| void I64Const(FullDecoder* decoder, Value* result, int64_t value) { |
| // The {VarState} stores constant values as int32_t, thus we only store |
| // 64-bit constants in this field if it fits in an int32_t. Larger values |
| // cannot be used as immediate value anyway, so we can also just put them in |
| // a register immediately. |
| int32_t value_i32 = static_cast<int32_t>(value); |
| if (value_i32 == value) { |
| __ PushConstant(kWasmI64, value_i32); |
| } else { |
| LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {}); |
| __ LoadConstant(reg, WasmValue(value)); |
| __ PushRegister(kWasmI64, reg); |
| } |
| } |
| |
| void F32Const(FullDecoder* decoder, Value* result, float value) { |
| LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {}); |
| __ LoadConstant(reg, WasmValue(value)); |
| __ PushRegister(kWasmF32, reg); |
| } |
| |
| void F64Const(FullDecoder* decoder, Value* result, double value) { |
| LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {}); |
| __ LoadConstant(reg, WasmValue(value)); |
| __ PushRegister(kWasmF64, reg); |
| } |
| |
| void RefNull(FullDecoder* decoder, ValueType type, Value*) { |
| if (!FLAG_experimental_liftoff_extern_ref) { |
| unsupported(decoder, kRefTypes, "ref_null"); |
| return; |
| } |
| Register isolate_root = __ GetUnusedRegister(kGpReg, {}).gp(); |
| // We can re-use the isolate_root register as result register. |
| Register result = isolate_root; |
| |
| LOAD_INSTANCE_FIELD(isolate_root, IsolateRoot, kSystemPointerSize); |
| __ LoadTaggedPointer(result, isolate_root, no_reg, |
| IsolateData::root_slot_offset(RootIndex::kNullValue), |
| {}); |
| __ PushRegister(type, LiftoffRegister(result)); |
| } |
| |
| void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) { |
| unsupported(decoder, kRefTypes, "func"); |
| } |
| |
| void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) { |
| unsupported(decoder, kRefTypes, "ref.as_non_null"); |
| } |
| |
| void Drop(FullDecoder* decoder, const Value& value) { |
| auto& slot = __ cache_state()->stack_state.back(); |
| // If the dropped slot contains a register, decrement it's use count. |
| if (slot.is_reg()) __ cache_state()->dec_used(slot.reg()); |
| __ cache_state()->stack_state.pop_back(); |
| } |
| |
| void TraceFunctionExit(FullDecoder* decoder) { |
| DEBUG_CODE_COMMENT("trace function exit"); |
| // Before making the runtime call, spill all cache registers. |
| __ SpillAllRegisters(); |
| LiftoffRegList pinned; |
| // Get a register to hold the stack slot for the return value. |
| LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); |
| __ AllocateStackSlot(info.gp(), sizeof(int64_t)); |
| |
| // Store the return value if there is exactly one. Multiple return values |
| // are not handled yet. |
| size_t num_returns = decoder->sig_->return_count(); |
| if (num_returns == 1) { |
| ValueType return_type = decoder->sig_->GetReturn(0); |
| LiftoffRegister return_reg = |
| __ LoadToRegister(__ cache_state()->stack_state.back(), pinned); |
| __ Store(info.gp(), no_reg, 0, return_reg, |
| StoreType::ForValueType(return_type), pinned); |
| } |
| // Put the parameter in its place. |
| WasmTraceExitDescriptor descriptor; |
| DCHECK_EQ(0, descriptor.GetStackParameterCount()); |
| DCHECK_EQ(1, descriptor.GetRegisterParameterCount()); |
| Register param_reg = descriptor.GetRegisterParameter(0); |
| if (info.gp() != param_reg) { |
| __ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr); |
| } |
| |
| source_position_table_builder_.AddPosition( |
| __ pc_offset(), SourcePosition(decoder->position()), false); |
| __ CallRuntimeStub(WasmCode::kWasmTraceExit); |
| DefineSafepoint(); |
| |
| __ DeallocateStackSlot(sizeof(int64_t)); |
| } |
| |
| void ReturnImpl(FullDecoder* decoder) { |
| if (FLAG_trace_wasm) TraceFunctionExit(decoder); |
| size_t num_returns = decoder->sig_->return_count(); |
| if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_); |
| DEBUG_CODE_COMMENT("leave frame"); |
| __ LeaveFrame(StackFrame::WASM); |
| __ DropStackSlotsAndRet( |
| static_cast<uint32_t>(descriptor_->StackParameterCount())); |
| } |
| |
| void DoReturn(FullDecoder* decoder, Vector<Value> /*values*/) { |
| ReturnImpl(decoder); |
| } |
| |
| void LocalGet(FullDecoder* decoder, Value* result, |
| const LocalIndexImmediate<validate>& imm) { |
| auto local_slot = __ cache_state()->stack_state[imm.index]; |
| __ cache_state()->stack_state.emplace_back( |
| local_slot.type(), __ NextSpillOffset(local_slot.type())); |
| auto* slot = &__ cache_state()->stack_state.back(); |
| if (local_slot.is_reg()) { |
| __ cache_state()->inc_used(local_slot.reg()); |
| slot->MakeRegister(local_slot.reg()); |
| } else if (local_slot.is_const()) { |
| slot->MakeConstant(local_slot.i32_const()); |
| } else { |
| DCHECK(local_slot.is_stack()); |
| auto rc = reg_class_for(local_slot.type()); |
| LiftoffRegister reg = __ GetUnusedRegister(rc, {}); |
| __ cache_state()->inc_used(reg); |
| slot->MakeRegister(reg); |
| __ Fill(reg, local_slot.offset(), local_slot.type()); |
| } |
| } |
| |
| void LocalSetFromStackSlot(LiftoffAssembler::VarState* dst_slot, |
| uint32_t local_index) { |
| auto& state = *__ cache_state(); |
| auto& src_slot = state.stack_state.back(); |
| ValueType type = dst_slot->type(); |
| if (dst_slot->is_reg()) { |
| LiftoffRegister slot_reg = dst_slot->reg(); |
| if (state.get_use_count(slot_reg) == 1) { |
| __ Fill(dst_slot->reg(), src_slot.offset(), type); |
| return; |
| } |
| state.dec_used(slot_reg); |
| dst_slot->MakeStack(); |
| } |
| DCHECK_EQ(type, __ local_type(local_index)); |
| RegClass rc = reg_class_for(type); |
| LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {}); |
| __ Fill(dst_reg, src_slot.offset(), type); |
| *dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset()); |
| __ cache_state()->inc_used(dst_reg); |
| } |
| |
| void LocalSet(uint32_t local_index, bool is_tee) { |
| auto& state = *__ cache_state(); |
| auto& source_slot = state.stack_state.back(); |
| auto& target_slot = state.stack_state[local_index]; |
| switch (source_slot.loc()) { |
| case kRegister: |
| if (target_slot.is_reg()) state.dec_used(target_slot.reg()); |
| target_slot.Copy(source_slot); |
| if (is_tee) state.inc_used(target_slot.reg()); |
| break; |
| case kIntConst: |
| if (target_slot.is_reg()) state.dec_used(target_slot.reg()); |
| target_slot.Copy(source_slot); |
| break; |
| case kStack: |
| LocalSetFromStackSlot(&target_slot, local_index); |
| break; |
| } |
| if (!is_tee) __ cache_state()->stack_state.pop_back(); |
| } |
| |
| void LocalSet(FullDecoder* decoder, const Value& value, |
| const LocalIndexImmediate<validate>& imm) { |
| LocalSet(imm.index, false); |
| } |
| |
| void LocalTee(FullDecoder* decoder, const Value& value, Value* result, |
| const LocalIndexImmediate<validate>& imm) { |
| LocalSet(imm.index, true); |
| } |
| |
| void AllocateLocals(FullDecoder* decoder, Vector<Value> local_values) { |
| // TODO(7748): Introduce typed functions bailout reason |
| unsupported(decoder, kGC, "let"); |
| } |
| |
| void DeallocateLocals(FullDecoder* decoder, uint32_t count) { |
| // TODO(7748): Introduce typed functions bailout reason |
| unsupported(decoder, kGC, "let"); |
| } |
| |
| Register GetGlobalBaseAndOffset(const WasmGlobal* global, |
| LiftoffRegList* pinned, uint32_t* offset) { |
| Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp(); |
| if (global->mutability && global->imported) { |
| LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize); |
| __ Load(LiftoffRegister(addr), addr, no_reg, |
| global->index * sizeof(Address), kPointerLoadType, *pinned); |
| *offset = 0; |
| } else { |
| LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize); |
| *offset = global->offset; |
| } |
| return addr; |
| } |
| |
| void GlobalGet(FullDecoder* decoder, Value* result, |
| const GlobalIndexImmediate<validate>& imm) { |
| const auto* global = &env_->module->globals[imm.index]; |
| if (!CheckSupportedType(decoder, |
| FLAG_experimental_liftoff_extern_ref |
| ? kSupportedTypes |
| : kSupportedTypesWithoutRefs, |
| global->type, "global")) { |
| return; |
| } |
| |
| if (global->type.is_reference_type()) { |
| if (global->mutability && global->imported) { |
| unsupported(decoder, kRefTypes, "imported mutable globals"); |
| return; |
| } |
| |
| LiftoffRegList pinned; |
| Register globals_buffer = |
| pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); |
| LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer); |
| Register value = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); |
| __ LoadTaggedPointer(value, globals_buffer, no_reg, |
| wasm::ObjectAccess::ElementOffsetInTaggedFixedArray( |
| imm.global->offset), |
| pinned); |
| __ PushRegister(global->type, LiftoffRegister(value)); |
| return; |
| } |
| LiftoffRegList pinned; |
| uint32_t offset = 0; |
| Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset); |
| LiftoffRegister value = |
| pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned)); |
| LoadType type = LoadType::ForValueType(global->type); |
| __ Load(value, addr, no_reg, offset, type, pinned, nullptr, true); |
| __ PushRegister(global->type, value); |
| } |
| |
| void GlobalSet(FullDecoder* decoder, const Value& value, |
| const GlobalIndexImmediate<validate>& imm) { |
| auto* global = &env_->module->globals[imm.index]; |
| if (!CheckSupportedType(decoder, |
| FLAG_experimental_liftoff_extern_ref |
| ? kSupportedTypes |
| : kSupportedTypesWithoutRefs, |
| global->type, "global")) { |
| return; |
| } |
| |
| if (global->type.is_reference_type()) { |
| if (global->mutability && global->imported) { |
| unsupported(decoder, kRefTypes, "imported mutable globals"); |
| return; |
| } |
| |
| LiftoffRegList pinned; |
| Register globals_buffer = |
| pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); |
| LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer); |
| LiftoffRegister value = pinned.set(__ PopToRegister(pinned)); |
| __ StoreTaggedPointer(globals_buffer, |
| wasm::ObjectAccess::ElementOffsetInTaggedFixedArray( |
| imm.global->offset), |
| value, pinned); |
| return; |
| } |
| LiftoffRegList pinned; |
| uint32_t offset = 0; |
| Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset); |
| LiftoffRegister reg = pinned.set(__ PopToRegister(pinned)); |
| StoreType type = StoreType::ForValueType(global->type); |
| __ Store(addr, no_reg, offset, reg, type, {}, nullptr, true); |
| } |
| |
| void TableGet(FullDecoder* decoder, const Value& index, Value* result, |
| const TableIndexImmediate<validate>& imm) { |
| unsupported(decoder, kRefTypes, "table_get"); |
| } |
| |
| void TableSet(FullDecoder* decoder, const Value& index, const Value& value, |
| const TableIndexImmediate<validate>& imm) { |
| unsupported(decoder, kRefTypes, "table_set"); |
| } |
| |
| void Unreachable(FullDecoder* decoder) { |
| Label* unreachable_label = AddOutOfLineTrap( |
| decoder->position(), WasmCode::kThrowWasmTrapUnreachable); |
| __ emit_jump(unreachable_label); |
| __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap); |
| } |
| |
| void Select(FullDecoder* decoder, const Value& cond, const Value& fval, |
| const Value& tval, Value* result) { |
| LiftoffRegList pinned; |
| Register condition = pinned.set(__ PopToRegister()).gp(); |
| ValueType type = __ cache_state()->stack_state.end()[-1].type(); |
| DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type()); |
| LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned)); |
| LiftoffRegister true_value = __ PopToRegister(pinned); |
| LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(), |
| {true_value, false_value}, {}); |
| if (!__ emit_select(dst, condition, true_value, false_value, type)) { |
| // Emit generic code (using branches) instead. |
| Label cont; |
| Label case_false; |
| __ emit_cond_jump(kEqual, &case_false, kWasmI32, condition); |
| if (dst != true_value) __ Move(dst, true_value, type); |
| __ emit_jump(&cont); |
| |
| __ bind(&case_false); |
| if (dst != false_value) __ Move(dst, false_value, type); |
| __ bind(&cont); |
| } |
| __ PushRegister(type, dst); |
| } |
| |
| void BrImpl(Control* target) { |
| if (!target->br_merge()->reached) { |
| target->label_state.InitMerge(*__ cache_state(), __ num_locals(), |
| target->br_merge()->arity, |
| target->stack_depth); |
| } |
| __ MergeStackWith(target->label_state, target->br_merge()->arity); |
| __ jmp(target->label.get()); |
| } |
| |
| void Br(FullDecoder* decoder, Control* target) { BrImpl(target); } |
| |
| void BrOrRet(FullDecoder* decoder, uint32_t depth) { |
| if (depth == decoder->control_depth() - 1) { |
| ReturnImpl(decoder); |
| } else { |
| BrImpl(decoder->control_at(depth)); |
| } |
| } |
| |
| void BrIf(FullDecoder* decoder, const Value& /* cond */, uint32_t depth) { |
| // Before branching, materialize all constants. This avoids repeatedly |
| // materializing them for each conditional branch. |
| // TODO(clemensb): Do the same for br_table. |
| if (depth != decoder->control_depth() - 1) { |
| __ MaterializeMergedConstants( |
| decoder->control_at(depth)->br_merge()->arity); |
| } |
| |
| Label cont_false; |
| Register value = __ PopToRegister().gp(); |
| |
| if (!has_outstanding_op()) { |
| // Unary "equal" means "equals zero". |
| __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value); |
| } else if (outstanding_op_ == kExprI32Eqz) { |
| // Unary "unequal" means "not equals zero". |
| __ emit_cond_jump(kUnequal, &cont_false, kWasmI32, value); |
| outstanding_op_ = kNoOutstandingOp; |
| } else { |
| // Otherwise, it's an i32 compare opcode. |
| Condition cond = NegateCondition(GetCompareCondition(outstanding_op_)); |
| Register rhs = value; |
| Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp(); |
| __ emit_cond_jump(cond, &cont_false, kWasmI32, lhs, rhs); |
| outstanding_op_ = kNoOutstandingOp; |
| } |
| |
| BrOrRet(decoder, depth); |
| __ bind(&cont_false); |
| } |
| |
| // Generate a branch table case, potentially reusing previously generated |
| // stack transfer code. |
| void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth, |
| std::map<uint32_t, MovableLabel>* br_targets) { |
| MovableLabel& label = (*br_targets)[br_depth]; |
| if (label.get()->is_bound()) { |
| __ jmp(label.get()); |
| } else { |
| __ bind(label.get()); |
| BrOrRet(decoder, br_depth); |
| } |
| } |
| |
| // Generate a branch table for input in [min, max). |
| // TODO(wasm): Generate a real branch table (like TF TableSwitch). |
| void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp, |
| LiftoffRegister value, uint32_t min, uint32_t max, |
| BranchTableIterator<validate>* table_iterator, |
| std::map<uint32_t, MovableLabel>* br_targets) { |
| DCHECK_LT(min, max); |
| // Check base case. |
| if (max == min + 1) { |
| DCHECK_EQ(min, table_iterator->cur_index()); |
| GenerateBrCase(decoder, table_iterator->next(), br_targets); |
| return; |
| } |
| |
| uint32_t split = min + (max - min) / 2; |
| Label upper_half; |
| __ LoadConstant(tmp, WasmValue(split)); |
| __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(), |
| tmp.gp()); |
| // Emit br table for lower half: |
| GenerateBrTable(decoder, tmp, value, min, split, table_iterator, |
| br_targets); |
| __ bind(&upper_half); |
| // table_iterator will trigger a DCHECK if we don't stop decoding now. |
| if (did_bailout()) return; |
| // Emit br table for upper half: |
| GenerateBrTable(decoder, tmp, value, split, max, table_iterator, |
| br_targets); |
| } |
| |
| void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm, |
| const Value& key) { |
| LiftoffRegList pinned; |
| LiftoffRegister value = pinned.set(__ PopToRegister()); |
| BranchTableIterator<validate> table_iterator(decoder, imm); |
| std::map<uint32_t, MovableLabel> br_targets; |
| |
| if (imm.table_count > 0) { |
| LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned); |
| __ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count})); |
| Label case_default; |
| __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32, |
| value.gp(), tmp.gp()); |
| |
| GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator, |
| &br_targets); |
| |
| __ bind(&case_default); |
| // table_iterator will trigger a DCHECK if we don't stop decoding now. |
| if (did_bailout()) return; |
| } |
| |
| // Generate the default case. |
| GenerateBrCase(decoder, table_iterator.next(), &br_targets); |
| DCHECK(!table_iterator.has_next()); |
| } |
| |
| void Else(FullDecoder* decoder, Control* c) { |
| if (c->reachable()) { |
| if (!c->end_merge.reached) { |
| c->label_state.InitMerge(*__ cache_state(), __ num_locals(), |
| c->end_merge.arity, c->stack_depth); |
| } |
| __ MergeFullStackWith(c->label_state, *__ cache_state()); |
| __ emit_jump(c->label.get()); |
| } |
| __ bind(c->else_state->label.get()); |
| __ cache_state()->Steal(c->else_state->state); |
| } |
| |
| SpilledRegistersForInspection* GetSpilledRegistersForInspection() { |
| DCHECK(for_debugging_); |
| // If we are generating debugging code, we really need to spill all |
| // registers to make them inspectable when stopping at the trap. |
| auto* spilled = compilation_zone_->New<SpilledRegistersForInspection>( |
| compilation_zone_); |
| for (uint32_t i = 0, e = __ cache_state()->stack_height(); i < e; ++i) { |
| auto& slot = __ cache_state()->stack_state[i]; |
| if (!slot.is_reg()) continue; |
| spilled->entries.push_back(SpilledRegistersForInspection::Entry{ |
| slot.offset(), slot.reg(), slot.type()}); |
| } |
| return spilled; |
| } |
| |
| Label* AddOutOfLineTrap(WasmCodePosition position, |
| WasmCode::RuntimeStubId stub, uint32_t pc = 0) { |
| DCHECK(FLAG_wasm_bounds_checks); |
| |
| out_of_line_code_.push_back(OutOfLineCode::Trap( |
| stub, position, pc, |
| RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling), |
| V8_UNLIKELY(for_debugging_) ? GetSpilledRegistersForInspection() |
| : nullptr)); |
| return out_of_line_code_.back().label.get(); |
| } |
| |
| enum ForceCheck : bool { kDoForceCheck = true, kDontForceCheck = false }; |
| |
| // Returns true if the memory access is statically known to be out of bounds |
| // (a jump to the trap was generated then); return false otherwise. |
| bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size, |
| uint64_t offset, Register index, LiftoffRegList pinned, |
| ForceCheck force_check) { |
| // If the offset does not fit in a uintptr_t, this can never succeed on this |
| // machine. |
| const bool statically_oob = |
| offset > std::numeric_limits<uintptr_t>::max() || |
| !base::IsInBounds<uintptr_t>(offset, access_size, |
| env_->max_memory_size); |
| |
| if (!force_check && !statically_oob && |
| (!FLAG_wasm_bounds_checks || env_->use_trap_handler)) { |
| return false; |
| } |
| |
| // TODO(wasm): This adds protected instruction information for the jump |
| // instruction we are about to generate. It would be better to just not add |
| // protected instruction info when the pc is 0. |
| Label* trap_label = AddOutOfLineTrap( |
| decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds, |
| env_->use_trap_handler ? __ pc_offset() : 0); |
| |
| if (statically_oob) { |
| __ emit_jump(trap_label); |
| decoder->SetSucceedingCodeDynamicallyUnreachable(); |
| return true; |
| } |
| |
| uintptr_t end_offset = offset + access_size - 1u; |
| |
| // If the end offset is larger than the smallest memory, dynamically check |
| // the end offset against the actual memory size, which is not known at |
| // compile time. Otherwise, only one check is required (see below). |
| LiftoffRegister end_offset_reg = |
| pinned.set(__ GetUnusedRegister(kGpReg, pinned)); |
| Register mem_size = __ GetUnusedRegister(kGpReg, pinned).gp(); |
| LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize); |
| |
| __ LoadConstant(end_offset_reg, WasmValue::ForUintPtr(end_offset)); |
| |
| if (end_offset >= env_->min_memory_size) { |
| __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, |
| LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(), |
| mem_size); |
| } |
| |
| // Just reuse the end_offset register for computing the effective size. |
| LiftoffRegister effective_size_reg = end_offset_reg; |
| __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size, end_offset_reg.gp()); |
| |
| __ emit_u32_to_intptr(index, index); |
| |
| __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, |
| LiftoffAssembler::kWasmIntPtr, index, |
| effective_size_reg.gp()); |
| return false; |
| } |
| |
| void AlignmentCheckMem(FullDecoder* decoder, uint32_t access_size, |
| uint32_t offset, Register index, |
| LiftoffRegList pinned) { |
| Label* trap_label = AddOutOfLineTrap( |
| decoder->position(), WasmCode::kThrowWasmTrapUnalignedAccess, 0); |
| Register address = __ GetUnusedRegister(kGpReg, pinned).gp(); |
| |
| const uint32_t align_mask = access_size - 1; |
| if ((offset & align_mask) == 0) { |
| // If {offset} is aligned, we can produce faster code. |
| |
| // TODO(ahaas): On Intel, the "test" instruction implicitly computes the |
| // AND of two operands. We could introduce a new variant of |
| // {emit_cond_jump} to use the "test" instruction without the "and" here. |
| // Then we can also avoid using the temp register here. |
| __ emit_i32_andi(address, index, align_mask); |
| __ emit_cond_jump(kUnequal, trap_label, kWasmI32, address); |
| return; |
| } |
| __ emit_i32_addi(address, index, offset); |
| __ emit_i32_andi(address, address, align_mask); |
| |
| __ emit_cond_jump(kUnequal, trap_label, kWasmI32, address); |
| } |
| |
| void TraceMemoryOperation(bool is_store, MachineRepresentation rep, |
| Register index, uint32_t offset, |
| WasmCodePosition position) { |
| // Before making the runtime call, spill all cache registers. |
| __ SpillAllRegisters(); |
| |
| LiftoffRegList pinned = LiftoffRegList::ForRegs(index); |
| // Get one register for computing the effective offset (offset + index). |
| LiftoffRegister effective_offset = |
| pinned.set(__ GetUnusedRegister(kGpReg, pinned)); |
| __ LoadConstant(effective_offset, WasmValue(offset)); |
| __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index); |
| |
| // Get a register to hold the stack slot for MemoryTracingInfo. |
| LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); |
| // Allocate stack slot for MemoryTracingInfo. |
| __ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo)); |
| |
| // Reuse the {effective_offset} register for all information to be stored in |
| // the MemoryTracingInfo struct. |
| LiftoffRegister data = effective_offset; |
| |
| // Now store all information into the MemoryTracingInfo struct. |
| if (kSystemPointerSize == 8) { |
| // Zero-extend the effective offset to u64. |
| CHECK(__ emit_type_conversion(kExprI64UConvertI32, data, effective_offset, |
| nullptr)); |
| } |
| __ Store( |
| info.gp(), no_reg, offsetof(MemoryTracingInfo, offset), data, |
| kSystemPointerSize == 8 ? StoreType::kI64Store : StoreType::kI32Store, |
| pinned); |
| __ LoadConstant(data, WasmValue(is_store ? 1 : 0)); |
| __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), data, |
| StoreType::kI32Store8, pinned); |
| __ LoadConstant(data, WasmValue(static_cast<int>(rep))); |
| __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), data, |
| StoreType::kI32Store8, pinned); |
| |
| WasmTraceMemoryDescriptor descriptor; |
| DCHECK_EQ(0, descriptor.GetStackParameterCount()); |
| DCHECK_EQ(1, descriptor.GetRegisterParameterCount()); |
| Register param_reg = descriptor.GetRegisterParameter(0); |
| if (info.gp() != param_reg) { |
| __ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr); |
| } |
| |
| source_position_table_builder_.AddPosition(__ pc_offset(), |
| SourcePosition(position), false); |
| __ CallRuntimeStub(WasmCode::kWasmTraceMemory); |
| DefineSafepoint(); |
| |
| __ DeallocateStackSlot(sizeof(MemoryTracingInfo)); |
| } |
| |
| Register AddMemoryMasking(Register index, uint32_t* offset, |
| LiftoffRegList* pinned) { |
| if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) { |
| return index; |
| } |
| DEBUG_CODE_COMMENT("mask memory index"); |
| // Make sure that we can overwrite {index}. |
| if (__ cache_state()->is_used(LiftoffRegister(index))) { |
| Register old_index = index; |
| pinned->clear(LiftoffRegister(old_index)); |
| index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp(); |
| if (index != old_index) __ Move(index, old_index, kWasmI32); |
| } |
| Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp(); |
| __ emit_ptrsize_addi(index, index, *offset); |
| LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize); |
| __ emit_ptrsize_and(index, index, tmp); |
| *offset = 0; |
| return index; |
| } |
| |
| void LoadMem(FullDecoder* decoder, LoadType type, |
| const MemoryAccessImmediate<validate>& imm, |
| const Value& index_val, Value* result) { |
| ValueType value_type = type.value_type(); |
| if (!CheckSupportedType(decoder, kSupportedTypes, value_type, "load")) |
| return; |
| LiftoffRegList pinned; |
| Register index = pinned.set(__ PopToRegister()).gp(); |
| if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned, |
| kDontForceCheck)) { |
| return; |
| } |
| uint32_t offset = imm.offset; |
| index = AddMemoryMasking(index, &offset, &pinned); |
| DEBUG_CODE_COMMENT("load from memory"); |
| Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); |
| LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize); |
| RegClass rc = reg_class_for(value_type); |
| LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); |
| uint32_t protected_load_pc = 0; |
| __ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true); |
| if (env_->use_trap_handler) { |
| AddOutOfLineTrap(decoder->position(), |
| WasmCode::kThrowWasmTrapMemOutOfBounds, |
| protected_load_pc); |
| } |
| __ PushRegister(value_type, value); |
| |
| if (FLAG_trace_wasm_memory) { |
| TraceMemoryOperation(false, type.mem_type().representation(), index, |
| offset, decoder->position()); |
| } |
| } |
| |
| void LoadTransform(FullDecoder* decoder, LoadType type, |
| LoadTransformationKind transform, |
| const MemoryAccessImmediate<validate>& imm, |
| const Value& index_val, Value* result) { |
| // LoadTransform requires SIMD support, so check for it here. If |
| // unsupported, bailout and let TurboFan lower the code. |
| if (!CheckSupportedType(decoder, kSupportedTypes, kWasmS128, |
| "LoadTransform")) { |
| return; |
| } |
| |
| LiftoffRegList pinned; |
| Register index = pinned.set(__ PopToRegister()).gp(); |
| // For load splats and load zero, LoadType is the size of the load, and for |
| // load extends, LoadType is the size of the lane, and it always loads 8 |
| // bytes. |
| uint32_t access_size = |
| transform == LoadTransformationKind::kExtend ? 8 : type.size(); |
| if (BoundsCheckMem(decoder, access_size, imm.offset, index, pinned, |
| kDontForceCheck)) { |
| return; |
| } |
| |
| uint32_t offset = imm.offset; |
| index = AddMemoryMasking(index, &offset, &pinned); |
| DEBUG_CODE_COMMENT("load with transformation"); |
| Register addr = __ GetUnusedRegister(kGpReg, pinned).gp(); |
| LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize); |
| LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {}); |
| uint32_t protected_load_pc = 0; |
| __ LoadTransform(value, addr, index, offset, type, transform, |
| &protected_load_pc); |
| |
| if (env_->use_trap_handler) { |
| AddOutOfLineTrap(decoder->position(), |
| WasmCode::kThrowWasmTrapMemOutOfBounds, |
| protected_load_pc); |
| } |
| __ PushRegister(ValueType::Primitive(kS128), value); |
| |
| if (FLAG_trace_wasm_memory) { |
| // Again load extend is different. |
| MachineRepresentation mem_rep = |
| transform == LoadTransformationKind::kExtend |
| ? MachineRepresentation::kWord64 |
| : type.mem_type().representation(); |
| TraceMemoryOperation(false, mem_rep, index, offset, decoder->position()); |
| } |
| } |
| |
| void LoadLane(FullDecoder* decoder, LoadType type, const Value& value, |
| const Value& index, const MemoryAccessImmediate<validate>& imm, |
| const uint8_t laneidx, Value* result) { |
| unsupported(decoder, kSimd, "simd load lane"); |
| } |
| |
| void StoreMem(FullDecoder* decoder, StoreType type, |
| const MemoryAccessImmediate<validate>& imm, |
| const Value& index_val, const Value& value_val) { |
| ValueType value_type = type.value_type(); |
| if (!CheckSupportedType(decoder, kSupportedTypes, value_type, "store")) |
| return; |
| LiftoffRegList pinned; |
| LiftoffRegister value = pinned.set(__ PopToRegister()); |
| Register index = pinned.set(__ PopToRegister(pinned)).gp(); |
| if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned, |
| kDontForceCheck)) { |
| return; |
| } |
| uint32_t offset = imm.offset; |
| index = AddMemoryMasking(index, &offset, &pinned); |
| DEBUG_CODE_COMMENT("store to memory"); |
| Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); |
| LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize); |
| uint32_t protected_store_pc = 0; |
| LiftoffRegList outer_pinned; |
| if (FLAG_trace_wasm_memory) outer_pinned.set(index); |
| __ Store(addr, index, offset, value, type, outer_pinned, |
| &protected_store_pc, true); |
| if (env_->use_trap_handler) { |
| AddOutOfLineTrap(decoder->position(), |
| WasmCode::kThrowWasmTrapMemOutOfBounds, |
| protected_store_pc); |
| } |
| if (FLAG_trace_wasm_memory) { |
| TraceMemoryOperation(true, type.mem_rep(), index, offset, |
| decoder->position()); |
| } |
| } |
| |
| void StoreLane(FullDecoder* decoder, StoreType type, |
| const MemoryAccessImmediate<validate>& imm, const Value& index, |
| const Value& value, const uint8_t laneidx) { |
| unsupported(decoder, kSimd, "simd load lane"); |
| } |
| |
| void CurrentMemoryPages(FullDecoder* decoder, Value* result) { |
| Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp(); |
| LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize); |
| __ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2); |
| __ PushRegister(kWasmI32, LiftoffRegister(mem_size)); |
| } |
| |
| void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) { |
| // Pop the input, then spill all cache registers to make the runtime call. |
| LiftoffRegList pinned; |
| LiftoffRegister input = pinned.set(__ PopToRegister()); |
| __ SpillAllRegisters(); |
| |
| constexpr Register kGpReturnReg = kGpReturnRegisters[0]; |
| static_assert(kLiftoffAssemblerGpCacheRegs & kGpReturnReg.bit(), |
| "first return register is a cache register (needs more " |
| "complex code here otherwise)"); |
| LiftoffRegister result = pinned.set(LiftoffRegister(kGpReturnReg)); |
| |
| WasmMemoryGrowDescriptor descriptor; |
| DCHECK_EQ(0, descriptor.GetStackParameterCount()); |
| DCHECK_EQ(1, descriptor.GetRegisterParameterCount()); |
| DCHECK_EQ(kWasmI32.machine_type(), descriptor.GetParameterType(0)); |
| |
| Register param_reg = descriptor.GetRegisterParameter(0); |
| if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32); |
| |
| __ CallRuntimeStub(WasmCode::kWasmMemoryGrow); |
| DefineSafepoint(); |
| RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill); |
| |
| if (kReturnRegister0 != result.gp()) { |
| __ Move(result.gp(), kReturnRegister0, kWasmI32); |
| } |
| |
| __ PushRegister(kWasmI32, result); |
| } |
| |
| DebugSideTableBuilder::EntryBuilder* RegisterDebugSideTableEntry( |
| DebugSideTableBuilder::AssumeSpilling assume_spilling) { |
| if (V8_LIKELY(!debug_sidetable_builder_)) return nullptr; |
| int stack_height = static_cast<int>(__ cache_state()->stack_height()); |
| return debug_sidetable_builder_->NewEntry( |
| __ pc_offset(), __ num_locals(), stack_height, |
| __ cache_state()->stack_state.begin(), assume_spilling); |
| } |
| |
| enum CallKind : bool { kReturnCall = true, kNoReturnCall = false }; |
| |
| void CallDirect(FullDecoder* decoder, |
| const CallFunctionImmediate<validate>& imm, |
| const Value args[], Value[]) { |
| CallDirect(decoder, imm, args, nullptr, kNoReturnCall); |
| } |
| |
| void CallIndirect(FullDecoder* decoder, const Value& index_val, |
| const CallIndirectImmediate<validate>& imm, |
| const Value args[], Value returns[]) { |
| CallIndirect(decoder, index_val, imm, kNoReturnCall); |
| } |
| |
| void CallRef(FullDecoder* decoder, const Value& func_ref, |
| const FunctionSig* sig, uint32_t sig_index, const Value args[], |
| Value returns[]) { |
| unsupported(decoder, kRefTypes, "call_ref"); |
| } |
| |
| void ReturnCall(FullDecoder* decoder, |
| const CallFunctionImmediate<validate>& imm, |
| const Value args[]) { |
| CallDirect(decoder, imm, args, nullptr, kReturnCall); |
| } |
| |
| void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val, |
| const CallIndirectImmediate<validate>& imm, |
| const Value args[]) { |
| CallIndirect(decoder, index_val, imm, kReturnCall); |
| } |
| |
| void ReturnCallRef(FullDecoder* decoder, const Value& func_ref, |
| const FunctionSig* sig, uint32_t sig_index, |
| const Value args[]) { |
| unsupported(decoder, kRefTypes, "call_ref"); |
| } |
| |
| void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) { |
| unsupported(decoder, kRefTypes, "br_on_null"); |
| } |
| |
| template <ValueType::Kind src_type, ValueType::Kind result_type, |
| typename EmitFn> |
| void EmitTerOp(EmitFn fn) { |
| static constexpr RegClass src_rc = reg_class_for(src_type); |
| static constexpr RegClass result_rc = reg_class_for(result_type); |
| LiftoffRegister src3 = __ PopToRegister(); |
| LiftoffRegister src2 = __ PopToRegister(LiftoffRegList::ForRegs(src3)); |
| LiftoffRegister src1 = |
| __ PopToRegister(LiftoffRegList::ForRegs(src3, src2)); |
| // Reusing src1 and src2 will complicate codegen for select for some |
| // backend, so we allow only reusing src3 (the mask), and pin src1 and src2. |
| LiftoffRegister dst = |
| src_rc == result_rc |
| ? __ GetUnusedRegister(result_rc, {src3}, |
| LiftoffRegList::ForRegs(src1, src2)) |
| : __ GetUnusedRegister(result_rc, {}); |
| CallEmitFn(fn, dst, src1, src2, src3); |
| __ PushRegister(ValueType::Primitive(result_type), dst); |
| } |
| |
| template <typename EmitFn, typename EmitFnImm> |
| void EmitSimdShiftOp(EmitFn fn, EmitFnImm fnImm) { |
| static constexpr RegClass result_rc = reg_class_for(ValueType::kS128); |
| |
| LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back(); |
| // Check if the RHS is an immediate. |
| if (rhs_slot.is_const()) { |
| __ cache_state()->stack_state.pop_back(); |
| int32_t imm = rhs_slot.i32_const(); |
| |
| LiftoffRegister operand = __ PopToRegister(); |
| LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {}); |
| |
| CallEmitFn(fnImm, dst, operand, imm); |
| __ PushRegister(kWasmS128, dst); |
| } else { |
| LiftoffRegister count = __ PopToRegister(); |
| LiftoffRegister operand = __ PopToRegister(); |
| LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {}); |
| |
| CallEmitFn(fn, dst, operand, count); |
| __ PushRegister(kWasmS128, dst); |
| } |
| } |
| |
| void EmitSimdFloatRoundingOpWithCFallback( |
| bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister), |
| ExternalReference (*ext_ref)()) { |
| static constexpr RegClass rc = reg_class_for(kWasmS128); |
| LiftoffRegister src = __ PopToRegister(); |
| LiftoffRegister dst = __ GetUnusedRegister(rc, {src}, {}); |
| if (!(asm_.*emit_fn)(dst, src)) { |
| // Return v128 via stack for ARM. |
| ValueType sig_v_s_reps[] = {kWasmS128}; |
| FunctionSig sig_v_s(0, 1, sig_v_s_reps); |
| GenerateCCall(&dst, &sig_v_s, kWasmS128, &src, ext_ref()); |
| } |
| __ PushRegister(kWasmS128, dst); |
| } |
| |
| void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args, |
| Value* result) { |
| if (!CpuFeatures::SupportsWasmSimd128()) { |
| return unsupported(decoder, kSimd, "simd"); |
| } |
| switch (opcode) { |
| case wasm::kExprI8x16Swizzle: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_swizzle); |
| case wasm::kExprI8x16Splat: |
| return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i8x16_splat); |
| case wasm::kExprI16x8Splat: |
| return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i16x8_splat); |
| case wasm::kExprI32x4Splat: |
| return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i32x4_splat); |
| case wasm::kExprI64x2Splat: |
| return EmitUnOp<kI64, kS128>(&LiftoffAssembler::emit_i64x2_splat); |
| case wasm::kExprF32x4Splat: |
| return EmitUnOp<kF32, kS128>(&LiftoffAssembler::emit_f32x4_splat); |
| case wasm::kExprF64x2Splat: |
| return EmitUnOp<kF64, kS128>(&LiftoffAssembler::emit_f64x2_splat); |
| case wasm::kExprI8x16Eq: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_eq); |
| case wasm::kExprI8x16Ne: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ne); |
| case wasm::kExprI8x16LtS: |
| return EmitBinOp<kS128, kS128, true>( |
| &LiftoffAssembler::emit_i8x16_gt_s); |
| case wasm::kExprI8x16LtU: |
| return EmitBinOp<kS128, kS128, true>( |
| &LiftoffAssembler::emit_i8x16_gt_u); |
| case wasm::kExprI8x16GtS: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_gt_s); |
| case wasm::kExprI8x16GtU: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_gt_u); |
| case wasm::kExprI8x16LeS: |
| return EmitBinOp<kS128, kS128, true>( |
| &LiftoffAssembler::emit_i8x16_ge_s); |
| case wasm::kExprI8x16LeU: |
| return EmitBinOp<kS128, kS128, true>( |
| &LiftoffAssembler::emit_i8x16_ge_u); |
| case wasm::kExprI8x16GeS: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ge_s); |
| case wasm::kExprI8x16GeU: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ge_u); |
| case wasm::kExprI16x8Eq: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_eq); |
| case wasm::kExprI16x8Ne: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ne); |
| case wasm::kExprI16x8LtS: |
| return EmitBinOp<kS128, kS128, true>( |
| &LiftoffAssembler::emit_i16x8_gt_s); |
| case wasm::kExprI16x8LtU: |
| return EmitBinOp<kS128, kS128, true>( |
| &LiftoffAssembler::emit_i16x8_gt_u); |
| case wasm::kExprI16x8GtS: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_gt_s); |
| case wasm::kExprI16x8GtU: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_gt_u); |
| case wasm::kExprI16x8LeS: |
| return EmitBinOp<kS128, kS128, true>( |
| &LiftoffAssembler::emit_i16x8_ge_s); |
| case wasm::kExprI16x8LeU: |
| return EmitBinOp<kS128, kS128, true>( |
| &LiftoffAssembler::emit_i16x8_ge_u); |
| case wasm::kExprI16x8GeS: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ge_s); |
| case wasm::kExprI16x8GeU: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ge_u); |
| case wasm::kExprI32x4Eq: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_eq); |
| case wasm::kExprI32x4Ne: |
| return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ne); |
| case wasm::kExprI32x4LtS: |
| return EmitBinOp<kS128, kS128, true>( |
| &LiftoffAssembler:: |