| // Copyright 2014 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/compiler/code-generator.h" |
| |
| #include "src/arm64/assembler-arm64-inl.h" |
| #include "src/arm64/macro-assembler-arm64-inl.h" |
| #include "src/compilation-info.h" |
| #include "src/compiler/code-generator-impl.h" |
| #include "src/compiler/gap-resolver.h" |
| #include "src/compiler/node-matchers.h" |
| #include "src/compiler/osr.h" |
| #include "src/frame-constants.h" |
| #include "src/heap/heap-inl.h" |
| |
| namespace v8 { |
| namespace internal { |
| namespace compiler { |
| |
| #define __ tasm()-> |
| |
| // Adds Arm64-specific methods to convert InstructionOperands. |
| class Arm64OperandConverter final : public InstructionOperandConverter { |
| public: |
| Arm64OperandConverter(CodeGenerator* gen, Instruction* instr) |
| : InstructionOperandConverter(gen, instr) {} |
| |
| DoubleRegister InputFloat32Register(size_t index) { |
| return InputDoubleRegister(index).S(); |
| } |
| |
| DoubleRegister InputFloat64Register(size_t index) { |
| return InputDoubleRegister(index); |
| } |
| |
| DoubleRegister InputSimd128Register(size_t index) { |
| return InputDoubleRegister(index).Q(); |
| } |
| |
| CPURegister InputFloat32OrZeroRegister(size_t index) { |
| if (instr_->InputAt(index)->IsImmediate()) { |
| DCHECK_EQ(0, bit_cast<int32_t>(InputFloat32(index))); |
| return wzr; |
| } |
| DCHECK(instr_->InputAt(index)->IsFPRegister()); |
| return InputDoubleRegister(index).S(); |
| } |
| |
| CPURegister InputFloat64OrZeroRegister(size_t index) { |
| if (instr_->InputAt(index)->IsImmediate()) { |
| DCHECK_EQ(0, bit_cast<int64_t>(InputDouble(index))); |
| return xzr; |
| } |
| DCHECK(instr_->InputAt(index)->IsDoubleRegister()); |
| return InputDoubleRegister(index); |
| } |
| |
| size_t OutputCount() { return instr_->OutputCount(); } |
| |
| DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); } |
| |
| DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); } |
| |
| DoubleRegister OutputSimd128Register() { return OutputDoubleRegister().Q(); } |
| |
| Register InputRegister32(size_t index) { |
| return ToRegister(instr_->InputAt(index)).W(); |
| } |
| |
| Register InputOrZeroRegister32(size_t index) { |
| DCHECK(instr_->InputAt(index)->IsRegister() || |
| (instr_->InputAt(index)->IsImmediate() && (InputInt32(index) == 0))); |
| if (instr_->InputAt(index)->IsImmediate()) { |
| return wzr; |
| } |
| return InputRegister32(index); |
| } |
| |
| Register InputRegister64(size_t index) { return InputRegister(index); } |
| |
| Register InputOrZeroRegister64(size_t index) { |
| DCHECK(instr_->InputAt(index)->IsRegister() || |
| (instr_->InputAt(index)->IsImmediate() && (InputInt64(index) == 0))); |
| if (instr_->InputAt(index)->IsImmediate()) { |
| return xzr; |
| } |
| return InputRegister64(index); |
| } |
| |
| Operand InputOperand(size_t index) { |
| return ToOperand(instr_->InputAt(index)); |
| } |
| |
| Operand InputOperand64(size_t index) { return InputOperand(index); } |
| |
| Operand InputOperand32(size_t index) { |
| return ToOperand32(instr_->InputAt(index)); |
| } |
| |
| Register OutputRegister64() { return OutputRegister(); } |
| |
| Register OutputRegister32() { return ToRegister(instr_->Output()).W(); } |
| |
| Register TempRegister32(size_t index) { |
| return ToRegister(instr_->TempAt(index)).W(); |
| } |
| |
| Operand InputOperand2_32(size_t index) { |
| switch (AddressingModeField::decode(instr_->opcode())) { |
| case kMode_None: |
| return InputOperand32(index); |
| case kMode_Operand2_R_LSL_I: |
| return Operand(InputRegister32(index), LSL, InputInt5(index + 1)); |
| case kMode_Operand2_R_LSR_I: |
| return Operand(InputRegister32(index), LSR, InputInt5(index + 1)); |
| case kMode_Operand2_R_ASR_I: |
| return Operand(InputRegister32(index), ASR, InputInt5(index + 1)); |
| case kMode_Operand2_R_ROR_I: |
| return Operand(InputRegister32(index), ROR, InputInt5(index + 1)); |
| case kMode_Operand2_R_UXTB: |
| return Operand(InputRegister32(index), UXTB); |
| case kMode_Operand2_R_UXTH: |
| return Operand(InputRegister32(index), UXTH); |
| case kMode_Operand2_R_SXTB: |
| return Operand(InputRegister32(index), SXTB); |
| case kMode_Operand2_R_SXTH: |
| return Operand(InputRegister32(index), SXTH); |
| case kMode_Operand2_R_SXTW: |
| return Operand(InputRegister32(index), SXTW); |
| case kMode_MRI: |
| case kMode_MRR: |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| Operand InputOperand2_64(size_t index) { |
| switch (AddressingModeField::decode(instr_->opcode())) { |
| case kMode_None: |
| return InputOperand64(index); |
| case kMode_Operand2_R_LSL_I: |
| return Operand(InputRegister64(index), LSL, InputInt6(index + 1)); |
| case kMode_Operand2_R_LSR_I: |
| return Operand(InputRegister64(index), LSR, InputInt6(index + 1)); |
| case kMode_Operand2_R_ASR_I: |
| return Operand(InputRegister64(index), ASR, InputInt6(index + 1)); |
| case kMode_Operand2_R_ROR_I: |
| return Operand(InputRegister64(index), ROR, InputInt6(index + 1)); |
| case kMode_Operand2_R_UXTB: |
| return Operand(InputRegister64(index), UXTB); |
| case kMode_Operand2_R_UXTH: |
| return Operand(InputRegister64(index), UXTH); |
| case kMode_Operand2_R_SXTB: |
| return Operand(InputRegister64(index), SXTB); |
| case kMode_Operand2_R_SXTH: |
| return Operand(InputRegister64(index), SXTH); |
| case kMode_Operand2_R_SXTW: |
| return Operand(InputRegister64(index), SXTW); |
| case kMode_MRI: |
| case kMode_MRR: |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| MemOperand MemoryOperand(size_t* first_index) { |
| const size_t index = *first_index; |
| switch (AddressingModeField::decode(instr_->opcode())) { |
| case kMode_None: |
| case kMode_Operand2_R_LSR_I: |
| case kMode_Operand2_R_ASR_I: |
| case kMode_Operand2_R_ROR_I: |
| case kMode_Operand2_R_UXTB: |
| case kMode_Operand2_R_UXTH: |
| case kMode_Operand2_R_SXTB: |
| case kMode_Operand2_R_SXTH: |
| case kMode_Operand2_R_SXTW: |
| break; |
| case kMode_Operand2_R_LSL_I: |
| *first_index += 3; |
| return MemOperand(InputRegister(index + 0), InputRegister(index + 1), |
| LSL, InputInt32(index + 2)); |
| case kMode_MRI: |
| *first_index += 2; |
| return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); |
| case kMode_MRR: |
| *first_index += 2; |
| return MemOperand(InputRegister(index + 0), InputRegister(index + 1)); |
| } |
| UNREACHABLE(); |
| } |
| |
| MemOperand MemoryOperand(size_t first_index = 0) { |
| return MemoryOperand(&first_index); |
| } |
| |
| Operand ToOperand(InstructionOperand* op) { |
| if (op->IsRegister()) { |
| return Operand(ToRegister(op)); |
| } |
| return ToImmediate(op); |
| } |
| |
| Operand ToOperand32(InstructionOperand* op) { |
| if (op->IsRegister()) { |
| return Operand(ToRegister(op).W()); |
| } |
| return ToImmediate(op); |
| } |
| |
| Operand ToImmediate(InstructionOperand* operand) { |
| Constant constant = ToConstant(operand); |
| switch (constant.type()) { |
| case Constant::kInt32: |
| if (RelocInfo::IsWasmSizeReference(constant.rmode())) { |
| return Operand(constant.ToInt32(), constant.rmode()); |
| } else { |
| return Operand(constant.ToInt32()); |
| } |
| case Constant::kInt64: |
| if (RelocInfo::IsWasmPtrReference(constant.rmode())) { |
| return Operand(constant.ToInt64(), constant.rmode()); |
| } else { |
| DCHECK(!RelocInfo::IsWasmSizeReference(constant.rmode())); |
| return Operand(constant.ToInt64()); |
| } |
| case Constant::kFloat32: |
| return Operand(Operand::EmbeddedNumber(constant.ToFloat32())); |
| case Constant::kFloat64: |
| return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value())); |
| case Constant::kExternalReference: |
| return Operand(constant.ToExternalReference()); |
| case Constant::kHeapObject: |
| return Operand(constant.ToHeapObject()); |
| case Constant::kRpoNumber: |
| UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64. |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const { |
| DCHECK_NOT_NULL(op); |
| DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); |
| return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm); |
| } |
| |
| MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const { |
| FrameOffset offset = frame_access_state()->GetFrameOffset(slot); |
| if (offset.from_frame_pointer()) { |
| int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset(); |
| // Convert FP-offsets to SP-offsets if it results in better code. |
| if (Assembler::IsImmLSUnscaled(from_sp) || |
| Assembler::IsImmLSScaled(from_sp, 3)) { |
| offset = FrameOffset::FromStackPointer(from_sp); |
| } |
| } |
| return MemOperand(offset.from_stack_pointer() ? tasm->StackPointer() : fp, |
| offset.offset()); |
| } |
| }; |
| |
| |
| namespace { |
| |
| class OutOfLineRecordWrite final : public OutOfLineCode { |
| public: |
| OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index, |
| Register value, Register scratch0, Register scratch1, |
| RecordWriteMode mode, |
| UnwindingInfoWriter* unwinding_info_writer) |
| : OutOfLineCode(gen), |
| object_(object), |
| index_(index), |
| value_(value), |
| scratch0_(scratch0), |
| scratch1_(scratch1), |
| mode_(mode), |
| must_save_lr_(!gen->frame_access_state()->has_frame()), |
| unwinding_info_writer_(unwinding_info_writer), |
| zone_(gen->zone()) {} |
| |
| void Generate() final { |
| if (mode_ > RecordWriteMode::kValueIsPointer) { |
| __ JumpIfSmi(value_, exit()); |
| } |
| __ CheckPageFlagClear(value_, scratch0_, |
| MemoryChunk::kPointersToHereAreInterestingMask, |
| exit()); |
| __ Add(scratch1_, object_, index_); |
| RememberedSetAction const remembered_set_action = |
| mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET |
| : OMIT_REMEMBERED_SET; |
| SaveFPRegsMode const save_fp_mode = |
| frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; |
| if (must_save_lr_) { |
| // We need to save and restore lr if the frame was elided. |
| __ Push(lr, padreg); |
| unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), |
| __ StackPointer()); |
| } |
| __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, |
| save_fp_mode); |
| if (must_save_lr_) { |
| __ Pop(padreg, lr); |
| unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset()); |
| } |
| } |
| |
| private: |
| Register const object_; |
| Operand const index_; |
| Register const value_; |
| Register const scratch0_; |
| Register const scratch1_; |
| RecordWriteMode const mode_; |
| bool must_save_lr_; |
| UnwindingInfoWriter* const unwinding_info_writer_; |
| Zone* zone_; |
| }; |
| |
| |
| Condition FlagsConditionToCondition(FlagsCondition condition) { |
| switch (condition) { |
| case kEqual: |
| return eq; |
| case kNotEqual: |
| return ne; |
| case kSignedLessThan: |
| return lt; |
| case kSignedGreaterThanOrEqual: |
| return ge; |
| case kSignedLessThanOrEqual: |
| return le; |
| case kSignedGreaterThan: |
| return gt; |
| case kUnsignedLessThan: |
| return lo; |
| case kUnsignedGreaterThanOrEqual: |
| return hs; |
| case kUnsignedLessThanOrEqual: |
| return ls; |
| case kUnsignedGreaterThan: |
| return hi; |
| case kFloatLessThanOrUnordered: |
| return lt; |
| case kFloatGreaterThanOrEqual: |
| return ge; |
| case kFloatLessThanOrEqual: |
| return ls; |
| case kFloatGreaterThanOrUnordered: |
| return hi; |
| case kFloatLessThan: |
| return lo; |
| case kFloatGreaterThanOrEqualOrUnordered: |
| return hs; |
| case kFloatLessThanOrEqualOrUnordered: |
| return le; |
| case kFloatGreaterThan: |
| return gt; |
| case kOverflow: |
| return vs; |
| case kNotOverflow: |
| return vc; |
| case kUnorderedEqual: |
| case kUnorderedNotEqual: |
| break; |
| case kPositiveOrZero: |
| return pl; |
| case kNegative: |
| return mi; |
| } |
| UNREACHABLE(); |
| } |
| |
| } // namespace |
| |
| #define ASSEMBLE_SHIFT(asm_instr, width) \ |
| do { \ |
| if (instr->InputAt(1)->IsRegister()) { \ |
| __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \ |
| i.InputRegister##width(1)); \ |
| } else { \ |
| uint32_t imm = \ |
| static_cast<uint32_t>(i.InputOperand##width(1).ImmediateValue()); \ |
| __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \ |
| imm % (width)); \ |
| } \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ |
| do { \ |
| __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| __ asm_instr(i.OutputRegister32(), i.TempRegister(0)); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ |
| do { \ |
| __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| __ asm_instr(i.InputRegister32(2), i.TempRegister(0)); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \ |
| do { \ |
| Label exchange; \ |
| __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| __ Bind(&exchange); \ |
| __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \ |
| __ store_instr(i.TempRegister32(1), i.InputRegister32(2), \ |
| i.TempRegister(0)); \ |
| __ Cbnz(i.TempRegister32(1), &exchange); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, ext) \ |
| do { \ |
| Label compareExchange; \ |
| Label exit; \ |
| __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| __ Bind(&compareExchange); \ |
| __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \ |
| __ Cmp(i.OutputRegister32(), Operand(i.InputRegister32(2), ext)); \ |
| __ B(ne, &exit); \ |
| __ store_instr(i.TempRegister32(1), i.InputRegister32(3), \ |
| i.TempRegister(0)); \ |
| __ Cbnz(i.TempRegister32(1), &compareExchange); \ |
| __ Bind(&exit); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \ |
| do { \ |
| Label binop; \ |
| __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| __ Bind(&binop); \ |
| __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \ |
| __ bin_instr(i.TempRegister32(1), i.OutputRegister32(), \ |
| Operand(i.InputRegister32(2))); \ |
| __ store_instr(i.TempRegister32(2), i.TempRegister32(1), \ |
| i.TempRegister(0)); \ |
| __ Cbnz(i.TempRegister32(2), &binop); \ |
| } while (0) |
| |
| #define ASSEMBLE_IEEE754_BINOP(name) \ |
| do { \ |
| FrameScope scope(tasm(), StackFrame::MANUAL); \ |
| __ CallCFunction( \ |
| ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \ |
| } while (0) |
| |
| #define ASSEMBLE_IEEE754_UNOP(name) \ |
| do { \ |
| FrameScope scope(tasm(), StackFrame::MANUAL); \ |
| __ CallCFunction( \ |
| ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \ |
| } while (0) |
| |
| void CodeGenerator::AssembleDeconstructFrame() { |
| __ Mov(csp, fp); |
| __ Pop(fp, lr); |
| |
| unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset()); |
| } |
| |
| void CodeGenerator::AssemblePrepareTailCall() { |
| if (frame_access_state()->has_frame()) { |
| __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); |
| __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| } |
| frame_access_state()->SetFrameAccessToSP(); |
| } |
| |
| void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg, |
| Register scratch1, |
| Register scratch2, |
| Register scratch3) { |
| DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3)); |
| Label done; |
| |
| // Check if current frame is an arguments adaptor frame. |
| __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| __ Cmp(scratch1, |
| Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); |
| __ B(ne, &done); |
| |
| // Load arguments count from current arguments adaptor frame (note, it |
| // does not include receiver). |
| Register caller_args_count_reg = scratch1; |
| __ Ldr(caller_args_count_reg, |
| MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| __ SmiUntag(caller_args_count_reg); |
| |
| ParameterCount callee_args_count(args_reg); |
| __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2, |
| scratch3); |
| __ bind(&done); |
| } |
| |
| namespace { |
| |
| void AdjustStackPointerForTailCall(TurboAssembler* tasm, |
| FrameAccessState* state, |
| int new_slot_above_sp, |
| bool allow_shrinkage = true) { |
| int current_sp_offset = state->GetSPToFPSlotCount() + |
| StandardFrameConstants::kFixedSlotCountAboveFp; |
| int stack_slot_delta = new_slot_above_sp - current_sp_offset; |
| DCHECK_EQ(stack_slot_delta % 2, 0); |
| if (stack_slot_delta > 0) { |
| tasm->Claim(stack_slot_delta); |
| state->IncreaseSPDelta(stack_slot_delta); |
| } else if (allow_shrinkage && stack_slot_delta < 0) { |
| tasm->Drop(-stack_slot_delta); |
| state->IncreaseSPDelta(stack_slot_delta); |
| } |
| } |
| |
| } // namespace |
| |
| void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, |
| int first_unused_stack_slot) { |
| AdjustStackPointerForTailCall(tasm(), frame_access_state(), |
| first_unused_stack_slot, false); |
| } |
| |
| void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, |
| int first_unused_stack_slot) { |
| DCHECK_EQ(first_unused_stack_slot % 2, 0); |
| AdjustStackPointerForTailCall(tasm(), frame_access_state(), |
| first_unused_stack_slot); |
| DCHECK(instr->IsTailCall()); |
| InstructionOperandConverter g(this, instr); |
| int optional_padding_slot = g.InputInt32(instr->InputCount() - 2); |
| if (optional_padding_slot % 2) { |
| __ Poke(padreg, optional_padding_slot * kPointerSize); |
| } |
| } |
| |
| // Check if the code object is marked for deoptimization. If it is, then it |
| // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need |
| // to: |
| // 1. compute the offset of the {CodeDataContainer} from our current location |
| // and load it. |
| // 2. read from memory the word that contains that bit, which can be found in |
| // the flags in the referenced {CodeDataContainer} object; |
| // 3. test kMarkedForDeoptimizationBit in those flags; and |
| // 4. if it is not zero then it jumps to the builtin. |
| void CodeGenerator::BailoutIfDeoptimized() { |
| UseScratchRegisterScope temps(tasm()); |
| Register scratch = temps.AcquireX(); |
| { |
| // Since we always emit a bailout check at the very beginning we can be |
| // certain that the distance between here and the {CodeDataContainer} is |
| // fixed and always in range of a load. |
| int data_container_offset = |
| (Code::kCodeDataContainerOffset - Code::kHeaderSize) - __ pc_offset(); |
| DCHECK_GE(0, data_container_offset); |
| DCHECK_EQ(0, data_container_offset % 4); |
| InstructionAccurateScope scope(tasm()); |
| __ ldr_pcrel(scratch, data_container_offset >> 2); |
| } |
| __ Ldr(scratch, |
| FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); |
| Label not_deoptimized; |
| __ Tbz(scratch, Code::kMarkedForDeoptimizationBit, ¬_deoptimized); |
| Handle<Code> code = isolate()->builtins()->builtin_handle( |
| Builtins::kCompileLazyDeoptimizedCode); |
| __ Jump(code, RelocInfo::CODE_TARGET); |
| __ Bind(¬_deoptimized); |
| } |
| |
| // Assembles an instruction after register allocation, producing machine code. |
| CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
| Instruction* instr) { |
| Arm64OperandConverter i(this, instr); |
| InstructionCode opcode = instr->opcode(); |
| ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); |
| switch (arch_opcode) { |
| case kArchCallCodeObject: { |
| // We must not share code targets for calls to builtins for wasm code, as |
| // they might need to be patched individually. |
| internal::Assembler::BlockCodeTargetSharingScope scope; |
| if (info()->IsWasm()) scope.Open(tasm()); |
| |
| if (instr->InputAt(0)->IsImmediate()) { |
| __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); |
| } else { |
| Register target = i.InputRegister(0); |
| __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); |
| __ Call(target); |
| } |
| RecordCallPosition(instr); |
| frame_access_state()->ClearSPDelta(); |
| break; |
| } |
| case kArchCallWasmFunction: { |
| // We must not share code targets for calls to builtins for wasm code, as |
| // they might need to be patched individually. |
| internal::Assembler::BlockCodeTargetSharingScope scope; |
| if (info()->IsWasm()) scope.Open(tasm()); |
| |
| if (instr->InputAt(0)->IsImmediate()) { |
| Address wasm_code = reinterpret_cast<Address>( |
| i.ToConstant(instr->InputAt(0)).ToInt64()); |
| if (info()->IsWasm()) { |
| __ Call(wasm_code, RelocInfo::WASM_CALL); |
| } else { |
| __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL); |
| } |
| } else { |
| Register target = i.InputRegister(0); |
| __ Call(target); |
| } |
| RecordCallPosition(instr); |
| frame_access_state()->ClearSPDelta(); |
| break; |
| } |
| case kArchTailCallCodeObjectFromJSFunction: |
| case kArchTailCallCodeObject: { |
| // We must not share code targets for calls to builtins for wasm code, as |
| // they might need to be patched individually. |
| internal::Assembler::BlockCodeTargetSharingScope scope; |
| if (info()->IsWasm()) scope.Open(tasm()); |
| |
| if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { |
| AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, |
| i.TempRegister(0), i.TempRegister(1), |
| i.TempRegister(2)); |
| } |
| if (instr->InputAt(0)->IsImmediate()) { |
| __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); |
| } else { |
| Register target = i.InputRegister(0); |
| __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); |
| __ Jump(target); |
| } |
| unwinding_info_writer_.MarkBlockWillExit(); |
| frame_access_state()->ClearSPDelta(); |
| frame_access_state()->SetFrameAccessToDefault(); |
| break; |
| } |
| case kArchTailCallWasm: { |
| // We must not share code targets for calls to builtins for wasm code, as |
| // they might need to be patched individually. |
| internal::Assembler::BlockCodeTargetSharingScope scope; |
| if (info()->IsWasm()) scope.Open(tasm()); |
| |
| if (instr->InputAt(0)->IsImmediate()) { |
| Address wasm_code = reinterpret_cast<Address>( |
| i.ToConstant(instr->InputAt(0)).ToInt64()); |
| if (info()->IsWasm()) { |
| __ Jump(wasm_code, RelocInfo::WASM_CALL); |
| } else { |
| __ Jump(wasm_code, RelocInfo::JS_TO_WASM_CALL); |
| } |
| |
| } else { |
| Register target = i.InputRegister(0); |
| __ Jump(target); |
| } |
| unwinding_info_writer_.MarkBlockWillExit(); |
| frame_access_state()->ClearSPDelta(); |
| frame_access_state()->SetFrameAccessToDefault(); |
| break; |
| } |
| case kArchTailCallAddress: { |
| CHECK(!instr->InputAt(0)->IsImmediate()); |
| __ Jump(i.InputRegister(0)); |
| unwinding_info_writer_.MarkBlockWillExit(); |
| frame_access_state()->ClearSPDelta(); |
| frame_access_state()->SetFrameAccessToDefault(); |
| break; |
| } |
| case kArchCallJSFunction: { |
| Register func = i.InputRegister(0); |
| if (FLAG_debug_code) { |
| // Check the function's context matches the context argument. |
| UseScratchRegisterScope scope(tasm()); |
| Register temp = scope.AcquireX(); |
| __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset)); |
| __ cmp(cp, temp); |
| __ Assert(eq, AbortReason::kWrongFunctionContext); |
| } |
| __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeOffset)); |
| __ Add(x10, x10, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| __ Call(x10); |
| RecordCallPosition(instr); |
| frame_access_state()->ClearSPDelta(); |
| break; |
| } |
| case kArchPrepareCallCFunction: |
| // We don't need kArchPrepareCallCFunction on arm64 as the instruction |
| // selector has already performed a Claim to reserve space on the stack. |
| // Frame alignment is always 16 bytes, and the stack pointer is already |
| // 16-byte aligned, therefore we do not need to align the stack pointer |
| // by an unknown value, and it is safe to continue accessing the frame |
| // via the stack pointer. |
| UNREACHABLE(); |
| break; |
| case kArchSaveCallerRegisters: { |
| fp_mode_ = |
| static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())); |
| DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); |
| // kReturnRegister0 should have been saved before entering the stub. |
| int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0); |
| DCHECK_EQ(0, bytes % kPointerSize); |
| DCHECK_EQ(0, frame_access_state()->sp_delta()); |
| frame_access_state()->IncreaseSPDelta(bytes / kPointerSize); |
| DCHECK(!caller_registers_saved_); |
| caller_registers_saved_ = true; |
| break; |
| } |
| case kArchRestoreCallerRegisters: { |
| DCHECK(fp_mode_ == |
| static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()))); |
| DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); |
| // Don't overwrite the returned value. |
| int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0); |
| frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize)); |
| DCHECK_EQ(0, frame_access_state()->sp_delta()); |
| DCHECK(caller_registers_saved_); |
| caller_registers_saved_ = false; |
| break; |
| } |
| case kArchPrepareTailCall: |
| AssemblePrepareTailCall(); |
| break; |
| case kArchCallCFunction: { |
| int const num_parameters = MiscField::decode(instr->opcode()); |
| if (instr->InputAt(0)->IsImmediate()) { |
| ExternalReference ref = i.InputExternalReference(0); |
| __ CallCFunction(ref, num_parameters, 0); |
| } else { |
| Register func = i.InputRegister(0); |
| __ CallCFunction(func, num_parameters, 0); |
| } |
| frame_access_state()->SetFrameAccessToDefault(); |
| // Ideally, we should decrement SP delta to match the change of stack |
| // pointer in CallCFunction. However, for certain architectures (e.g. |
| // ARM), there may be more strict alignment requirement, causing old SP |
| // to be saved on the stack. In those cases, we can not calculate the SP |
| // delta statically. |
| frame_access_state()->ClearSPDelta(); |
| if (caller_registers_saved_) { |
| // Need to re-sync SP delta introduced in kArchSaveCallerRegisters. |
| // Here, we assume the sequence to be: |
| // kArchSaveCallerRegisters; |
| // kArchCallCFunction; |
| // kArchRestoreCallerRegisters; |
| int bytes = |
| __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0); |
| frame_access_state()->IncreaseSPDelta(bytes / kPointerSize); |
| } |
| break; |
| } |
| case kArchJmp: |
| AssembleArchJump(i.InputRpo(0)); |
| break; |
| case kArchTableSwitch: |
| AssembleArchTableSwitch(instr); |
| break; |
| case kArchLookupSwitch: |
| AssembleArchLookupSwitch(instr); |
| break; |
| case kArchDebugAbort: |
| DCHECK(i.InputRegister(0).is(x1)); |
| if (!frame_access_state()->has_frame()) { |
| // We don't actually want to generate a pile of code for this, so just |
| // claim there is a stack frame, without generating one. |
| FrameScope scope(tasm(), StackFrame::NONE); |
| __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), |
| RelocInfo::CODE_TARGET); |
| } else { |
| __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), |
| RelocInfo::CODE_TARGET); |
| } |
| __ Debug("kArchDebugAbort", 0, BREAK); |
| break; |
| case kArchDebugBreak: |
| __ Debug("kArchDebugBreak", 0, BREAK); |
| break; |
| case kArchComment: { |
| Address comment_string = i.InputExternalReference(0).address(); |
| __ RecordComment(reinterpret_cast<const char*>(comment_string)); |
| break; |
| } |
| case kArchNop: |
| case kArchThrowTerminator: |
| // don't emit code for nops. |
| break; |
| case kArchDeoptimize: { |
| int deopt_state_id = |
| BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); |
| CodeGenResult result = |
| AssembleDeoptimizerCall(deopt_state_id, current_source_position_); |
| if (result != kSuccess) return result; |
| break; |
| } |
| case kArchRet: |
| AssembleReturn(instr->InputAt(0)); |
| break; |
| case kArchStackPointer: |
| __ mov(i.OutputRegister(), tasm()->StackPointer()); |
| break; |
| case kArchFramePointer: |
| __ mov(i.OutputRegister(), fp); |
| break; |
| case kArchParentFramePointer: |
| if (frame_access_state()->has_frame()) { |
| __ ldr(i.OutputRegister(), MemOperand(fp, 0)); |
| } else { |
| __ mov(i.OutputRegister(), fp); |
| } |
| break; |
| case kArchTruncateDoubleToI: |
| __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(), |
| i.InputDoubleRegister(0)); |
| break; |
| case kArchStoreWithWriteBarrier: { |
| RecordWriteMode mode = |
| static_cast<RecordWriteMode>(MiscField::decode(instr->opcode())); |
| AddressingMode addressing_mode = |
| AddressingModeField::decode(instr->opcode()); |
| Register object = i.InputRegister(0); |
| Operand index(0); |
| if (addressing_mode == kMode_MRI) { |
| index = Operand(i.InputInt64(1)); |
| } else { |
| DCHECK_EQ(addressing_mode, kMode_MRR); |
| index = Operand(i.InputRegister(1)); |
| } |
| Register value = i.InputRegister(2); |
| Register scratch0 = i.TempRegister(0); |
| Register scratch1 = i.TempRegister(1); |
| auto ool = new (zone()) |
| OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1, |
| mode, &unwinding_info_writer_); |
| __ Str(value, MemOperand(object, index)); |
| __ CheckPageFlagSet(object, scratch0, |
| MemoryChunk::kPointersFromHereAreInterestingMask, |
| ool->entry()); |
| __ Bind(ool->exit()); |
| break; |
| } |
| case kArchStackSlot: { |
| FrameOffset offset = |
| frame_access_state()->GetFrameOffset(i.InputInt32(0)); |
| Register base = offset.from_stack_pointer() ? __ StackPointer() : fp; |
| __ Add(i.OutputRegister(0), base, Operand(offset.offset())); |
| break; |
| } |
| case kIeee754Float64Acos: |
| ASSEMBLE_IEEE754_UNOP(acos); |
| break; |
| case kIeee754Float64Acosh: |
| ASSEMBLE_IEEE754_UNOP(acosh); |
| break; |
| case kIeee754Float64Asin: |
| ASSEMBLE_IEEE754_UNOP(asin); |
| break; |
| case kIeee754Float64Asinh: |
| ASSEMBLE_IEEE754_UNOP(asinh); |
| break; |
| case kIeee754Float64Atan: |
| ASSEMBLE_IEEE754_UNOP(atan); |
| break; |
| case kIeee754Float64Atanh: |
| ASSEMBLE_IEEE754_UNOP(atanh); |
| break; |
| case kIeee754Float64Atan2: |
| ASSEMBLE_IEEE754_BINOP(atan2); |
| break; |
| case kIeee754Float64Cos: |
| ASSEMBLE_IEEE754_UNOP(cos); |
| break; |
| case kIeee754Float64Cosh: |
| ASSEMBLE_IEEE754_UNOP(cosh); |
| break; |
| case kIeee754Float64Cbrt: |
| ASSEMBLE_IEEE754_UNOP(cbrt); |
| break; |
| case kIeee754Float64Exp: |
| ASSEMBLE_IEEE754_UNOP(exp); |
| break; |
| case kIeee754Float64Expm1: |
| ASSEMBLE_IEEE754_UNOP(expm1); |
| break; |
| case kIeee754Float64Log: |
| ASSEMBLE_IEEE754_UNOP(log); |
| break; |
| case kIeee754Float64Log1p: |
| ASSEMBLE_IEEE754_UNOP(log1p); |
| break; |
| case kIeee754Float64Log2: |
| ASSEMBLE_IEEE754_UNOP(log2); |
| break; |
| case kIeee754Float64Log10: |
| ASSEMBLE_IEEE754_UNOP(log10); |
| break; |
| case kIeee754Float64Pow: { |
| __ CallStubDelayed(new (zone()) |
| MathPowStub(nullptr, MathPowStub::DOUBLE)); |
| break; |
| } |
| case kIeee754Float64Sin: |
| ASSEMBLE_IEEE754_UNOP(sin); |
| break; |
| case kIeee754Float64Sinh: |
| ASSEMBLE_IEEE754_UNOP(sinh); |
| break; |
| case kIeee754Float64Tan: |
| ASSEMBLE_IEEE754_UNOP(tan); |
| break; |
| case kIeee754Float64Tanh: |
| ASSEMBLE_IEEE754_UNOP(tanh); |
| break; |
| case kArm64Float32RoundDown: |
| __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0)); |
| break; |
| case kArm64Float64RoundDown: |
| __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Float32RoundUp: |
| __ Frintp(i.OutputFloat32Register(), i.InputFloat32Register(0)); |
| break; |
| case kArm64Float64RoundUp: |
| __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Float64RoundTiesAway: |
| __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Float32RoundTruncate: |
| __ Frintz(i.OutputFloat32Register(), i.InputFloat32Register(0)); |
| break; |
| case kArm64Float64RoundTruncate: |
| __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Float32RoundTiesEven: |
| __ Frintn(i.OutputFloat32Register(), i.InputFloat32Register(0)); |
| break; |
| case kArm64Float64RoundTiesEven: |
| __ Frintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Add: |
| if (FlagsModeField::decode(opcode) != kFlags_none) { |
| __ Adds(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| } else { |
| __ Add(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| } |
| break; |
| case kArm64Add32: |
| if (FlagsModeField::decode(opcode) != kFlags_none) { |
| __ Adds(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| } else { |
| __ Add(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| } |
| break; |
| case kArm64And: |
| if (FlagsModeField::decode(opcode) != kFlags_none) { |
| // The ands instruction only sets N and Z, so only the following |
| // conditions make sense. |
| DCHECK(FlagsConditionField::decode(opcode) == kEqual || |
| FlagsConditionField::decode(opcode) == kNotEqual || |
| FlagsConditionField::decode(opcode) == kPositiveOrZero || |
| FlagsConditionField::decode(opcode) == kNegative); |
| __ Ands(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| } else { |
| __ And(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| } |
| break; |
| case kArm64And32: |
| if (FlagsModeField::decode(opcode) != kFlags_none) { |
| // The ands instruction only sets N and Z, so only the following |
| // conditions make sense. |
| DCHECK(FlagsConditionField::decode(opcode) == kEqual || |
| FlagsConditionField::decode(opcode) == kNotEqual || |
| FlagsConditionField::decode(opcode) == kPositiveOrZero || |
| FlagsConditionField::decode(opcode) == kNegative); |
| __ Ands(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| } else { |
| __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| } |
| break; |
| case kArm64Bic: |
| __ Bic(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| break; |
| case kArm64Bic32: |
| __ Bic(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| break; |
| case kArm64Mul: |
| __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| break; |
| case kArm64Mul32: |
| __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); |
| break; |
| case kArm64Smull: |
| __ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1)); |
| break; |
| case kArm64Umull: |
| __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1)); |
| break; |
| case kArm64Madd: |
| __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
| i.InputRegister(2)); |
| break; |
| case kArm64Madd32: |
| __ Madd(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1), |
| i.InputRegister32(2)); |
| break; |
| case kArm64Msub: |
| __ Msub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
| i.InputRegister(2)); |
| break; |
| case kArm64Msub32: |
| __ Msub(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1), |
| i.InputRegister32(2)); |
| break; |
| case kArm64Mneg: |
| __ Mneg(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| break; |
| case kArm64Mneg32: |
| __ Mneg(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); |
| break; |
| case kArm64Idiv: |
| __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| break; |
| case kArm64Idiv32: |
| __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); |
| break; |
| case kArm64Udiv: |
| __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| break; |
| case kArm64Udiv32: |
| __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); |
| break; |
| case kArm64Imod: { |
| UseScratchRegisterScope scope(tasm()); |
| Register temp = scope.AcquireX(); |
| __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1)); |
| __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); |
| break; |
| } |
| case kArm64Imod32: { |
| UseScratchRegisterScope scope(tasm()); |
| Register temp = scope.AcquireW(); |
| __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1)); |
| __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), |
| i.InputRegister32(0)); |
| break; |
| } |
| case kArm64Umod: { |
| UseScratchRegisterScope scope(tasm()); |
| Register temp = scope.AcquireX(); |
| __ Udiv(temp, i.InputRegister(0), i.InputRegister(1)); |
| __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); |
| break; |
| } |
| case kArm64Umod32: { |
| UseScratchRegisterScope scope(tasm()); |
| Register temp = scope.AcquireW(); |
| __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1)); |
| __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), |
| i.InputRegister32(0)); |
| break; |
| } |
| case kArm64Not: |
| __ Mvn(i.OutputRegister(), i.InputOperand(0)); |
| break; |
| case kArm64Not32: |
| __ Mvn(i.OutputRegister32(), i.InputOperand32(0)); |
| break; |
| case kArm64Or: |
| __ Orr(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| break; |
| case kArm64Or32: |
| __ Orr(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| break; |
| case kArm64Orn: |
| __ Orn(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| break; |
| case kArm64Orn32: |
| __ Orn(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| break; |
| case kArm64Eor: |
| __ Eor(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| break; |
| case kArm64Eor32: |
| __ Eor(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| break; |
| case kArm64Eon: |
| __ Eon(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| break; |
| case kArm64Eon32: |
| __ Eon(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| break; |
| case kArm64Sub: |
| if (FlagsModeField::decode(opcode) != kFlags_none) { |
| __ Subs(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| } else { |
| __ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0), |
| i.InputOperand2_64(1)); |
| } |
| break; |
| case kArm64Sub32: |
| if (FlagsModeField::decode(opcode) != kFlags_none) { |
| __ Subs(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| } else { |
| __ Sub(i.OutputRegister32(), i.InputOrZeroRegister32(0), |
| i.InputOperand2_32(1)); |
| } |
| break; |
| case kArm64Lsl: |
| ASSEMBLE_SHIFT(Lsl, 64); |
| break; |
| case kArm64Lsl32: |
| ASSEMBLE_SHIFT(Lsl, 32); |
| break; |
| case kArm64Lsr: |
| ASSEMBLE_SHIFT(Lsr, 64); |
| break; |
| case kArm64Lsr32: |
| ASSEMBLE_SHIFT(Lsr, 32); |
| break; |
| case kArm64Asr: |
| ASSEMBLE_SHIFT(Asr, 64); |
| break; |
| case kArm64Asr32: |
| ASSEMBLE_SHIFT(Asr, 32); |
| break; |
| case kArm64Ror: |
| ASSEMBLE_SHIFT(Ror, 64); |
| break; |
| case kArm64Ror32: |
| ASSEMBLE_SHIFT(Ror, 32); |
| break; |
| case kArm64Mov32: |
| __ Mov(i.OutputRegister32(), i.InputRegister32(0)); |
| break; |
| case kArm64Sxtb32: |
| __ Sxtb(i.OutputRegister32(), i.InputRegister32(0)); |
| break; |
| case kArm64Sxth32: |
| __ Sxth(i.OutputRegister32(), i.InputRegister32(0)); |
| break; |
| case kArm64Sxtw: |
| __ Sxtw(i.OutputRegister(), i.InputRegister32(0)); |
| break; |
| case kArm64Sbfx32: |
| __ Sbfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1), |
| i.InputInt5(2)); |
| break; |
| case kArm64Ubfx: |
| __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1), |
| i.InputInt32(2)); |
| break; |
| case kArm64Ubfx32: |
| __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1), |
| i.InputInt32(2)); |
| break; |
| case kArm64Ubfiz32: |
| __ Ubfiz(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1), |
| i.InputInt5(2)); |
| break; |
| case kArm64Bfi: |
| __ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2), |
| i.InputInt6(3)); |
| break; |
| case kArm64TestAndBranch32: |
| case kArm64TestAndBranch: |
| // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch. |
| break; |
| case kArm64CompareAndBranch32: |
| case kArm64CompareAndBranch: |
| // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch. |
| break; |
| case kArm64Claim: { |
| int count = i.InputInt32(0); |
| DCHECK_EQ(count % 2, 0); |
| __ AssertCspAligned(); |
| if (count > 0) { |
| __ Claim(count); |
| frame_access_state()->IncreaseSPDelta(count); |
| } |
| break; |
| } |
| case kArm64Poke: { |
| Operand operand(i.InputInt32(1) * kPointerSize); |
| if (instr->InputAt(0)->IsSimd128Register()) { |
| __ Poke(i.InputSimd128Register(0), operand); |
| } else if (instr->InputAt(0)->IsFPRegister()) { |
| __ Poke(i.InputFloat64Register(0), operand); |
| } else { |
| __ Poke(i.InputOrZeroRegister64(0), operand); |
| } |
| break; |
| } |
| case kArm64PokePair: { |
| int slot = i.InputInt32(2) - 1; |
| if (instr->InputAt(0)->IsFPRegister()) { |
| __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0), |
| slot * kPointerSize); |
| } else { |
| __ PokePair(i.InputRegister(1), i.InputRegister(0), |
| slot * kPointerSize); |
| } |
| break; |
| } |
| case kArm64Peek: { |
| int reverse_slot = i.InputInt32(0); |
| int offset = |
| FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot); |
| if (instr->OutputAt(0)->IsFPRegister()) { |
| LocationOperand* op = LocationOperand::cast(instr->OutputAt(0)); |
| if (op->representation() == MachineRepresentation::kFloat64) { |
| __ Ldr(i.OutputDoubleRegister(), MemOperand(fp, offset)); |
| } else { |
| DCHECK_EQ(MachineRepresentation::kFloat32, op->representation()); |
| __ Ldr(i.OutputFloatRegister(), MemOperand(fp, offset)); |
| } |
| } else { |
| __ Ldr(i.OutputRegister(), MemOperand(fp, offset)); |
| } |
| break; |
| } |
| case kArm64Clz: |
| __ Clz(i.OutputRegister64(), i.InputRegister64(0)); |
| break; |
| case kArm64Clz32: |
| __ Clz(i.OutputRegister32(), i.InputRegister32(0)); |
| break; |
| case kArm64Rbit: |
| __ Rbit(i.OutputRegister64(), i.InputRegister64(0)); |
| break; |
| case kArm64Rbit32: |
| __ Rbit(i.OutputRegister32(), i.InputRegister32(0)); |
| break; |
| case kArm64Cmp: |
| __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand2_64(1)); |
| break; |
| case kArm64Cmp32: |
| __ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1)); |
| break; |
| case kArm64Cmn: |
| __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand2_64(1)); |
| break; |
| case kArm64Cmn32: |
| __ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1)); |
| break; |
| case kArm64Tst: |
| __ Tst(i.InputOrZeroRegister64(0), i.InputOperand2_64(1)); |
| break; |
| case kArm64Tst32: |
| __ Tst(i.InputOrZeroRegister32(0), i.InputOperand2_32(1)); |
| break; |
| case kArm64Float32Cmp: |
| if (instr->InputAt(1)->IsFPRegister()) { |
| __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1)); |
| } else { |
| DCHECK(instr->InputAt(1)->IsImmediate()); |
| // 0.0 is the only immediate supported by fcmp instructions. |
| DCHECK_EQ(0.0f, i.InputFloat32(1)); |
| __ Fcmp(i.InputFloat32Register(0), i.InputFloat32(1)); |
| } |
| break; |
| case kArm64Float32Add: |
| __ Fadd(i.OutputFloat32Register(), i.InputFloat32Register(0), |
| i.InputFloat32Register(1)); |
| break; |
| case kArm64Float32Sub: |
| __ Fsub(i.OutputFloat32Register(), i.InputFloat32Register(0), |
| i.InputFloat32Register(1)); |
| break; |
| case kArm64Float32Mul: |
| __ Fmul(i.OutputFloat32Register(), i.InputFloat32Register(0), |
| i.InputFloat32Register(1)); |
| break; |
| case kArm64Float32Div: |
| __ Fdiv(i.OutputFloat32Register(), i.InputFloat32Register(0), |
| i.InputFloat32Register(1)); |
| break; |
| case kArm64Float32Abs: |
| __ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0)); |
| break; |
| case kArm64Float32Neg: |
| __ Fneg(i.OutputFloat32Register(), i.InputFloat32Register(0)); |
| break; |
| case kArm64Float32Sqrt: |
| __ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0)); |
| break; |
| case kArm64Float64Cmp: |
| if (instr->InputAt(1)->IsFPRegister()) { |
| __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
| } else { |
| DCHECK(instr->InputAt(1)->IsImmediate()); |
| // 0.0 is the only immediate supported by fcmp instructions. |
| DCHECK_EQ(0.0, i.InputDouble(1)); |
| __ Fcmp(i.InputDoubleRegister(0), i.InputDouble(1)); |
| } |
| break; |
| case kArm64Float64Add: |
| __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kArm64Float64Sub: |
| __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kArm64Float64Mul: |
| __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kArm64Float64Div: |
| __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kArm64Float64Mod: { |
| // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc |
| FrameScope scope(tasm(), StackFrame::MANUAL); |
| DCHECK(d0.is(i.InputDoubleRegister(0))); |
| DCHECK(d1.is(i.InputDoubleRegister(1))); |
| DCHECK(d0.is(i.OutputDoubleRegister())); |
| // TODO(dcarney): make sure this saves all relevant registers. |
| __ CallCFunction( |
| ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2); |
| break; |
| } |
| case kArm64Float32Max: { |
| __ Fmax(i.OutputFloat32Register(), i.InputFloat32Register(0), |
| i.InputFloat32Register(1)); |
| break; |
| } |
| case kArm64Float64Max: { |
| __ Fmax(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| } |
| case kArm64Float32Min: { |
| __ Fmin(i.OutputFloat32Register(), i.InputFloat32Register(0), |
| i.InputFloat32Register(1)); |
| break; |
| } |
| case kArm64Float64Min: { |
| __ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| } |
| case kArm64Float64Abs: |
| __ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Float64Neg: |
| __ Fneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Float64Sqrt: |
| __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Float32ToFloat64: |
| __ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S()); |
| break; |
| case kArm64Float64ToFloat32: |
| __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Float32ToInt32: |
| __ Fcvtzs(i.OutputRegister32(), i.InputFloat32Register(0)); |
| // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, |
| // because INT32_MIN allows easier out-of-bounds detection. |
| __ Cmn(i.OutputRegister32(), 1); |
| __ Csinc(i.OutputRegister32(), i.OutputRegister32(), i.OutputRegister32(), |
| vc); |
| break; |
| case kArm64Float64ToInt32: |
| __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Float32ToUint32: |
| __ Fcvtzu(i.OutputRegister32(), i.InputFloat32Register(0)); |
| // Avoid UINT32_MAX as an overflow indicator and use 0 instead, |
| // because 0 allows easier out-of-bounds detection. |
| __ Cmn(i.OutputRegister32(), 1); |
| __ Adc(i.OutputRegister32(), i.OutputRegister32(), Operand(0)); |
| break; |
| case kArm64Float64ToUint32: |
| __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Float32ToInt64: |
| __ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0)); |
| if (i.OutputCount() > 1) { |
| __ Mov(i.OutputRegister(1), 1); |
| Label done; |
| __ Cmp(i.OutputRegister(0), 1); |
| __ Ccmp(i.OutputRegister(0), -1, VFlag, vc); |
| __ Fccmp(i.InputFloat32Register(0), i.InputFloat32Register(0), VFlag, |
| vc); |
| __ B(vc, &done); |
| __ Fcmp(i.InputFloat32Register(0), static_cast<float>(INT64_MIN)); |
| __ Cset(i.OutputRegister(1), eq); |
| __ Bind(&done); |
| } |
| break; |
| case kArm64Float64ToInt64: |
| __ Fcvtzs(i.OutputRegister(0), i.InputDoubleRegister(0)); |
| if (i.OutputCount() > 1) { |
| __ Mov(i.OutputRegister(1), 1); |
| Label done; |
| __ Cmp(i.OutputRegister(0), 1); |
| __ Ccmp(i.OutputRegister(0), -1, VFlag, vc); |
| __ Fccmp(i.InputDoubleRegister(0), i.InputDoubleRegister(0), VFlag, vc); |
| __ B(vc, &done); |
| __ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT64_MIN)); |
| __ Cset(i.OutputRegister(1), eq); |
| __ Bind(&done); |
| } |
| break; |
| case kArm64Float32ToUint64: |
| __ Fcvtzu(i.OutputRegister64(), i.InputFloat32Register(0)); |
| if (i.OutputCount() > 1) { |
| __ Fcmp(i.InputFloat32Register(0), -1.0); |
| __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt); |
| __ Cset(i.OutputRegister(1), ne); |
| } |
| break; |
| case kArm64Float64ToUint64: |
| __ Fcvtzu(i.OutputRegister64(), i.InputDoubleRegister(0)); |
| if (i.OutputCount() > 1) { |
| __ Fcmp(i.InputDoubleRegister(0), -1.0); |
| __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt); |
| __ Cset(i.OutputRegister(1), ne); |
| } |
| break; |
| case kArm64Int32ToFloat32: |
| __ Scvtf(i.OutputFloat32Register(), i.InputRegister32(0)); |
| break; |
| case kArm64Int32ToFloat64: |
| __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0)); |
| break; |
| case kArm64Int64ToFloat32: |
| __ Scvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0)); |
| break; |
| case kArm64Int64ToFloat64: |
| __ Scvtf(i.OutputDoubleRegister(), i.InputRegister64(0)); |
| break; |
| case kArm64Uint32ToFloat32: |
| __ Ucvtf(i.OutputFloat32Register(), i.InputRegister32(0)); |
| break; |
| case kArm64Uint32ToFloat64: |
| __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0)); |
| break; |
| case kArm64Uint64ToFloat32: |
| __ Ucvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0)); |
| break; |
| case kArm64Uint64ToFloat64: |
| __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister64(0)); |
| break; |
| case kArm64Float64ExtractLowWord32: |
| __ Fmov(i.OutputRegister32(), i.InputFloat32Register(0)); |
| break; |
| case kArm64Float64ExtractHighWord32: |
| __ Umov(i.OutputRegister32(), i.InputFloat64Register(0).V2S(), 1); |
| break; |
| case kArm64Float64InsertLowWord32: |
| DCHECK(i.OutputFloat64Register().Is(i.InputFloat64Register(0))); |
| __ Ins(i.OutputFloat64Register().V2S(), 0, i.InputRegister32(1)); |
| break; |
| case kArm64Float64InsertHighWord32: |
| DCHECK(i.OutputFloat64Register().Is(i.InputFloat64Register(0))); |
| __ Ins(i.OutputFloat64Register().V2S(), 1, i.InputRegister32(1)); |
| break; |
| case kArm64Float64MoveU64: |
| __ Fmov(i.OutputFloat64Register(), i.InputRegister(0)); |
| break; |
| case kArm64Float64SilenceNaN: |
| __ CanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64U64MoveFloat64: |
| __ Fmov(i.OutputRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kArm64Ldrb: |
| __ Ldrb(i.OutputRegister(), i.MemoryOperand()); |
| break; |
| case kArm64Ldrsb: |
| __ Ldrsb(i.OutputRegister(), i.MemoryOperand()); |
| break; |
| case kArm64Strb: |
| __ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); |
| break; |
| case kArm64Ldrh: |
| __ Ldrh(i.OutputRegister(), i.MemoryOperand()); |
| break; |
| case kArm64Ldrsh: |
| __ Ldrsh(i.OutputRegister(), i.MemoryOperand()); |
| break; |
| case kArm64Strh: |
| __ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); |
| break; |
| case kArm64Ldrsw: |
| __ Ldrsw(i.OutputRegister(), i.MemoryOperand()); |
| break; |
| case kArm64LdrW: |
| __ Ldr(i.OutputRegister32(), i.MemoryOperand()); |
| break; |
| case kArm64StrW: |
| __ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1)); |
| break; |
| case kArm64Ldr: |
| __ Ldr(i.OutputRegister(), i.MemoryOperand()); |
| break; |
| case kArm64Str: |
| __ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); |
| break; |
| case kArm64LdrS: |
| __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand()); |
| break; |
| case kArm64StrS: |
| __ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1)); |
| break; |
| case kArm64LdrD: |
| __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand()); |
| break; |
| case kArm64StrD: |
| __ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1)); |
| break; |
| case kArm64LdrQ: |
| __ Ldr(i.OutputSimd128Register(), i.MemoryOperand()); |
| break; |
| case kArm64StrQ: |
| __ Str(i.InputSimd128Register(0), i.MemoryOperand(1)); |
| break; |
| case kAtomicLoadInt8: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb); |
| __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); |
| break; |
| case kAtomicLoadUint8: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb); |
| break; |
| case kAtomicLoadInt16: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh); |
| __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); |
| break; |
| case kAtomicLoadUint16: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh); |
| break; |
| case kAtomicLoadWord32: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar); |
| break; |
| case kAtomicStoreWord8: |
| ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb); |
| break; |
| case kAtomicStoreWord16: |
| ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh); |
| break; |
| case kAtomicStoreWord32: |
| ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr); |
| break; |
| case kAtomicExchangeInt8: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb); |
| __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); |
| break; |
| case kAtomicExchangeUint8: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb); |
| break; |
| case kAtomicExchangeInt16: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh); |
| __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); |
| break; |
| case kAtomicExchangeUint16: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh); |
| break; |
| case kAtomicExchangeWord32: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr); |
| break; |
| case kAtomicCompareExchangeInt8: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB); |
| __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); |
| break; |
| case kAtomicCompareExchangeUint8: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB); |
| break; |
| case kAtomicCompareExchangeInt16: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH); |
| __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); |
| break; |
| case kAtomicCompareExchangeUint16: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH); |
| break; |
| case kAtomicCompareExchangeWord32: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW); |
| break; |
| #define ATOMIC_BINOP_CASE(op, inst) \ |
| case kAtomic##op##Int8: \ |
| ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \ |
| __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \ |
| break; \ |
| case kAtomic##op##Uint8: \ |
| ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \ |
| break; \ |
| case kAtomic##op##Int16: \ |
| ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \ |
| __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \ |
| break; \ |
| case kAtomic##op##Uint16: \ |
| ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \ |
| break; \ |
| case kAtomic##op##Word32: \ |
| ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst); \ |
| break; |
| ATOMIC_BINOP_CASE(Add, Add) |
| ATOMIC_BINOP_CASE(Sub, Sub) |
| ATOMIC_BINOP_CASE(And, And) |
| ATOMIC_BINOP_CASE(Or, Orr) |
| ATOMIC_BINOP_CASE(Xor, Eor) |
| #undef ATOMIC_BINOP_CASE |
| #undef ASSEMBLE_SHIFT |
| #undef ASSEMBLE_ATOMIC_LOAD_INTEGER |
| #undef ASSEMBLE_ATOMIC_STORE_INTEGER |
| #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER |
| #undef ASSEMBLE_ATOMIC_BINOP |
| #undef ASSEMBLE_IEEE754_BINOP |
| #undef ASSEMBLE_IEEE754_UNOP |
| #undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER |
| |
| #define SIMD_UNOP_CASE(Op, Instr, FORMAT) \ |
| case Op: \ |
| __ Instr(i.OutputSimd128Register().V##FORMAT(), \ |
| i.InputSimd128Register(0).V##FORMAT()); \ |
| break; |
| #define SIMD_WIDENING_UNOP_CASE(Op, Instr, WIDE, NARROW) \ |
| case Op: \ |
| __ Instr(i.OutputSimd128Register().V##WIDE(), \ |
| i.InputSimd128Register(0).V##NARROW()); \ |
| break; |
| #define SIMD_BINOP_CASE(Op, Instr, FORMAT) \ |
| case Op: \ |
| __ Instr(i.OutputSimd128Register().V##FORMAT(), \ |
| i.InputSimd128Register(0).V##FORMAT(), \ |
| i.InputSimd128Register(1).V##FORMAT()); \ |
| break; |
| |
| case kArm64F32x4Splat: { |
| __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0); |
| break; |
| } |
| case kArm64F32x4ExtractLane: { |
| __ Mov(i.OutputSimd128Register().S(), i.InputSimd128Register(0).V4S(), |
| i.InputInt8(1)); |
| break; |
| } |
| case kArm64F32x4ReplaceLane: { |
| VRegister dst = i.OutputSimd128Register().V4S(), |
| src1 = i.InputSimd128Register(0).V4S(); |
| if (!dst.is(src1)) { |
| __ Mov(dst, src1); |
| } |
| __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V4S(), 0); |
| break; |
| } |
| SIMD_UNOP_CASE(kArm64F32x4SConvertI32x4, Scvtf, 4S); |
| SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S); |
| SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S); |
| SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S); |
| SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S); |
| SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S); |
| SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S); |
| SIMD_BINOP_CASE(kArm64F32x4AddHoriz, Faddp, 4S); |
| SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S); |
| SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S); |
| SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S); |
| SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S); |
| SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S); |
| case kArm64F32x4Ne: { |
| VRegister dst = i.OutputSimd128Register().V4S(); |
| __ Fcmeq(dst, i.InputSimd128Register(0).V4S(), |
| i.InputSimd128Register(1).V4S()); |
| __ Mvn(dst, dst); |
| break; |
| } |
| case kArm64F32x4Lt: { |
| __ Fcmgt(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(), |
| i.InputSimd128Register(0).V4S()); |
| break; |
| } |
| case kArm64F32x4Le: { |
| __ Fcmge(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(), |
| i.InputSimd128Register(0).V4S()); |
| break; |
| } |
| case kArm64I32x4Splat: { |
| __ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0)); |
| break; |
| } |
| case kArm64I32x4ExtractLane: { |
| __ Mov(i.OutputRegister32(), i.InputSimd128Register(0).V4S(), |
| i.InputInt8(1)); |
| break; |
| } |
| case kArm64I32x4ReplaceLane: { |
| VRegister dst = i.OutputSimd128Register().V4S(), |
| src1 = i.InputSimd128Register(0).V4S(); |
| if (!dst.is(src1)) { |
| __ Mov(dst, src1); |
| } |
| __ Mov(dst, i.InputInt8(1), i.InputRegister32(2)); |
| break; |
| } |
| SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4S); |
| SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8Low, Sxtl, 4S, 4H); |
| SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8High, Sxtl2, 4S, 8H); |
| SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S); |
| case kArm64I32x4Shl: { |
| __ Shl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), |
| i.InputInt5(1)); |
| break; |
| } |
| case kArm64I32x4ShrS: { |
| __ Sshr(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), |
| i.InputInt5(1)); |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S); |
| SIMD_BINOP_CASE(kArm64I32x4AddHoriz, Addp, 4S); |
| SIMD_BINOP_CASE(kArm64I32x4Sub, Sub, 4S); |
| SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4S); |
| SIMD_BINOP_CASE(kArm64I32x4MinS, Smin, 4S); |
| SIMD_BINOP_CASE(kArm64I32x4MaxS, Smax, 4S); |
| SIMD_BINOP_CASE(kArm64I32x4Eq, Cmeq, 4S); |
| case kArm64I32x4Ne: { |
| VRegister dst = i.OutputSimd128Register().V4S(); |
| __ Cmeq(dst, i.InputSimd128Register(0).V4S(), |
| i.InputSimd128Register(1).V4S()); |
| __ Mvn(dst, dst); |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64I32x4GtS, Cmgt, 4S); |
| SIMD_BINOP_CASE(kArm64I32x4GeS, Cmge, 4S); |
| SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4S); |
| SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8Low, Uxtl, 4S, 4H); |
| SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H); |
| case kArm64I32x4ShrU: { |
| __ Ushr(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), |
| i.InputInt5(1)); |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S); |
| SIMD_BINOP_CASE(kArm64I32x4MaxU, Umax, 4S); |
| SIMD_BINOP_CASE(kArm64I32x4GtU, Cmhi, 4S); |
| SIMD_BINOP_CASE(kArm64I32x4GeU, Cmhs, 4S); |
| case kArm64I16x8Splat: { |
| __ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0)); |
| break; |
| } |
| case kArm64I16x8ExtractLane: { |
| __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(), |
| i.InputInt8(1)); |
| break; |
| } |
| case kArm64I16x8ReplaceLane: { |
| VRegister dst = i.OutputSimd128Register().V8H(), |
| src1 = i.InputSimd128Register(0).V8H(); |
| if (!dst.is(src1)) { |
| __ Mov(dst, src1); |
| } |
| __ Mov(dst, i.InputInt8(1), i.InputRegister32(2)); |
| break; |
| } |
| SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16Low, Sxtl, 8H, 8B); |
| SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16High, Sxtl2, 8H, 16B); |
| SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H); |
| case kArm64I16x8Shl: { |
| __ Shl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), |
| i.InputInt5(1)); |
| break; |
| } |
| case kArm64I16x8ShrS: { |
| __ Sshr(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), |
| i.InputInt5(1)); |
| break; |
| } |
| case kArm64I16x8SConvertI32x4: { |
| VRegister dst = i.OutputSimd128Register(), |
| src0 = i.InputSimd128Register(0), |
| src1 = i.InputSimd128Register(1); |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp = scope.AcquireV(kFormat4S); |
| if (dst.is(src1)) { |
| __ Mov(temp, src1.V4S()); |
| src1 = temp; |
| } |
| __ Sqxtn(dst.V4H(), src0.V4S()); |
| __ Sqxtn2(dst.V8H(), src1.V4S()); |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8AddSaturateS, Sqadd, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8AddHoriz, Addp, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8SubSaturateS, Sqsub, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8MinS, Smin, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8MaxS, Smax, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8Eq, Cmeq, 8H); |
| case kArm64I16x8Ne: { |
| VRegister dst = i.OutputSimd128Register().V8H(); |
| __ Cmeq(dst, i.InputSimd128Register(0).V8H(), |
| i.InputSimd128Register(1).V8H()); |
| __ Mvn(dst, dst); |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64I16x8GtS, Cmgt, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8GeS, Cmge, 8H); |
| case kArm64I16x8UConvertI8x16Low: { |
| __ Uxtl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8B()); |
| break; |
| } |
| case kArm64I16x8UConvertI8x16High: { |
| __ Uxtl2(i.OutputSimd128Register().V8H(), |
| i.InputSimd128Register(0).V16B()); |
| break; |
| } |
| case kArm64I16x8ShrU: { |
| __ Ushr(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), |
| i.InputInt5(1)); |
| break; |
| } |
| case kArm64I16x8UConvertI32x4: { |
| VRegister dst = i.OutputSimd128Register(), |
| src0 = i.InputSimd128Register(0), |
| src1 = i.InputSimd128Register(1); |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp = scope.AcquireV(kFormat4S); |
| if (dst.is(src1)) { |
| __ Mov(temp, src1.V4S()); |
| src1 = temp; |
| } |
| __ Uqxtn(dst.V4H(), src0.V4S()); |
| __ Uqxtn2(dst.V8H(), src1.V4S()); |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64I16x8AddSaturateU, Uqadd, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8SubSaturateU, Uqsub, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8MinU, Umin, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8MaxU, Umax, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H); |
| SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H); |
| case kArm64I8x16Splat: { |
| __ Dup(i.OutputSimd128Register().V16B(), i.InputRegister32(0)); |
| break; |
| } |
| case kArm64I8x16ExtractLane: { |
| __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(), |
| i.InputInt8(1)); |
| break; |
| } |
| case kArm64I8x16ReplaceLane: { |
| VRegister dst = i.OutputSimd128Register().V16B(), |
| src1 = i.InputSimd128Register(0).V16B(); |
| if (!dst.is(src1)) { |
| __ Mov(dst, src1); |
| } |
| __ Mov(dst, i.InputInt8(1), i.InputRegister32(2)); |
| break; |
| } |
| SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B); |
| case kArm64I8x16Shl: { |
| __ Shl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), |
| i.InputInt5(1)); |
| break; |
| } |
| case kArm64I8x16ShrS: { |
| __ Sshr(i.OutputSimd128Register().V16B(), |
| i.InputSimd128Register(0).V16B(), i.InputInt5(1)); |
| break; |
| } |
| case kArm64I8x16SConvertI16x8: { |
| VRegister dst = i.OutputSimd128Register(), |
| src0 = i.InputSimd128Register(0), |
| src1 = i.InputSimd128Register(1); |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp = scope.AcquireV(kFormat8H); |
| if (dst.is(src1)) { |
| __ Mov(temp, src1.V8H()); |
| src1 = temp; |
| } |
| __ Sqxtn(dst.V8B(), src0.V8H()); |
| __ Sqxtn2(dst.V16B(), src1.V8H()); |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64I8x16Add, Add, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16AddSaturateS, Sqadd, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16SubSaturateS, Sqsub, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16Mul, Mul, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16MinS, Smin, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16MaxS, Smax, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16Eq, Cmeq, 16B); |
| case kArm64I8x16Ne: { |
| VRegister dst = i.OutputSimd128Register().V16B(); |
| __ Cmeq(dst, i.InputSimd128Register(0).V16B(), |
| i.InputSimd128Register(1).V16B()); |
| __ Mvn(dst, dst); |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B); |
| case kArm64I8x16ShrU: { |
| __ Ushr(i.OutputSimd128Register().V16B(), |
| i.InputSimd128Register(0).V16B(), i.InputInt5(1)); |
| break; |
| } |
| case kArm64I8x16UConvertI16x8: { |
| VRegister dst = i.OutputSimd128Register(), |
| src0 = i.InputSimd128Register(0), |
| src1 = i.InputSimd128Register(1); |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp = scope.AcquireV(kFormat8H); |
| if (dst.is(src1)) { |
| __ Mov(temp, src1.V8H()); |
| src1 = temp; |
| } |
| __ Uqxtn(dst.V8B(), src0.V8H()); |
| __ Uqxtn2(dst.V16B(), src1.V8H()); |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64I8x16AddSaturateU, Uqadd, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16SubSaturateU, Uqsub, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16MinU, Umin, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16MaxU, Umax, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B); |
| SIMD_BINOP_CASE(kArm64I8x16GeU, Cmhs, 16B); |
| case kArm64S128Zero: { |
| __ Movi(i.OutputSimd128Register().V16B(), 0); |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64S128And, And, 16B); |
| SIMD_BINOP_CASE(kArm64S128Or, Orr, 16B); |
| SIMD_BINOP_CASE(kArm64S128Xor, Eor, 16B); |
| SIMD_UNOP_CASE(kArm64S128Not, Mvn, 16B); |
| case kArm64S128Dup: { |
| VRegister dst = i.OutputSimd128Register(), |
| src = i.InputSimd128Register(0); |
| int lanes = i.InputInt32(1); |
| int index = i.InputInt32(2); |
| switch (lanes) { |
| case 4: |
| __ Dup(dst.V4S(), src.V4S(), index); |
| break; |
| case 8: |
| __ Dup(dst.V8H(), src.V8H(), index); |
| break; |
| case 16: |
| __ Dup(dst.V16B(), src.V16B(), index); |
| break; |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| break; |
| } |
| case kArm64S128Select: { |
| VRegister dst = i.OutputSimd128Register().V16B(); |
| DCHECK(dst.is(i.InputSimd128Register(0).V16B())); |
| __ Bsl(dst, i.InputSimd128Register(1).V16B(), |
| i.InputSimd128Register(2).V16B()); |
| break; |
| } |
| case kArm64S32x4Shuffle: { |
| Simd128Register dst = i.OutputSimd128Register().V4S(), |
| src0 = i.InputSimd128Register(0).V4S(), |
| src1 = i.InputSimd128Register(1).V4S(); |
| // Check for in-place shuffles. |
| // If dst == src0 == src1, then the shuffle is unary and we only use src0. |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp = scope.AcquireV(kFormat4S); |
| if (dst.is(src0)) { |
| __ Mov(temp, src0); |
| src0 = temp; |
| } else if (dst.is(src1)) { |
| __ Mov(temp, src1); |
| src1 = temp; |
| } |
| // Perform shuffle as a vmov per lane. |
| int32_t shuffle = i.InputInt32(2); |
| for (int i = 0; i < 4; i++) { |
| VRegister src = src0; |
| int lane = shuffle & 0x7; |
| if (lane >= 4) { |
| src = src1; |
| lane &= 0x3; |
| } |
| __ Mov(dst, i, src, lane); |
| shuffle >>= 8; |
| } |
| break; |
| } |
| SIMD_BINOP_CASE(kArm64S32x4ZipLeft, Zip1, 4S); |
| SIMD_BINOP_CASE(kArm64S32x4ZipRight, Zip2, 4S); |
| SIMD_BINOP_CASE(kArm64S32x4UnzipLeft, Uzp1, 4S); |
| SIMD_BINOP_CASE(kArm64S32x4UnzipRight, Uzp2, 4S); |
| SIMD_BINOP_CASE(kArm64S32x4TransposeLeft, Trn1, 4S); |
| SIMD_BINOP_CASE(kArm64S32x4TransposeRight, Trn2, 4S); |
| SIMD_BINOP_CASE(kArm64S16x8ZipLeft, Zip1, 8H); |
| SIMD_BINOP_CASE(kArm64S16x8ZipRight, Zip2, 8H); |
| SIMD_BINOP_CASE(kArm64S16x8UnzipLeft, Uzp1, 8H); |
| SIMD_BINOP_CASE(kArm64S16x8UnzipRight, Uzp2, 8H); |
| SIMD_BINOP_CASE(kArm64S16x8TransposeLeft, Trn1, 8H); |
| SIMD_BINOP_CASE(kArm64S16x8TransposeRight, Trn2, 8H); |
| SIMD_BINOP_CASE(kArm64S8x16ZipLeft, Zip1, 16B); |
| SIMD_BINOP_CASE(kArm64S8x16ZipRight, Zip2, 16B); |
| SIMD_BINOP_CASE(kArm64S8x16UnzipLeft, Uzp1, 16B); |
| SIMD_BINOP_CASE(kArm64S8x16UnzipRight, Uzp2, 16B); |
| SIMD_BINOP_CASE(kArm64S8x16TransposeLeft, Trn1, 16B); |
| SIMD_BINOP_CASE(kArm64S8x16TransposeRight, Trn2, 16B); |
| case kArm64S8x16Concat: { |
| __ Ext(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), |
| i.InputSimd128Register(1).V16B(), i.InputInt4(2)); |
| break; |
| } |
| case kArm64S8x16Shuffle: { |
| Simd128Register dst = i.OutputSimd128Register().V16B(), |
| src0 = i.InputSimd128Register(0).V16B(), |
| src1 = i.InputSimd128Register(1).V16B(); |
| // Unary shuffle table is in src0, binary shuffle table is in src0, src1, |
| // which must be consecutive. |
| int64_t mask = 0; |
| if (src0.is(src1)) { |
| mask = 0x0F0F0F0F; |
| } else { |
| mask = 0x1F1F1F1F; |
| DCHECK(AreConsecutive(src0, src1)); |
| } |
| int64_t imm1 = |
| (i.InputInt32(2) & mask) | ((i.InputInt32(3) & mask) << 32); |
| int64_t imm2 = |
| (i.InputInt32(4) & mask) | ((i.InputInt32(5) & mask) << 32); |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp = scope.AcquireV(kFormat16B); |
| __ Movi(temp, imm2, imm1); |
| |
| if (src0.is(src1)) { |
| __ Tbl(dst, src0, temp.V16B()); |
| } else { |
| __ Tbl(dst, src0, src1, temp.V16B()); |
| } |
| break; |
| } |
| SIMD_UNOP_CASE(kArm64S32x2Reverse, Rev64, 4S); |
| SIMD_UNOP_CASE(kArm64S16x4Reverse, Rev64, 8H); |
| SIMD_UNOP_CASE(kArm64S16x2Reverse, Rev32, 8H); |
| SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16B); |
| SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B); |
| SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B); |
| |
| #define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \ |
| case Op: { \ |
| UseScratchRegisterScope scope(tasm()); \ |
| VRegister temp = scope.AcquireV(format); \ |
| __ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \ |
| __ Umov(i.OutputRegister32(), temp, 0); \ |
| break; \ |
| } |
| SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S); |
| SIMD_REDUCE_OP_CASE(kArm64S1x4AllTrue, Uminv, kFormatS, 4S); |
| SIMD_REDUCE_OP_CASE(kArm64S1x8AnyTrue, Umaxv, kFormatH, 8H); |
| SIMD_REDUCE_OP_CASE(kArm64S1x8AllTrue, Uminv, kFormatH, 8H); |
| SIMD_REDUCE_OP_CASE(kArm64S1x16AnyTrue, Umaxv, kFormatB, 16B); |
| SIMD_REDUCE_OP_CASE(kArm64S1x16AllTrue, Uminv, kFormatB, 16B); |
| } |
| return kSuccess; |
| } // NOLINT(readability/fn_size) |
| |
| #undef SIMD_UNOP_CASE |
| #undef SIMD_WIDENING_UNOP_CASE |
| #undef SIMD_BINOP_CASE |
| #undef SIMD_REDUCE_OP_CASE |
| |
| // Assemble branches after this instruction. |
| void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { |
| Arm64OperandConverter i(this, instr); |
| Label* tlabel = branch->true_label; |
| Label* flabel = branch->false_label; |
| FlagsCondition condition = branch->condition; |
| ArchOpcode opcode = instr->arch_opcode(); |
| |
| if (opcode == kArm64CompareAndBranch32) { |
| switch (condition) { |
| case kEqual: |
| __ Cbz(i.InputRegister32(0), tlabel); |
| break; |
| case kNotEqual: |
| __ Cbnz(i.InputRegister32(0), tlabel); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } else if (opcode == kArm64CompareAndBranch) { |
| switch (condition) { |
| case kEqual: |
| __ Cbz(i.InputRegister64(0), tlabel); |
| break; |
| case kNotEqual: |
| __ Cbnz(i.InputRegister64(0), tlabel); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } else if (opcode == kArm64TestAndBranch32) { |
| switch (condition) { |
| case kEqual: |
| __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel); |
| break; |
| case kNotEqual: |
| __ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } else if (opcode == kArm64TestAndBranch) { |
| switch (condition) { |
| case kEqual: |
| __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel); |
| break; |
| case kNotEqual: |
| __ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } else { |
| Condition cc = FlagsConditionToCondition(condition); |
| __ B(cc, tlabel); |
| } |
| if (!branch->fallthru) __ B(flabel); // no fallthru to flabel. |
| } |
| |
| void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, |
| BranchInfo* branch) { |
| AssembleArchBranch(instr, branch); |
| } |
| |
| void CodeGenerator::AssembleArchJump(RpoNumber target) { |
| if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target)); |
| } |
| |
| void CodeGenerator::AssembleArchTrap(Instruction* instr, |
| FlagsCondition condition) { |
| class OutOfLineTrap final : public OutOfLineCode { |
| public: |
| OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr) |
| : OutOfLineCode(gen), |
| frame_elided_(frame_elided), |
| instr_(instr), |
| gen_(gen) {} |
| void Generate() final { |
| Arm64OperandConverter i(gen_, instr_); |
| Builtins::Name trap_id = |
| static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1)); |
| bool old_has_frame = __ has_frame(); |
| if (frame_elided_) { |
| __ set_has_frame(true); |
| __ EnterFrame(StackFrame::WASM_COMPILED); |
| } |
| GenerateCallToTrap(trap_id); |
| if (frame_elided_) { |
| __ set_has_frame(old_has_frame); |
| } |
| } |
| |
| private: |
| void GenerateCallToTrap(Builtins::Name trap_id) { |
| if (trap_id == Builtins::builtin_count) { |
| // We cannot test calls to the runtime in cctest/test-run-wasm. |
| // Therefore we emit a call to C here instead of a call to the runtime. |
| __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing( |
| __ isolate()), |
| 0); |
| __ LeaveFrame(StackFrame::WASM_COMPILED); |
| CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor(); |
| int pop_count = static_cast<int>(descriptor->StackParameterCount()); |
| pop_count += (pop_count & 1); // align |
| __ Drop(pop_count); |
| __ Ret(); |
| } else { |
| DCHECK(csp.Is(__ StackPointer())); |
| gen_->AssembleSourcePosition(instr_); |
| __ Call(__ isolate()->builtins()->builtin_handle(trap_id), |
| RelocInfo::CODE_TARGET); |
| ReferenceMap* reference_map = |
| new (gen_->zone()) ReferenceMap(gen_->zone()); |
| gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0, |
| Safepoint::kNoLazyDeopt); |
| if (FLAG_debug_code) { |
| // The trap code should never return. |
| __ Brk(0); |
| } |
| } |
| } |
| bool frame_elided_; |
| Instruction* instr_; |
| CodeGenerator* gen_; |
| }; |
| bool frame_elided = !frame_access_state()->has_frame(); |
| auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr); |
| Label* tlabel = ool->entry(); |
| Condition cc = FlagsConditionToCondition(condition); |
| __ B(cc, tlabel); |
| } |
| |
| // Assemble boolean materializations after this instruction. |
| void CodeGenerator::AssembleArchBoolean(Instruction* instr, |
| FlagsCondition condition) { |
| Arm64OperandConverter i(this, instr); |
| |
| // Materialize a full 64-bit 1 or 0 value. The result register is always the |
| // last output of the instruction. |
| DCHECK_NE(0u, instr->OutputCount()); |
| Register reg = i.OutputRegister(instr->OutputCount() - 1); |
| Condition cc = FlagsConditionToCondition(condition); |
| __ Cset(reg, cc); |
| } |
| |
| |
| void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) { |
| Arm64OperandConverter i(this, instr); |
| Register input = i.InputRegister32(0); |
| for (size_t index = 2; index < instr->InputCount(); index += 2) { |
| __ Cmp(input, i.InputInt32(index + 0)); |
| __ B(eq, GetLabel(i.InputRpo(index + 1))); |
| } |
| AssembleArchJump(i.InputRpo(1)); |
| } |
| |
| |
| void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { |
| Arm64OperandConverter i(this, instr); |
| UseScratchRegisterScope scope(tasm()); |
| Register input = i.InputRegister32(0); |
| Register temp = scope.AcquireX(); |
| size_t const case_count = instr->InputCount() - 2; |
| Label table; |
| __ Cmp(input, case_count); |
| __ B(hs, GetLabel(i.InputRpo(1))); |
| __ Adr(temp, &table); |
| __ Add(temp, temp, Operand(input, UXTW, 2)); |
| __ Br(temp); |
| __ StartBlockPools(); |
| __ Bind(&table); |
| for (size_t index = 0; index < case_count; ++index) { |
| __ B(GetLabel(i.InputRpo(index + 2))); |
| } |
| __ EndBlockPools(); |
| } |
| |
| void CodeGenerator::FinishFrame(Frame* frame) { |
| frame->AlignFrame(16); |
| CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
| |
| // Save FP registers. |
| CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits, |
| descriptor->CalleeSavedFPRegisters()); |
| int saved_count = saves_fp.Count(); |
| if (saved_count != 0) { |
| DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list()); |
| DCHECK_EQ(saved_count % 2, 0); |
| frame->AllocateSavedCalleeRegisterSlots(saved_count * |
| (kDoubleSize / kPointerSize)); |
| } |
| |
| CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits, |
| descriptor->CalleeSavedRegisters()); |
| saved_count = saves.Count(); |
| if (saved_count != 0) { |
| DCHECK_EQ(saved_count % 2, 0); |
| frame->AllocateSavedCalleeRegisterSlots(saved_count); |
| } |
| } |
| |
| void CodeGenerator::AssembleConstructFrame() { |
| CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
| __ AssertCspAligned(); |
| |
| // The frame has been previously padded in CodeGenerator::FinishFrame(). |
| DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0); |
| int shrink_slots = |
| frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize(); |
| |
| CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits, |
| descriptor->CalleeSavedRegisters()); |
| CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits, |
| descriptor->CalleeSavedFPRegisters()); |
| // The number of slots for returns has to be even to ensure the correct stack |
| // alignment. |
| const int returns = RoundUp(frame()->GetReturnSlotCount(), 2); |
| |
| if (frame_access_state()->has_frame()) { |
| // Link the frame |
| if (descriptor->IsJSFunctionCall()) { |
| __ Prologue(); |
| } else { |
| __ Push(lr, fp); |
| __ Mov(fp, __ StackPointer()); |
| } |
| unwinding_info_writer_.MarkFrameConstructed(__ pc_offset()); |
| |
| // Create OSR entry if applicable |
| if (info()->is_osr()) { |
| // TurboFan OSR-compiled functions cannot be entered directly. |
| __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); |
| |
| // Unoptimized code jumps directly to this entrypoint while the |
| // unoptimized frame is still on the stack. Optimized code uses OSR values |
| // directly from the unoptimized frame. Thus, all that needs to be done is |
| // to allocate the remaining stack slots. |
| if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); |
| osr_pc_offset_ = __ pc_offset(); |
| shrink_slots -= osr_helper()->UnoptimizedFrameSlots(); |
| } |
| |
| if (info()->IsWasm() && shrink_slots > 128) { |
| // For WebAssembly functions with big frames we have to do the stack |
| // overflow check before we construct the frame. Otherwise we may not |
| // have enough space on the stack to call the runtime for the stack |
| // overflow. |
| Label done; |
| // If the frame is bigger than the stack, we throw the stack overflow |
| // exception unconditionally. Thereby we can avoid the integer overflow |
| // check in the condition code. |
| if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) { |
| UseScratchRegisterScope scope(tasm()); |
| Register scratch = scope.AcquireX(); |
| __ Mov(scratch, Operand(ExternalReference::address_of_real_stack_limit( |
| __ isolate()))); |
| __ Ldr(scratch, MemOperand(scratch)); |
| __ Add(scratch, scratch, shrink_slots * kPointerSize); |
| __ Cmp(__ StackPointer(), scratch); |
| __ B(hs, &done); |
| } |
| |
| if (!frame_access_state()->has_frame()) { |
| __ set_has_frame(true); |
| // There is no need to leave the frame, we will not return from the |
| // runtime call. |
| __ EnterFrame(StackFrame::WASM_COMPILED); |
| } |
| DCHECK(__ StackPointer().Is(csp)); |
| __ AssertStackConsistency(); |
| __ Mov(cp, Smi::kZero); |
| __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow); |
| // We come from WebAssembly, there are no references for the GC. |
| ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); |
| RecordSafepoint(reference_map, Safepoint::kSimple, 0, |
| Safepoint::kNoLazyDeopt); |
| if (FLAG_debug_code) { |
| __ Brk(0); |
| } |
| __ AssertStackConsistency(); |
| __ Bind(&done); |
| } |
| |
| // Skip callee-saved slots, which are pushed below. |
| shrink_slots -= saves.Count(); |
| shrink_slots -= saves_fp.Count(); |
| shrink_slots -= returns; |
| |
| // Build remainder of frame, including accounting for and filling-in |
| // frame-specific header information, i.e. claiming the extra slot that |
| // other platforms explicitly push for STUB (code object) frames and frames |
| // recording their argument count. |
| switch (descriptor->kind()) { |
| case CallDescriptor::kCallJSFunction: |
| if (descriptor->PushArgumentCount()) { |
| __ Claim(shrink_slots + 1); // Claim extra slot for argc. |
| __ Str(kJavaScriptCallArgCountRegister, |
| MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset)); |
| } else { |
| __ Claim(shrink_slots); |
| } |
| break; |
| case CallDescriptor::kCallCodeObject: |
| case CallDescriptor::kCallWasmFunction: { |
| UseScratchRegisterScope temps(tasm()); |
| __ Claim(shrink_slots + 1); // Claim extra slot for frame type marker. |
| Register scratch = temps.AcquireX(); |
| __ Mov(scratch, |
| StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); |
| __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset)); |
| } break; |
| case CallDescriptor::kCallAddress: |
| __ Claim(shrink_slots); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| // Save FP registers. |
| DCHECK_IMPLIES(saves_fp.Count() != 0, |
| saves_fp.list() == CPURegList::GetCalleeSavedV().list()); |
| __ PushCPURegList(saves_fp); |
| |
| // Save registers. |
| // TODO(palfia): TF save list is not in sync with |
| // CPURegList::GetCalleeSaved(): x30 is missing. |
| // DCHECK(saves.list() == CPURegList::GetCalleeSaved().list()); |
| __ PushCPURegList(saves); |
| |
| if (returns != 0) { |
| __ Claim(returns); |
| } |
| } |
| |
| void CodeGenerator::AssembleReturn(InstructionOperand* pop) { |
| CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
| |
| const int returns = RoundUp(frame()->GetReturnSlotCount(), 2); |
| |
| if (returns != 0) { |
| __ Drop(returns); |
| } |
| |
| // Restore registers. |
| CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits, |
| descriptor->CalleeSavedRegisters()); |
| __ PopCPURegList(saves); |
| |
| // Restore fp registers. |
| CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits, |
| descriptor->CalleeSavedFPRegisters()); |
| __ PopCPURegList(saves_fp); |
| |
| unwinding_info_writer_.MarkBlockWillExit(); |
| |
| Arm64OperandConverter g(this, nullptr); |
| int pop_count = static_cast<int>(descriptor->StackParameterCount()); |
| if (descriptor->IsCFunctionCall()) { |
| AssembleDeconstructFrame(); |
| } else if (frame_access_state()->has_frame()) { |
| // Canonicalize JSFunction return sites for now unless they have an variable |
| // number of stack slot pops. |
| if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) { |
| if (return_label_.is_bound()) { |
| __ B(&return_label_); |
| return; |
| } else { |
| __ Bind(&return_label_); |
| AssembleDeconstructFrame(); |
| } |
| } else { |
| AssembleDeconstructFrame(); |
| } |
| } |
| |
| if (pop->IsImmediate()) { |
| pop_count += g.ToConstant(pop).ToInt32(); |
| __ DropArguments(pop_count); |
| } else { |
| Register pop_reg = g.ToRegister(pop); |
| __ Add(pop_reg, pop_reg, pop_count); |
| __ DropArguments(pop_reg); |
| } |
| |
| __ AssertCspAligned(); |
| __ Ret(); |
| } |
| |
| void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); } |
| |
| void CodeGenerator::AssembleMove(InstructionOperand* source, |
| InstructionOperand* destination) { |
| Arm64OperandConverter g(this, nullptr); |
| // Dispatch on the source and destination operand kinds. Not all |
| // combinations are possible. |
| if (source->IsRegister()) { |
| DCHECK(destination->IsRegister() || destination->IsStackSlot()); |
| Register src = g.ToRegister(source); |
| if (destination->IsRegister()) { |
| __ Mov(g.ToRegister(destination), src); |
| } else { |
| __ Str(src, g.ToMemOperand(destination, tasm())); |
| } |
| } else if (source->IsStackSlot()) { |
| MemOperand src = g.ToMemOperand(source, tasm()); |
| DCHECK(destination->IsRegister() || destination->IsStackSlot()); |
| if (destination->IsRegister()) { |
| __ Ldr(g.ToRegister(destination), src); |
| } else { |
| UseScratchRegisterScope scope(tasm()); |
| Register temp = scope.AcquireX(); |
| __ Ldr(temp, src); |
| __ Str(temp, g.ToMemOperand(destination, tasm())); |
| } |
| } else if (source->IsConstant()) { |
| Constant src = g.ToConstant(ConstantOperand::cast(source)); |
| if (destination->IsRegister() || destination->IsStackSlot()) { |
| UseScratchRegisterScope scope(tasm()); |
| Register dst = destination->IsRegister() ? g.ToRegister(destination) |
| : scope.AcquireX(); |
| if (src.type() == Constant::kHeapObject) { |
| Handle<HeapObject> src_object = src.ToHeapObject(); |
| Heap::RootListIndex index; |
| if (IsMaterializableFromRoot(src_object, &index)) { |
| __ LoadRoot(dst, index); |
| } else { |
| __ Mov(dst, src_object); |
| } |
| } else { |
| __ Mov(dst, g.ToImmediate(source)); |
| } |
| if (destination->IsStackSlot()) { |
| __ Str(dst, g.ToMemOperand(destination, tasm())); |
| } |
| } else if (src.type() == Constant::kFloat32) { |
| if (destination->IsFPRegister()) { |
| VRegister dst = g.ToDoubleRegister(destination).S(); |
| __ Fmov(dst, src.ToFloat32()); |
| } else { |
| DCHECK(destination->IsFPStackSlot()); |
| if (bit_cast<int32_t>(src.ToFloat32()) == 0) { |
| __ Str(wzr, g.ToMemOperand(destination, tasm())); |
| } else { |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp = scope.AcquireS(); |
| __ Fmov(temp, src.ToFloat32()); |
| __ Str(temp, g.ToMemOperand(destination, tasm())); |
| } |
| } |
| } else { |
| DCHECK_EQ(Constant::kFloat64, src.type()); |
| if (destination->IsFPRegister()) { |
| VRegister dst = g.ToDoubleRegister(destination); |
| __ Fmov(dst, src.ToFloat64().value()); |
| } else { |
| DCHECK(destination->IsFPStackSlot()); |
| if (src.ToFloat64().AsUint64() == 0) { |
| __ Str(xzr, g.ToMemOperand(destination, tasm())); |
| } else { |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp = scope.AcquireD(); |
| __ Fmov(temp, src.ToFloat64().value()); |
| __ Str(temp, g.ToMemOperand(destination, tasm())); |
| } |
| } |
| } |
| } else if (source->IsFPRegister()) { |
| VRegister src = g.ToDoubleRegister(source); |
| if (destination->IsFPRegister()) { |
| VRegister dst = g.ToDoubleRegister(destination); |
| if (destination->IsSimd128Register()) { |
| __ Mov(dst.Q(), src.Q()); |
| } else { |
| __ Mov(dst, src); |
| } |
| } else { |
| DCHECK(destination->IsFPStackSlot()); |
| MemOperand dst = g.ToMemOperand(destination, tasm()); |
| if (destination->IsSimd128StackSlot()) { |
| __ Str(src.Q(), dst); |
| } else { |
| __ Str(src, dst); |
| } |
| } |
| } else if (source->IsFPStackSlot()) { |
| DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); |
| MemOperand src = g.ToMemOperand(source, tasm()); |
| if (destination->IsFPRegister()) { |
| VRegister dst = g.ToDoubleRegister(destination); |
| if (destination->IsSimd128Register()) { |
| __ Ldr(dst.Q(), src); |
| } else { |
| __ Ldr(dst, src); |
| } |
| } else { |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp = scope.AcquireD(); |
| MemOperand dst = g.ToMemOperand(destination, tasm()); |
| if (destination->IsSimd128StackSlot()) { |
| __ Ldr(temp.Q(), src); |
| __ Str(temp.Q(), dst); |
| } else { |
| __ Ldr(temp, src); |
| __ Str(temp, dst); |
| } |
| } |
| } else { |
| UNREACHABLE(); |
| } |
| } |
| |
| |
| void CodeGenerator::AssembleSwap(InstructionOperand* source, |
| InstructionOperand* destination) { |
| Arm64OperandConverter g(this, nullptr); |
| // Dispatch on the source and destination operand kinds. Not all |
| // combinations are possible. |
| if (source->IsRegister()) { |
| // Register-register. |
| UseScratchRegisterScope scope(tasm()); |
| Register temp = scope.AcquireX(); |
| Register src = g.ToRegister(source); |
| if (destination->IsRegister()) { |
| Register dst = g.ToRegister(destination); |
| __ Mov(temp, src); |
| __ Mov(src, dst); |
| __ Mov(dst, temp); |
| } else { |
| DCHECK(destination->IsStackSlot()); |
| MemOperand dst = g.ToMemOperand(destination, tasm()); |
| __ Mov(temp, src); |
| __ Ldr(src, dst); |
| __ Str(temp, dst); |
| } |
| } else if (source->IsStackSlot() || source->IsFPStackSlot()) { |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp_0 = scope.AcquireD(); |
| VRegister temp_1 = scope.AcquireD(); |
| MemOperand src = g.ToMemOperand(source, tasm()); |
| MemOperand dst = g.ToMemOperand(destination, tasm()); |
| if (source->IsSimd128StackSlot()) { |
| __ Ldr(temp_0.Q(), src); |
| __ Ldr(temp_1.Q(), dst); |
| __ Str(temp_0.Q(), dst); |
| __ Str(temp_1.Q(), src); |
| } else { |
| __ Ldr(temp_0, src); |
| __ Ldr(temp_1, dst); |
| __ Str(temp_0, dst); |
| __ Str(temp_1, src); |
| } |
| } else if (source->IsFPRegister()) { |
| UseScratchRegisterScope scope(tasm()); |
| VRegister temp = scope.AcquireD(); |
| VRegister src = g.ToDoubleRegister(source); |
| if (destination->IsFPRegister()) { |
| VRegister dst = g.ToDoubleRegister(destination); |
| if (source->IsSimd128Register()) { |
| __ Mov(temp.Q(), src.Q()); |
| __ Mov(src.Q(), dst.Q()); |
| __ Mov(dst.Q(), temp.Q()); |
| } else { |
| __ Mov(temp, src); |
| __ Mov(src, dst); |
| __ Mov(dst, temp); |
| } |
| } else { |
| DCHECK( |