| // Copyright 2014 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/codegen/assembler-inl.h" |
| #include "src/codegen/callable.h" |
| #include "src/codegen/macro-assembler.h" |
| #include "src/codegen/mips64/constants-mips64.h" |
| #include "src/codegen/optimized-compilation-info.h" |
| #include "src/compiler/backend/code-generator-impl.h" |
| #include "src/compiler/backend/code-generator.h" |
| #include "src/compiler/backend/gap-resolver.h" |
| #include "src/compiler/node-matchers.h" |
| #include "src/compiler/osr.h" |
| #include "src/heap/heap-inl.h" // crbug.com/v8/8499 |
| #include "src/wasm/wasm-code-manager.h" |
| |
| namespace v8 { |
| namespace internal { |
| namespace compiler { |
| |
| #define __ tasm()-> |
| |
| // TODO(plind): consider renaming these macros. |
| #define TRACE_MSG(msg) \ |
| PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ |
| __LINE__) |
| |
| #define TRACE_UNIMPL() \ |
| PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \ |
| __LINE__) |
| |
| // Adds Mips-specific methods to convert InstructionOperands. |
| class MipsOperandConverter final : public InstructionOperandConverter { |
| public: |
| MipsOperandConverter(CodeGenerator* gen, Instruction* instr) |
| : InstructionOperandConverter(gen, instr) {} |
| |
| FloatRegister OutputSingleRegister(size_t index = 0) { |
| return ToSingleRegister(instr_->OutputAt(index)); |
| } |
| |
| FloatRegister InputSingleRegister(size_t index) { |
| return ToSingleRegister(instr_->InputAt(index)); |
| } |
| |
| FloatRegister ToSingleRegister(InstructionOperand* op) { |
| // Single (Float) and Double register namespace is same on MIPS, |
| // both are typedefs of FPURegister. |
| return ToDoubleRegister(op); |
| } |
| |
| Register InputOrZeroRegister(size_t index) { |
| if (instr_->InputAt(index)->IsImmediate()) { |
| DCHECK_EQ(0, InputInt32(index)); |
| return zero_reg; |
| } |
| return InputRegister(index); |
| } |
| |
| DoubleRegister InputOrZeroDoubleRegister(size_t index) { |
| if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; |
| |
| return InputDoubleRegister(index); |
| } |
| |
| DoubleRegister InputOrZeroSingleRegister(size_t index) { |
| if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; |
| |
| return InputSingleRegister(index); |
| } |
| |
| Operand InputImmediate(size_t index) { |
| Constant constant = ToConstant(instr_->InputAt(index)); |
| switch (constant.type()) { |
| case Constant::kInt32: |
| return Operand(constant.ToInt32()); |
| case Constant::kInt64: |
| return Operand(constant.ToInt64()); |
| case Constant::kFloat32: |
| return Operand::EmbeddedNumber(constant.ToFloat32()); |
| case Constant::kFloat64: |
| return Operand::EmbeddedNumber(constant.ToFloat64().value()); |
| case Constant::kExternalReference: |
| case Constant::kCompressedHeapObject: |
| case Constant::kHeapObject: |
| // TODO(plind): Maybe we should handle ExtRef & HeapObj here? |
| // maybe not done on arm due to const pool ?? |
| break; |
| case Constant::kDelayedStringConstant: |
| return Operand::EmbeddedStringConstant( |
| constant.ToDelayedStringConstant()); |
| case Constant::kRpoNumber: |
| UNREACHABLE(); // TODO(titzer): RPO immediates on mips? |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| Operand InputOperand(size_t index) { |
| InstructionOperand* op = instr_->InputAt(index); |
| if (op->IsRegister()) { |
| return Operand(ToRegister(op)); |
| } |
| return InputImmediate(index); |
| } |
| |
| MemOperand MemoryOperand(size_t* first_index) { |
| const size_t index = *first_index; |
| switch (AddressingModeField::decode(instr_->opcode())) { |
| case kMode_None: |
| break; |
| case kMode_MRI: |
| *first_index += 2; |
| return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); |
| case kMode_MRR: |
| // TODO(plind): r6 address mode, to be implemented ... |
| UNREACHABLE(); |
| } |
| UNREACHABLE(); |
| } |
| |
| MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); } |
| |
| MemOperand ToMemOperand(InstructionOperand* op) const { |
| DCHECK_NOT_NULL(op); |
| DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); |
| return SlotToMemOperand(AllocatedOperand::cast(op)->index()); |
| } |
| |
| MemOperand SlotToMemOperand(int slot) const { |
| FrameOffset offset = frame_access_state()->GetFrameOffset(slot); |
| return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); |
| } |
| }; |
| |
| static inline bool HasRegisterInput(Instruction* instr, size_t index) { |
| return instr->InputAt(index)->IsRegister(); |
| } |
| |
| namespace { |
| |
| class OutOfLineRecordWrite final : public OutOfLineCode { |
| public: |
| OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index, |
| Register value, Register scratch0, Register scratch1, |
| RecordWriteMode mode, StubCallMode stub_mode) |
| : OutOfLineCode(gen), |
| object_(object), |
| index_(index), |
| value_(value), |
| scratch0_(scratch0), |
| scratch1_(scratch1), |
| mode_(mode), |
| stub_mode_(stub_mode), |
| must_save_lr_(!gen->frame_access_state()->has_frame()), |
| zone_(gen->zone()) {} |
| |
| void Generate() final { |
| if (mode_ > RecordWriteMode::kValueIsPointer) { |
| __ JumpIfSmi(value_, exit()); |
| } |
| __ CheckPageFlag(value_, scratch0_, |
| MemoryChunk::kPointersToHereAreInterestingMask, eq, |
| exit()); |
| __ Daddu(scratch1_, object_, index_); |
| RememberedSetAction const remembered_set_action = |
| mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET |
| : OMIT_REMEMBERED_SET; |
| SaveFPRegsMode const save_fp_mode = |
| frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; |
| if (must_save_lr_) { |
| // We need to save and restore ra if the frame was elided. |
| __ Push(ra); |
| } |
| if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { |
| __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode); |
| } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) { |
| // A direct call to a wasm runtime stub defined in this module. |
| // Just encode the stub index. This will be patched when the code |
| // is added to the native module and copied into wasm code space. |
| __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, |
| save_fp_mode, wasm::WasmCode::kWasmRecordWrite); |
| } else { |
| __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, |
| save_fp_mode); |
| } |
| if (must_save_lr_) { |
| __ Pop(ra); |
| } |
| } |
| |
| private: |
| Register const object_; |
| Register const index_; |
| Register const value_; |
| Register const scratch0_; |
| Register const scratch1_; |
| RecordWriteMode const mode_; |
| StubCallMode const stub_mode_; |
| bool must_save_lr_; |
| Zone* zone_; |
| }; |
| |
| #define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \ |
| class ool_name final : public OutOfLineCode { \ |
| public: \ |
| ool_name(CodeGenerator* gen, T dst, T src1, T src2) \ |
| : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \ |
| \ |
| void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \ |
| \ |
| private: \ |
| T const dst_; \ |
| T const src1_; \ |
| T const src2_; \ |
| } |
| |
| CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister); |
| CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister); |
| CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister); |
| CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister); |
| |
| #undef CREATE_OOL_CLASS |
| |
| Condition FlagsConditionToConditionCmp(FlagsCondition condition) { |
| switch (condition) { |
| case kEqual: |
| return eq; |
| case kNotEqual: |
| return ne; |
| case kSignedLessThan: |
| return lt; |
| case kSignedGreaterThanOrEqual: |
| return ge; |
| case kSignedLessThanOrEqual: |
| return le; |
| case kSignedGreaterThan: |
| return gt; |
| case kUnsignedLessThan: |
| return lo; |
| case kUnsignedGreaterThanOrEqual: |
| return hs; |
| case kUnsignedLessThanOrEqual: |
| return ls; |
| case kUnsignedGreaterThan: |
| return hi; |
| case kUnorderedEqual: |
| case kUnorderedNotEqual: |
| break; |
| default: |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| Condition FlagsConditionToConditionTst(FlagsCondition condition) { |
| switch (condition) { |
| case kNotEqual: |
| return ne; |
| case kEqual: |
| return eq; |
| default: |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| Condition FlagsConditionToConditionOvf(FlagsCondition condition) { |
| switch (condition) { |
| case kOverflow: |
| return ne; |
| case kNotOverflow: |
| return eq; |
| default: |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| FPUCondition FlagsConditionToConditionCmpFPU( |
| bool& predicate, // NOLINT(runtime/references) |
| FlagsCondition condition) { |
| switch (condition) { |
| case kEqual: |
| predicate = true; |
| return EQ; |
| case kNotEqual: |
| predicate = false; |
| return EQ; |
| case kUnsignedLessThan: |
| predicate = true; |
| return OLT; |
| case kUnsignedGreaterThanOrEqual: |
| predicate = false; |
| return OLT; |
| case kUnsignedLessThanOrEqual: |
| predicate = true; |
| return OLE; |
| case kUnsignedGreaterThan: |
| predicate = false; |
| return OLE; |
| case kUnorderedEqual: |
| case kUnorderedNotEqual: |
| predicate = true; |
| break; |
| default: |
| predicate = true; |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| void EmitWordLoadPoisoningIfNeeded( |
| CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, |
| MipsOperandConverter& i) { // NOLINT(runtime/references) |
| const MemoryAccessMode access_mode = |
| static_cast<MemoryAccessMode>(MiscField::decode(opcode)); |
| if (access_mode == kMemoryAccessPoisoned) { |
| Register value = i.OutputRegister(); |
| codegen->tasm()->And(value, value, kSpeculationPoisonRegister); |
| } |
| } |
| |
| } // namespace |
| |
| #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ |
| do { \ |
| __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ |
| __ sync(); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ |
| do { \ |
| __ sync(); \ |
| __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \ |
| __ sync(); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \ |
| do { \ |
| Label binop; \ |
| __ Daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| __ sync(); \ |
| __ bind(&binop); \ |
| __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ |
| __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \ |
| Operand(i.InputRegister(2))); \ |
| __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ |
| __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ |
| __ sync(); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \ |
| size, bin_instr, representation) \ |
| do { \ |
| Label binop; \ |
| __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| if (representation == 32) { \ |
| __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \ |
| } else { \ |
| DCHECK_EQ(representation, 64); \ |
| __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \ |
| } \ |
| __ Dsubu(i.TempRegister(0), i.TempRegister(0), \ |
| Operand(i.TempRegister(3))); \ |
| __ sll(i.TempRegister(3), i.TempRegister(3), 3); \ |
| __ sync(); \ |
| __ bind(&binop); \ |
| __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ |
| __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \ |
| size, sign_extend); \ |
| __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \ |
| Operand(i.InputRegister(2))); \ |
| __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \ |
| size); \ |
| __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ |
| __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ |
| __ sync(); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \ |
| do { \ |
| Label exchange; \ |
| __ sync(); \ |
| __ bind(&exchange); \ |
| __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ |
| __ mov(i.TempRegister(1), i.InputRegister(2)); \ |
| __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ |
| __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \ |
| __ sync(); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \ |
| load_linked, store_conditional, sign_extend, size, representation) \ |
| do { \ |
| Label exchange; \ |
| __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| if (representation == 32) { \ |
| __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ |
| } else { \ |
| DCHECK_EQ(representation, 64); \ |
| __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \ |
| } \ |
| __ Dsubu(i.TempRegister(0), i.TempRegister(0), \ |
| Operand(i.TempRegister(1))); \ |
| __ sll(i.TempRegister(1), i.TempRegister(1), 3); \ |
| __ sync(); \ |
| __ bind(&exchange); \ |
| __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ |
| __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ |
| size, sign_extend); \ |
| __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \ |
| size); \ |
| __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ |
| __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \ |
| __ sync(); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \ |
| store_conditional) \ |
| do { \ |
| Label compareExchange; \ |
| Label exit; \ |
| __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| __ sync(); \ |
| __ bind(&compareExchange); \ |
| __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ |
| __ BranchShort(&exit, ne, i.InputRegister(2), \ |
| Operand(i.OutputRegister(0))); \ |
| __ mov(i.TempRegister(2), i.InputRegister(3)); \ |
| __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ |
| __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ |
| Operand(zero_reg)); \ |
| __ bind(&exit); \ |
| __ sync(); \ |
| } while (0) |
| |
| #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \ |
| load_linked, store_conditional, sign_extend, size, representation) \ |
| do { \ |
| Label compareExchange; \ |
| Label exit; \ |
| __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ |
| if (representation == 32) { \ |
| __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ |
| } else { \ |
| DCHECK_EQ(representation, 64); \ |
| __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \ |
| } \ |
| __ Dsubu(i.TempRegister(0), i.TempRegister(0), \ |
| Operand(i.TempRegister(1))); \ |
| __ sll(i.TempRegister(1), i.TempRegister(1), 3); \ |
| __ sync(); \ |
| __ bind(&compareExchange); \ |
| __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ |
| __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ |
| size, sign_extend); \ |
| __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \ |
| size, sign_extend); \ |
| __ BranchShort(&exit, ne, i.InputRegister(2), \ |
| Operand(i.OutputRegister(0))); \ |
| __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \ |
| size); \ |
| __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ |
| __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ |
| Operand(zero_reg)); \ |
| __ bind(&exit); \ |
| __ sync(); \ |
| } while (0) |
| |
| #define ASSEMBLE_IEEE754_BINOP(name) \ |
| do { \ |
| FrameScope scope(tasm(), StackFrame::MANUAL); \ |
| __ PrepareCallCFunction(0, 2, kScratchReg); \ |
| __ MovToFloatParameters(i.InputDoubleRegister(0), \ |
| i.InputDoubleRegister(1)); \ |
| __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ |
| /* Move the result in the double result register. */ \ |
| __ MovFromFloatResult(i.OutputDoubleRegister()); \ |
| } while (0) |
| |
| #define ASSEMBLE_IEEE754_UNOP(name) \ |
| do { \ |
| FrameScope scope(tasm(), StackFrame::MANUAL); \ |
| __ PrepareCallCFunction(0, 1, kScratchReg); \ |
| __ MovToFloatParameter(i.InputDoubleRegister(0)); \ |
| __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ |
| /* Move the result in the double result register. */ \ |
| __ MovFromFloatResult(i.OutputDoubleRegister()); \ |
| } while (0) |
| |
| void CodeGenerator::AssembleDeconstructFrame() { |
| __ mov(sp, fp); |
| __ Pop(ra, fp); |
| } |
| |
| void CodeGenerator::AssemblePrepareTailCall() { |
| if (frame_access_state()->has_frame()) { |
| __ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); |
| __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| } |
| frame_access_state()->SetFrameAccessToSP(); |
| } |
| |
| void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg, |
| Register scratch1, |
| Register scratch2, |
| Register scratch3) { |
| DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3)); |
| Label done; |
| |
| // Check if current frame is an arguments adaptor frame. |
| __ Ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| __ Branch(&done, ne, scratch3, |
| Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); |
| |
| // Load arguments count from current arguments adaptor frame (note, it |
| // does not include receiver). |
| Register caller_args_count_reg = scratch1; |
| __ Ld(caller_args_count_reg, |
| MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| __ SmiUntag(caller_args_count_reg); |
| |
| ParameterCount callee_args_count(args_reg); |
| __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2, |
| scratch3); |
| __ bind(&done); |
| } |
| |
| namespace { |
| |
| void AdjustStackPointerForTailCall(TurboAssembler* tasm, |
| FrameAccessState* state, |
| int new_slot_above_sp, |
| bool allow_shrinkage = true) { |
| int current_sp_offset = state->GetSPToFPSlotCount() + |
| StandardFrameConstants::kFixedSlotCountAboveFp; |
| int stack_slot_delta = new_slot_above_sp - current_sp_offset; |
| if (stack_slot_delta > 0) { |
| tasm->Dsubu(sp, sp, stack_slot_delta * kSystemPointerSize); |
| state->IncreaseSPDelta(stack_slot_delta); |
| } else if (allow_shrinkage && stack_slot_delta < 0) { |
| tasm->Daddu(sp, sp, -stack_slot_delta * kSystemPointerSize); |
| state->IncreaseSPDelta(stack_slot_delta); |
| } |
| } |
| |
| } // namespace |
| |
| void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, |
| int first_unused_stack_slot) { |
| AdjustStackPointerForTailCall(tasm(), frame_access_state(), |
| first_unused_stack_slot, false); |
| } |
| |
| void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, |
| int first_unused_stack_slot) { |
| AdjustStackPointerForTailCall(tasm(), frame_access_state(), |
| first_unused_stack_slot); |
| } |
| |
| // Check that {kJavaScriptCallCodeStartRegister} is correct. |
| void CodeGenerator::AssembleCodeStartRegisterCheck() { |
| __ ComputeCodeStartAddress(kScratchReg); |
| __ Assert(eq, AbortReason::kWrongFunctionCodeStart, |
| kJavaScriptCallCodeStartRegister, Operand(kScratchReg)); |
| } |
| |
| // Check if the code object is marked for deoptimization. If it is, then it |
| // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need |
| // to: |
| // 1. read from memory the word that contains that bit, which can be found in |
| // the flags in the referenced {CodeDataContainer} object; |
| // 2. test kMarkedForDeoptimizationBit in those flags; and |
| // 3. if it is not zero then it jumps to the builtin. |
| void CodeGenerator::BailoutIfDeoptimized() { |
| int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; |
| __ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); |
| __ Lw(kScratchReg, |
| FieldMemOperand(kScratchReg, |
| CodeDataContainer::kKindSpecificFlagsOffset)); |
| __ And(kScratchReg, kScratchReg, |
| Operand(1 << Code::kMarkedForDeoptimizationBit)); |
| __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), |
| RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); |
| } |
| |
| void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { |
| // Calculate a mask which has all bits set in the normal case, but has all |
| // bits cleared if we are speculatively executing the wrong PC. |
| // difference = (current - expected) | (expected - current) |
| // poison = ~(difference >> (kBitsPerSystemPointer - 1)) |
| __ ComputeCodeStartAddress(kScratchReg); |
| __ Move(kSpeculationPoisonRegister, kScratchReg); |
| __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister, |
| kJavaScriptCallCodeStartRegister); |
| __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister, |
| kScratchReg); |
| __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister, |
| kJavaScriptCallCodeStartRegister); |
| __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister, |
| kBitsPerSystemPointer - 1); |
| __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister, |
| kSpeculationPoisonRegister); |
| } |
| |
| void CodeGenerator::AssembleRegisterArgumentPoisoning() { |
| __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); |
| __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister); |
| __ And(sp, sp, kSpeculationPoisonRegister); |
| } |
| |
| // Assembles an instruction after register allocation, producing machine code. |
| CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
| Instruction* instr) { |
| MipsOperandConverter i(this, instr); |
| InstructionCode opcode = instr->opcode(); |
| ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); |
| switch (arch_opcode) { |
| case kArchCallCodeObject: { |
| if (instr->InputAt(0)->IsImmediate()) { |
| __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); |
| } else { |
| Register reg = i.InputRegister(0); |
| DCHECK_IMPLIES( |
| HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), |
| reg == kJavaScriptCallCodeStartRegister); |
| __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag); |
| __ Call(reg); |
| } |
| RecordCallPosition(instr); |
| frame_access_state()->ClearSPDelta(); |
| break; |
| } |
| case kArchCallBuiltinPointer: { |
| DCHECK(!instr->InputAt(0)->IsImmediate()); |
| Register builtin_index = i.InputRegister(0); |
| __ CallBuiltinByIndex(builtin_index); |
| RecordCallPosition(instr); |
| frame_access_state()->ClearSPDelta(); |
| break; |
| } |
| case kArchCallWasmFunction: { |
| if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { |
| AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, |
| i.TempRegister(0), i.TempRegister(1), |
| i.TempRegister(2)); |
| } |
| if (instr->InputAt(0)->IsImmediate()) { |
| Constant constant = i.ToConstant(instr->InputAt(0)); |
| Address wasm_code = static_cast<Address>(constant.ToInt64()); |
| __ Call(wasm_code, constant.rmode()); |
| } else { |
| __ daddiu(kScratchReg, i.InputRegister(0), 0); |
| __ Call(kScratchReg); |
| } |
| RecordCallPosition(instr); |
| frame_access_state()->ClearSPDelta(); |
| break; |
| } |
| case kArchTailCallCodeObjectFromJSFunction: |
| case kArchTailCallCodeObject: { |
| if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { |
| AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, |
| i.TempRegister(0), i.TempRegister(1), |
| i.TempRegister(2)); |
| } |
| if (instr->InputAt(0)->IsImmediate()) { |
| __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); |
| } else { |
| Register reg = i.InputRegister(0); |
| DCHECK_IMPLIES( |
| HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), |
| reg == kJavaScriptCallCodeStartRegister); |
| __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag); |
| __ Jump(reg); |
| } |
| frame_access_state()->ClearSPDelta(); |
| frame_access_state()->SetFrameAccessToDefault(); |
| break; |
| } |
| case kArchTailCallWasm: { |
| if (instr->InputAt(0)->IsImmediate()) { |
| Constant constant = i.ToConstant(instr->InputAt(0)); |
| Address wasm_code = static_cast<Address>(constant.ToInt64()); |
| __ Jump(wasm_code, constant.rmode()); |
| } else { |
| __ daddiu(kScratchReg, i.InputRegister(0), 0); |
| __ Jump(kScratchReg); |
| } |
| frame_access_state()->ClearSPDelta(); |
| frame_access_state()->SetFrameAccessToDefault(); |
| break; |
| } |
| case kArchTailCallAddress: { |
| CHECK(!instr->InputAt(0)->IsImmediate()); |
| Register reg = i.InputRegister(0); |
| DCHECK_IMPLIES( |
| HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), |
| reg == kJavaScriptCallCodeStartRegister); |
| __ Jump(reg); |
| frame_access_state()->ClearSPDelta(); |
| frame_access_state()->SetFrameAccessToDefault(); |
| break; |
| } |
| case kArchCallJSFunction: { |
| Register func = i.InputRegister(0); |
| if (FLAG_debug_code) { |
| // Check the function's context matches the context argument. |
| __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); |
| __ Assert(eq, AbortReason::kWrongFunctionContext, cp, |
| Operand(kScratchReg)); |
| } |
| static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); |
| __ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset)); |
| __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| __ Call(a2); |
| RecordCallPosition(instr); |
| frame_access_state()->ClearSPDelta(); |
| break; |
| } |
| case kArchPrepareCallCFunction: { |
| int const num_parameters = MiscField::decode(instr->opcode()); |
| __ PrepareCallCFunction(num_parameters, kScratchReg); |
| // Frame alignment requires using FP-relative frame addressing. |
| frame_access_state()->SetFrameAccessToFP(); |
| break; |
| } |
| case kArchSaveCallerRegisters: { |
| fp_mode_ = |
| static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())); |
| DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); |
| // kReturnRegister0 should have been saved before entering the stub. |
| int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0); |
| DCHECK(IsAligned(bytes, kSystemPointerSize)); |
| DCHECK_EQ(0, frame_access_state()->sp_delta()); |
| frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); |
| DCHECK(!caller_registers_saved_); |
| caller_registers_saved_ = true; |
| break; |
| } |
| case kArchRestoreCallerRegisters: { |
| DCHECK(fp_mode_ == |
| static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()))); |
| DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); |
| // Don't overwrite the returned value. |
| int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0); |
| frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize)); |
| DCHECK_EQ(0, frame_access_state()->sp_delta()); |
| DCHECK(caller_registers_saved_); |
| caller_registers_saved_ = false; |
| break; |
| } |
| case kArchPrepareTailCall: |
| AssemblePrepareTailCall(); |
| break; |
| case kArchCallCFunction: { |
| int const num_parameters = MiscField::decode(instr->opcode()); |
| Label return_location; |
| if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { |
| // Put the return address in a stack slot. |
| __ LoadAddress(kScratchReg, &return_location); |
| __ sd(kScratchReg, |
| MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); |
| } |
| if (instr->InputAt(0)->IsImmediate()) { |
| ExternalReference ref = i.InputExternalReference(0); |
| __ CallCFunction(ref, num_parameters); |
| } else { |
| Register func = i.InputRegister(0); |
| __ CallCFunction(func, num_parameters); |
| } |
| __ bind(&return_location); |
| RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); |
| frame_access_state()->SetFrameAccessToDefault(); |
| // Ideally, we should decrement SP delta to match the change of stack |
| // pointer in CallCFunction. However, for certain architectures (e.g. |
| // ARM), there may be more strict alignment requirement, causing old SP |
| // to be saved on the stack. In those cases, we can not calculate the SP |
| // delta statically. |
| frame_access_state()->ClearSPDelta(); |
| if (caller_registers_saved_) { |
| // Need to re-sync SP delta introduced in kArchSaveCallerRegisters. |
| // Here, we assume the sequence to be: |
| // kArchSaveCallerRegisters; |
| // kArchCallCFunction; |
| // kArchRestoreCallerRegisters; |
| int bytes = |
| __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0); |
| frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); |
| } |
| break; |
| } |
| case kArchJmp: |
| AssembleArchJump(i.InputRpo(0)); |
| break; |
| case kArchBinarySearchSwitch: |
| AssembleArchBinarySearchSwitch(instr); |
| break; |
| case kArchLookupSwitch: |
| AssembleArchLookupSwitch(instr); |
| break; |
| case kArchTableSwitch: |
| AssembleArchTableSwitch(instr); |
| break; |
| case kArchAbortCSAAssert: |
| DCHECK(i.InputRegister(0) == a0); |
| { |
| // We don't actually want to generate a pile of code for this, so just |
| // claim there is a stack frame, without generating one. |
| FrameScope scope(tasm(), StackFrame::NONE); |
| __ Call( |
| isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), |
| RelocInfo::CODE_TARGET); |
| } |
| __ stop(); |
| break; |
| case kArchDebugBreak: |
| __ stop(); |
| break; |
| case kArchComment: |
| __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0))); |
| break; |
| case kArchNop: |
| case kArchThrowTerminator: |
| // don't emit code for nops. |
| break; |
| case kArchDeoptimize: { |
| int deopt_state_id = |
| BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); |
| CodeGenResult result = |
| AssembleDeoptimizerCall(deopt_state_id, current_source_position_); |
| if (result != kSuccess) return result; |
| break; |
| } |
| case kArchRet: |
| AssembleReturn(instr->InputAt(0)); |
| break; |
| case kArchStackPointer: |
| __ mov(i.OutputRegister(), sp); |
| break; |
| case kArchFramePointer: |
| __ mov(i.OutputRegister(), fp); |
| break; |
| case kArchParentFramePointer: |
| if (frame_access_state()->has_frame()) { |
| __ Ld(i.OutputRegister(), MemOperand(fp, 0)); |
| } else { |
| __ mov(i.OutputRegister(), fp); |
| } |
| break; |
| case kArchTruncateDoubleToI: |
| __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), |
| i.InputDoubleRegister(0), DetermineStubCallMode()); |
| break; |
| case kArchStoreWithWriteBarrier: { |
| RecordWriteMode mode = |
| static_cast<RecordWriteMode>(MiscField::decode(instr->opcode())); |
| Register object = i.InputRegister(0); |
| Register index = i.InputRegister(1); |
| Register value = i.InputRegister(2); |
| Register scratch0 = i.TempRegister(0); |
| Register scratch1 = i.TempRegister(1); |
| auto ool = new (zone()) |
| OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1, |
| mode, DetermineStubCallMode()); |
| __ Daddu(kScratchReg, object, index); |
| __ Sd(value, MemOperand(kScratchReg)); |
| __ CheckPageFlag(object, scratch0, |
| MemoryChunk::kPointersFromHereAreInterestingMask, ne, |
| ool->entry()); |
| __ bind(ool->exit()); |
| break; |
| } |
| case kArchStackSlot: { |
| FrameOffset offset = |
| frame_access_state()->GetFrameOffset(i.InputInt32(0)); |
| Register base_reg = offset.from_stack_pointer() ? sp : fp; |
| __ Daddu(i.OutputRegister(), base_reg, Operand(offset.offset())); |
| int alignment = i.InputInt32(1); |
| DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || |
| alignment == 16); |
| if (FLAG_debug_code && alignment > 0) { |
| // Verify that the output_register is properly aligned |
| __ And(kScratchReg, i.OutputRegister(), |
| Operand(kSystemPointerSize - 1)); |
| __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg, |
| Operand(zero_reg)); |
| } |
| if (alignment == 2 * kSystemPointerSize) { |
| Label done; |
| __ Daddu(kScratchReg, base_reg, Operand(offset.offset())); |
| __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); |
| __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); |
| __ Daddu(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize); |
| __ bind(&done); |
| } else if (alignment > 2 * kSystemPointerSize) { |
| Label done; |
| __ Daddu(kScratchReg, base_reg, Operand(offset.offset())); |
| __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); |
| __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); |
| __ li(kScratchReg2, alignment); |
| __ Dsubu(kScratchReg2, kScratchReg2, Operand(kScratchReg)); |
| __ Daddu(i.OutputRegister(), i.OutputRegister(), kScratchReg2); |
| __ bind(&done); |
| } |
| |
| break; |
| } |
| case kArchWordPoisonOnSpeculation: |
| __ And(i.OutputRegister(), i.InputRegister(0), |
| kSpeculationPoisonRegister); |
| break; |
| case kIeee754Float64Acos: |
| ASSEMBLE_IEEE754_UNOP(acos); |
| break; |
| case kIeee754Float64Acosh: |
| ASSEMBLE_IEEE754_UNOP(acosh); |
| break; |
| case kIeee754Float64Asin: |
| ASSEMBLE_IEEE754_UNOP(asin); |
| break; |
| case kIeee754Float64Asinh: |
| ASSEMBLE_IEEE754_UNOP(asinh); |
| break; |
| case kIeee754Float64Atan: |
| ASSEMBLE_IEEE754_UNOP(atan); |
| break; |
| case kIeee754Float64Atanh: |
| ASSEMBLE_IEEE754_UNOP(atanh); |
| break; |
| case kIeee754Float64Atan2: |
| ASSEMBLE_IEEE754_BINOP(atan2); |
| break; |
| case kIeee754Float64Cos: |
| ASSEMBLE_IEEE754_UNOP(cos); |
| break; |
| case kIeee754Float64Cosh: |
| ASSEMBLE_IEEE754_UNOP(cosh); |
| break; |
| case kIeee754Float64Cbrt: |
| ASSEMBLE_IEEE754_UNOP(cbrt); |
| break; |
| case kIeee754Float64Exp: |
| ASSEMBLE_IEEE754_UNOP(exp); |
| break; |
| case kIeee754Float64Expm1: |
| ASSEMBLE_IEEE754_UNOP(expm1); |
| break; |
| case kIeee754Float64Log: |
| ASSEMBLE_IEEE754_UNOP(log); |
| break; |
| case kIeee754Float64Log1p: |
| ASSEMBLE_IEEE754_UNOP(log1p); |
| break; |
| case kIeee754Float64Log2: |
| ASSEMBLE_IEEE754_UNOP(log2); |
| break; |
| case kIeee754Float64Log10: |
| ASSEMBLE_IEEE754_UNOP(log10); |
| break; |
| case kIeee754Float64Pow: |
| ASSEMBLE_IEEE754_BINOP(pow); |
| break; |
| case kIeee754Float64Sin: |
| ASSEMBLE_IEEE754_UNOP(sin); |
| break; |
| case kIeee754Float64Sinh: |
| ASSEMBLE_IEEE754_UNOP(sinh); |
| break; |
| case kIeee754Float64Tan: |
| ASSEMBLE_IEEE754_UNOP(tan); |
| break; |
| case kIeee754Float64Tanh: |
| ASSEMBLE_IEEE754_UNOP(tanh); |
| break; |
| case kMips64Add: |
| __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64Dadd: |
| __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64DaddOvf: |
| __ DaddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), |
| kScratchReg); |
| break; |
| case kMips64Sub: |
| __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64Dsub: |
| __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64DsubOvf: |
| __ DsubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), |
| kScratchReg); |
| break; |
| case kMips64Mul: |
| __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64MulOvf: |
| __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), |
| kScratchReg); |
| break; |
| case kMips64MulHigh: |
| __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64MulHighU: |
| __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64DMulHigh: |
| __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64Div: |
| __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| if (kArchVariant == kMips64r6) { |
| __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| } else { |
| __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); |
| } |
| break; |
| case kMips64DivU: |
| __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| if (kArchVariant == kMips64r6) { |
| __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| } else { |
| __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); |
| } |
| break; |
| case kMips64Mod: |
| __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64ModU: |
| __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64Dmul: |
| __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64Ddiv: |
| __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| if (kArchVariant == kMips64r6) { |
| __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| } else { |
| __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); |
| } |
| break; |
| case kMips64DdivU: |
| __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| if (kArchVariant == kMips64r6) { |
| __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| } else { |
| __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); |
| } |
| break; |
| case kMips64Dmod: |
| __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64DmodU: |
| __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64Dlsa: |
| DCHECK(instr->InputAt(2)->IsImmediate()); |
| __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
| i.InputInt8(2)); |
| break; |
| case kMips64Lsa: |
| DCHECK(instr->InputAt(2)->IsImmediate()); |
| __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
| i.InputInt8(2)); |
| break; |
| case kMips64And: |
| __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64And32: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ sll(i.InputRegister(1), i.InputRegister(1), 0x0); |
| __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| } else { |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| } |
| break; |
| case kMips64Or: |
| __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64Or32: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ sll(i.InputRegister(1), i.InputRegister(1), 0x0); |
| __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| } else { |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| } |
| break; |
| case kMips64Nor: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| } else { |
| DCHECK_EQ(0, i.InputOperand(1).immediate()); |
| __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); |
| } |
| break; |
| case kMips64Nor32: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ sll(i.InputRegister(1), i.InputRegister(1), 0x0); |
| __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| } else { |
| DCHECK_EQ(0, i.InputOperand(1).immediate()); |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); |
| } |
| break; |
| case kMips64Xor: |
| __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64Xor32: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ sll(i.InputRegister(1), i.InputRegister(1), 0x0); |
| __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| } else { |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| } |
| break; |
| case kMips64Clz: |
| __ Clz(i.OutputRegister(), i.InputRegister(0)); |
| break; |
| case kMips64Dclz: |
| __ dclz(i.OutputRegister(), i.InputRegister(0)); |
| break; |
| case kMips64Ctz: { |
| Register src = i.InputRegister(0); |
| Register dst = i.OutputRegister(); |
| __ Ctz(dst, src); |
| } break; |
| case kMips64Dctz: { |
| Register src = i.InputRegister(0); |
| Register dst = i.OutputRegister(); |
| __ Dctz(dst, src); |
| } break; |
| case kMips64Popcnt: { |
| Register src = i.InputRegister(0); |
| Register dst = i.OutputRegister(); |
| __ Popcnt(dst, src); |
| } break; |
| case kMips64Dpopcnt: { |
| Register src = i.InputRegister(0); |
| Register dst = i.OutputRegister(); |
| __ Dpopcnt(dst, src); |
| } break; |
| case kMips64Shl: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| } else { |
| int64_t imm = i.InputOperand(1).immediate(); |
| __ sll(i.OutputRegister(), i.InputRegister(0), |
| static_cast<uint16_t>(imm)); |
| } |
| break; |
| case kMips64Shr: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| } else { |
| int64_t imm = i.InputOperand(1).immediate(); |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ srl(i.OutputRegister(), i.InputRegister(0), |
| static_cast<uint16_t>(imm)); |
| } |
| break; |
| case kMips64Sar: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| } else { |
| int64_t imm = i.InputOperand(1).immediate(); |
| __ sll(i.InputRegister(0), i.InputRegister(0), 0x0); |
| __ sra(i.OutputRegister(), i.InputRegister(0), |
| static_cast<uint16_t>(imm)); |
| } |
| break; |
| case kMips64Ext: |
| __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), |
| i.InputInt8(2)); |
| break; |
| case kMips64Ins: |
| if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { |
| __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2)); |
| } else { |
| __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), |
| i.InputInt8(2)); |
| } |
| break; |
| case kMips64Dext: { |
| __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), |
| i.InputInt8(2)); |
| break; |
| } |
| case kMips64Dins: |
| if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { |
| __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2)); |
| } else { |
| __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), |
| i.InputInt8(2)); |
| } |
| break; |
| case kMips64Dshl: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| } else { |
| int64_t imm = i.InputOperand(1).immediate(); |
| if (imm < 32) { |
| __ dsll(i.OutputRegister(), i.InputRegister(0), |
| static_cast<uint16_t>(imm)); |
| } else { |
| __ dsll32(i.OutputRegister(), i.InputRegister(0), |
| static_cast<uint16_t>(imm - 32)); |
| } |
| } |
| break; |
| case kMips64Dshr: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| } else { |
| int64_t imm = i.InputOperand(1).immediate(); |
| if (imm < 32) { |
| __ dsrl(i.OutputRegister(), i.InputRegister(0), |
| static_cast<uint16_t>(imm)); |
| } else { |
| __ dsrl32(i.OutputRegister(), i.InputRegister(0), |
| static_cast<uint16_t>(imm - 32)); |
| } |
| } |
| break; |
| case kMips64Dsar: |
| if (instr->InputAt(1)->IsRegister()) { |
| __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| } else { |
| int64_t imm = i.InputOperand(1).immediate(); |
| if (imm < 32) { |
| __ dsra(i.OutputRegister(), i.InputRegister(0), imm); |
| } else { |
| __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32); |
| } |
| } |
| break; |
| case kMips64Ror: |
| __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64Dror: |
| __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); |
| break; |
| case kMips64Tst: |
| __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1)); |
| // Pseudo-instruction used for cmp/branch. No opcode emitted here. |
| break; |
| case kMips64Cmp: |
| // Pseudo-instruction used for cmp/branch. No opcode emitted here. |
| break; |
| case kMips64Mov: |
| // TODO(plind): Should we combine mov/li like this, or use separate instr? |
| // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType |
| if (HasRegisterInput(instr, 0)) { |
| __ mov(i.OutputRegister(), i.InputRegister(0)); |
| } else { |
| __ li(i.OutputRegister(), i.InputOperand(0)); |
| } |
| break; |
| |
| case kMips64CmpS: { |
| FPURegister left = i.InputOrZeroSingleRegister(0); |
| FPURegister right = i.InputOrZeroSingleRegister(1); |
| bool predicate; |
| FPUCondition cc = |
| FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition()); |
| |
| if ((left == kDoubleRegZero || right == kDoubleRegZero) && |
| !__ IsDoubleZeroRegSet()) { |
| __ Move(kDoubleRegZero, 0.0); |
| } |
| |
| __ CompareF32(cc, left, right); |
| } break; |
| case kMips64AddS: |
| // TODO(plind): add special case: combine mult & add. |
| __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64SubS: |
| __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64MulS: |
| // TODO(plind): add special case: right op is -1.0, see arm port. |
| __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64DivS: |
| __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64ModS: { |
| // TODO(bmeurer): We should really get rid of this special instruction, |
| // and generate a CallAddress instruction instead. |
| FrameScope scope(tasm(), StackFrame::MANUAL); |
| __ PrepareCallCFunction(0, 2, kScratchReg); |
| __ MovToFloatParameters(i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate()) |
| __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); |
| // Move the result in the double result register. |
| __ MovFromFloatResult(i.OutputSingleRegister()); |
| break; |
| } |
| case kMips64AbsS: |
| __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); |
| break; |
| case kMips64NegS: |
| __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); |
| break; |
| case kMips64SqrtS: { |
| __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| } |
| case kMips64MaxS: |
| __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64MinS: |
| __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64CmpD: { |
| FPURegister left = i.InputOrZeroDoubleRegister(0); |
| FPURegister right = i.InputOrZeroDoubleRegister(1); |
| bool predicate; |
| FPUCondition cc = |
| FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition()); |
| if ((left == kDoubleRegZero || right == kDoubleRegZero) && |
| !__ IsDoubleZeroRegSet()) { |
| __ Move(kDoubleRegZero, 0.0); |
| } |
| __ CompareF64(cc, left, right); |
| } break; |
| case kMips64AddD: |
| // TODO(plind): add special case: combine mult & add. |
| __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64SubD: |
| __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64MulD: |
| // TODO(plind): add special case: right op is -1.0, see arm port. |
| __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64DivD: |
| __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64ModD: { |
| // TODO(bmeurer): We should really get rid of this special instruction, |
| // and generate a CallAddress instruction instead. |
| FrameScope scope(tasm(), StackFrame::MANUAL); |
| __ PrepareCallCFunction(0, 2, kScratchReg); |
| __ MovToFloatParameters(i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); |
| // Move the result in the double result register. |
| __ MovFromFloatResult(i.OutputDoubleRegister()); |
| break; |
| } |
| case kMips64AbsD: |
| __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kMips64NegD: |
| __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kMips64SqrtD: { |
| __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| } |
| case kMips64MaxD: |
| __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64MinD: |
| __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| i.InputDoubleRegister(1)); |
| break; |
| case kMips64Float64RoundDown: { |
| __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| } |
| case kMips64Float32RoundDown: { |
| __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); |
| break; |
| } |
| case kMips64Float64RoundTruncate: { |
| __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| } |
| case kMips64Float32RoundTruncate: { |
| __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); |
| break; |
| } |
| case kMips64Float64RoundUp: { |
| __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| } |
| case kMips64Float32RoundUp: { |
| __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); |
| break; |
| } |
| case kMips64Float64RoundTiesEven: { |
| __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| } |
| case kMips64Float32RoundTiesEven: { |
| __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); |
| break; |
| } |
| case kMips64Float32Max: { |
| FPURegister dst = i.OutputSingleRegister(); |
| FPURegister src1 = i.InputSingleRegister(0); |
| FPURegister src2 = i.InputSingleRegister(1); |
| auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2); |
| __ Float32Max(dst, src1, src2, ool->entry()); |
| __ bind(ool->exit()); |
| break; |
| } |
| case kMips64Float64Max: { |
| FPURegister dst = i.OutputDoubleRegister(); |
| FPURegister src1 = i.InputDoubleRegister(0); |
| FPURegister src2 = i.InputDoubleRegister(1); |
| auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2); |
| __ Float64Max(dst, src1, src2, ool->entry()); |
| __ bind(ool->exit()); |
| break; |
| } |
| case kMips64Float32Min: { |
| FPURegister dst = i.OutputSingleRegister(); |
| FPURegister src1 = i.InputSingleRegister(0); |
| FPURegister src2 = i.InputSingleRegister(1); |
| auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2); |
| __ Float32Min(dst, src1, src2, ool->entry()); |
| __ bind(ool->exit()); |
| break; |
| } |
| case kMips64Float64Min: { |
| FPURegister dst = i.OutputDoubleRegister(); |
| FPURegister src1 = i.InputDoubleRegister(0); |
| FPURegister src2 = i.InputDoubleRegister(1); |
| auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2); |
| __ Float64Min(dst, src1, src2, ool->entry()); |
| __ bind(ool->exit()); |
| break; |
| } |
| case kMips64Float64SilenceNaN: |
| __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kMips64CvtSD: |
| __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kMips64CvtDS: |
| __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); |
| break; |
| case kMips64CvtDW: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ mtc1(i.InputRegister(0), scratch); |
| __ cvt_d_w(i.OutputDoubleRegister(), scratch); |
| break; |
| } |
| case kMips64CvtSW: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ mtc1(i.InputRegister(0), scratch); |
| __ cvt_s_w(i.OutputDoubleRegister(), scratch); |
| break; |
| } |
| case kMips64CvtSUw: { |
| __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0)); |
| break; |
| } |
| case kMips64CvtSL: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ dmtc1(i.InputRegister(0), scratch); |
| __ cvt_s_l(i.OutputDoubleRegister(), scratch); |
| break; |
| } |
| case kMips64CvtDL: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ dmtc1(i.InputRegister(0), scratch); |
| __ cvt_d_l(i.OutputDoubleRegister(), scratch); |
| break; |
| } |
| case kMips64CvtDUw: { |
| __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0)); |
| break; |
| } |
| case kMips64CvtDUl: { |
| __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0)); |
| break; |
| } |
| case kMips64CvtSUl: { |
| __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0)); |
| break; |
| } |
| case kMips64FloorWD: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ floor_w_d(scratch, i.InputDoubleRegister(0)); |
| __ mfc1(i.OutputRegister(), scratch); |
| break; |
| } |
| case kMips64CeilWD: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ ceil_w_d(scratch, i.InputDoubleRegister(0)); |
| __ mfc1(i.OutputRegister(), scratch); |
| break; |
| } |
| case kMips64RoundWD: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ round_w_d(scratch, i.InputDoubleRegister(0)); |
| __ mfc1(i.OutputRegister(), scratch); |
| break; |
| } |
| case kMips64TruncWD: { |
| FPURegister scratch = kScratchDoubleReg; |
| // Other arches use round to zero here, so we follow. |
| __ trunc_w_d(scratch, i.InputDoubleRegister(0)); |
| __ mfc1(i.OutputRegister(), scratch); |
| break; |
| } |
| case kMips64FloorWS: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ floor_w_s(scratch, i.InputDoubleRegister(0)); |
| __ mfc1(i.OutputRegister(), scratch); |
| break; |
| } |
| case kMips64CeilWS: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ ceil_w_s(scratch, i.InputDoubleRegister(0)); |
| __ mfc1(i.OutputRegister(), scratch); |
| break; |
| } |
| case kMips64RoundWS: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ round_w_s(scratch, i.InputDoubleRegister(0)); |
| __ mfc1(i.OutputRegister(), scratch); |
| break; |
| } |
| case kMips64TruncWS: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ trunc_w_s(scratch, i.InputDoubleRegister(0)); |
| __ mfc1(i.OutputRegister(), scratch); |
| // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, |
| // because INT32_MIN allows easier out-of-bounds detection. |
| __ addiu(kScratchReg, i.OutputRegister(), 1); |
| __ slt(kScratchReg2, kScratchReg, i.OutputRegister()); |
| __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2); |
| break; |
| } |
| case kMips64TruncLS: { |
| FPURegister scratch = kScratchDoubleReg; |
| Register tmp_fcsr = kScratchReg; |
| Register result = kScratchReg2; |
| |
| bool load_status = instr->OutputCount() > 1; |
| if (load_status) { |
| // Save FCSR. |
| __ cfc1(tmp_fcsr, FCSR); |
| // Clear FPU flags. |
| __ ctc1(zero_reg, FCSR); |
| } |
| // Other arches use round to zero here, so we follow. |
| __ trunc_l_s(scratch, i.InputDoubleRegister(0)); |
| __ dmfc1(i.OutputRegister(), scratch); |
| if (load_status) { |
| __ cfc1(result, FCSR); |
| // Check for overflow and NaNs. |
| __ andi(result, result, |
| (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask)); |
| __ Slt(result, zero_reg, result); |
| __ xori(result, result, 1); |
| __ mov(i.OutputRegister(1), result); |
| // Restore FCSR |
| __ ctc1(tmp_fcsr, FCSR); |
| } |
| break; |
| } |
| case kMips64TruncLD: { |
| FPURegister scratch = kScratchDoubleReg; |
| Register tmp_fcsr = kScratchReg; |
| Register result = kScratchReg2; |
| |
| bool load_status = instr->OutputCount() > 1; |
| if (load_status) { |
| // Save FCSR. |
| __ cfc1(tmp_fcsr, FCSR); |
| // Clear FPU flags. |
| __ ctc1(zero_reg, FCSR); |
| } |
| // Other arches use round to zero here, so we follow. |
| __ trunc_l_d(scratch, i.InputDoubleRegister(0)); |
| __ dmfc1(i.OutputRegister(0), scratch); |
| if (load_status) { |
| __ cfc1(result, FCSR); |
| // Check for overflow and NaNs. |
| __ andi(result, result, |
| (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask)); |
| __ Slt(result, zero_reg, result); |
| __ xori(result, result, 1); |
| __ mov(i.OutputRegister(1), result); |
| // Restore FCSR |
| __ ctc1(tmp_fcsr, FCSR); |
| } |
| break; |
| } |
| case kMips64TruncUwD: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch); |
| break; |
| } |
| case kMips64TruncUwS: { |
| FPURegister scratch = kScratchDoubleReg; |
| __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch); |
| // Avoid UINT32_MAX as an overflow indicator and use 0 instead, |
| // because 0 allows easier out-of-bounds detection. |
| __ addiu(kScratchReg, i.OutputRegister(), 1); |
| __ Movz(i.OutputRegister(), zero_reg, kScratchReg); |
| break; |
| } |
| case kMips64TruncUlS: { |
| FPURegister scratch = kScratchDoubleReg; |
| Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; |
| __ Trunc_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch, |
| result); |
| break; |
| } |
| case kMips64TruncUlD: { |
| FPURegister scratch = kScratchDoubleReg; |
| Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; |
| __ Trunc_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch, |
| result); |
| break; |
| } |
| case kMips64BitcastDL: |
| __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kMips64BitcastLD: |
| __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister()); |
| break; |
| case kMips64Float64ExtractLowWord32: |
| __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kMips64Float64ExtractHighWord32: |
| __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0)); |
| break; |
| case kMips64Float64InsertLowWord32: |
| __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1)); |
| break; |
| case kMips64Float64InsertHighWord32: |
| __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1)); |
| break; |
| // ... more basic instructions ... |
| |
| case kMips64Seb: |
| __ seb(i.OutputRegister(), i.InputRegister(0)); |
| break; |
| case kMips64Seh: |
| __ seh(i.OutputRegister(), i.InputRegister(0)); |
| break; |
| case kMips64Lbu: |
| __ Lbu(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Lb: |
| __ Lb(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Sb: |
| __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand()); |
| break; |
| case kMips64Lhu: |
| __ Lhu(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Ulhu: |
| __ Ulhu(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Lh: |
| __ Lh(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Ulh: |
| __ Ulh(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Sh: |
| __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand()); |
| break; |
| case kMips64Ush: |
| __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg); |
| break; |
| case kMips64Lw: |
| __ Lw(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Ulw: |
| __ Ulw(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Lwu: |
| __ Lwu(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Ulwu: |
| __ Ulwu(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Ld: |
| __ Ld(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Uld: |
| __ Uld(i.OutputRegister(), i.MemoryOperand()); |
| EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); |
| break; |
| case kMips64Sw: |
| __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand()); |
| break; |
| case kMips64Usw: |
| __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand()); |
| break; |
| case kMips64Sd: |
| __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand()); |
| break; |
| case kMips64Usd: |
| __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand()); |
| break; |
| case kMips64Lwc1: { |
| __ Lwc1(i.OutputSingleRegister(), i.MemoryOperand()); |
| break; |
| } |
| case kMips64Ulwc1: { |
| __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg); |
| break; |
| } |
| case kMips64Swc1: { |
| size_t index = 0; |
| MemOperand operand = i.MemoryOperand(&index); |
| FPURegister ft = i.InputOrZeroSingleRegister(index); |
| if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { |
| __ Move(kDoubleRegZero, 0.0); |
| } |
| __ Swc1(ft, operand); |
| break; |
| } |
| case kMips64Uswc1: { |
| size_t index = 0; |
| MemOperand operand = i.MemoryOperand(&index); |
| FPURegister ft = i.InputOrZeroSingleRegister(index); |
| if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { |
| __ Move(kDoubleRegZero, 0.0); |
| } |
| __ Uswc1(ft, operand, kScratchReg); |
| break; |
| } |
| case kMips64Ldc1: |
| __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); |
| break; |
| case kMips64Uldc1: |
| __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg); |
| break; |
| case kMips64Sdc1: { |
| FPURegister ft = i.InputOrZeroDoubleRegister(2); |
| if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { |
| __ Move(kDoubleRegZero, 0.0); |
| } |
| __ Sdc1(ft, i.MemoryOperand()); |
| break; |
| } |
| case kMips64Usdc1: { |
| FPURegister ft = i.InputOrZeroDoubleRegister(2); |
| if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { |
| __ Move(kDoubleRegZero, 0.0); |
| } |
| __ Usdc1(ft, i.MemoryOperand(), kScratchReg); |
| break; |
| } |
| case kMips64Sync: { |
| __ sync(); |
| break; |
| } |
| case kMips64Push: |
| if (instr->InputAt(0)->IsFPRegister()) { |
| __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); |
| __ Subu(sp, sp, Operand(kDoubleSize)); |
| frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize); |
| } else { |
| __ Push(i.InputRegister(0)); |
| frame_access_state()->IncreaseSPDelta(1); |
| } |
| break; |
| case kMips64Peek: { |
| // The incoming value is 0-based, but we need a 1-based value. |
| int reverse_slot = i.InputInt32(0) + 1; |
| int offset = |
| FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot); |
| if (instr->OutputAt(0)->IsFPRegister()) { |
| LocationOperand* op = LocationOperand::cast(instr->OutputAt(0)); |
| if (op->representation() == MachineRepresentation::kFloat64) { |
| __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset)); |
| } else { |
| DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32); |
| __ lwc1( |
| i.OutputSingleRegister(0), |
| MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset)); |
| } |
| } else { |
| __ Ld(i.OutputRegister(0), MemOperand(fp, offset)); |
| } |
| break; |
| } |
| case kMips64StackClaim: { |
| __ Dsubu(sp, sp, Operand(i.InputInt32(0))); |
| frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / |
| kSystemPointerSize); |
| break; |
| } |
| case kMips64StoreToStackSlot: { |
| if (instr->InputAt(0)->IsFPRegister()) { |
| if (instr->InputAt(0)->IsSimd128Register()) { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1))); |
| } else { |
| __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); |
| } |
| } else { |
| __ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1))); |
| } |
| break; |
| } |
| case kMips64ByteSwap64: { |
| __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8); |
| break; |
| } |
| case kMips64ByteSwap32: { |
| __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4); |
| break; |
| } |
| case kWord32AtomicLoadInt8: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb); |
| break; |
| case kWord32AtomicLoadUint8: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu); |
| break; |
| case kWord32AtomicLoadInt16: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh); |
| break; |
| case kWord32AtomicLoadUint16: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu); |
| break; |
| case kWord32AtomicLoadWord32: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw); |
| break; |
| case kMips64Word64AtomicLoadUint8: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu); |
| break; |
| case kMips64Word64AtomicLoadUint16: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu); |
| break; |
| case kMips64Word64AtomicLoadUint32: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu); |
| break; |
| case kMips64Word64AtomicLoadUint64: |
| ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld); |
| break; |
| case kWord32AtomicStoreWord8: |
| ASSEMBLE_ATOMIC_STORE_INTEGER(Sb); |
| break; |
| case kWord32AtomicStoreWord16: |
| ASSEMBLE_ATOMIC_STORE_INTEGER(Sh); |
| break; |
| case kWord32AtomicStoreWord32: |
| ASSEMBLE_ATOMIC_STORE_INTEGER(Sw); |
| break; |
| case kMips64Word64AtomicStoreWord8: |
| ASSEMBLE_ATOMIC_STORE_INTEGER(Sb); |
| break; |
| case kMips64Word64AtomicStoreWord16: |
| ASSEMBLE_ATOMIC_STORE_INTEGER(Sh); |
| break; |
| case kMips64Word64AtomicStoreWord32: |
| ASSEMBLE_ATOMIC_STORE_INTEGER(Sw); |
| break; |
| case kMips64Word64AtomicStoreWord64: |
| ASSEMBLE_ATOMIC_STORE_INTEGER(Sd); |
| break; |
| case kWord32AtomicExchangeInt8: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32); |
| break; |
| case kWord32AtomicExchangeUint8: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32); |
| break; |
| case kWord32AtomicExchangeInt16: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32); |
| break; |
| case kWord32AtomicExchangeUint16: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32); |
| break; |
| case kWord32AtomicExchangeWord32: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc); |
| break; |
| case kMips64Word64AtomicExchangeUint8: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64); |
| break; |
| case kMips64Word64AtomicExchangeUint16: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64); |
| break; |
| case kMips64Word64AtomicExchangeUint32: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64); |
| break; |
| case kMips64Word64AtomicExchangeUint64: |
| ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd); |
| break; |
| case kWord32AtomicCompareExchangeInt8: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32); |
| break; |
| case kWord32AtomicCompareExchangeUint8: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32); |
| break; |
| case kWord32AtomicCompareExchangeInt16: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32); |
| break; |
| case kWord32AtomicCompareExchangeUint16: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32); |
| break; |
| case kWord32AtomicCompareExchangeWord32: |
| __ sll(i.InputRegister(2), i.InputRegister(2), 0); |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc); |
| break; |
| case kMips64Word64AtomicCompareExchangeUint8: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64); |
| break; |
| case kMips64Word64AtomicCompareExchangeUint16: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64); |
| break; |
| case kMips64Word64AtomicCompareExchangeUint32: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64); |
| break; |
| case kMips64Word64AtomicCompareExchangeUint64: |
| ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd); |
| break; |
| #define ATOMIC_BINOP_CASE(op, inst) \ |
| case kWord32Atomic##op##Int8: \ |
| ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \ |
| break; \ |
| case kWord32Atomic##op##Uint8: \ |
| ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \ |
| break; \ |
| case kWord32Atomic##op##Int16: \ |
| ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \ |
| break; \ |
| case kWord32Atomic##op##Uint16: \ |
| ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \ |
| break; \ |
| case kWord32Atomic##op##Word32: \ |
| ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \ |
| break; |
| ATOMIC_BINOP_CASE(Add, Addu) |
| ATOMIC_BINOP_CASE(Sub, Subu) |
| ATOMIC_BINOP_CASE(And, And) |
| ATOMIC_BINOP_CASE(Or, Or) |
| ATOMIC_BINOP_CASE(Xor, Xor) |
| #undef ATOMIC_BINOP_CASE |
| #define ATOMIC_BINOP_CASE(op, inst) \ |
| case kMips64Word64Atomic##op##Uint8: \ |
| ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \ |
| break; \ |
| case kMips64Word64Atomic##op##Uint16: \ |
| ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \ |
| break; \ |
| case kMips64Word64Atomic##op##Uint32: \ |
| ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \ |
| break; \ |
| case kMips64Word64Atomic##op##Uint64: \ |
| ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \ |
| break; |
| ATOMIC_BINOP_CASE(Add, Daddu) |
| ATOMIC_BINOP_CASE(Sub, Dsubu) |
| ATOMIC_BINOP_CASE(And, And) |
| ATOMIC_BINOP_CASE(Or, Or) |
| ATOMIC_BINOP_CASE(Xor, Xor) |
| #undef ATOMIC_BINOP_CASE |
| case kMips64AssertEqual: |
| __ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()), |
| i.InputRegister(0), Operand(i.InputRegister(1))); |
| break; |
| case kMips64S128Zero: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(), |
| i.OutputSimd128Register()); |
| break; |
| } |
| case kMips64I32x4Splat: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fill_w(i.OutputSimd128Register(), i.InputRegister(0)); |
| break; |
| } |
| case kMips64I32x4ExtractLane: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0), |
| i.InputInt8(1)); |
| break; |
| } |
| case kMips64I32x4ReplaceLane: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Simd128Register src = i.InputSimd128Register(0); |
| Simd128Register dst = i.OutputSimd128Register(); |
| if (src != dst) { |
| __ move_v(dst, src); |
| } |
| __ insert_w(dst, i.InputInt8(1), i.InputRegister(2)); |
| break; |
| } |
| case kMips64I32x4Add: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I32x4Sub: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64F32x4Splat: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ FmoveLow(kScratchReg, i.InputSingleRegister(0)); |
| __ fill_w(i.OutputSimd128Register(), kScratchReg); |
| break; |
| } |
| case kMips64F32x4ExtractLane: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1)); |
| __ FmoveLow(i.OutputSingleRegister(), kScratchReg); |
| break; |
| } |
| case kMips64F32x4ReplaceLane: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Simd128Register src = i.InputSimd128Register(0); |
| Simd128Register dst = i.OutputSimd128Register(); |
| if (src != dst) { |
| __ move_v(dst, src); |
| } |
| __ FmoveLow(kScratchReg, i.InputSingleRegister(2)); |
| __ insert_w(dst, i.InputInt8(1), kScratchReg); |
| break; |
| } |
| case kMips64F32x4SConvertI32x4: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64F32x4UConvertI32x4: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I32x4Mul: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I32x4MaxS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I32x4MinS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I32x4Eq: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I32x4Ne: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Simd128Register dst = i.OutputSimd128Register(); |
| __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); |
| __ nor_v(dst, dst, dst); |
| break; |
| } |
| case kMips64I32x4Shl: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputInt5(1)); |
| break; |
| } |
| case kMips64I32x4ShrS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputInt5(1)); |
| break; |
| } |
| case kMips64I32x4ShrU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputInt5(1)); |
| break; |
| } |
| case kMips64I32x4MaxU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I32x4MinU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64S128Select: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0)); |
| __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64F32x4Abs: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); |
| break; |
| } |
| case kMips64F32x4Neg: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); |
| break; |
| } |
| case kMips64F32x4RecipApprox: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64F32x4RecipSqrtApprox: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64F32x4Add: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64F32x4Sub: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64F32x4Mul: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64F32x4Max: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64F32x4Min: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64F32x4Eq: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64F32x4Ne: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64F32x4Lt: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64F32x4Le: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I32x4SConvertF32x4: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I32x4UConvertF32x4: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I32x4Neg: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); |
| __ subv_w(i.OutputSimd128Register(), kSimd128RegZero, |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I32x4GtS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I32x4GeS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I32x4GtU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I32x4GeU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I16x8Splat: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fill_h(i.OutputSimd128Register(), i.InputRegister(0)); |
| break; |
| } |
| case kMips64I16x8ExtractLane: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0), |
| i.InputInt8(1)); |
| break; |
| } |
| case kMips64I16x8ReplaceLane: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Simd128Register src = i.InputSimd128Register(0); |
| Simd128Register dst = i.OutputSimd128Register(); |
| if (src != dst) { |
| __ move_v(dst, src); |
| } |
| __ insert_h(dst, i.InputInt8(1), i.InputRegister(2)); |
| break; |
| } |
| case kMips64I16x8Neg: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); |
| __ subv_h(i.OutputSimd128Register(), kSimd128RegZero, |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I16x8Shl: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputInt4(1)); |
| break; |
| } |
| case kMips64I16x8ShrS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputInt4(1)); |
| break; |
| } |
| case kMips64I16x8ShrU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputInt4(1)); |
| break; |
| } |
| case kMips64I16x8Add: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8AddSaturateS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8Sub: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8SubSaturateS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8Mul: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8MaxS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8MinS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8Eq: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8Ne: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Simd128Register dst = i.OutputSimd128Register(); |
| __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); |
| __ nor_v(dst, dst, dst); |
| break; |
| } |
| case kMips64I16x8GtS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I16x8GeS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I16x8AddSaturateU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8SubSaturateU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8MaxU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8MinU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I16x8GtU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I16x8GeU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I8x16Splat: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ fill_b(i.OutputSimd128Register(), i.InputRegister(0)); |
| break; |
| } |
| case kMips64I8x16ExtractLane: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0), |
| i.InputInt8(1)); |
| break; |
| } |
| case kMips64I8x16ReplaceLane: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Simd128Register src = i.InputSimd128Register(0); |
| Simd128Register dst = i.OutputSimd128Register(); |
| if (src != dst) { |
| __ move_v(dst, src); |
| } |
| __ insert_b(dst, i.InputInt8(1), i.InputRegister(2)); |
| break; |
| } |
| case kMips64I8x16Neg: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); |
| __ subv_b(i.OutputSimd128Register(), kSimd128RegZero, |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I8x16Shl: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputInt3(1)); |
| break; |
| } |
| case kMips64I8x16ShrS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputInt3(1)); |
| break; |
| } |
| case kMips64I8x16Add: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16AddSaturateS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16Sub: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16SubSaturateS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16Mul: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16MaxS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16MinS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16Eq: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16Ne: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Simd128Register dst = i.OutputSimd128Register(); |
| __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); |
| __ nor_v(dst, dst, dst); |
| break; |
| } |
| case kMips64I8x16GtS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I8x16GeS: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I8x16ShrU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputInt3(1)); |
| break; |
| } |
| case kMips64I8x16AddSaturateU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16SubSaturateU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16MaxU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16MinU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64I8x16GtU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64I8x16GeU: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64S128And: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64S128Or: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64S128Xor: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(1)); |
| break; |
| } |
| case kMips64S128Not: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| i.InputSimd128Register(0)); |
| break; |
| } |
| case kMips64S1x4AnyTrue: |
| case kMips64S1x8AnyTrue: |
| case kMips64S1x16AnyTrue: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Register dst = i.OutputRegister(); |
| Label all_false; |
| __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero, |
| i.InputSimd128Register(0), USE_DELAY_SLOT); |
| __ li(dst, 0l); // branch delay slot |
| __ li(dst, -1); |
| __ bind(&all_false); |
| break; |
| } |
| case kMips64S1x4AllTrue: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Register dst = i.OutputRegister(); |
| Label all_true; |
| __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero, |
| i.InputSimd128Register(0), USE_DELAY_SLOT); |
| __ li(dst, -1); // branch delay slot |
| __ li(dst, 0l); |
| __ bind(&all_true); |
| break; |
| } |
| case kMips64S1x8AllTrue: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Register dst = i.OutputRegister(); |
| Label all_true; |
| __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero, |
| i.InputSimd128Register(0), USE_DELAY_SLOT); |
| __ li(dst, -1); // branch delay slot |
| __ li(dst, 0l); |
| __ bind(&all_true); |
| break; |
| } |
| case kMips64S1x16AllTrue: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Register dst = i.OutputRegister(); |
| Label all_true; |
| __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero, |
| i.InputSimd128Register(0), USE_DELAY_SLOT); |
| __ li(dst, -1); // branch delay slot |
| __ li(dst, 0l); |
| __ bind(&all_true); |
| break; |
| } |
| case kMips64MsaLd: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ ld_b(i.OutputSimd128Register(), i.MemoryOperand()); |
| break; |
| } |
| case kMips64MsaSt: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| __ st_b(i.InputSimd128Register(2), i.MemoryOperand()); |
| break; |
| } |
| case kMips64S32x4InterleaveRight: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Simd128Register dst = i.OutputSimd128Register(), |
| src0 = i.InputSimd128Register(0), |
| src1 = i.InputSimd128Register(1); |
| // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] |
| // dst = [5, 1, 4, 0] |
| __ ilvr_w(dst, src1, src0); |
| break; |
| } |
| case kMips64S32x4InterleaveLeft: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Simd128Register dst = i.OutputSimd128Register(), |
| src0 = i.InputSimd128Register(0), |
| src1 = i.InputSimd128Register(1); |
| // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] |
| // dst = [7, 3, 6, 2] |
| __ ilvl_w(dst, src1, src0); |
| break; |
| } |
| case kMips64S32x4PackEven: { |
| CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); |
| Simd128Register dst = i.OutputSimd128Register(), |
| src0 = i.InputSimd128Register(0), |
| src1 = i.InputSimd128Register(1); |
| // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] |
| |