| // Copyright 2012 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include <limits.h> // For LONG_MIN, LONG_MAX. |
| |
| #if V8_TARGET_ARCH_MIPS |
| |
| #include "src/base/bits.h" |
| #include "src/base/division-by-constant.h" |
| #include "src/bootstrapper.h" |
| #include "src/callable.h" |
| #include "src/code-stubs.h" |
| #include "src/debug/debug.h" |
| #include "src/external-reference-table.h" |
| #include "src/frames-inl.h" |
| #include "src/mips/assembler-mips-inl.h" |
| #include "src/mips/macro-assembler-mips.h" |
| #include "src/register-configuration.h" |
| #include "src/runtime/runtime.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size, |
| CodeObjectRequired create_code_object) |
| : TurboAssembler(isolate, buffer, size, create_code_object) {} |
| |
| TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size, |
| CodeObjectRequired create_code_object) |
| : Assembler(isolate, buffer, buffer_size), |
| isolate_(isolate), |
| has_double_zero_reg_set_(false) { |
| if (create_code_object == CodeObjectRequired::kYes) { |
| code_object_ = |
| Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate); |
| } |
| } |
| |
| int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, |
| Register exclusion1, |
| Register exclusion2, |
| Register exclusion3) const { |
| int bytes = 0; |
| RegList exclusions = 0; |
| if (exclusion1 != no_reg) { |
| exclusions |= exclusion1.bit(); |
| if (exclusion2 != no_reg) { |
| exclusions |= exclusion2.bit(); |
| if (exclusion3 != no_reg) { |
| exclusions |= exclusion3.bit(); |
| } |
| } |
| } |
| |
| RegList list = kJSCallerSaved & ~exclusions; |
| bytes += NumRegs(list) * kPointerSize; |
| |
| if (fp_mode == kSaveFPRegs) { |
| bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; |
| } |
| |
| return bytes; |
| } |
| |
| int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, |
| Register exclusion2, Register exclusion3) { |
| int bytes = 0; |
| RegList exclusions = 0; |
| if (exclusion1 != no_reg) { |
| exclusions |= exclusion1.bit(); |
| if (exclusion2 != no_reg) { |
| exclusions |= exclusion2.bit(); |
| if (exclusion3 != no_reg) { |
| exclusions |= exclusion3.bit(); |
| } |
| } |
| } |
| |
| RegList list = kJSCallerSaved & ~exclusions; |
| MultiPush(list); |
| bytes += NumRegs(list) * kPointerSize; |
| |
| if (fp_mode == kSaveFPRegs) { |
| MultiPushFPU(kCallerSavedFPU); |
| bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; |
| } |
| |
| return bytes; |
| } |
| |
| int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, |
| Register exclusion2, Register exclusion3) { |
| int bytes = 0; |
| if (fp_mode == kSaveFPRegs) { |
| MultiPopFPU(kCallerSavedFPU); |
| bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; |
| } |
| |
| RegList exclusions = 0; |
| if (exclusion1 != no_reg) { |
| exclusions |= exclusion1.bit(); |
| if (exclusion2 != no_reg) { |
| exclusions |= exclusion2.bit(); |
| if (exclusion3 != no_reg) { |
| exclusions |= exclusion3.bit(); |
| } |
| } |
| } |
| |
| RegList list = kJSCallerSaved & ~exclusions; |
| MultiPop(list); |
| bytes += NumRegs(list) * kPointerSize; |
| |
| return bytes; |
| } |
| |
| void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { |
| lw(destination, MemOperand(s6, index << kPointerSizeLog2)); |
| } |
| |
| void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index, |
| Condition cond, Register src1, |
| const Operand& src2) { |
| Branch(2, NegateCondition(cond), src1, src2); |
| lw(destination, MemOperand(s6, index << kPointerSizeLog2)); |
| } |
| |
| |
| void TurboAssembler::PushCommonFrame(Register marker_reg) { |
| if (marker_reg.is_valid()) { |
| Push(ra, fp, marker_reg); |
| Addu(fp, sp, Operand(kPointerSize)); |
| } else { |
| Push(ra, fp); |
| mov(fp, sp); |
| } |
| } |
| |
| void TurboAssembler::PushStandardFrame(Register function_reg) { |
| int offset = -StandardFrameConstants::kContextOffset; |
| if (function_reg.is_valid()) { |
| Push(ra, fp, cp, function_reg); |
| offset += kPointerSize; |
| } else { |
| Push(ra, fp, cp); |
| } |
| Addu(fp, sp, Operand(offset)); |
| } |
| |
| // Push and pop all registers that can hold pointers. |
| void MacroAssembler::PushSafepointRegisters() { |
| // Safepoints expect a block of kNumSafepointRegisters values on the |
| // stack, so adjust the stack for unsaved registers. |
| const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
| DCHECK_GE(num_unsaved, 0); |
| if (num_unsaved > 0) { |
| Subu(sp, sp, Operand(num_unsaved * kPointerSize)); |
| } |
| MultiPush(kSafepointSavedRegisters); |
| } |
| |
| |
| void MacroAssembler::PopSafepointRegisters() { |
| const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
| MultiPop(kSafepointSavedRegisters); |
| if (num_unsaved > 0) { |
| Addu(sp, sp, Operand(num_unsaved * kPointerSize)); |
| } |
| } |
| |
| int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
| // The registers are pushed starting with the highest encoding, |
| // which means that lowest encodings are closest to the stack pointer. |
| return kSafepointRegisterStackIndexMap[reg_code]; |
| } |
| |
| |
| // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved) |
| // The register 'object' contains a heap object pointer. The heap object |
| // tag is shifted away. |
| void MacroAssembler::RecordWriteField(Register object, int offset, |
| Register value, Register dst, |
| RAStatus ra_status, |
| SaveFPRegsMode save_fp, |
| RememberedSetAction remembered_set_action, |
| SmiCheck smi_check) { |
| DCHECK(!AreAliased(value, dst, t8, object)); |
| // First, check if a write barrier is even needed. The tests below |
| // catch stores of Smis. |
| Label done; |
| |
| // Skip barrier if writing a smi. |
| if (smi_check == INLINE_SMI_CHECK) { |
| JumpIfSmi(value, &done); |
| } |
| |
| // Although the object register is tagged, the offset is relative to the start |
| // of the object, so so offset must be a multiple of kPointerSize. |
| DCHECK(IsAligned(offset, kPointerSize)); |
| |
| Addu(dst, object, Operand(offset - kHeapObjectTag)); |
| if (emit_debug_code()) { |
| Label ok; |
| And(t8, dst, Operand(kPointerSize - 1)); |
| Branch(&ok, eq, t8, Operand(zero_reg)); |
| stop("Unaligned cell in write barrier"); |
| bind(&ok); |
| } |
| |
| RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action, |
| OMIT_SMI_CHECK); |
| |
| bind(&done); |
| |
| // Clobber clobbered input registers when running with the debug-code flag |
| // turned on to provoke errors. |
| if (emit_debug_code()) { |
| li(value, Operand(bit_cast<int32_t>(kZapValue + 4))); |
| li(dst, Operand(bit_cast<int32_t>(kZapValue + 8))); |
| } |
| } |
| |
| void TurboAssembler::SaveRegisters(RegList registers) { |
| DCHECK_GT(NumRegs(registers), 0); |
| RegList regs = 0; |
| for (int i = 0; i < Register::kNumRegisters; ++i) { |
| if ((registers >> i) & 1u) { |
| regs |= Register::from_code(i).bit(); |
| } |
| } |
| MultiPush(regs); |
| } |
| |
| void TurboAssembler::RestoreRegisters(RegList registers) { |
| DCHECK_GT(NumRegs(registers), 0); |
| RegList regs = 0; |
| for (int i = 0; i < Register::kNumRegisters; ++i) { |
| if ((registers >> i) & 1u) { |
| regs |= Register::from_code(i).bit(); |
| } |
| } |
| MultiPop(regs); |
| } |
| |
| void TurboAssembler::CallRecordWriteStub( |
| Register object, Register address, |
| RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { |
| // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode, |
| // i.e. always emit remember set and save FP registers in RecordWriteStub. If |
| // large performance regression is observed, we should use these values to |
| // avoid unnecessary work. |
| |
| Callable const callable = |
| Builtins::CallableFor(isolate(), Builtins::kRecordWrite); |
| RegList registers = callable.descriptor().allocatable_registers(); |
| |
| SaveRegisters(registers); |
| Register object_parameter(callable.descriptor().GetRegisterParameter( |
| RecordWriteDescriptor::kObject)); |
| Register slot_parameter( |
| callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot)); |
| Register isolate_parameter(callable.descriptor().GetRegisterParameter( |
| RecordWriteDescriptor::kIsolate)); |
| Register remembered_set_parameter(callable.descriptor().GetRegisterParameter( |
| RecordWriteDescriptor::kRememberedSet)); |
| Register fp_mode_parameter(callable.descriptor().GetRegisterParameter( |
| RecordWriteDescriptor::kFPMode)); |
| |
| Push(object); |
| Push(address); |
| |
| Pop(slot_parameter); |
| Pop(object_parameter); |
| |
| li(isolate_parameter, Operand(ExternalReference::isolate_address(isolate()))); |
| Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); |
| Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); |
| Call(callable.code(), RelocInfo::CODE_TARGET); |
| |
| RestoreRegisters(registers); |
| } |
| |
| // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved) |
| // The register 'object' contains a heap object pointer. The heap object |
| // tag is shifted away. |
| void MacroAssembler::RecordWrite(Register object, Register address, |
| Register value, RAStatus ra_status, |
| SaveFPRegsMode fp_mode, |
| RememberedSetAction remembered_set_action, |
| SmiCheck smi_check) { |
| DCHECK(!AreAliased(object, address, value, t8)); |
| DCHECK(!AreAliased(object, address, value, t9)); |
| |
| if (emit_debug_code()) { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| lw(scratch, MemOperand(address)); |
| Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch, |
| Operand(value)); |
| } |
| |
| if (remembered_set_action == OMIT_REMEMBERED_SET && |
| !FLAG_incremental_marking) { |
| return; |
| } |
| |
| // First, check if a write barrier is even needed. The tests below |
| // catch stores of smis and stores into the young generation. |
| Label done; |
| |
| if (smi_check == INLINE_SMI_CHECK) { |
| DCHECK_EQ(0, kSmiTag); |
| JumpIfSmi(value, &done); |
| } |
| |
| CheckPageFlag(value, |
| value, // Used as scratch. |
| MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); |
| CheckPageFlag(object, |
| value, // Used as scratch. |
| MemoryChunk::kPointersFromHereAreInterestingMask, |
| eq, |
| &done); |
| |
| // Record the actual write. |
| if (ra_status == kRAHasNotBeenSaved) { |
| push(ra); |
| } |
| CallRecordWriteStub(object, address, remembered_set_action, fp_mode); |
| if (ra_status == kRAHasNotBeenSaved) { |
| pop(ra); |
| } |
| |
| bind(&done); |
| |
| { |
| // Count number of write barriers in generated code. |
| isolate()->counters()->write_barriers_static()->Increment(); |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, |
| scratch, value); |
| } |
| |
| // Clobber clobbered registers when running with the debug-code flag |
| // turned on to provoke errors. |
| if (emit_debug_code()) { |
| li(address, Operand(bit_cast<int32_t>(kZapValue + 12))); |
| li(value, Operand(bit_cast<int32_t>(kZapValue + 16))); |
| } |
| } |
| |
| // --------------------------------------------------------------------------- |
| // Instruction macros. |
| |
| void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| addu(rd, rs, rt.rm()); |
| } else { |
| if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) { |
| addiu(rd, rs, rt.immediate()); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| addu(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| subu(rd, rs, rt.rm()); |
| } else { |
| if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) { |
| addiu(rd, rs, -rt.immediate()); // No subiu instr, use addiu(x, y, -imm). |
| } else if (!(-rt.immediate() & kHiMask) && |
| !MustUseReg(rt.rmode())) { // Use load |
| // -imm and addu for cases where loading -imm generates one instruction. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, -rt.immediate()); |
| addu(rd, rs, scratch); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| subu(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| if (IsMipsArchVariant(kLoongson)) { |
| mult(rs, rt.rm()); |
| mflo(rd); |
| } else { |
| mul(rd, rs, rt.rm()); |
| } |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| if (IsMipsArchVariant(kLoongson)) { |
| mult(rs, scratch); |
| mflo(rd); |
| } else { |
| mul(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Mul(Register rd_hi, Register rd_lo, Register rs, |
| const Operand& rt) { |
| if (rt.is_reg()) { |
| if (!IsMipsArchVariant(kMips32r6)) { |
| mult(rs, rt.rm()); |
| mflo(rd_lo); |
| mfhi(rd_hi); |
| } else { |
| if (rd_lo == rs) { |
| DCHECK(rd_hi != rs); |
| DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm()); |
| muh(rd_hi, rs, rt.rm()); |
| mul(rd_lo, rs, rt.rm()); |
| } else { |
| DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm()); |
| mul(rd_lo, rs, rt.rm()); |
| muh(rd_hi, rs, rt.rm()); |
| } |
| } |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| if (!IsMipsArchVariant(kMips32r6)) { |
| mult(rs, scratch); |
| mflo(rd_lo); |
| mfhi(rd_hi); |
| } else { |
| if (rd_lo == rs) { |
| DCHECK(rd_hi != rs); |
| DCHECK(rd_hi != scratch && rd_lo != scratch); |
| muh(rd_hi, rs, scratch); |
| mul(rd_lo, rs, scratch); |
| } else { |
| DCHECK(rd_hi != scratch && rd_lo != scratch); |
| mul(rd_lo, rs, scratch); |
| muh(rd_hi, rs, scratch); |
| } |
| } |
| } |
| } |
| |
| void TurboAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs, |
| const Operand& rt) { |
| Register reg = no_reg; |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| if (rt.is_reg()) { |
| reg = rt.rm(); |
| } else { |
| DCHECK(rs != scratch); |
| reg = scratch; |
| li(reg, rt); |
| } |
| |
| if (!IsMipsArchVariant(kMips32r6)) { |
| multu(rs, reg); |
| mflo(rd_lo); |
| mfhi(rd_hi); |
| } else { |
| if (rd_lo == rs) { |
| DCHECK(rd_hi != rs); |
| DCHECK(rd_hi != reg && rd_lo != reg); |
| muhu(rd_hi, rs, reg); |
| mulu(rd_lo, rs, reg); |
| } else { |
| DCHECK(rd_hi != reg && rd_lo != reg); |
| mulu(rd_lo, rs, reg); |
| muhu(rd_hi, rs, reg); |
| } |
| } |
| } |
| |
| void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| if (!IsMipsArchVariant(kMips32r6)) { |
| mult(rs, rt.rm()); |
| mfhi(rd); |
| } else { |
| muh(rd, rs, rt.rm()); |
| } |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| if (!IsMipsArchVariant(kMips32r6)) { |
| mult(rs, scratch); |
| mfhi(rd); |
| } else { |
| muh(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Mult(Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| mult(rs, rt.rm()); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| mult(rs, scratch); |
| } |
| } |
| |
| void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| if (!IsMipsArchVariant(kMips32r6)) { |
| multu(rs, rt.rm()); |
| mfhi(rd); |
| } else { |
| muhu(rd, rs, rt.rm()); |
| } |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| if (!IsMipsArchVariant(kMips32r6)) { |
| multu(rs, scratch); |
| mfhi(rd); |
| } else { |
| muhu(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Multu(Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| multu(rs, rt.rm()); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| multu(rs, scratch); |
| } |
| } |
| |
| void TurboAssembler::Div(Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| div(rs, rt.rm()); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| div(rs, scratch); |
| } |
| } |
| |
| void TurboAssembler::Div(Register rem, Register res, Register rs, |
| const Operand& rt) { |
| if (rt.is_reg()) { |
| if (!IsMipsArchVariant(kMips32r6)) { |
| div(rs, rt.rm()); |
| mflo(res); |
| mfhi(rem); |
| } else { |
| div(res, rs, rt.rm()); |
| mod(rem, rs, rt.rm()); |
| } |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| if (!IsMipsArchVariant(kMips32r6)) { |
| div(rs, scratch); |
| mflo(res); |
| mfhi(rem); |
| } else { |
| div(res, rs, scratch); |
| mod(rem, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| if (!IsMipsArchVariant(kMips32r6)) { |
| div(rs, rt.rm()); |
| mflo(res); |
| } else { |
| div(res, rs, rt.rm()); |
| } |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| if (!IsMipsArchVariant(kMips32r6)) { |
| div(rs, scratch); |
| mflo(res); |
| } else { |
| div(res, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| if (!IsMipsArchVariant(kMips32r6)) { |
| div(rs, rt.rm()); |
| mfhi(rd); |
| } else { |
| mod(rd, rs, rt.rm()); |
| } |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| if (!IsMipsArchVariant(kMips32r6)) { |
| div(rs, scratch); |
| mfhi(rd); |
| } else { |
| mod(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| if (!IsMipsArchVariant(kMips32r6)) { |
| divu(rs, rt.rm()); |
| mfhi(rd); |
| } else { |
| modu(rd, rs, rt.rm()); |
| } |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| if (!IsMipsArchVariant(kMips32r6)) { |
| divu(rs, scratch); |
| mfhi(rd); |
| } else { |
| modu(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Divu(Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| divu(rs, rt.rm()); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| divu(rs, scratch); |
| } |
| } |
| |
| void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| if (!IsMipsArchVariant(kMips32r6)) { |
| divu(rs, rt.rm()); |
| mflo(res); |
| } else { |
| divu(res, rs, rt.rm()); |
| } |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| if (!IsMipsArchVariant(kMips32r6)) { |
| divu(rs, scratch); |
| mflo(res); |
| } else { |
| divu(res, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| and_(rd, rs, rt.rm()); |
| } else { |
| if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) { |
| andi(rd, rs, rt.immediate()); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| and_(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| or_(rd, rs, rt.rm()); |
| } else { |
| if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) { |
| ori(rd, rs, rt.immediate()); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| or_(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| xor_(rd, rs, rt.rm()); |
| } else { |
| if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) { |
| xori(rd, rs, rt.immediate()); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| xor_(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| nor(rd, rs, rt.rm()); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| nor(rd, rs, scratch); |
| } |
| } |
| |
| void TurboAssembler::Neg(Register rs, const Operand& rt) { |
| subu(rs, zero_reg, rt.rm()); |
| } |
| |
| void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| slt(rd, rs, rt.rm()); |
| } else { |
| if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) { |
| slti(rd, rs, rt.immediate()); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = rd == at ? t8 : temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| slt(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| sltu(rd, rs, rt.rm()); |
| } else { |
| const uint32_t int16_min = std::numeric_limits<int16_t>::min(); |
| if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode())) { |
| // Imm range is: [0, 32767]. |
| sltiu(rd, rs, rt.immediate()); |
| } else if (is_uint15(rt.immediate() - int16_min) && |
| !MustUseReg(rt.rmode())) { |
| // Imm range is: [max_unsigned-32767,max_unsigned]. |
| sltiu(rd, rs, static_cast<uint16_t>(rt.immediate())); |
| } else { |
| // li handles the relocation. |
| UseScratchRegisterScope temps(this); |
| Register scratch = rd == at ? t8 : temps.Acquire(); |
| DCHECK(rs != scratch); |
| li(scratch, rt); |
| sltu(rd, rs, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { |
| if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| if (rt.is_reg()) { |
| rotrv(rd, rs, rt.rm()); |
| } else { |
| rotr(rd, rs, rt.immediate() & 0x1F); |
| } |
| } else { |
| if (rt.is_reg()) { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; |
| subu(scratch, zero_reg, rt.rm()); |
| sllv(scratch, rs, scratch); |
| srlv(rd, rs, rt.rm()); |
| or_(rd, rd, scratch); |
| } else { |
| if (rt.immediate() == 0) { |
| srl(rd, rs, 0); |
| } else { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| srl(scratch, rs, rt.immediate() & 0x1F); |
| sll(rd, rs, (0x20 - (rt.immediate() & 0x1F)) & 0x1F); |
| or_(rd, rd, scratch); |
| } |
| } |
| } |
| } |
| |
| |
| void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { |
| if (IsMipsArchVariant(kLoongson)) { |
| lw(zero_reg, rs); |
| } else { |
| pref(hint, rs); |
| } |
| } |
| |
| void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, |
| Register scratch) { |
| DCHECK(sa >= 1 && sa <= 31); |
| if (IsMipsArchVariant(kMips32r6) && sa <= 4) { |
| lsa(rd, rt, rs, sa - 1); |
| } else { |
| Register tmp = rd == rt ? scratch : rd; |
| DCHECK(tmp != rt); |
| sll(tmp, rs, sa); |
| Addu(rd, rt, tmp); |
| } |
| } |
| |
| void TurboAssembler::Bovc(Register rs, Register rt, Label* L) { |
| if (is_trampoline_emitted()) { |
| Label skip; |
| bnvc(rs, rt, &skip); |
| BranchLong(L, PROTECT); |
| bind(&skip); |
| } else { |
| bovc(rs, rt, L); |
| } |
| } |
| |
| void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) { |
| if (is_trampoline_emitted()) { |
| Label skip; |
| bovc(rs, rt, &skip); |
| BranchLong(L, PROTECT); |
| bind(&skip); |
| } else { |
| bnvc(rs, rt, L); |
| } |
| } |
| |
| // ------------Pseudo-instructions------------- |
| |
| // Word Swap Byte |
| void TurboAssembler::ByteSwapSigned(Register dest, Register src, |
| int operand_size) { |
| DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4); |
| |
| if (operand_size == 2) { |
| Seh(src, src); |
| } else if (operand_size == 1) { |
| Seb(src, src); |
| } |
| // No need to do any preparation if operand_size is 4 |
| |
| if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| wsbh(dest, src); |
| rotr(dest, dest, 16); |
| } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { |
| Register tmp = t0; |
| Register tmp2 = t1; |
| |
| andi(tmp2, src, 0xFF); |
| sll(tmp2, tmp2, 24); |
| or_(tmp, zero_reg, tmp2); |
| |
| andi(tmp2, src, 0xFF00); |
| sll(tmp2, tmp2, 8); |
| or_(tmp, tmp, tmp2); |
| |
| srl(src, src, 8); |
| andi(tmp2, src, 0xFF00); |
| or_(tmp, tmp, tmp2); |
| |
| srl(src, src, 16); |
| andi(tmp2, src, 0xFF); |
| or_(tmp, tmp, tmp2); |
| |
| or_(dest, tmp, zero_reg); |
| } |
| } |
| |
| void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, |
| int operand_size) { |
| DCHECK(operand_size == 1 || operand_size == 2); |
| |
| if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| if (operand_size == 1) { |
| andi(src, src, 0xFF); |
| } else { |
| andi(src, src, 0xFFFF); |
| } |
| // No need to do any preparation if operand_size is 4 |
| |
| wsbh(dest, src); |
| rotr(dest, dest, 16); |
| } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { |
| if (operand_size == 1) { |
| sll(src, src, 24); |
| } else { |
| Register tmp = t0; |
| |
| andi(tmp, src, 0xFF00); |
| sll(src, src, 24); |
| sll(tmp, tmp, 8); |
| or_(dest, tmp, src); |
| } |
| } |
| } |
| |
| void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { |
| DCHECK(rd != at); |
| DCHECK(rs.rm() != at); |
| if (IsMipsArchVariant(kMips32r6)) { |
| lw(rd, rs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3); |
| MemOperand source = rs; |
| // Adjust offset for two accesses and check if offset + 3 fits into int16_t. |
| AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3); |
| if (rd != source.rm()) { |
| lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset)); |
| lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset)); |
| } else { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| lwr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); |
| lwl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); |
| mov(rd, scratch); |
| } |
| } |
| } |
| |
| void TurboAssembler::Usw(Register rd, const MemOperand& rs) { |
| DCHECK(rd != at); |
| DCHECK(rs.rm() != at); |
| DCHECK(rd != rs.rm()); |
| if (IsMipsArchVariant(kMips32r6)) { |
| sw(rd, rs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3); |
| MemOperand source = rs; |
| // Adjust offset for two accesses and check if offset + 3 fits into int16_t. |
| AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3); |
| swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset)); |
| swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset)); |
| } |
| } |
| |
| void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { |
| DCHECK(rd != at); |
| DCHECK(rs.rm() != at); |
| if (IsMipsArchVariant(kMips32r6)) { |
| lh(rd, rs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| MemOperand source = rs; |
| // Adjust offset for two accesses and check if offset + 1 fits into int16_t. |
| AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| if (source.rm() == scratch) { |
| #if defined(V8_TARGET_LITTLE_ENDIAN) |
| lb(rd, MemOperand(source.rm(), source.offset() + 1)); |
| lbu(scratch, source); |
| #elif defined(V8_TARGET_BIG_ENDIAN) |
| lb(rd, source); |
| lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); |
| #endif |
| } else { |
| #if defined(V8_TARGET_LITTLE_ENDIAN) |
| lbu(scratch, source); |
| lb(rd, MemOperand(source.rm(), source.offset() + 1)); |
| #elif defined(V8_TARGET_BIG_ENDIAN) |
| lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); |
| lb(rd, source); |
| #endif |
| } |
| sll(rd, rd, 8); |
| or_(rd, rd, scratch); |
| } |
| } |
| |
| void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { |
| DCHECK(rd != at); |
| DCHECK(rs.rm() != at); |
| if (IsMipsArchVariant(kMips32r6)) { |
| lhu(rd, rs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| MemOperand source = rs; |
| // Adjust offset for two accesses and check if offset + 1 fits into int16_t. |
| AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| if (source.rm() == scratch) { |
| #if defined(V8_TARGET_LITTLE_ENDIAN) |
| lbu(rd, MemOperand(source.rm(), source.offset() + 1)); |
| lbu(scratch, source); |
| #elif defined(V8_TARGET_BIG_ENDIAN) |
| lbu(rd, source); |
| lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); |
| #endif |
| } else { |
| #if defined(V8_TARGET_LITTLE_ENDIAN) |
| lbu(scratch, source); |
| lbu(rd, MemOperand(source.rm(), source.offset() + 1)); |
| #elif defined(V8_TARGET_BIG_ENDIAN) |
| lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); |
| lbu(rd, source); |
| #endif |
| } |
| sll(rd, rd, 8); |
| or_(rd, rd, scratch); |
| } |
| } |
| |
| void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { |
| DCHECK(rd != at); |
| DCHECK(rs.rm() != at); |
| DCHECK(rs.rm() != scratch); |
| DCHECK(scratch != at); |
| if (IsMipsArchVariant(kMips32r6)) { |
| sh(rd, rs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| MemOperand source = rs; |
| // Adjust offset for two accesses and check if offset + 1 fits into int16_t. |
| AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); |
| |
| if (scratch != rd) { |
| mov(scratch, rd); |
| } |
| |
| #if defined(V8_TARGET_LITTLE_ENDIAN) |
| sb(scratch, source); |
| srl(scratch, scratch, 8); |
| sb(scratch, MemOperand(source.rm(), source.offset() + 1)); |
| #elif defined(V8_TARGET_BIG_ENDIAN) |
| sb(scratch, MemOperand(source.rm(), source.offset() + 1)); |
| srl(scratch, scratch, 8); |
| sb(scratch, source); |
| #endif |
| } |
| } |
| |
| void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, |
| Register scratch) { |
| if (IsMipsArchVariant(kMips32r6)) { |
| lwc1(fd, rs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| Ulw(scratch, rs); |
| mtc1(scratch, fd); |
| } |
| } |
| |
| void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs, |
| Register scratch) { |
| if (IsMipsArchVariant(kMips32r6)) { |
| swc1(fd, rs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| mfc1(scratch, fd); |
| Usw(scratch, rs); |
| } |
| } |
| |
| void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs, |
| Register scratch) { |
| DCHECK(scratch != at); |
| if (IsMipsArchVariant(kMips32r6)) { |
| Ldc1(fd, rs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); |
| mtc1(scratch, fd); |
| Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); |
| Mthc1(scratch, fd); |
| } |
| } |
| |
| void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs, |
| Register scratch) { |
| DCHECK(scratch != at); |
| if (IsMipsArchVariant(kMips32r6)) { |
| Sdc1(fd, rs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| mfc1(scratch, fd); |
| Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); |
| Mfhc1(scratch, fd); |
| Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); |
| } |
| } |
| |
| void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) { |
| // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
| // load to two 32-bit loads. |
| DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4); |
| MemOperand tmp = src; |
| AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES); |
| lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset)); |
| if (IsFp32Mode()) { // fp32 mode. |
| FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1); |
| lwc1(nextfpreg, |
| MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); |
| } else { |
| DCHECK(IsFp64Mode() || IsFpxxMode()); |
| // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(src.rm() != scratch); |
| lw(scratch, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); |
| Mthc1(scratch, fd); |
| } |
| } |
| |
| void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) { |
| // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
| // store to two 32-bit stores. |
| DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4); |
| MemOperand tmp = src; |
| AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES); |
| swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset)); |
| if (IsFp32Mode()) { // fp32 mode. |
| FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1); |
| swc1(nextfpreg, |
| MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); |
| } else { |
| DCHECK(IsFp64Mode() || IsFpxxMode()); |
| // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); |
| DCHECK(src.rm() != t8); |
| Mfhc1(t8, fd); |
| sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); |
| } |
| } |
| |
| void TurboAssembler::Ll(Register rd, const MemOperand& rs) { |
| bool is_one_instruction = IsMipsArchVariant(kMips32r6) |
| ? is_int9(rs.offset()) |
| : is_int16(rs.offset()); |
| if (is_one_instruction) { |
| ll(rd, rs); |
| } else { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| li(scratch, rs.offset()); |
| addu(scratch, scratch, rs.rm()); |
| ll(rd, MemOperand(scratch, 0)); |
| } |
| } |
| |
| void TurboAssembler::Sc(Register rd, const MemOperand& rs) { |
| bool is_one_instruction = IsMipsArchVariant(kMips32r6) |
| ? is_int9(rs.offset()) |
| : is_int16(rs.offset()); |
| if (is_one_instruction) { |
| sc(rd, rs); |
| } else { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| li(scratch, rs.offset()); |
| addu(scratch, scratch, rs.rm()); |
| sc(rd, MemOperand(scratch, 0)); |
| } |
| } |
| |
| void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) { |
| li(dst, Operand(value), mode); |
| } |
| |
| void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { |
| DCHECK(!j.is_reg()); |
| BlockTrampolinePoolScope block_trampoline_pool(this); |
| if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { |
| // Normal load of an immediate value which does not need Relocation Info. |
| if (is_int16(j.immediate())) { |
| addiu(rd, zero_reg, j.immediate()); |
| } else if (!(j.immediate() & kHiMask)) { |
| ori(rd, zero_reg, j.immediate()); |
| } else { |
| lui(rd, (j.immediate() >> kLuiShift) & kImm16Mask); |
| if (j.immediate() & kImm16Mask) { |
| ori(rd, rd, (j.immediate() & kImm16Mask)); |
| } |
| } |
| } else { |
| int32_t immediate; |
| if (j.IsHeapObjectRequest()) { |
| RequestHeapObject(j.heap_object_request()); |
| immediate = 0; |
| } else { |
| immediate = j.immediate(); |
| } |
| |
| if (MustUseReg(j.rmode())) { |
| RecordRelocInfo(j.rmode(), immediate); |
| } |
| // We always need the same number of instructions as we may need to patch |
| // this code to load another value which may need 2 instructions to load. |
| |
| lui(rd, (immediate >> kLuiShift) & kImm16Mask); |
| ori(rd, rd, (immediate & kImm16Mask)); |
| } |
| } |
| |
| void TurboAssembler::MultiPush(RegList regs) { |
| int16_t num_to_push = base::bits::CountPopulation(regs); |
| int16_t stack_offset = num_to_push * kPointerSize; |
| |
| Subu(sp, sp, Operand(stack_offset)); |
| for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
| if ((regs & (1 << i)) != 0) { |
| stack_offset -= kPointerSize; |
| sw(ToRegister(i), MemOperand(sp, stack_offset)); |
| } |
| } |
| } |
| |
| |
| void TurboAssembler::MultiPop(RegList regs) { |
| int16_t stack_offset = 0; |
| |
| for (int16_t i = 0; i < kNumRegisters; i++) { |
| if ((regs & (1 << i)) != 0) { |
| lw(ToRegister(i), MemOperand(sp, stack_offset)); |
| stack_offset += kPointerSize; |
| } |
| } |
| addiu(sp, sp, stack_offset); |
| } |
| |
| |
| void TurboAssembler::MultiPushFPU(RegList regs) { |
| int16_t num_to_push = base::bits::CountPopulation(regs); |
| int16_t stack_offset = num_to_push * kDoubleSize; |
| |
| Subu(sp, sp, Operand(stack_offset)); |
| for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
| if ((regs & (1 << i)) != 0) { |
| stack_offset -= kDoubleSize; |
| Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
| } |
| } |
| } |
| |
| |
| void TurboAssembler::MultiPopFPU(RegList regs) { |
| int16_t stack_offset = 0; |
| |
| for (int16_t i = 0; i < kNumRegisters; i++) { |
| if ((regs & (1 << i)) != 0) { |
| Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
| stack_offset += kDoubleSize; |
| } |
| } |
| addiu(sp, sp, stack_offset); |
| } |
| |
| |
| void TurboAssembler::AddPair(Register dst_low, Register dst_high, |
| Register left_low, Register left_high, |
| Register right_low, Register right_high) { |
| Register kScratchReg = s3; |
| if (left_low == right_low) { |
| // Special case for left = right and the sum potentially overwriting both |
| // left and right. |
| Slt(kScratchReg, left_low, zero_reg); |
| Addu(dst_low, left_low, right_low); |
| } else { |
| Addu(dst_low, left_low, right_low); |
| // If the sum overwrites right, left remains unchanged, otherwise right |
| // remains unchanged. |
| Sltu(kScratchReg, dst_low, (dst_low == right_low) ? left_low : right_low); |
| } |
| Addu(dst_high, left_high, right_high); |
| Addu(dst_high, dst_high, kScratchReg); |
| } |
| |
| void TurboAssembler::SubPair(Register dst_low, Register dst_high, |
| Register left_low, Register left_high, |
| Register right_low, Register right_high) { |
| Register kScratchReg = s3; |
| Sltu(kScratchReg, left_low, right_low); |
| Subu(dst_low, left_low, right_low); |
| Subu(dst_high, left_high, right_high); |
| Subu(dst_high, dst_high, kScratchReg); |
| } |
| |
| void TurboAssembler::ShlPair(Register dst_low, Register dst_high, |
| Register src_low, Register src_high, |
| Register shift) { |
| Label done; |
| Register kScratchReg = s3; |
| Register kScratchReg2 = s4; |
| And(shift, shift, 0x3F); |
| sllv(dst_low, src_low, shift); |
| Nor(kScratchReg2, zero_reg, shift); |
| srl(kScratchReg, src_low, 1); |
| srlv(kScratchReg, kScratchReg, kScratchReg2); |
| sllv(dst_high, src_high, shift); |
| Or(dst_high, dst_high, kScratchReg); |
| And(kScratchReg, shift, 32); |
| if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { |
| Branch(&done, eq, kScratchReg, Operand(zero_reg)); |
| mov(dst_high, dst_low); |
| mov(dst_low, zero_reg); |
| } else { |
| movn(dst_high, dst_low, kScratchReg); |
| movn(dst_low, zero_reg, kScratchReg); |
| } |
| bind(&done); |
| } |
| |
| void TurboAssembler::ShlPair(Register dst_low, Register dst_high, |
| Register src_low, Register src_high, |
| uint32_t shift) { |
| Register kScratchReg = s3; |
| shift = shift & 0x3F; |
| if (shift == 0) { |
| mov(dst_low, src_low); |
| mov(dst_high, src_high); |
| } else if (shift < 32) { |
| if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| srl(dst_high, src_low, 32 - shift); |
| Ins(dst_high, src_high, shift, 32 - shift); |
| sll(dst_low, src_low, shift); |
| } else { |
| sll(dst_high, src_high, shift); |
| sll(dst_low, src_low, shift); |
| srl(kScratchReg, src_low, 32 - shift); |
| Or(dst_high, dst_high, kScratchReg); |
| } |
| } else if (shift == 32) { |
| mov(dst_low, zero_reg); |
| mov(dst_high, src_low); |
| } else { |
| shift = shift - 32; |
| mov(dst_low, zero_reg); |
| sll(dst_high, src_low, shift); |
| } |
| } |
| |
| void TurboAssembler::ShrPair(Register dst_low, Register dst_high, |
| Register src_low, Register src_high, |
| Register shift) { |
| Label done; |
| Register kScratchReg = s3; |
| Register kScratchReg2 = s4; |
| And(shift, shift, 0x3F); |
| srlv(dst_high, src_high, shift); |
| Nor(kScratchReg2, zero_reg, shift); |
| sll(kScratchReg, src_high, 1); |
| sllv(kScratchReg, kScratchReg, kScratchReg2); |
| srlv(dst_low, src_low, shift); |
| Or(dst_low, dst_low, kScratchReg); |
| And(kScratchReg, shift, 32); |
| if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { |
| Branch(&done, eq, kScratchReg, Operand(zero_reg)); |
| mov(dst_low, dst_high); |
| mov(dst_high, zero_reg); |
| } else { |
| movn(dst_low, dst_high, kScratchReg); |
| movn(dst_high, zero_reg, kScratchReg); |
| } |
| bind(&done); |
| } |
| |
| void TurboAssembler::ShrPair(Register dst_low, Register dst_high, |
| Register src_low, Register src_high, |
| uint32_t shift) { |
| Register kScratchReg = s3; |
| shift = shift & 0x3F; |
| if (shift == 0) { |
| mov(dst_low, src_low); |
| mov(dst_high, src_high); |
| } else if (shift < 32) { |
| if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| srl(dst_low, src_low, shift); |
| Ins(dst_low, src_high, 32 - shift, shift); |
| srl(dst_high, src_high, shift); |
| } else { |
| srl(dst_high, src_high, shift); |
| srl(dst_low, src_low, shift); |
| shift = 32 - shift; |
| sll(kScratchReg, src_high, shift); |
| Or(dst_low, dst_low, kScratchReg); |
| } |
| } else if (shift == 32) { |
| mov(dst_high, zero_reg); |
| mov(dst_low, src_high); |
| } else { |
| shift = shift - 32; |
| mov(dst_high, zero_reg); |
| srl(dst_low, src_high, shift); |
| } |
| } |
| |
| void TurboAssembler::SarPair(Register dst_low, Register dst_high, |
| Register src_low, Register src_high, |
| Register shift) { |
| Label done; |
| Register kScratchReg = s3; |
| Register kScratchReg2 = s4; |
| And(shift, shift, 0x3F); |
| srav(dst_high, src_high, shift); |
| Nor(kScratchReg2, zero_reg, shift); |
| sll(kScratchReg, src_high, 1); |
| sllv(kScratchReg, kScratchReg, kScratchReg2); |
| srlv(dst_low, src_low, shift); |
| Or(dst_low, dst_low, kScratchReg); |
| And(kScratchReg, shift, 32); |
| Branch(&done, eq, kScratchReg, Operand(zero_reg)); |
| mov(dst_low, dst_high); |
| sra(dst_high, dst_high, 31); |
| bind(&done); |
| } |
| |
| void TurboAssembler::SarPair(Register dst_low, Register dst_high, |
| Register src_low, Register src_high, |
| uint32_t shift) { |
| Register kScratchReg = s3; |
| shift = shift & 0x3F; |
| if (shift == 0) { |
| mov(dst_low, src_low); |
| mov(dst_high, src_high); |
| } else if (shift < 32) { |
| if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| srl(dst_low, src_low, shift); |
| Ins(dst_low, src_high, 32 - shift, shift); |
| sra(dst_high, src_high, shift); |
| } else { |
| sra(dst_high, src_high, shift); |
| srl(dst_low, src_low, shift); |
| shift = 32 - shift; |
| sll(kScratchReg, src_high, shift); |
| Or(dst_low, dst_low, kScratchReg); |
| } |
| } else if (shift == 32) { |
| sra(dst_high, src_high, 31); |
| mov(dst_low, src_high); |
| } else { |
| shift = shift - 32; |
| sra(dst_high, src_high, 31); |
| sra(dst_low, src_high, shift); |
| } |
| } |
| |
| void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos, |
| uint16_t size) { |
| DCHECK_LT(pos, 32); |
| DCHECK_LT(pos + size, 33); |
| |
| if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| ext_(rt, rs, pos, size); |
| } else { |
| // Move rs to rt and shift it left then right to get the |
| // desired bitfield on the right side and zeroes on the left. |
| int shift_left = 32 - (pos + size); |
| sll(rt, rs, shift_left); // Acts as a move if shift_left == 0. |
| |
| int shift_right = 32 - size; |
| if (shift_right > 0) { |
| srl(rt, rt, shift_right); |
| } |
| } |
| } |
| |
| void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos, |
| uint16_t size) { |
| DCHECK_LT(pos, 32); |
| DCHECK_LE(pos + size, 32); |
| DCHECK_NE(size, 0); |
| |
| if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| ins_(rt, rs, pos, size); |
| } else { |
| DCHECK(rt != t8 && rs != t8); |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| Subu(scratch, zero_reg, Operand(1)); |
| srl(scratch, scratch, 32 - size); |
| and_(t8, rs, scratch); |
| sll(t8, t8, pos); |
| sll(scratch, scratch, pos); |
| nor(scratch, scratch, zero_reg); |
| and_(scratch, rt, scratch); |
| or_(rt, t8, scratch); |
| } |
| } |
| |
| void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, |
| int size, bool sign_extend) { |
| srav(dest, source, pos); |
| Ext(dest, dest, 0, size); |
| if (size == 8) { |
| if (sign_extend) { |
| Seb(dest, dest); |
| } |
| } else if (size == 16) { |
| if (sign_extend) { |
| Seh(dest, dest); |
| } |
| } else { |
| UNREACHABLE(); |
| } |
| } |
| |
| void TurboAssembler::InsertBits(Register dest, Register source, Register pos, |
| int size) { |
| Ror(dest, dest, pos); |
| Ins(dest, source, 0, size); |
| { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| Subu(scratch, pos, Operand(32)); |
| Neg(scratch, Operand(scratch)); |
| Ror(dest, dest, scratch); |
| } |
| } |
| |
| void TurboAssembler::Seb(Register rd, Register rt) { |
| if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| seb(rd, rt); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)); |
| sll(rd, rt, 24); |
| sra(rd, rd, 24); |
| } |
| } |
| |
| void TurboAssembler::Seh(Register rd, Register rt) { |
| if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| seh(rd, rt); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)); |
| sll(rd, rt, 16); |
| sra(rd, rd, 16); |
| } |
| } |
| |
| void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { |
| if (IsMipsArchVariant(kMips32r6)) { |
| // r6 neg_s changes the sign for NaN-like operands as well. |
| neg_s(fd, fs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| Label is_nan, done; |
| Register scratch1 = t8; |
| Register scratch2 = t9; |
| BranchF32(nullptr, &is_nan, eq, fs, fs); |
| Branch(USE_DELAY_SLOT, &done); |
| // For NaN input, neg_s will return the same NaN value, |
| // while the sign has to be changed separately. |
| neg_s(fd, fs); // In delay slot. |
| bind(&is_nan); |
| mfc1(scratch1, fs); |
| li(scratch2, kBinary32SignMask); |
| Xor(scratch1, scratch1, scratch2); |
| mtc1(scratch1, fd); |
| bind(&done); |
| } |
| } |
| |
| void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { |
| if (IsMipsArchVariant(kMips32r6)) { |
| // r6 neg_d changes the sign for NaN-like operands as well. |
| neg_d(fd, fs); |
| } else { |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
| IsMipsArchVariant(kLoongson)); |
| Label is_nan, done; |
| Register scratch1 = t8; |
| Register scratch2 = t9; |
| BranchF64(nullptr, &is_nan, eq, fs, fs); |
| Branch(USE_DELAY_SLOT, &done); |
| // For NaN input, neg_d will return the same NaN value, |
| // while the sign has to be changed separately. |
| neg_d(fd, fs); // In delay slot. |
| bind(&is_nan); |
| Mfhc1(scratch1, fs); |
| li(scratch2, HeapNumber::kSignMask); |
| Xor(scratch1, scratch1, scratch2); |
| Mthc1(scratch1, fd); |
| bind(&done); |
| } |
| } |
| |
| void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs, |
| FPURegister scratch) { |
| // In FP64Mode we do conversion from long. |
| if (IsFp64Mode()) { |
| mtc1(rs, scratch); |
| Mthc1(zero_reg, scratch); |
| cvt_d_l(fd, scratch); |
| } else { |
| // Convert rs to a FP value in fd. |
| DCHECK(fd != scratch); |
| DCHECK(rs != at); |
| |
| Label msb_clear, conversion_done; |
| // For a value which is < 2^31, regard it as a signed positve word. |
| Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT); |
| mtc1(rs, fd); |
| { |
| UseScratchRegisterScope temps(this); |
| Register scratch1 = temps.Acquire(); |
| li(scratch1, 0x41F00000); // FP value: 2^32. |
| |
| // For unsigned inputs > 2^31, we convert to double as a signed int32, |
| // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1. |
| mtc1(zero_reg, scratch); |
| Mthc1(scratch1, scratch); |
| } |
| |
| cvt_d_w(fd, fd); |
| |
| Branch(USE_DELAY_SLOT, &conversion_done); |
| add_d(fd, fd, scratch); |
| |
| bind(&msb_clear); |
| cvt_d_w(fd, fd); |
| |
| bind(&conversion_done); |
| } |
| } |
| |
| void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs, |
| FPURegister scratch) { |
| Trunc_uw_d(fs, t8, scratch); |
| mtc1(t8, fd); |
| } |
| |
| void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs, |
| FPURegister scratch) { |
| Trunc_uw_s(fs, t8, scratch); |
| mtc1(t8, fd); |
| } |
| |
| void TurboAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { |
| if (IsMipsArchVariant(kLoongson) && fd == fs) { |
| Mfhc1(t8, fs); |
| trunc_w_d(fd, fs); |
| Mthc1(t8, fs); |
| } else { |
| trunc_w_d(fd, fs); |
| } |
| } |
| |
| void TurboAssembler::Round_w_d(FPURegister fd, FPURegister fs) { |
| if (IsMipsArchVariant(kLoongson) && fd == fs) { |
| Mfhc1(t8, fs); |
| round_w_d(fd, fs); |
| Mthc1(t8, fs); |
| } else { |
| round_w_d(fd, fs); |
| } |
| } |
| |
| void TurboAssembler::Floor_w_d(FPURegister fd, FPURegister fs) { |
| if (IsMipsArchVariant(kLoongson) && fd == fs) { |
| Mfhc1(t8, fs); |
| floor_w_d(fd, fs); |
| Mthc1(t8, fs); |
| } else { |
| floor_w_d(fd, fs); |
| } |
| } |
| |
| void TurboAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { |
| if (IsMipsArchVariant(kLoongson) && fd == fs) { |
| Mfhc1(t8, fs); |
| ceil_w_d(fd, fs); |
| Mthc1(t8, fs); |
| } else { |
| ceil_w_d(fd, fs); |
| } |
| } |
| |
| void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs, |
| FPURegister scratch) { |
| DCHECK(fd != scratch); |
| DCHECK(rs != at); |
| |
| { |
| // Load 2^31 into scratch as its float representation. |
| UseScratchRegisterScope temps(this); |
| Register scratch1 = temps.Acquire(); |
| li(scratch1, 0x41E00000); |
| mtc1(zero_reg, scratch); |
| Mthc1(scratch1, scratch); |
| } |
| // Test if scratch > fd. |
| // If fd < 2^31 we can convert it normally. |
| Label simple_convert; |
| BranchF(&simple_convert, nullptr, lt, fd, scratch); |
| |
| // First we subtract 2^31 from fd, then trunc it to rs |
| // and add 2^31 to rs. |
| sub_d(scratch, fd, scratch); |
| trunc_w_d(scratch, scratch); |
| mfc1(rs, scratch); |
| Or(rs, rs, 1 << 31); |
| |
| Label done; |
| Branch(&done); |
| // Simple conversion. |
| bind(&simple_convert); |
| trunc_w_d(scratch, fd); |
| mfc1(rs, scratch); |
| |
| bind(&done); |
| } |
| |
| void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs, |
| FPURegister scratch) { |
| DCHECK(fd != scratch); |
| DCHECK(rs != at); |
| |
| { |
| // Load 2^31 into scratch as its float representation. |
| UseScratchRegisterScope temps(this); |
| Register scratch1 = temps.Acquire(); |
| li(scratch1, 0x4F000000); |
| mtc1(scratch1, scratch); |
| } |
| // Test if scratch > fd. |
| // If fd < 2^31 we can convert it normally. |
| Label simple_convert; |
| BranchF32(&simple_convert, nullptr, lt, fd, scratch); |
| |
| // First we subtract 2^31 from fd, then trunc it to rs |
| // and add 2^31 to rs. |
| sub_s(scratch, fd, scratch); |
| trunc_w_s(scratch, scratch); |
| mfc1(rs, scratch); |
| Or(rs, rs, 1 << 31); |
| |
| Label done; |
| Branch(&done); |
| // Simple conversion. |
| bind(&simple_convert); |
| trunc_w_s(scratch, fd); |
| mfc1(rs, scratch); |
| |
| bind(&done); |
| } |
| |
| void TurboAssembler::Mthc1(Register rt, FPURegister fs) { |
| if (IsFp32Mode()) { |
| mtc1(rt, fs.high()); |
| } else { |
| DCHECK(IsFp64Mode() || IsFpxxMode()); |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); |
| mthc1(rt, fs); |
| } |
| } |
| |
| void TurboAssembler::Mfhc1(Register rt, FPURegister fs) { |
| if (IsFp32Mode()) { |
| mfc1(rt, fs.high()); |
| } else { |
| DCHECK(IsFp64Mode() || IsFpxxMode()); |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); |
| mfhc1(rt, fs); |
| } |
| } |
| |
| void TurboAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, |
| FPURegister ft, FPURegister scratch) { |
| if (IsMipsArchVariant(kMips32r2)) { |
| madd_s(fd, fr, fs, ft); |
| } else { |
| DCHECK(fr != scratch && fs != scratch && ft != scratch); |
| mul_s(scratch, fs, ft); |
| add_s(fd, fr, scratch); |
| } |
| } |
| |
| void TurboAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, |
| FPURegister ft, FPURegister scratch) { |
| if (IsMipsArchVariant(kMips32r2)) { |
| madd_d(fd, fr, fs, ft); |
| } else { |
| DCHECK(fr != scratch && fs != scratch && ft != scratch); |
| mul_d(scratch, fs, ft); |
| add_d(fd, fr, scratch); |
| } |
| } |
| |
| void TurboAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, |
| FPURegister ft, FPURegister scratch) { |
| if (IsMipsArchVariant(kMips32r2)) { |
| msub_s(fd, fr, fs, ft); |
| } else { |
| DCHECK(fr != scratch && fs != scratch && ft != scratch); |
| mul_s(scratch, fs, ft); |
| sub_s(fd, scratch, fr); |
| } |
| } |
| |
| void TurboAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, |
| FPURegister ft, FPURegister scratch) { |
| if (IsMipsArchVariant(kMips32r2)) { |
| msub_d(fd, fr, fs, ft); |
| } else { |
| DCHECK(fr != scratch && fs != scratch && ft != scratch); |
| mul_d(scratch, fs, ft); |
| sub_d(fd, scratch, fr); |
| } |
| } |
| |
| void TurboAssembler::BranchFCommon(SecondaryField sizeField, Label* target, |
| Label* nan, Condition cond, FPURegister cmp1, |
| FPURegister cmp2, BranchDelaySlot bd) { |
| { |
| BlockTrampolinePoolScope block_trampoline_pool(this); |
| if (cond == al) { |
| Branch(bd, target); |
| return; |
| } |
| |
| if (IsMipsArchVariant(kMips32r6)) { |
| sizeField = sizeField == D ? L : W; |
| } |
| DCHECK(nan || target); |
| // Check for unordered (NaN) cases. |
| if (nan) { |
| bool long_branch = |
| nan->is_bound() ? !is_near(nan) : is_trampoline_emitted(); |
| if (!IsMipsArchVariant(kMips32r6)) { |
| if (long_branch) { |
| Label skip; |
| c(UN, sizeField, cmp1, cmp2); |
| bc1f(&skip); |
| nop(); |
| BranchLong(nan, bd); |
| bind(&skip); |
| } else { |
| c(UN, sizeField, cmp1, cmp2); |
| bc1t(nan); |
| if (bd == PROTECT) { |
| nop(); |
| } |
| } |
| } else { |
| // Use kDoubleCompareReg for comparison result. It has to be unavailable |
| // to lithium register allocator. |
| DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg); |
| if (long_branch) { |
| Label skip; |
| cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1eqz(&skip, kDoubleCompareReg); |
| nop(); |
| BranchLong(nan, bd); |
| bind(&skip); |
| } else { |
| cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1nez(nan, kDoubleCompareReg); |
| if (bd == PROTECT) { |
| nop(); |
| } |
| } |
| } |
| } |
| |
| if (target) { |
| bool long_branch = |
| target->is_bound() ? !is_near(target) : is_trampoline_emitted(); |
| if (long_branch) { |
| Label skip; |
| Condition neg_cond = NegateFpuCondition(cond); |
| BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd); |
| BranchLong(target, bd); |
| bind(&skip); |
| } else { |
| BranchShortF(sizeField, target, cond, cmp1, cmp2, bd); |
| } |
| } |
| } |
| } |
| |
| void TurboAssembler::BranchShortF(SecondaryField sizeField, Label* target, |
| Condition cc, FPURegister cmp1, |
| FPURegister cmp2, BranchDelaySlot bd) { |
| if (!IsMipsArchVariant(kMips32r6)) { |
| BlockTrampolinePoolScope block_trampoline_pool(this); |
| if (target) { |
| // Here NaN cases were either handled by this function or are assumed to |
| // have been handled by the caller. |
| switch (cc) { |
| case lt: |
| c(OLT, sizeField, cmp1, cmp2); |
| bc1t(target); |
| break; |
| case ult: |
| c(ULT, sizeField, cmp1, cmp2); |
| bc1t(target); |
| break; |
| case gt: |
| c(ULE, sizeField, cmp1, cmp2); |
| bc1f(target); |
| break; |
| case ugt: |
| c(OLE, sizeField, cmp1, cmp2); |
| bc1f(target); |
| break; |
| case ge: |
| c(ULT, sizeField, cmp1, cmp2); |
| bc1f(target); |
| break; |
| case uge: |
| c(OLT, sizeField, cmp1, cmp2); |
| bc1f(target); |
| break; |
| case le: |
| c(OLE, sizeField, cmp1, cmp2); |
| bc1t(target); |
| break; |
| case ule: |
| c(ULE, sizeField, cmp1, cmp2); |
| bc1t(target); |
| break; |
| case eq: |
| c(EQ, sizeField, cmp1, cmp2); |
| bc1t(target); |
| break; |
| case ueq: |
| c(UEQ, sizeField, cmp1, cmp2); |
| bc1t(target); |
| break; |
| case ne: // Unordered or not equal. |
| c(EQ, sizeField, cmp1, cmp2); |
| bc1f(target); |
| break; |
| case ogl: |
| c(UEQ, sizeField, cmp1, cmp2); |
| bc1f(target); |
| break; |
| default: |
| CHECK(0); |
| } |
| } |
| } else { |
| BlockTrampolinePoolScope block_trampoline_pool(this); |
| if (target) { |
| // Here NaN cases were either handled by this function or are assumed to |
| // have been handled by the caller. |
| // Unsigned conditions are treated as their signed counterpart. |
| // Use kDoubleCompareReg for comparison result, it is |
| // valid in fp64 (FR = 1) mode which is implied for mips32r6. |
| DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg); |
| switch (cc) { |
| case lt: |
| cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1nez(target, kDoubleCompareReg); |
| break; |
| case ult: |
| cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1nez(target, kDoubleCompareReg); |
| break; |
| case gt: |
| cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1eqz(target, kDoubleCompareReg); |
| break; |
| case ugt: |
| cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1eqz(target, kDoubleCompareReg); |
| break; |
| case ge: |
| cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1eqz(target, kDoubleCompareReg); |
| break; |
| case uge: |
| cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1eqz(target, kDoubleCompareReg); |
| break; |
| case le: |
| cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1nez(target, kDoubleCompareReg); |
| break; |
| case ule: |
| cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1nez(target, kDoubleCompareReg); |
| break; |
| case eq: |
| cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1nez(target, kDoubleCompareReg); |
| break; |
| case ueq: |
| cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1nez(target, kDoubleCompareReg); |
| break; |
| case ne: |
| cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1eqz(target, kDoubleCompareReg); |
| break; |
| case ogl: |
| cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2); |
| bc1eqz(target, kDoubleCompareReg); |
| break; |
| default: |
| CHECK(0); |
| } |
| } |
| } |
| if (bd == PROTECT) { |
| nop(); |
| } |
| } |
| |
| void TurboAssembler::BranchMSA(Label* target, MSABranchDF df, |
| MSABranchCondition cond, MSARegister wt, |
| BranchDelaySlot bd) { |
| { |
| BlockTrampolinePoolScope block_trampoline_pool(this); |
| |
| if (target) { |
| bool long_branch = |
| target->is_bound() ? !is_near(target) : is_trampoline_emitted(); |
| if (long_branch) { |
| Label skip; |
| MSABranchCondition neg_cond = NegateMSABranchCondition(cond); |
| BranchShortMSA(df, &skip, neg_cond, wt, bd); |
| BranchLong(target, bd); |
| bind(&skip); |
| } else { |
| BranchShortMSA(df, target, cond, wt, bd); |
| } |
| } |
| } |
| } |
| |
| void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target, |
| MSABranchCondition cond, MSARegister wt, |
| BranchDelaySlot bd) { |
| if (IsMipsArchVariant(kMips32r6)) { |
| BlockTrampolinePoolScope block_trampoline_pool(this); |
| if (target) { |
| switch (cond) { |
| case all_not_zero: |
| switch (df) { |
| case MSA_BRANCH_D: |
| bnz_d(wt, target); |
| break; |
| case MSA_BRANCH_W: |
| bnz_w(wt, target); |
| break; |
| case MSA_BRANCH_H: |
| bnz_h(wt, target); |
| break; |
| case MSA_BRANCH_B: |
| default: |
| bnz_b(wt, target); |
| } |
| break; |
| case one_elem_not_zero: |
| bnz_v(wt, target); |
| break; |
| case one_elem_zero: |
| switch (df) { |
| case MSA_BRANCH_D: |
| bz_d(wt, target); |
| break; |
| case MSA_BRANCH_W: |
| bz_w(wt, target); |
| break; |
| case MSA_BRANCH_H: |
| bz_h(wt, target); |
| break; |
| case MSA_BRANCH_B: |
| default: |
| bz_b(wt, target); |
| } |
| break; |
| case all_zero: |
| bz_v(wt, target); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } |
| } |
| if (bd == PROTECT) { |
| nop(); |
| } |
| } |
| |
| void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { |
| if (IsFp32Mode()) { |
| mtc1(src_low, dst); |
| } else { |
| DCHECK(IsFp64Mode() || IsFpxxMode()); |
| DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| DCHECK(src_low != scratch); |
| mfhc1(scratch, dst); |
| mtc1(src_low, dst); |
| mthc1(scratch, dst); |
| } |
| } |
| |
| void TurboAssembler::Move(FPURegister dst, float imm) { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| li(scratch, Operand(bit_cast<int32_t>(imm))); |
| mtc1(scratch, dst); |
| } |
| |
| void TurboAssembler::Move(FPURegister dst, double imm) { |
| int64_t imm_bits = bit_cast<int64_t>(imm); |
| // Handle special values first. |
| if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) { |
| mov_d(dst, kDoubleRegZero); |
| } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) { |
| Neg_d(dst, kDoubleRegZero); |
| } else { |
| uint32_t lo, hi; |
| DoubleAsTwoUInt32(imm, &lo, &hi); |
| // Move the low part of the double into the lower of the corresponding FPU |
| // register of FPU register pair. |
| if (lo != 0) { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| li(scratch, Operand(lo)); |
| mtc1(scratch, dst); |
| } else { |
| mtc1(zero_reg, dst); |
| } |
| // Move the high part of the double into the higher of the corresponding FPU |
| // register of FPU register pair. |
| if (hi != 0) { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| li(scratch, Operand(hi)); |
| Mthc1(scratch, dst); |
| } else { |
| Mthc1(zero_reg, dst); |
| } |
| if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true; |
| } |
| } |
| |
| void TurboAssembler::Movz(Register rd, Register rs, Register rt) { |
| if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { |
| Label done; |
| Branch(&done, ne, rt, Operand(zero_reg)); |
| mov(rd, rs); |
| bind(&done); |
| } else { |
| movz(rd, rs, rt); |
| } |
| } |
| |
| void TurboAssembler::Movn(Register rd, Register rs, Register rt) { |
| if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { |
| Label done; |
| Branch(&done, eq, rt, Operand(zero_reg)); |
| mov(rd, rs); |
| bind(&done); |
| } else { |
| movn(rd, rs, rt); |
| } |
| } |
| |
| void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) { |
| if (IsMipsArchVariant(kLoongson)) { |
| // Tests an FP condition code and then conditionally move rs to rd. |
| // We do not currently use any FPU cc bit other than bit 0. |
| DCHECK_EQ(cc, 0); |
| DCHECK(rs != t8 && rd != t8); |
| Label done; |
| Register scratch = t8; |
| // For testing purposes we need to fetch content of the FCSR register and |
| // than test its cc (floating point condition code) bit (for cc = 0, it is |
| // 24. bit of the FCSR). |
| cfc1(scratch, FCSR); |
| // For the MIPS I, II and III architectures, the contents of scratch is |
| // UNPREDICTABLE for the instruction immediately following CFC1. |
| nop(); |
| srl(scratch, scratch, 16); |
| andi(scratch, scratch, 0x0080); |
| Branch(&done, eq, scratch, Operand(zero_reg)); |
| mov(rd, rs); |
| bind(&done); |
| } else { |
| movt(rd, rs, cc); |
| } |
| } |
| |
| void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) { |
| if (IsMipsArchVariant(kLoongson)) { |
| // Tests an FP condition code and then conditionally move rs to rd. |
| // We do not currently use any FPU cc bit other than bit 0. |
| DCHECK_EQ(cc, 0); |
| DCHECK(rs != t8 && rd != t8); |
| Label done; |
| Register scratch = t8; |
| // For testing purposes we need to fetch content of the FCSR register and |
| // than test its cc (floating point condition code) bit (for cc = 0, it is |
| // 24. bit of the FCSR). |
| cfc1(scratch, FCSR); |
| // For the MIPS I, II and III architectures, the contents of scratch is |
| // UNPREDICTABLE for the instruction immediately following CFC1. |
| nop(); |
| srl(scratch, scratch, 16); |
| andi(scratch, scratch, 0x0080); |
| Branch(&done, ne, scratch, Operand(zero_reg)); |
| mov(rd, rs); |
| bind(&done); |
| } else { |
| movf(rd, rs, cc); |
| } |
| } |
| |
| void TurboAssembler::Clz(Register rd, Register rs) { |
| if (IsMipsArchVariant(kLoongson)) { |
| DCHECK(rd != t8 && rd != t9 && rs != t8 && rs != t9); |
| Register mask = t8; |
| Register scratch = t9; |
| Label loop, end; |
| { |
| UseScratchRegisterScope temps(this); |
| Register scratch1 = temps.Acquire(); |
| mov(scratch1, rs); |
| mov(rd, zero_reg); |
| lui(mask, 0x8000); |
| bind(&loop); |
| and_(scratch, scratch1, mask); |
| } |
| Branch(&end, ne, scratch, Operand(zero_reg)); |
| addiu(rd, rd, 1); |
| Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT); |
| srl(mask, mask, 1); |
| bind(&end); |
| } else { |
| clz(rd, rs); |
| } |
| } |
| |
| |
| void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, |
| Register result, |
| DoubleRegister double_input, |
| Register scratch, |
| DoubleRegister double_scratch, |
| Register except_flag, |
| CheckForInexactConversion check_inexact) { |
| DCHECK(result != scratch); |
| DCHECK(double_input != double_scratch); |
| DCHECK(except_flag != scratch); |
| |
| Label done; |
| |
| // Clear the except flag (0 = no exception) |
| mov(except_flag, zero_reg); |
| |
| // Test for values that can be exactly represented as a signed 32-bit integer. |
| cvt_w_d(double_scratch, double_input); |
| mfc1(result, double_scratch); |
| cvt_d_w(double_scratch, double_scratch); |
| BranchF(&done, nullptr, eq, double_input, double_scratch); |
| |
| int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions. |
| |
| if (check_inexact == kDontCheckForInexactConversion) { |
| // Ignore inexact exceptions. |
| except_mask &= ~kFCSRInexactFlagMask; |
| } |
| |
| // Save FCSR. |
| cfc1(scratch, FCSR); |
| // Disable FPU exceptions. |
| ctc1(zero_reg, FCSR); |
| |
| // Do operation based on rounding mode. |
| switch (rounding_mode) { |
| case kRoundToNearest: |
| Round_w_d(double_scratch, double_input); |
| break; |
| case kRoundToZero: |
| Trunc_w_d(double_scratch, double_input); |
| break; |
| case kRoundToPlusInf: |
| Ceil_w_d(double_scratch, double_input); |
| break; |
| case kRoundToMinusInf: |
| Floor_w_d(double_scratch, double_input); |
| break; |
| } // End of switch-statement. |
| |
| // Retrieve FCSR. |
| cfc1(except_flag, FCSR); |
| // Restore FCSR. |
| ctc1(scratch, FCSR); |
| // Move the converted value into the result register. |
| mfc1(result, double_scratch); |
| |
| // Check for fpu exceptions. |
| And(except_flag, except_flag, Operand(except_mask)); |
| |
| bind(&done); |
| } |
| |
| void TurboAssembler::TryInlineTruncateDoubleToI(Register result, |
| DoubleRegister double_input, |
| Label* done) { |
| DoubleRegister single_scratch = kLithiumScratchDouble.low(); |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| Register scratch2 = t9; |
| |
| // Clear cumulative exception flags and save the FCSR. |
| cfc1(scratch2, FCSR); |
| ctc1(zero_reg, FCSR); |
| // Try a conversion to a signed integer. |
| trunc_w_d(single_scratch, double_input); |
| mfc1(result, single_scratch); |
| // Retrieve and restore the FCSR. |
| cfc1(scratch, FCSR); |
| ctc1(scratch2, FCSR); |
| // Check for overflow and NaNs. |
| And(scratch, |
| scratch, |
| kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask); |
| // If we had no exceptions we are done. |
| Branch(done, eq, scratch, Operand(zero_reg)); |
| } |
| |
| void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result, |
| DoubleRegister double_input) { |
| Label done; |
| |
| TryInlineTruncateDoubleToI(result, double_input, &done); |
| |
| // If we fell through then inline version didn't succeed - call stub instead. |
| push(ra); |
| Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack. |
| Sdc1(double_input, MemOperand(sp, 0)); |
| |
| CallStubDelayed(new (zone) DoubleToIStub(nullptr, result)); |
| |
| Addu(sp, sp, Operand(kDoubleSize)); |
| pop(ra); |
| |
| bind(&done); |
| } |
| |
| // Emulated condtional branches do not emit a nop in the branch delay slot. |
| // |
| // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. |
| #define BRANCH_ARGS_CHECK(cond, rs, rt) \ |
| DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ |
| (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) |
| |
| void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) { |
| DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset)); |
| BranchShort(offset, bdslot); |
| } |
| |
| void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs, |
| const Operand& rt, BranchDelaySlot bdslot) { |
| bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); |
| DCHECK(is_near); |
| USE(is_near); |
| } |
| |
| void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) { |
| if (L->is_bound()) { |
| if (is_near_branch(L)) { |
| BranchShort(L, bdslot); |
| } else { |
| BranchLong(L, bdslot); |
| } |
| } else { |
| if (is_trampoline_emitted()) { |
| BranchLong(L, bdslot); |
| } else { |
| BranchShort(L, bdslot); |
| } |
| } |
| } |
| |
| void TurboAssembler::Branch(Label* L, Condition cond, Register rs, |
| const Operand& rt, BranchDelaySlot bdslot) { |
| if (L->is_bound()) { |
| if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) { |
| if (cond != cc_always) { |
| Label skip; |
| Condition neg_cond = NegateCondition(cond); |
| BranchShort(&skip, neg_cond, rs, rt); |
| BranchLong(L, bdslot); |
| bind(&skip); |
| } else { |
| BranchLong(L, bdslot); |
| } |
| } |
| } else { |
| if (is_trampoline_emitted()) { |
| if (cond != cc_always) { |
| Label skip; |
| Condition neg_cond = NegateCondition(cond); |
| BranchShort(&skip, neg_cond, rs, rt); |
| BranchLong(L, bdslot); |
| bind(&skip); |
| } else { |
| BranchLong(L, bdslot); |
| } |
| } else { |
| BranchShort(L, cond, rs, rt, bdslot); |
| } |
| } |
| } |
| |
| void TurboAssembler::Branch(Label* L, Condition cond, Register rs, |
| Heap::RootListIndex index, BranchDelaySlot bdslot) { |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.Acquire(); |
| LoadRoot(scratch, index); |
| Branch(L, cond, rs, Operand(scratch), bdslot); |
| } |
| |
| void TurboAssembler::BranchShortHelper(int16_t offset, Label* L, |
| BranchDelaySlot bdslot) { |
| DCHECK(L == nullptr || offset == 0); |
| offset = GetOffset(offset, L, OffsetSize::kOffset16); |
| b(offset); |
| |
| // Emit a nop in the branch delay slot if required. |
| if (bdslot == PROTECT) |
| nop(); |
| } |
| |
| void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) { |
| DCHECK(L == nullptr || offset == 0); |
| offset = GetOffset(offset, L, OffsetSize::kOffset26); |
| bc(offset); |
| } |
| |
| void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) { |
| if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { |
| DCHECK(is_int26(offset)); |
| BranchShortHelperR6(offset, nullptr); |
| } else { |
| DCHECK(is_int16(offset)); |
| BranchShortHelper(offset, nullptr, bdslot); |
| } |
| } |
| |
| void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { |
| if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { |
| BranchShortHelperR6(0, L); |
| } else { |
| BranchShortHelper(0, L, bdslot); |
| } |
| } |
| |
| |
| static inline bool IsZero(const Operand& rt) { |
| if (rt.is_reg()) { |
| return rt.rm() == zero_reg; |
| } else { |
| return rt.immediate() == 0; |
| } |
| } |
| |
| int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { |
| if (L) { |
| offset = branch_offset_helper(L, bits) >> 2; |
| } else { |
| DCHECK(is_intn(offset, bits)); |
| } |
| return offset; |
| } |
| |
| Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, |
| Register scratch) { |
| Register r2 = no_reg; |
| if (rt.is_reg()) { |
| r2 = rt.rm(); |
| } else { |
| r2 = scratch; |
| li(r2, rt); |
| } |
| |
| return r2; |
| } |
| |
| bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, |
| OffsetSize bits) { |
| if (!is_near(L, bits)) return false; |
| offset = GetOffset(offset, L, bits); |
| return true; |
| } |
| |
| bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits, |
| Register& scratch, const Operand& rt) { |
| if (!is_near(L, bits)) return false; |
| scratch = GetRtAsRegisterHelper(rt, scratch); |
| offset = GetOffset(offset, L, bits); |
| return true; |
| } |
| |
| bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, |
| Condition cond, Register rs, |
| const Operand& rt) { |
| DCHECK(L == nullptr || offset == 0); |
| UseScratchRegisterScope temps(this); |
| Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; |
| |
| // Be careful to always use shifted_branch_offset only just before the |
| // branch instruction, as the location will be remember for patching the |
| // target. |
| { |
| BlockTrampolinePoolScope block_trampoline_pool(this); |
| switch (cond) { |
| case cc_always: |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; |
| bc(offset); |
| break; |
| case eq: |
| if (rt.is_reg() && rs.code() == rt.rm().code()) { |
| // Pre R6 beq is used here to make the code patchable. Otherwise bc |
| // should be used which has no condition field so is not patchable. |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| beq(rs, scratch, offset); |
| nop(); |
| } else if (IsZero(rt)) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; |
| beqzc(rs, offset); |
| } else { |
| // We don't want any other register but scratch clobbered. |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| beqc(rs, scratch, offset); |
| } |
| break; |
| case ne: |
| if (rt.is_reg() && rs.code() == rt.rm().code()) { |
| // Pre R6 bne is used here to make the code patchable. Otherwise we |
| // should not generate any instruction. |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| bne(rs, scratch, offset); |
| nop(); |
| } else if (IsZero(rt)) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; |
| bnezc(rs, offset); |
| } else { |
| // We don't want any other register but scratch clobbered. |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| bnec(rs, scratch, offset); |
| } |
| break; |
| |
| // Signed comparison. |
| case greater: |
| // rs > rt |
| if (rt.is_reg() && rs.code() == rt.rm().code()) { |
| break; // No code needs to be emitted. |
| } else if (rs == zero_reg) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| bltzc(scratch, offset); |
| } else if (IsZero(rt)) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; |
| bgtzc(rs, offset); |
| } else { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| DCHECK(rs != scratch); |
| bltc(scratch, rs, offset); |
| } |
| break; |
| case greater_equal: |
| // rs >= rt |
| if (rt.is_reg() && rs.code() == rt.rm().code()) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; |
| bc(offset); |
| } else if (rs == zero_reg) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| blezc(scratch, offset); |
| } else if (IsZero(rt)) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; |
| bgezc(rs, offset); |
| } else { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| DCHECK(rs != scratch); |
| bgec(rs, scratch, offset); |
| } |
| break; |
| case less: |
| // rs < rt |
| if (rt.is_reg() && rs.code() == rt.rm().code()) { |
| break; // No code needs to be emitted. |
| } else if (rs == zero_reg) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| bgtzc(scratch, offset); |
| } else if (IsZero(rt)) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; |
| bltzc(rs, offset); |
| } else { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| DCHECK(rs != scratch); |
| bltc(rs, scratch, offset); |
| } |
| break; |
| case less_equal: |
| // rs <= rt |
| if (rt.is_reg() && rs.code() == rt.rm().code()) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; |
| bc(offset); |
| } else if (rs == zero_reg) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| bgezc(scratch, offset); |
| } else if (IsZero(rt)) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; |
| blezc(rs, offset); |
| } else { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
| DCHECK(rs != scratch); |
| bgec(scratch, rs, offset); |
| } |
| break; |
| |
| // Unsigned comparison. |
| case Ugreater: |
| // rs > rt |
| if (rt.is_reg() && rs.code() == rt.rm().code()) { |
| break; // No code needs to be emitted. |
| } else if (rs == zero_reg) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) |
| return false; |
| bnezc(scratch, offset); |
| } else if (IsZero(rt)) { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; |
| bnezc(rs, offset); |
| } else { |
| if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) |
| return false; |
|