| /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
| * vim: set ts=8 sts=4 et sw=4 tw=99: |
| * This Source Code Form is subject to the terms of the Mozilla Public |
| * License, v. 2.0. If a copy of the MPL was not distributed with this |
| * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
| |
| #include "jit/arm/MacroAssembler-arm.h" |
| |
| #include "mozilla/Casting.h" |
| #include "mozilla/DebugOnly.h" |
| #include "mozilla/MathAlgorithms.h" |
| |
| #include "jit/arm/Simulator-arm.h" |
| #include "jit/Bailouts.h" |
| #include "jit/BaselineFrame.h" |
| #include "jit/JitFrames.h" |
| #include "jit/MacroAssembler.h" |
| #include "jit/MoveEmitter.h" |
| |
| #include "jit/MacroAssembler-inl.h" |
| |
| using namespace js; |
| using namespace jit; |
| |
| using mozilla::Abs; |
| using mozilla::BitwiseCast; |
| |
| bool |
| isValueDTRDCandidate(ValueOperand& val) |
| { |
| // In order to be used for a DTRD memory function, the two target registers |
| // need to be a) Adjacent, with the tag larger than the payload, and b) |
| // Aligned to a multiple of two. |
| if ((val.typeReg().code() != (val.payloadReg().code() + 1))) |
| return false; |
| if ((val.payloadReg().code() & 1) != 0) |
| return false; |
| return true; |
| } |
| |
| void |
| MacroAssemblerARM::convertBoolToInt32(Register source, Register dest) |
| { |
| // Note that C++ bool is only 1 byte, so zero extend it to clear the |
| // higher-order bits. |
| ma_and(Imm32(0xff), source, dest); |
| } |
| |
| void |
| MacroAssemblerARM::convertInt32ToDouble(Register src, FloatRegister dest_) |
| { |
| // Direct conversions aren't possible. |
| VFPRegister dest = VFPRegister(dest_); |
| as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat); |
| as_vcvt(dest, dest.sintOverlay()); |
| } |
| |
| void |
| MacroAssemblerARM::convertInt32ToDouble(const Address& src, FloatRegister dest) |
| { |
| ScratchDoubleScope scratch(asMasm()); |
| ma_vldr(src, scratch); |
| as_vcvt(dest, VFPRegister(scratch).sintOverlay()); |
| } |
| |
| void |
| MacroAssemblerARM::convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) |
| { |
| Register base = src.base; |
| uint32_t scale = Imm32::ShiftOf(src.scale).value; |
| |
| ScratchRegisterScope scratch(asMasm()); |
| |
| if (src.offset != 0) { |
| ma_mov(base, scratch); |
| base = scratch; |
| ma_add(Imm32(src.offset), base); |
| } |
| ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), scratch); |
| convertInt32ToDouble(scratch, dest); |
| } |
| |
| void |
| MacroAssemblerARM::convertUInt32ToDouble(Register src, FloatRegister dest_) |
| { |
| // Direct conversions aren't possible. |
| VFPRegister dest = VFPRegister(dest_); |
| as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat); |
| as_vcvt(dest, dest.uintOverlay()); |
| } |
| |
| static const double TO_DOUBLE_HIGH_SCALE = 0x100000000; |
| |
| void |
| MacroAssemblerARMCompat::convertUInt64ToDouble(Register64 src, Register temp, FloatRegister dest) |
| { |
| convertUInt32ToDouble(src.high, dest); |
| movePtr(ImmPtr(&TO_DOUBLE_HIGH_SCALE), ScratchRegister); |
| loadDouble(Address(ScratchRegister, 0), ScratchDoubleReg); |
| mulDouble(ScratchDoubleReg, dest); |
| convertUInt32ToDouble(src.low, ScratchDoubleReg); |
| addDouble(ScratchDoubleReg, dest); |
| } |
| |
| void |
| MacroAssemblerARM::convertUInt32ToFloat32(Register src, FloatRegister dest_) |
| { |
| // Direct conversions aren't possible. |
| VFPRegister dest = VFPRegister(dest_); |
| as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat); |
| as_vcvt(VFPRegister(dest).singleOverlay(), dest.uintOverlay()); |
| } |
| |
| void MacroAssemblerARM::convertDoubleToFloat32(FloatRegister src, FloatRegister dest, |
| Condition c) |
| { |
| as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src), false, c); |
| } |
| |
| // There are two options for implementing emitTruncateDouble: |
| // |
| // 1. Convert the floating point value to an integer, if it did not fit, then it |
| // was clamped to INT_MIN/INT_MAX, and we can test it. NOTE: if the value |
| // really was supposed to be INT_MAX / INT_MIN then it will be wrong. |
| // |
| // 2. Convert the floating point value to an integer, if it did not fit, then it |
| // set one or two bits in the fpcsr. Check those. |
| void |
| MacroAssemblerARM::branchTruncateDouble(FloatRegister src, Register dest, Label* fail) |
| { |
| ScratchDoubleScope scratch(asMasm()); |
| FloatRegister scratchSIntReg = scratch.sintOverlay(); |
| |
| ma_vcvt_F64_I32(src, scratchSIntReg); |
| ma_vxfer(scratchSIntReg, dest); |
| ma_cmp(dest, Imm32(0x7fffffff)); |
| ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); |
| ma_b(fail, Assembler::Equal); |
| } |
| |
| // Checks whether a double is representable as a 32-bit integer. If so, the |
| // integer is written to the output register. Otherwise, a bailout is taken to |
| // the given snapshot. This function overwrites the scratch float register. |
| void |
| MacroAssemblerARM::convertDoubleToInt32(FloatRegister src, Register dest, |
| Label* fail, bool negativeZeroCheck) |
| { |
| // Convert the floating point value to an integer, if it did not fit, then |
| // when we convert it *back* to a float, it will have a different value, |
| // which we can test. |
| ScratchDoubleScope scratchDouble(asMasm()); |
| FloatRegister scratchSIntReg = scratchDouble.sintOverlay(); |
| |
| ma_vcvt_F64_I32(src, scratchSIntReg); |
| // Move the value into the dest register. |
| ma_vxfer(scratchSIntReg, dest); |
| ma_vcvt_I32_F64(scratchSIntReg, scratchDouble); |
| ma_vcmp(src, scratchDouble); |
| as_vmrs(pc); |
| ma_b(fail, Assembler::VFP_NotEqualOrUnordered); |
| |
| if (negativeZeroCheck) { |
| ma_cmp(dest, Imm32(0)); |
| // Test and bail for -0.0, when integer result is 0. Move the top word |
| // of the double into the output reg, if it is non-zero, then the |
| // original value was -0.0. |
| as_vxfer(dest, InvalidReg, src, FloatToCore, Assembler::Equal, 1); |
| ma_cmp(dest, Imm32(0x80000000), Assembler::Equal); |
| ma_b(fail, Assembler::Equal); |
| } |
| } |
| |
| // Checks whether a float32 is representable as a 32-bit integer. If so, the |
| // integer is written to the output register. Otherwise, a bailout is taken to |
| // the given snapshot. This function overwrites the scratch float register. |
| void |
| MacroAssemblerARM::convertFloat32ToInt32(FloatRegister src, Register dest, |
| Label* fail, bool negativeZeroCheck) |
| { |
| // Converting the floating point value to an integer and then converting it |
| // back to a float32 would not work, as float to int32 conversions are |
| // clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX |
| // and then back to float(INT32_MAX + 1)). If this ever happens, we just |
| // bail out. |
| ScratchFloat32Scope scratchFloat(asMasm()); |
| |
| FloatRegister ScratchSIntReg = scratchFloat.sintOverlay(); |
| ma_vcvt_F32_I32(src, ScratchSIntReg); |
| |
| // Store the result |
| ma_vxfer(ScratchSIntReg, dest); |
| |
| ma_vcvt_I32_F32(ScratchSIntReg, scratchFloat); |
| ma_vcmp(src, scratchFloat); |
| as_vmrs(pc); |
| ma_b(fail, Assembler::VFP_NotEqualOrUnordered); |
| |
| // Bail out in the clamped cases. |
| ma_cmp(dest, Imm32(0x7fffffff)); |
| ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); |
| ma_b(fail, Assembler::Equal); |
| |
| if (negativeZeroCheck) { |
| ma_cmp(dest, Imm32(0)); |
| // Test and bail for -0.0, when integer result is 0. Move the float into |
| // the output reg, and if it is non-zero then the original value was |
| // -0.0 |
| as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, Assembler::Equal, 0); |
| ma_cmp(dest, Imm32(0x80000000), Assembler::Equal); |
| ma_b(fail, Assembler::Equal); |
| } |
| } |
| |
| void |
| MacroAssemblerARM::convertFloat32ToDouble(FloatRegister src, FloatRegister dest) |
| { |
| MOZ_ASSERT(dest.isDouble()); |
| MOZ_ASSERT(src.isSingle()); |
| as_vcvt(VFPRegister(dest), VFPRegister(src).singleOverlay()); |
| } |
| |
| void |
| MacroAssemblerARM::branchTruncateFloat32(FloatRegister src, Register dest, Label* fail) |
| { |
| ScratchFloat32Scope scratch(asMasm()); |
| ma_vcvt_F32_I32(src, scratch.sintOverlay()); |
| ma_vxfer(scratch, dest); |
| ma_cmp(dest, Imm32(0x7fffffff)); |
| ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); |
| ma_b(fail, Assembler::Equal); |
| } |
| |
| void |
| MacroAssemblerARM::convertInt32ToFloat32(Register src, FloatRegister dest) |
| { |
| // Direct conversions aren't possible. |
| as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat); |
| as_vcvt(dest.singleOverlay(), dest.sintOverlay()); |
| } |
| |
| void |
| MacroAssemblerARM::convertInt32ToFloat32(const Address& src, FloatRegister dest) |
| { |
| ScratchFloat32Scope scratch(asMasm()); |
| ma_vldr(src, scratch); |
| as_vcvt(dest, VFPRegister(scratch).sintOverlay()); |
| } |
| |
| void |
| MacroAssemblerARM::addDouble(FloatRegister src, FloatRegister dest) |
| { |
| ma_vadd(dest, src, dest); |
| } |
| |
| void |
| MacroAssemblerARM::subDouble(FloatRegister src, FloatRegister dest) |
| { |
| ma_vsub(dest, src, dest); |
| } |
| |
| void |
| MacroAssemblerARM::mulDouble(FloatRegister src, FloatRegister dest) |
| { |
| ma_vmul(dest, src, dest); |
| } |
| |
| void |
| MacroAssemblerARM::divDouble(FloatRegister src, FloatRegister dest) |
| { |
| ma_vdiv(dest, src, dest); |
| } |
| |
| void |
| MacroAssemblerARM::negateDouble(FloatRegister reg) |
| { |
| ma_vneg(reg, reg); |
| } |
| |
| void |
| MacroAssemblerARM::inc64(AbsoluteAddress dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| |
| ma_strd(r0, r1, EDtrAddr(sp, EDtrOffImm(-8)), PreIndex); |
| |
| ma_mov(Imm32((int32_t)dest.addr), scratch); |
| ma_ldrd(EDtrAddr(scratch, EDtrOffImm(0)), r0, r1); |
| |
| ma_add(Imm32(1), r0, SetCC); |
| ma_adc(Imm32(0), r1, LeaveCC); |
| |
| ma_strd(r0, r1, EDtrAddr(scratch, EDtrOffImm(0))); |
| ma_ldrd(EDtrAddr(sp, EDtrOffImm(8)), r0, r1, PostIndex); |
| } |
| |
| bool |
| MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op, |
| SBit s, Condition c) |
| { |
| if ((s == SetCC && ! condsAreSafe(op)) || !can_dbl(op)) |
| return false; |
| |
| ALUOp interop = getDestVariant(op); |
| Imm8::TwoImm8mData both = Imm8::EncodeTwoImms(imm.value); |
| if (both.fst.invalid) |
| return false; |
| |
| // For the most part, there is no good reason to set the condition codes for |
| // the first instruction. We can do better things if the second instruction |
| // doesn't have a dest, such as check for overflow by doing first operation |
| // don't do second operation if first operation overflowed. This preserves |
| // the overflow condition code. Unfortunately, it is horribly brittle. |
| as_alu(dest, src1, Operand2(both.fst), interop, LeaveCC, c); |
| as_alu(dest, dest, Operand2(both.snd), op, s, c); |
| return true; |
| } |
| |
| void |
| MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest, |
| ALUOp op, SBit s, Condition c) |
| { |
| // As it turns out, if you ask for a compare-like instruction you *probably* |
| // want it to set condition codes. |
| if (dest == InvalidReg) |
| MOZ_ASSERT(s == SetCC); |
| |
| // The operator gives us the ability to determine how this can be used. |
| Imm8 imm8 = Imm8(imm.value); |
| // One instruction: If we can encode it using an imm8m, then do so. |
| if (!imm8.invalid) { |
| as_alu(dest, src1, imm8, op, s, c); |
| return; |
| } |
| |
| // One instruction, negated: |
| Imm32 negImm = imm; |
| Register negDest; |
| ALUOp negOp = ALUNeg(op, dest, &negImm, &negDest); |
| Imm8 negImm8 = Imm8(negImm.value); |
| // 'add r1, r2, -15' can be replaced with 'sub r1, r2, 15'. For bonus |
| // points, dest can be replaced (nearly always invalid => ScratchRegister) |
| // This is useful if we wish to negate tst. tst has an invalid (aka not |
| // used) dest, but its negation is bic *requires* a dest. We can accomodate, |
| // but it will need to clobber *something*, and the scratch register isn't |
| // being used, so... |
| if (negOp != OpInvalid && !negImm8.invalid) { |
| as_alu(negDest, src1, negImm8, negOp, s, c); |
| return; |
| } |
| |
| if (HasMOVWT()) { |
| // If the operation is a move-a-like then we can try to use movw to move |
| // the bits into the destination. Otherwise, we'll need to fall back on |
| // a multi-instruction format :( |
| // movw/movt does not set condition codes, so don't hold your breath. |
| if (s == LeaveCC && (op == OpMov || op == OpMvn)) { |
| // ARMv7 supports movw/movt. movw zero-extends its 16 bit argument, |
| // so we can set the register this way. movt leaves the bottom 16 |
| // bits in tact, so it is unsuitable to move a constant that |
| if (op == OpMov && ((imm.value & ~ 0xffff) == 0)) { |
| MOZ_ASSERT(src1 == InvalidReg); |
| as_movw(dest, Imm16((uint16_t)imm.value), c); |
| return; |
| } |
| |
| // If they asked for a mvn rfoo, imm, where ~imm fits into 16 bits |
| // then do it. |
| if (op == OpMvn && (((~imm.value) & ~ 0xffff) == 0)) { |
| MOZ_ASSERT(src1 == InvalidReg); |
| as_movw(dest, Imm16((uint16_t)~imm.value), c); |
| return; |
| } |
| |
| // TODO: constant dedup may enable us to add dest, r0, 23 *if* we |
| // are attempting to load a constant that looks similar to one that |
| // already exists. If it can't be done with a single movw then we |
| // *need* to use two instructions since this must be some sort of a |
| // move operation, we can just use a movw/movt pair and get the |
| // whole thing done in two moves. This does not work for ops like |
| // add, since we'd need to do: movw tmp; movt tmp; add dest, tmp, |
| // src1. |
| if (op == OpMvn) |
| imm.value = ~imm.value; |
| as_movw(dest, Imm16(imm.value & 0xffff), c); |
| as_movt(dest, Imm16((imm.value >> 16) & 0xffff), c); |
| return; |
| } |
| // If we weren't doing a movalike, a 16 bit immediate will require 2 |
| // instructions. With the same amount of space and (less)time, we can do |
| // two 8 bit operations, reusing the dest register. e.g. |
| // movw tmp, 0xffff; add dest, src, tmp ror 4 |
| // vs. |
| // add dest, src, 0xff0; add dest, dest, 0xf000000f |
| // |
| // It turns out that there are some immediates that we miss with the |
| // second approach. A sample value is: add dest, src, 0x1fffe this can |
| // be done by movw tmp, 0xffff; add dest, src, tmp lsl 1 since imm8m's |
| // only get even offsets, we cannot encode this. I'll try to encode as |
| // two imm8's first, since they are faster. Both operations should take |
| // 1 cycle, where as add dest, tmp ror 4 takes two cycles to execute. |
| } |
| |
| // Either a) this isn't ARMv7 b) this isn't a move start by attempting to |
| // generate a two instruction form. Some things cannot be made into two-inst |
| // forms correctly. Namely, adds dest, src, 0xffff. Since we want the |
| // condition codes (and don't know which ones will be checked), we need to |
| // assume that the overflow flag will be checked and add{,s} dest, src, |
| // 0xff00; add{,s} dest, dest, 0xff is not guaranteed to set the overflow |
| // flag the same as the (theoretical) one instruction variant. |
| if (alu_dbl(src1, imm, dest, op, s, c)) |
| return; |
| |
| // And try with its negative. |
| if (negOp != OpInvalid && |
| alu_dbl(src1, negImm, negDest, negOp, s, c)) |
| return; |
| |
| // Often this code is called with dest as the ScratchRegister. The register |
| // is logically owned by the caller after this call. |
| const Register& scratch = ScratchRegister; |
| MOZ_ASSERT(src1 != scratch); |
| #ifdef DEBUG |
| if (dest != scratch) { |
| // If the destination register is not the scratch register, double check |
| // that the current function does not erase the content of the scratch |
| // register. |
| ScratchRegisterScope assertScratch(asMasm()); |
| } |
| #endif |
| |
| // Well, damn. We can use two 16 bit mov's, then do the op or we can do a |
| // single load from a pool then op. |
| if (HasMOVWT()) { |
| // Try to load the immediate into a scratch register then use that |
| as_movw(scratch, Imm16(imm.value & 0xffff), c); |
| if ((imm.value >> 16) != 0) |
| as_movt(scratch, Imm16((imm.value >> 16) & 0xffff), c); |
| } else { |
| // Going to have to use a load. If the operation is a move, then just |
| // move it into the destination register |
| if (op == OpMov) { |
| as_Imm32Pool(dest, imm.value, c); |
| return; |
| } else { |
| // If this isn't just going into a register, then stick it in a |
| // temp, and then proceed. |
| as_Imm32Pool(scratch, imm.value, c); |
| } |
| } |
| as_alu(dest, src1, O2Reg(scratch), op, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest, ALUOp op, |
| SBit s, Assembler::Condition c) |
| { |
| MOZ_ASSERT(op2.getTag() == Operand::OP2); |
| as_alu(dest, src1, op2.toOp2(), op, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op, SBit s, Condition c) |
| { |
| as_alu(dest, src1, op2, op, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_nop() |
| { |
| as_nop(); |
| } |
| |
| void |
| MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest, Assembler::Condition c, |
| RelocStyle rs) |
| { |
| int32_t imm = imm_.value; |
| switch(rs) { |
| case L_MOVWT: |
| as_movw(dest, Imm16(imm & 0xffff), c); |
| as_movt(dest, Imm16(imm >> 16 & 0xffff), c); |
| break; |
| case L_LDR: |
| as_Imm32Pool(dest, imm, c); |
| break; |
| } |
| } |
| |
| void |
| MacroAssemblerARM::ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c, |
| RelocStyle rs) |
| { |
| ma_movPatchable(Imm32(int32_t(imm.value)), dest, c, rs); |
| } |
| |
| /* static */ void |
| MacroAssemblerARM::ma_mov_patch(Imm32 imm_, Register dest, Assembler::Condition c, |
| RelocStyle rs, Instruction* i) |
| { |
| MOZ_ASSERT(i); |
| int32_t imm = imm_.value; |
| |
| // Make sure the current instruction is not an artificial guard inserted |
| // by the assembler buffer. |
| i = i->skipPool(); |
| |
| switch(rs) { |
| case L_MOVWT: |
| Assembler::as_movw_patch(dest, Imm16(imm & 0xffff), c, i); |
| i = i->next(); |
| Assembler::as_movt_patch(dest, Imm16(imm >> 16 & 0xffff), c, i); |
| break; |
| case L_LDR: |
| Assembler::WritePoolEntry(i, c, imm); |
| break; |
| } |
| } |
| |
| /* static */ void |
| MacroAssemblerARM::ma_mov_patch(ImmPtr imm, Register dest, Assembler::Condition c, |
| RelocStyle rs, Instruction* i) |
| { |
| ma_mov_patch(Imm32(int32_t(imm.value)), dest, c, rs, i); |
| } |
| |
| void |
| MacroAssemblerARM::ma_mov(Register src, Register dest, SBit s, Assembler::Condition c) |
| { |
| if (s == SetCC || dest != src) |
| as_mov(dest, O2Reg(src), s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_mov(Imm32 imm, Register dest, |
| SBit s, Assembler::Condition c) |
| { |
| ma_alu(InvalidReg, imm, dest, OpMov, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_mov(ImmWord imm, Register dest, |
| SBit s, Assembler::Condition c) |
| { |
| ma_alu(InvalidReg, Imm32(imm.value), dest, OpMov, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_mov(ImmGCPtr ptr, Register dest) |
| { |
| // As opposed to x86/x64 version, the data relocation has to be executed |
| // before to recover the pointer, and not after. |
| writeDataRelocation(ptr); |
| RelocStyle rs; |
| if (HasMOVWT()) |
| rs = L_MOVWT; |
| else |
| rs = L_LDR; |
| |
| ma_movPatchable(Imm32(uintptr_t(ptr.value)), dest, Always, rs); |
| } |
| |
| // Shifts (just a move with a shifting op2) |
| void |
| MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst) |
| { |
| as_mov(dst, lsl(src, shift.value)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst) |
| { |
| as_mov(dst, lsr(src, shift.value)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_asr(Imm32 shift, Register src, Register dst) |
| { |
| as_mov(dst, asr(src, shift.value)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_ror(Imm32 shift, Register src, Register dst) |
| { |
| as_mov(dst, ror(src, shift.value)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst) |
| { |
| as_mov(dst, rol(src, shift.value)); |
| } |
| |
| // Shifts (just a move with a shifting op2) |
| void |
| MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst) |
| { |
| as_mov(dst, lsl(src, shift)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst) |
| { |
| as_mov(dst, lsr(src, shift)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_asr(Register shift, Register src, Register dst) |
| { |
| as_mov(dst, asr(src, shift)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_ror(Register shift, Register src, Register dst) |
| { |
| as_mov(dst, ror(src, shift)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| ma_rsb(shift, Imm32(32), scratch); |
| as_mov(dst, ror(src, scratch)); |
| } |
| |
| // Move not (dest <- ~src) |
| void |
| MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest, SBit s, Assembler::Condition c) |
| { |
| ma_alu(InvalidReg, imm, dest, OpMvn, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_mvn(Register src1, Register dest, SBit s, Assembler::Condition c) |
| { |
| as_alu(dest, InvalidReg, O2Reg(src1), OpMvn, s, c); |
| } |
| |
| // Negate (dest <- -src), src is a register, rather than a general op2. |
| void |
| MacroAssemblerARM::ma_neg(Register src1, Register dest, SBit s, Assembler::Condition c) |
| { |
| as_rsb(dest, src1, Imm8(0), s, c); |
| } |
| |
| // And. |
| void |
| MacroAssemblerARM::ma_and(Register src, Register dest, SBit s, Assembler::Condition c) |
| { |
| ma_and(dest, src, dest); |
| } |
| |
| void |
| MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest, |
| SBit s, Assembler::Condition c) |
| { |
| as_and(dest, src1, O2Reg(src2), s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SBit s, Assembler::Condition c) |
| { |
| ma_alu(dest, imm, dest, OpAnd, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest, |
| SBit s, Assembler::Condition c) |
| { |
| ma_alu(src1, imm, dest, OpAnd, s, c); |
| } |
| |
| // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2). |
| void |
| MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, SBit s, Assembler::Condition c) |
| { |
| ma_alu(dest, imm, dest, OpBic, s, c); |
| } |
| |
| // Exclusive or. |
| void |
| MacroAssemblerARM::ma_eor(Register src, Register dest, SBit s, Assembler::Condition c) |
| { |
| ma_eor(dest, src, dest, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest, |
| SBit s, Assembler::Condition c) |
| { |
| as_eor(dest, src1, O2Reg(src2), s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SBit s, Assembler::Condition c) |
| { |
| ma_alu(dest, imm, dest, OpEor, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest, |
| SBit s, Assembler::Condition c) |
| { |
| ma_alu(src1, imm, dest, OpEor, s, c); |
| } |
| |
| // Or. |
| void |
| MacroAssemblerARM::ma_orr(Register src, Register dest, SBit s, Assembler::Condition c) |
| { |
| ma_orr(dest, src, dest, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest, |
| SBit s, Assembler::Condition c) |
| { |
| as_orr(dest, src1, O2Reg(src2), s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SBit s, Assembler::Condition c) |
| { |
| ma_alu(dest, imm, dest, OpOrr, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest, |
| SBit s, Assembler::Condition c) |
| { |
| ma_alu(src1, imm, dest, OpOrr, s, c); |
| } |
| |
| // Arithmetic-based ops. |
| // Add with carry. |
| void |
| MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SBit s, Condition c) |
| { |
| ma_alu(dest, imm, dest, OpAdc, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_adc(Register src, Register dest, SBit s, Condition c) |
| { |
| as_alu(dest, dest, O2Reg(src), OpAdc, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SBit s, Condition c) |
| { |
| as_alu(dest, src1, O2Reg(src2), OpAdc, s, c); |
| } |
| |
| // Add. |
| void |
| MacroAssemblerARM::ma_add(Imm32 imm, Register dest, SBit s, Condition c) |
| { |
| ma_alu(dest, imm, dest, OpAdd, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_add(Register src1, Register dest, SBit s, Condition c) |
| { |
| ma_alu(dest, O2Reg(src1), dest, OpAdd, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SBit s, Condition c) |
| { |
| as_alu(dest, src1, O2Reg(src2), OpAdd, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SBit s, Condition c) |
| { |
| ma_alu(src1, op, dest, OpAdd, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SBit s, Condition c) |
| { |
| ma_alu(src1, op, dest, OpAdd, s, c); |
| } |
| |
| // Subtract with carry. |
| void |
| MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SBit s, Condition c) |
| { |
| ma_alu(dest, imm, dest, OpSbc, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_sbc(Register src1, Register dest, SBit s, Condition c) |
| { |
| as_alu(dest, dest, O2Reg(src1), OpSbc, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SBit s, Condition c) |
| { |
| as_alu(dest, src1, O2Reg(src2), OpSbc, s, c); |
| } |
| |
| // Subtract. |
| void |
| MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SBit s, Condition c) |
| { |
| ma_alu(dest, imm, dest, OpSub, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_sub(Register src1, Register dest, SBit s, Condition c) |
| { |
| ma_alu(dest, Operand(src1), dest, OpSub, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SBit s, Condition c) |
| { |
| ma_alu(src1, Operand(src2), dest, OpSub, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SBit s, Condition c) |
| { |
| ma_alu(src1, op, dest, OpSub, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SBit s, Condition c) |
| { |
| ma_alu(src1, op, dest, OpSub, s, c); |
| } |
| |
| // Reverse subtract. |
| void |
| MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SBit s, Condition c) |
| { |
| ma_alu(dest, imm, dest, OpRsb, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_rsb(Register src1, Register dest, SBit s, Condition c) |
| { |
| as_alu(dest, dest, O2Reg(src1), OpAdd, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SBit s, Condition c) |
| { |
| as_alu(dest, src1, O2Reg(src2), OpRsb, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SBit s, Condition c) |
| { |
| ma_alu(src1, op2, dest, OpRsb, s, c); |
| } |
| |
| // Reverse subtract with carry. |
| void |
| MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SBit s, Condition c) |
| { |
| ma_alu(dest, imm, dest, OpRsc, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_rsc(Register src1, Register dest, SBit s, Condition c) |
| { |
| as_alu(dest, dest, O2Reg(src1), OpRsc, s, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SBit s, Condition c) |
| { |
| as_alu(dest, src1, O2Reg(src2), OpRsc, s, c); |
| } |
| |
| // Compares/tests. |
| // Compare negative (sets condition codes as src1 + src2 would). |
| void |
| MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, Condition c) |
| { |
| ma_alu(src1, imm, InvalidReg, OpCmn, SetCC, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c) |
| { |
| as_alu(InvalidReg, src2, O2Reg(src1), OpCmn, SetCC, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c) |
| { |
| MOZ_CRASH("Feature NYI"); |
| } |
| |
| // Compare (src - src2). |
| void |
| MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, Condition c) |
| { |
| ma_alu(src1, imm, InvalidReg, OpCmp, SetCC, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_cmp(Register src1, ImmWord ptr, Condition c) |
| { |
| ma_cmp(src1, Imm32(ptr.value), c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_cmp(Register src1, ImmGCPtr ptr, Condition c) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| ma_mov(ptr, scratch); |
| ma_cmp(src1, scratch, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_cmp(Register src1, Operand op, Condition c) |
| { |
| switch (op.getTag()) { |
| case Operand::OP2: |
| as_cmp(src1, op.toOp2(), c); |
| break; |
| case Operand::MEM: { |
| ScratchRegisterScope scratch(asMasm()); |
| ma_ldr(op.toAddress(), scratch); |
| as_cmp(src1, O2Reg(scratch), c); |
| break; |
| } |
| default: |
| MOZ_CRASH("trying to compare FP and integer registers"); |
| } |
| } |
| |
| void |
| MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c) |
| { |
| as_cmp(src1, O2Reg(src2), c); |
| } |
| |
| // Test for equality, (src1 ^ src2). |
| void |
| MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, Condition c) |
| { |
| ma_alu(src1, imm, InvalidReg, OpTeq, SetCC, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c) |
| { |
| as_tst(src1, O2Reg(src2), c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c) |
| { |
| as_teq(src1, op.toOp2(), c); |
| } |
| |
| // Test (src1 & src2). |
| void |
| MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, Condition c) |
| { |
| ma_alu(src1, imm, InvalidReg, OpTst, SetCC, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c) |
| { |
| as_tst(src1, O2Reg(src2), c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c) |
| { |
| as_tst(src1, op.toOp2(), c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_mul(Register src1, Register src2, Register dest) |
| { |
| as_mul(dest, src1, src2); |
| } |
| |
| void |
| MacroAssemblerARM::ma_mul(Register src1, Imm32 imm, Register dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| ma_mov(imm, scratch); |
| as_mul(dest, src1, scratch); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARM::ma_check_mul(Register src1, Register src2, Register dest, Condition cond) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| |
| // TODO: this operation is illegal on armv6 and earlier if src2 == |
| // ScratchRegister or src2 == dest. |
| if (cond == Equal || cond == NotEqual) { |
| as_smull(scratch, dest, src1, src2, SetCC); |
| return cond; |
| } |
| |
| if (cond == Overflow) { |
| as_smull(scratch, dest, src1, src2); |
| as_cmp(scratch, asr(dest, 31)); |
| return NotEqual; |
| } |
| |
| MOZ_CRASH("Condition NYI"); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARM::ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| |
| ma_mov(imm, scratch); |
| if (cond == Equal || cond == NotEqual) { |
| as_smull(scratch, dest, scratch, src1, SetCC); |
| return cond; |
| } |
| |
| if (cond == Overflow) { |
| as_smull(scratch, dest, scratch, src1); |
| as_cmp(scratch, asr(dest, 31)); |
| return NotEqual; |
| } |
| |
| MOZ_CRASH("Condition NYI"); |
| } |
| |
| void |
| MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold, Register tmp, |
| int32_t shift) |
| { |
| // We wish to compute x % (1<<y) - 1 for a known constant, y. |
| // |
| // 1. Let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as |
| // a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n |
| // |
| // 2. Since both addition and multiplication commute with modulus: |
| // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C == |
| // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)... |
| // |
| // 3. Since b == C + 1, b % C == 1, and b^n % C == 1 the whole thing |
| // simplifies to: c_0 + c_1 + c_2 ... c_n % C |
| // |
| // Each c_n can easily be computed by a shift/bitextract, and the modulus |
| // can be maintained by simply subtracting by C whenever the number gets |
| // over C. |
| int32_t mask = (1 << shift) - 1; |
| Label head; |
| |
| // Register 'hold' holds -1 if the value was negative, 1 otherwise. The |
| // ScratchRegister holds the remaining bits that have not been processed lr |
| // serves as a temporary location to store extracted bits into as well as |
| // holding the trial subtraction as a temp value dest is the accumulator |
| // (and holds the final result) |
| // |
| // Move the whole value into tmp, setting the codition codes so we can muck |
| // with them later. |
| // |
| // Note that we cannot use ScratchRegister in place of tmp here, as ma_and |
| // below on certain architectures move the mask into ScratchRegister before |
| // performing the bitwise and. |
| as_mov(tmp, O2Reg(src), SetCC); |
| // Zero out the dest. |
| ma_mov(Imm32(0), dest); |
| // Set the hold appropriately. |
| ma_mov(Imm32(1), hold); |
| ma_mov(Imm32(-1), hold, LeaveCC, Signed); |
| ma_rsb(Imm32(0), tmp, SetCC, Signed); |
| |
| // Begin the main loop. |
| bind(&head); |
| { |
| AutoRegisterScope scratch2(asMasm(), secondScratchReg_); |
| |
| // Extract the bottom bits into lr. |
| ma_and(Imm32(mask), tmp, scratch2); |
| // Add those bits to the accumulator. |
| ma_add(scratch2, dest, dest); |
| // Do a trial subtraction, this is the same operation as cmp, but we store |
| // the dest. |
| ma_sub(dest, Imm32(mask), scratch2, SetCC); |
| // If (sum - C) > 0, store sum - C back into sum, thus performing a modulus. |
| ma_mov(scratch2, dest, LeaveCC, NotSigned); |
| // Get rid of the bits that we extracted before, and set the condition codes. |
| as_mov(tmp, lsr(tmp, shift), SetCC); |
| // If the shift produced zero, finish, otherwise, continue in the loop. |
| ma_b(&head, NonZero); |
| } |
| |
| // Check the hold to see if we need to negate the result. Hold can only be |
| // 1 or -1, so this will never set the 0 flag. |
| ma_cmp(hold, Imm32(0)); |
| // If the hold was non-zero, negate the result to be in line with what JS |
| // wants this will set the condition codes if we try to negate. |
| ma_rsb(Imm32(0), dest, SetCC, Signed); |
| // Since the Zero flag is not set by the compare, we can *only* set the Zero |
| // flag in the rsb, so Zero is set iff we negated zero (e.g. the result of |
| // the computation was -0.0). |
| } |
| |
| void |
| MacroAssemblerARM::ma_smod(Register num, Register div, Register dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| as_sdiv(scratch, num, div); |
| as_mls(dest, num, scratch, div); |
| } |
| |
| void |
| MacroAssemblerARM::ma_umod(Register num, Register div, Register dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| as_udiv(scratch, num, div); |
| as_mls(dest, num, scratch, div); |
| } |
| |
| // Division |
| void |
| MacroAssemblerARM::ma_sdiv(Register num, Register div, Register dest, Condition cond) |
| { |
| as_sdiv(dest, num, div, cond); |
| } |
| |
| void |
| MacroAssemblerARM::ma_udiv(Register num, Register div, Register dest, Condition cond) |
| { |
| as_udiv(dest, num, div, cond); |
| } |
| |
| // Miscellaneous instructions. |
| void |
| MacroAssemblerARM::ma_clz(Register src, Register dest, Condition cond) |
| { |
| as_clz(dest, src, cond); |
| } |
| |
| // Memory. |
| // Shortcut for when we know we're transferring 32 bits of data. |
| void |
| MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt, |
| Index mode, Assembler::Condition cc) |
| { |
| ma_dataTransferN(ls, 32, true, rn, offset, rt, mode, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Register rm, Register rt, |
| Index mode, Assembler::Condition cc) |
| { |
| MOZ_CRASH("Feature NYI"); |
| } |
| |
| void |
| MacroAssemblerARM::ma_str(Register rt, DTRAddr addr, Index mode, Condition cc) |
| { |
| as_dtr(IsStore, 32, mode, rt, addr, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_dtr(LoadStore ls, Register rt, const Address& addr, Index mode, Condition cc) |
| { |
| ma_dataTransferN(ls, 32, true, addr.base, Imm32(addr.offset), rt, mode, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_str(Register rt, const Address& addr, Index mode, Condition cc) |
| { |
| ma_dtr(IsStore, rt, addr, mode, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode, Condition cc) |
| { |
| MOZ_ASSERT((rt.code() & 1) == 0); |
| MOZ_ASSERT(rt2.value.code() == rt.code() + 1); |
| as_extdtr(IsStore, 64, true, mode, rt, addr, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_ldr(DTRAddr addr, Register rt, Index mode, Condition cc) |
| { |
| as_dtr(IsLoad, 32, mode, rt, addr, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_ldr(const Address& addr, Register rt, Index mode, Condition cc) |
| { |
| ma_dtr(IsLoad, rt, addr, mode, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_ldrb(DTRAddr addr, Register rt, Index mode, Condition cc) |
| { |
| as_dtr(IsLoad, 8, mode, rt, addr, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_ldrsh(EDtrAddr addr, Register rt, Index mode, Condition cc) |
| { |
| as_extdtr(IsLoad, 16, true, mode, rt, addr, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_ldrh(EDtrAddr addr, Register rt, Index mode, Condition cc) |
| { |
| as_extdtr(IsLoad, 16, false, mode, rt, addr, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_ldrsb(EDtrAddr addr, Register rt, Index mode, Condition cc) |
| { |
| as_extdtr(IsLoad, 8, true, mode, rt, addr, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2, |
| Index mode, Condition cc) |
| { |
| MOZ_ASSERT((rt.code() & 1) == 0); |
| MOZ_ASSERT(rt2.value.code() == rt.code() + 1); |
| as_extdtr(IsLoad, 64, true, mode, rt, addr, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_strh(Register rt, EDtrAddr addr, Index mode, Condition cc) |
| { |
| as_extdtr(IsStore, 16, false, mode, rt, addr, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode, Condition cc) |
| { |
| as_dtr(IsStore, 8, mode, rt, addr, cc); |
| } |
| |
| // Specialty for moving N bits of data, where n == 8,16,32,64. |
| BufferOffset |
| MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, |
| Register rn, Register rm, Register rt, |
| Index mode, Assembler::Condition cc, unsigned shiftAmount) |
| { |
| if (size == 32 || (size == 8 && !IsSigned)) |
| return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(rm, LSL, shiftAmount)), cc); |
| |
| ScratchRegisterScope scratch(asMasm()); |
| |
| if (shiftAmount != 0) { |
| MOZ_ASSERT(rn != scratch); |
| MOZ_ASSERT(rt != scratch); |
| ma_lsl(Imm32(shiftAmount), rm, scratch); |
| rm = scratch; |
| } |
| |
| return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)), cc); |
| } |
| |
| BufferOffset |
| MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, |
| Register rn, Imm32 offset, Register rt, |
| Index mode, Assembler::Condition cc) |
| { |
| int off = offset.value; |
| |
| // We can encode this as a standard ldr. |
| if (size == 32 || (size == 8 && !IsSigned) ) { |
| if (off < 4096 && off > -4096) { |
| // This encodes as a single instruction, Emulating mode's behavior |
| // in a multi-instruction sequence is not necessary. |
| return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc); |
| } |
| |
| // We cannot encode this offset in a a single ldr. For mode == index, |
| // try to encode it as |add scratch, base, imm; ldr dest, [scratch, +offset]|. |
| // This does not wark for mode == PreIndex or mode == PostIndex. |
| // PreIndex is simple, just do the add into the base register first, |
| // then do a PreIndex'ed load. PostIndexed loads can be tricky. |
| // Normally, doing the load with an index of 0, then doing an add would |
| // work, but if the destination is the PC, you don't get to execute the |
| // instruction after the branch, which will lead to the base register |
| // not being updated correctly. Explicitly handle this case, without |
| // doing anything fancy, then handle all of the other cases. |
| |
| // mode == Offset |
| // add scratch, base, offset_hi |
| // ldr dest, [scratch, +offset_lo] |
| // |
| // mode == PreIndex |
| // add base, base, offset_hi |
| // ldr dest, [base, +offset_lo]! |
| // |
| // mode == PostIndex, dest == pc |
| // ldr scratch, [base] |
| // add base, base, offset_hi |
| // add base, base, offset_lo |
| // mov dest, scratch |
| // PostIndex with the pc as the destination needs to be handled |
| // specially, since in the code below, the write into 'dest' is going to |
| // alter the control flow, so the following instruction would never get |
| // emitted. |
| // |
| // mode == PostIndex, dest != pc |
| // ldr dest, [base], offset_lo |
| // add base, base, offset_hi |
| |
| if (rt == pc && mode == PostIndex && ls == IsLoad) { |
| ScratchRegisterScope scratch(asMasm()); |
| ma_mov(rn, scratch); |
| ma_alu(rn, offset, rn, OpAdd); |
| return as_dtr(IsLoad, size, Offset, pc, DTRAddr(scratch, DtrOffImm(0)), cc); |
| } |
| |
| // Often this code is called with rt as the ScratchRegister. |
| // The register is logically owned by the caller, so we cannot ask |
| // for exclusive ownership here. If full checking is desired, |
| // this function should take an explicit scratch register argument. |
| const Register& scratch = ScratchRegister; |
| MOZ_ASSERT(rn != scratch); |
| |
| int bottom = off & 0xfff; |
| int neg_bottom = 0x1000 - bottom; |
| // For a regular offset, base == ScratchRegister does what we want. |
| // Modify the scratch register, leaving the actual base unscathed. |
| Register base = scratch; |
| // For the preindex case, we want to just re-use rn as the base |
| // register, so when the base register is updated *before* the load, rn |
| // is updated. |
| if (mode == PreIndex) |
| base = rn; |
| MOZ_ASSERT(mode != PostIndex); |
| // At this point, both off - bottom and off + neg_bottom will be |
| // reasonable-ish quantities. |
| // |
| // Note a neg_bottom of 0x1000 can not be encoded as an immediate |
| // negative offset in the instruction and this occurs when bottom is |
| // zero, so this case is guarded against below. |
| if (off < 0) { |
| Operand2 sub_off = Imm8(-(off - bottom)); // sub_off = bottom - off |
| if (!sub_off.invalid) { |
| // - sub_off = off - bottom |
| as_sub(scratch, rn, sub_off, LeaveCC, cc); |
| return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(bottom)), cc); |
| } |
| |
| // sub_off = -neg_bottom - off |
| sub_off = Imm8(-(off + neg_bottom)); |
| if (!sub_off.invalid && bottom != 0) { |
| // Guarded against by: bottom != 0 |
| MOZ_ASSERT(neg_bottom < 0x1000); |
| // - sub_off = neg_bottom + off |
| as_sub(scratch, rn, sub_off, LeaveCC, cc); |
| return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(-neg_bottom)), cc); |
| } |
| } else { |
| // sub_off = off - bottom |
| Operand2 sub_off = Imm8(off - bottom); |
| if (!sub_off.invalid) { |
| // sub_off = off - bottom |
| as_add(scratch, rn, sub_off, LeaveCC, cc); |
| return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(bottom)), cc); |
| } |
| |
| // sub_off = neg_bottom + off |
| sub_off = Imm8(off + neg_bottom); |
| if (!sub_off.invalid && bottom != 0) { |
| // Guarded against by: bottom != 0 |
| MOZ_ASSERT(neg_bottom < 0x1000); |
| // sub_off = neg_bottom + off |
| as_add(scratch, rn, sub_off, LeaveCC, cc); |
| return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(-neg_bottom)), cc); |
| } |
| } |
| |
| ma_mov(offset, scratch); |
| return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(scratch, LSL, 0))); |
| } else { |
| ScratchRegisterScope scratch(asMasm()); |
| |
| // Should attempt to use the extended load/store instructions. |
| if (off < 256 && off > -256) |
| return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc); |
| |
| // We cannot encode this offset in a single extldr. Try to encode it as |
| // an add scratch, base, imm; extldr dest, [scratch, +offset]. |
| int bottom = off & 0xff; |
| int neg_bottom = 0x100 - bottom; |
| // At this point, both off - bottom and off + neg_bottom will be |
| // reasonable-ish quantities. |
| // |
| // Note a neg_bottom of 0x100 can not be encoded as an immediate |
| // negative offset in the instruction and this occurs when bottom is |
| // zero, so this case is guarded against below. |
| if (off < 0) { |
| // sub_off = bottom - off |
| Operand2 sub_off = Imm8(-(off - bottom)); |
| if (!sub_off.invalid) { |
| // - sub_off = off - bottom |
| as_sub(scratch, rn, sub_off, LeaveCC, cc); |
| return as_extdtr(ls, size, IsSigned, Offset, rt, |
| EDtrAddr(scratch, EDtrOffImm(bottom)), |
| cc); |
| } |
| // sub_off = -neg_bottom - off |
| sub_off = Imm8(-(off + neg_bottom)); |
| if (!sub_off.invalid && bottom != 0) { |
| // Guarded against by: bottom != 0 |
| MOZ_ASSERT(neg_bottom < 0x100); |
| // - sub_off = neg_bottom + off |
| as_sub(scratch, rn, sub_off, LeaveCC, cc); |
| return as_extdtr(ls, size, IsSigned, Offset, rt, |
| EDtrAddr(scratch, EDtrOffImm(-neg_bottom)), |
| cc); |
| } |
| } else { |
| // sub_off = off - bottom |
| Operand2 sub_off = Imm8(off - bottom); |
| if (!sub_off.invalid) { |
| // sub_off = off - bottom |
| as_add(scratch, rn, sub_off, LeaveCC, cc); |
| return as_extdtr(ls, size, IsSigned, Offset, rt, |
| EDtrAddr(scratch, EDtrOffImm(bottom)), |
| cc); |
| } |
| // sub_off = neg_bottom + off |
| sub_off = Imm8(off + neg_bottom); |
| if (!sub_off.invalid && bottom != 0) { |
| // Guarded against by: bottom != 0 |
| MOZ_ASSERT(neg_bottom < 0x100); |
| // sub_off = neg_bottom + off |
| as_add(scratch, rn, sub_off, LeaveCC, cc); |
| return as_extdtr(ls, size, IsSigned, Offset, rt, |
| EDtrAddr(scratch, EDtrOffImm(-neg_bottom)), |
| cc); |
| } |
| } |
| ma_mov(offset, scratch); |
| return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(scratch)), cc); |
| } |
| } |
| |
| void |
| MacroAssemblerARM::ma_pop(Register r) |
| { |
| ma_dtr(IsLoad, sp, Imm32(4), r, PostIndex); |
| } |
| |
| void |
| MacroAssemblerARM::ma_push(Register r) |
| { |
| // Pushing sp is not well defined: use two instructions. |
| if (r == sp) { |
| ScratchRegisterScope scratch(asMasm()); |
| ma_mov(sp, scratch); |
| ma_dtr(IsStore, sp, Imm32(-4), scratch, PreIndex); |
| return; |
| } |
| |
| ma_dtr(IsStore, sp, Imm32(-4), r, PreIndex); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vpop(VFPRegister r) |
| { |
| startFloatTransferM(IsLoad, sp, IA, WriteBack); |
| transferFloatReg(r); |
| finishFloatTransfer(); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vpush(VFPRegister r) |
| { |
| startFloatTransferM(IsStore, sp, DB, WriteBack); |
| transferFloatReg(r); |
| finishFloatTransfer(); |
| } |
| |
| // Barriers |
| void |
| MacroAssemblerARM::ma_dmb(BarrierOption option) |
| { |
| if (HasDMBDSBISB()) |
| as_dmb(option); |
| else |
| as_dmb_trap(); |
| } |
| |
| void |
| MacroAssemblerARM::ma_dsb(BarrierOption option) |
| { |
| if (HasDMBDSBISB()) |
| as_dsb(option); |
| else |
| as_dsb_trap(); |
| } |
| |
| // Branches when done from within arm-specific code. |
| BufferOffset |
| MacroAssemblerARM::ma_b(Label* dest, Assembler::Condition c) |
| { |
| return as_b(dest, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_bx(Register dest, Assembler::Condition c) |
| { |
| as_bx(dest, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_b(void* target, Assembler::Condition c) |
| { |
| // An immediate pool is used for easier patching. |
| as_Imm32Pool(pc, uint32_t(target), c); |
| } |
| |
| // This is almost NEVER necessary: we'll basically never be calling a label, |
| // except possibly in the crazy bailout-table case. |
| void |
| MacroAssemblerARM::ma_bl(Label* dest, Assembler::Condition c) |
| { |
| as_bl(dest, c); |
| } |
| |
| void |
| MacroAssemblerARM::ma_blx(Register reg, Assembler::Condition c) |
| { |
| as_blx(reg, c); |
| } |
| |
| // VFP/ALU |
| void |
| MacroAssemblerARM::ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
| { |
| as_vadd(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
| { |
| as_vadd(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
| VFPRegister(src2).singleOverlay()); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
| { |
| as_vsub(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
| { |
| as_vsub(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
| VFPRegister(src2).singleOverlay()); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
| { |
| as_vmul(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
| { |
| as_vmul(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
| VFPRegister(src2).singleOverlay()); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
| { |
| as_vdiv(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
| { |
| as_vdiv(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
| VFPRegister(src2).singleOverlay()); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vmov(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| as_vmov(dest, src, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vmov_f32(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vneg(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| as_vneg(dest, src, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| as_vneg(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vabs(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| as_vabs(dest, src, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vabs_f32(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| as_vabs(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vsqrt(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| as_vsqrt(dest, src, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vsqrt_f32(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| as_vsqrt(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
| } |
| |
| static inline uint32_t |
| DoubleHighWord(const double value) |
| { |
| return static_cast<uint32_t>(BitwiseCast<uint64_t>(value) >> 32); |
| } |
| |
| static inline uint32_t |
| DoubleLowWord(const double value) |
| { |
| return BitwiseCast<uint64_t>(value) & uint32_t(0xffffffff); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vimm(double value, FloatRegister dest, Condition cc) |
| { |
| if (HasVFPv3()) { |
| if (DoubleLowWord(value) == 0) { |
| if (DoubleHighWord(value) == 0) { |
| // To zero a register, load 1.0, then execute dN <- dN - dN |
| as_vimm(dest, VFPImm::One, cc); |
| as_vsub(dest, dest, dest, cc); |
| return; |
| } |
| |
| VFPImm enc(DoubleHighWord(value)); |
| if (enc.isValid()) { |
| as_vimm(dest, enc, cc); |
| return; |
| } |
| |
| } |
| } |
| // Fall back to putting the value in a pool. |
| as_FImm64Pool(dest, value, cc); |
| } |
| |
| static inline uint32_t |
| Float32Word(const float value) |
| { |
| return BitwiseCast<uint32_t>(value); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vimm_f32(float value, FloatRegister dest, Condition cc) |
| { |
| VFPRegister vd = VFPRegister(dest).singleOverlay(); |
| if (HasVFPv3()) { |
| if (Float32Word(value) == 0) { |
| // To zero a register, load 1.0, then execute sN <- sN - sN. |
| as_vimm(vd, VFPImm::One, cc); |
| as_vsub(vd, vd, vd, cc); |
| return; |
| } |
| |
| // Note that the vimm immediate float32 instruction encoding differs |
| // from the vimm immediate double encoding, but this difference matches |
| // the difference in the floating point formats, so it is possible to |
| // convert the float32 to a double and then use the double encoding |
| // paths. It is still necessary to firstly check that the double low |
| // word is zero because some float32 numbers set these bits and this can |
| // not be ignored. |
| double doubleValue = value; |
| if (DoubleLowWord(value) == 0) { |
| VFPImm enc(DoubleHighWord(doubleValue)); |
| if (enc.isValid()) { |
| as_vimm(vd, enc, cc); |
| return; |
| } |
| } |
| } |
| // Fall back to putting the value in a pool. |
| as_FImm32Pool(vd, value, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc) |
| { |
| as_vcmp(VFPRegister(src1), VFPRegister(src2), cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcmp_f32(FloatRegister src1, FloatRegister src2, Condition cc) |
| { |
| as_vcmp(VFPRegister(src1).singleOverlay(), VFPRegister(src2).singleOverlay(), cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcmpz(FloatRegister src1, Condition cc) |
| { |
| as_vcmpz(VFPRegister(src1), cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcmpz_f32(FloatRegister src1, Condition cc) |
| { |
| as_vcmpz(VFPRegister(src1).singleOverlay(), cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| MOZ_ASSERT(src.isDouble()); |
| MOZ_ASSERT(dest.isSInt()); |
| as_vcvt(dest, src, false, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| MOZ_ASSERT(src.isDouble()); |
| MOZ_ASSERT(dest.isUInt()); |
| as_vcvt(dest, src, false, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| MOZ_ASSERT(src.isSInt()); |
| MOZ_ASSERT(dest.isDouble()); |
| as_vcvt(dest, src, false, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| MOZ_ASSERT(src.isUInt()); |
| MOZ_ASSERT(dest.isDouble()); |
| as_vcvt(dest, src, false, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| MOZ_ASSERT(src.isSingle()); |
| MOZ_ASSERT(dest.isSInt()); |
| as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src).singleOverlay(), false, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| MOZ_ASSERT(src.isSingle()); |
| MOZ_ASSERT(dest.isUInt()); |
| as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src).singleOverlay(), false, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| MOZ_ASSERT(src.isSInt()); |
| MOZ_ASSERT(dest.isSingle()); |
| as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).sintOverlay(), false, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest, Condition cc) |
| { |
| MOZ_ASSERT(src.isUInt()); |
| MOZ_ASSERT(dest.isSingle()); |
| as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).uintOverlay(), false, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest, Condition cc) |
| { |
| as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest1, Register dest2, Condition cc) |
| { |
| as_vxfer(dest1, dest2, VFPRegister(src), FloatToCore, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vxfer(Register src, FloatRegister dest, Condition cc) |
| { |
| as_vxfer(src, InvalidReg, VFPRegister(dest).singleOverlay(), CoreToFloat, cc); |
| } |
| |
| void |
| MacroAssemblerARM::ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc) |
| { |
| as_vxfer(src1, src2, VFPRegister(dest), CoreToFloat, cc); |
| } |
| |
| BufferOffset |
| MacroAssemblerARM::ma_vdtr(LoadStore ls, const Address& addr, VFPRegister rt, Condition cc) |
| { |
| int off = addr.offset; |
| MOZ_ASSERT((off & 3) == 0); |
| Register base = addr.base; |
| if (off > -1024 && off < 1024) |
| return as_vdtr(ls, rt, Operand(addr).toVFPAddr(), cc); |
| |
| ScratchRegisterScope scratch(asMasm()); |
| |
| // We cannot encode this offset in a a single ldr. Try to encode it as an |
| // add scratch, base, imm; ldr dest, [scratch, +offset]. |
| int bottom = off & (0xff << 2); |
| int neg_bottom = (0x100 << 2) - bottom; |
| // At this point, both off - bottom and off + neg_bottom will be |
| // reasonable-ish quantities. |
| // |
| // Note a neg_bottom of 0x400 can not be encoded as an immediate negative |
| // offset in the instruction and this occurs when bottom is zero, so this |
| // case is guarded against below. |
| if (off < 0) { |
| // sub_off = bottom - off |
| Operand2 sub_off = Imm8(-(off - bottom)); |
| if (!sub_off.invalid) { |
| // - sub_off = off - bottom |
| as_sub(scratch, base, sub_off, LeaveCC, cc); |
| return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(bottom)), cc); |
| } |
| // sub_off = -neg_bottom - off |
| sub_off = Imm8(-(off + neg_bottom)); |
| if (!sub_off.invalid && bottom != 0) { |
| // Guarded against by: bottom != 0 |
| MOZ_ASSERT(neg_bottom < 0x400); |
| // - sub_off = neg_bottom + off |
| as_sub(scratch, base, sub_off, LeaveCC, cc); |
| return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(-neg_bottom)), cc); |
| } |
| } else { |
| // sub_off = off - bottom |
| Operand2 sub_off = Imm8(off - bottom); |
| if (!sub_off.invalid) { |
| // sub_off = off - bottom |
| as_add(scratch, base, sub_off, LeaveCC, cc); |
| return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(bottom)), cc); |
| } |
| // sub_off = neg_bottom + off |
| sub_off = Imm8(off + neg_bottom); |
| if (!sub_off.invalid && bottom != 0) { |
| // Guarded against by: bottom != 0 |
| MOZ_ASSERT(neg_bottom < 0x400); |
| // sub_off = neg_bottom + off |
| as_add(scratch, base, sub_off, LeaveCC, cc); |
| return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(-neg_bottom)), cc); |
| } |
| } |
| ma_add(base, Imm32(off), scratch, LeaveCC, cc); |
| return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(0)), cc); |
| } |
| |
| BufferOffset |
| MacroAssemblerARM::ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc) |
| { |
| return as_vdtr(IsLoad, dest, addr, cc); |
| } |
| |
| BufferOffset |
| MacroAssemblerARM::ma_vldr(const Address& addr, VFPRegister dest, Condition cc) |
| { |
| return ma_vdtr(IsLoad, addr, dest, cc); |
| } |
| |
| BufferOffset |
| MacroAssemblerARM::ma_vldr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| as_add(scratch, base, lsl(index, shift), LeaveCC, cc); |
| return ma_vldr(Address(scratch, 0), src, cc); |
| } |
| |
| BufferOffset |
| MacroAssemblerARM::ma_vstr(VFPRegister src, VFPAddr addr, Condition cc) |
| { |
| return as_vdtr(IsStore, src, addr, cc); |
| } |
| |
| BufferOffset |
| MacroAssemblerARM::ma_vstr(VFPRegister src, const Address& addr, Condition cc) |
| { |
| return ma_vdtr(IsStore, addr, src, cc); |
| } |
| |
| BufferOffset |
| MacroAssemblerARM::ma_vstr(VFPRegister src, Register base, Register index, int32_t shift, |
| int32_t offset, Condition cc) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| as_add(scratch, base, lsl(index, shift), LeaveCC, cc); |
| return ma_vstr(src, Address(scratch, offset), cc); |
| } |
| |
| bool |
| MacroAssemblerARMCompat::buildOOLFakeExitFrame(void* fakeReturnAddr) |
| { |
| DebugOnly<uint32_t> initialDepth = asMasm().framePushed(); |
| uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS); |
| |
| asMasm().Push(Imm32(descriptor)); // descriptor_ |
| asMasm().Push(ImmPtr(fakeReturnAddr)); |
| |
| return true; |
| } |
| |
| void |
| MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic) |
| { |
| // Exists for MIPS compatibility. |
| } |
| |
| void |
| MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic) |
| { |
| // Exists for MIPS compatibility. |
| } |
| |
| void |
| MacroAssemblerARMCompat::add32(Register src, Register dest) |
| { |
| ma_add(src, dest, SetCC); |
| } |
| |
| void |
| MacroAssemblerARMCompat::add32(Imm32 imm, Register dest) |
| { |
| ma_add(imm, dest, SetCC); |
| } |
| |
| void |
| MacroAssemblerARMCompat::add32(Imm32 imm, const Address& dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| load32(dest, scratch); |
| ma_add(imm, scratch, SetCC); |
| store32(scratch, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::addPtr(Register src, Register dest) |
| { |
| ma_add(src, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::addPtr(const Address& src, Register dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| load32(src, scratch); |
| ma_add(scratch, dest, SetCC); |
| } |
| |
| void |
| MacroAssemblerARMCompat::move32(Imm32 imm, Register dest) |
| { |
| ma_mov(imm, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::move32(Register src, Register dest) |
| { |
| ma_mov(src, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::movePtr(Register src, Register dest) |
| { |
| ma_mov(src, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::movePtr(ImmWord imm, Register dest) |
| { |
| ma_mov(Imm32(imm.value), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::movePtr(ImmGCPtr imm, Register dest) |
| { |
| ma_mov(imm, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::movePtr(ImmPtr imm, Register dest) |
| { |
| movePtr(ImmWord(uintptr_t(imm.value)), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::movePtr(wasm::SymbolicAddress imm, Register dest) |
| { |
| RelocStyle rs; |
| if (HasMOVWT()) |
| rs = L_MOVWT; |
| else |
| rs = L_LDR; |
| |
| append(AsmJSAbsoluteLink(CodeOffset(currentOffset()), imm)); |
| ma_movPatchable(Imm32(-1), dest, Always, rs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::load8ZeroExtend(const Address& address, Register dest) |
| { |
| ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::load8ZeroExtend(const BaseIndex& src, Register dest) |
| { |
| Register base = src.base; |
| uint32_t scale = Imm32::ShiftOf(src.scale).value; |
| |
| if (src.offset == 0) { |
| ma_ldrb(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest); |
| } else { |
| ScratchRegisterScope scratch(asMasm()); |
| ma_add(base, Imm32(src.offset), scratch); |
| ma_ldrb(DTRAddr(scratch, DtrRegImmShift(src.index, LSL, scale)), dest); |
| } |
| } |
| |
| void |
| MacroAssemblerARMCompat::load8SignExtend(const Address& address, Register dest) |
| { |
| ma_dataTransferN(IsLoad, 8, true, address.base, Imm32(address.offset), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::load8SignExtend(const BaseIndex& src, Register dest) |
| { |
| Register index = src.index; |
| |
| ScratchRegisterScope scratch(asMasm()); |
| |
| // ARMv7 does not have LSL on an index register with an extended load. |
| if (src.scale != TimesOne) { |
| ma_lsl(Imm32::ShiftOf(src.scale), index, scratch); |
| index = scratch; |
| } |
| |
| if (src.offset != 0) { |
| if (index != scratch) { |
| ma_mov(index, scratch); |
| index = scratch; |
| } |
| ma_add(Imm32(src.offset), index); |
| } |
| ma_ldrsb(EDtrAddr(src.base, EDtrOffReg(index)), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::load16ZeroExtend(const Address& address, Register dest) |
| { |
| ma_dataTransferN(IsLoad, 16, false, address.base, Imm32(address.offset), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::load16ZeroExtend(const BaseIndex& src, Register dest) |
| { |
| Register index = src.index; |
| |
| ScratchRegisterScope scratch(asMasm()); |
| |
| // ARMv7 does not have LSL on an index register with an extended load. |
| if (src.scale != TimesOne) { |
| ma_lsl(Imm32::ShiftOf(src.scale), index, scratch); |
| index = scratch; |
| } |
| |
| if (src.offset != 0) { |
| if (index != scratch) { |
| ma_mov(index, scratch); |
| index = scratch; |
| } |
| ma_add(Imm32(src.offset), index); |
| } |
| ma_ldrh(EDtrAddr(src.base, EDtrOffReg(index)), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::load16SignExtend(const Address& address, Register dest) |
| { |
| ma_dataTransferN(IsLoad, 16, true, address.base, Imm32(address.offset), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::load16SignExtend(const BaseIndex& src, Register dest) |
| { |
| Register index = src.index; |
| |
| ScratchRegisterScope scratch(asMasm()); |
| |
| // We don't have LSL on index register yet. |
| if (src.scale != TimesOne) { |
| ma_lsl(Imm32::ShiftOf(src.scale), index, scratch); |
| index = scratch; |
| } |
| |
| if (src.offset != 0) { |
| if (index != scratch) { |
| ma_mov(index, scratch); |
| index = scratch; |
| } |
| ma_add(Imm32(src.offset), index); |
| } |
| ma_ldrsh(EDtrAddr(src.base, EDtrOffReg(index)), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::load32(const Address& address, Register dest) |
| { |
| loadPtr(address, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::load32(const BaseIndex& address, Register dest) |
| { |
| loadPtr(address, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::load32(AbsoluteAddress address, Register dest) |
| { |
| loadPtr(address, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadPtr(const Address& address, Register dest) |
| { |
| ma_ldr(address, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadPtr(const BaseIndex& src, Register dest) |
| { |
| Register base = src.base; |
| uint32_t scale = Imm32::ShiftOf(src.scale).value; |
| |
| if (src.offset != 0) { |
| ScratchRegisterScope scratch(asMasm()); |
| ma_mov(base, scratch); |
| ma_add(Imm32(src.offset), scratch); |
| ma_ldr(DTRAddr(scratch, DtrRegImmShift(src.index, LSL, scale)), dest); |
| return; |
| } |
| |
| ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadPtr(AbsoluteAddress address, Register dest) |
| { |
| MOZ_ASSERT(dest != pc); // Use dest as a scratch register. |
| movePtr(ImmWord(uintptr_t(address.addr)), dest); |
| loadPtr(Address(dest, 0), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadPtr(wasm::SymbolicAddress address, Register dest) |
| { |
| MOZ_ASSERT(dest != pc); // Use dest as a scratch register. |
| movePtr(address, dest); |
| loadPtr(Address(dest, 0), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadPrivate(const Address& address, Register dest) |
| { |
| ma_ldr(ToPayload(address), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadDouble(const Address& address, FloatRegister dest) |
| { |
| ma_vldr(address, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadDouble(const BaseIndex& src, FloatRegister dest) |
| { |
| // VFP instructions don't even support register Base + register Index modes, |
| // so just add the index, then handle the offset like normal. |
| Register base = src.base; |
| Register index = src.index; |
| uint32_t scale = Imm32::ShiftOf(src.scale).value; |
| int32_t offset = src.offset; |
| |
| ScratchRegisterScope scratch(asMasm()); |
| as_add(scratch, base, lsl(index, scale)); |
| ma_vldr(Address(scratch, offset), dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadFloatAsDouble(const Address& address, FloatRegister dest) |
| { |
| VFPRegister rt = dest; |
| ma_vldr(address, rt.singleOverlay()); |
| as_vcvt(rt, rt.singleOverlay()); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) |
| { |
| // VFP instructions don't even support register Base + register Index modes, |
| // so just add the index, then handle the offset like normal. |
| Register base = src.base; |
| Register index = src.index; |
| uint32_t scale = Imm32::ShiftOf(src.scale).value; |
| int32_t offset = src.offset; |
| VFPRegister rt = dest; |
| |
| ScratchRegisterScope scratch(asMasm()); |
| as_add(scratch, base, lsl(index, scale)); |
| ma_vldr(Address(scratch, offset), rt.singleOverlay()); |
| as_vcvt(rt, rt.singleOverlay()); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadFloat32(const Address& address, FloatRegister dest) |
| { |
| ma_vldr(address, VFPRegister(dest).singleOverlay()); |
| } |
| |
| void |
| MacroAssemblerARMCompat::loadFloat32(const BaseIndex& src, FloatRegister dest) |
| { |
| // VFP instructions don't even support register Base + register Index modes, |
| // so just add the index, then handle the offset like normal. |
| Register base = src.base; |
| Register index = src.index; |
| uint32_t scale = Imm32::ShiftOf(src.scale).value; |
| int32_t offset = src.offset; |
| |
| ScratchRegisterScope scratch(asMasm()); |
| as_add(scratch, base, lsl(index, scale)); |
| ma_vldr(Address(scratch, offset), VFPRegister(dest).singleOverlay()); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store8(Imm32 imm, const Address& address) |
| { |
| AutoRegisterScope scratch2(asMasm(), secondScratchReg_); |
| ma_mov(imm, scratch2); |
| store8(scratch2, address); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store8(Register src, const Address& address) |
| { |
| ma_dataTransferN(IsStore, 8, false, address.base, Imm32(address.offset), src); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store8(Imm32 imm, const BaseIndex& dest) |
| { |
| AutoRegisterScope scratch2(asMasm(), secondScratchReg_); |
| ma_mov(imm, scratch2); |
| store8(scratch2, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store8(Register src, const BaseIndex& dest) |
| { |
| Register base = dest.base; |
| uint32_t scale = Imm32::ShiftOf(dest.scale).value; |
| |
| ScratchRegisterScope scratch(asMasm()); |
| |
| if (dest.offset != 0) { |
| ma_add(base, Imm32(dest.offset), scratch); |
| base = scratch; |
| } |
| ma_strb(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale))); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store16(Imm32 imm, const Address& address) |
| { |
| AutoRegisterScope scratch2(asMasm(), secondScratchReg_); |
| ma_mov(imm, scratch2); |
| store16(scratch2, address); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store16(Register src, const Address& address) |
| { |
| ma_dataTransferN(IsStore, 16, false, address.base, Imm32(address.offset), src); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store16(Imm32 imm, const BaseIndex& dest) |
| { |
| AutoRegisterScope scratch2(asMasm(), secondScratchReg_); |
| ma_mov(imm, scratch2); |
| store16(scratch2, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store16(Register src, const BaseIndex& address) |
| { |
| Register index = address.index; |
| |
| ScratchRegisterScope scratch(asMasm()); |
| |
| // We don't have LSL on index register yet. |
| if (address.scale != TimesOne) { |
| ma_lsl(Imm32::ShiftOf(address.scale), index, scratch); |
| index = scratch; |
| } |
| |
| if (address.offset != 0) { |
| ma_add(index, Imm32(address.offset), scratch); |
| index = scratch; |
| } |
| ma_strh(src, EDtrAddr(address.base, EDtrOffReg(index))); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store32(Register src, AbsoluteAddress address) |
| { |
| storePtr(src, address); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store32(Register src, const Address& address) |
| { |
| storePtr(src, address); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store32(Imm32 src, const Address& address) |
| { |
| AutoRegisterScope scratch2(asMasm(), secondScratchReg_); |
| move32(src, scratch2); |
| storePtr(scratch2, address); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store32(Imm32 imm, const BaseIndex& dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| ma_mov(imm, scratch); |
| store32(scratch, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store32(Register src, const BaseIndex& dest) |
| { |
| Register base = dest.base; |
| uint32_t scale = Imm32::ShiftOf(dest.scale).value; |
| |
| AutoRegisterScope scratch2(asMasm(), secondScratchReg_); |
| |
| if (dest.offset != 0) { |
| ma_add(base, Imm32(dest.offset), scratch2); |
| base = scratch2; |
| } |
| ma_str(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale))); |
| } |
| |
| void |
| MacroAssemblerARMCompat::store32_NoSecondScratch(Imm32 src, const Address& address) |
| { |
| // move32() needs to use the ScratchRegister internally, but there is no additional |
| // scratch register available since this function forbids use of the second one. |
| move32(src, ScratchRegister); |
| storePtr(ScratchRegister, address); |
| } |
| |
| template <typename T> |
| void |
| MacroAssemblerARMCompat::storePtr(ImmWord imm, T address) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| movePtr(imm, scratch); |
| storePtr(scratch, address); |
| } |
| |
| template void MacroAssemblerARMCompat::storePtr<Address>(ImmWord imm, Address address); |
| template void MacroAssemblerARMCompat::storePtr<BaseIndex>(ImmWord imm, BaseIndex address); |
| |
| template <typename T> |
| void |
| MacroAssemblerARMCompat::storePtr(ImmPtr imm, T address) |
| { |
| storePtr(ImmWord(uintptr_t(imm.value)), address); |
| } |
| |
| template void MacroAssemblerARMCompat::storePtr<Address>(ImmPtr imm, Address address); |
| template void MacroAssemblerARMCompat::storePtr<BaseIndex>(ImmPtr imm, BaseIndex address); |
| |
| template <typename T> |
| void |
| MacroAssemblerARMCompat::storePtr(ImmGCPtr imm, T address) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| movePtr(imm, scratch); |
| storePtr(scratch, address); |
| } |
| |
| template void MacroAssemblerARMCompat::storePtr<Address>(ImmGCPtr imm, Address address); |
| template void MacroAssemblerARMCompat::storePtr<BaseIndex>(ImmGCPtr imm, BaseIndex address); |
| |
| void |
| MacroAssemblerARMCompat::storePtr(Register src, const Address& address) |
| { |
| ma_str(src, address); |
| } |
| |
| void |
| MacroAssemblerARMCompat::storePtr(Register src, const BaseIndex& address) |
| { |
| store32(src, address); |
| } |
| |
| void |
| MacroAssemblerARMCompat::storePtr(Register src, AbsoluteAddress dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| movePtr(ImmWord(uintptr_t(dest.addr)), scratch); |
| storePtr(src, Address(scratch, 0)); |
| } |
| |
| // Note: this function clobbers the input register. |
| void |
| MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) |
| { |
| if (HasVFPv3()) { |
| Label notSplit; |
| { |
| ScratchDoubleScope scratchDouble(*this); |
| MOZ_ASSERT(input != scratchDouble); |
| ma_vimm(0.5, scratchDouble); |
| |
| ma_vadd(input, scratchDouble, scratchDouble); |
| // Convert the double into an unsigned fixed point value with 24 bits of |
| // precision. The resulting number will look like 0xII.DDDDDD |
| as_vcvtFixed(scratchDouble, false, 24, true); |
| } |
| |
| // Move the fixed point value into an integer register. |
| { |
| ScratchFloat32Scope scratchFloat(*this); |
| as_vxfer(output, InvalidReg, scratchFloat.uintOverlay(), FloatToCore); |
| } |
| |
| // See if this value *might* have been an exact integer after adding |
| // 0.5. This tests the 1/2 through 1/16,777,216th places, but 0.5 needs |
| // to be tested out to the 1/140,737,488,355,328th place. |
| ma_tst(output, Imm32(0x00ffffff)); |
| // Convert to a uint8 by shifting out all of the fraction bits. |
| ma_lsr(Imm32(24), output, output); |
| // If any of the bottom 24 bits were non-zero, then we're good, since |
| // this number can't be exactly XX.0 |
| ma_b(¬Split, NonZero); |
| { |
| ScratchRegisterScope scratch(*this); |
| as_vxfer(scratch, InvalidReg, input, FloatToCore); |
| ma_cmp(scratch, Imm32(0)); |
| } |
| // If the lower 32 bits of the double were 0, then this was an exact number, |
| // and it should be even. |
| ma_bic(Imm32(1), output, LeaveCC, Zero); |
| bind(¬Split); |
| } else { |
| ScratchDoubleScope scratchDouble(*this); |
| MOZ_ASSERT(input != scratchDouble); |
| ma_vimm(0.5, scratchDouble); |
| |
| Label outOfRange; |
| ma_vcmpz(input); |
| // Do the add, in place so we can reference it later. |
| ma_vadd(input, scratchDouble, input); |
| // Do the conversion to an integer. |
| as_vcvt(VFPRegister(scratchDouble).uintOverlay(), VFPRegister(input)); |
| // Copy the converted value out. |
| as_vxfer(output, InvalidReg, scratchDouble, FloatToCore); |
| as_vmrs(pc); |
| ma_mov(Imm32(0), output, LeaveCC, Overflow); // NaN => 0 |
| ma_b(&outOfRange, Overflow); // NaN |
| ma_cmp(output, Imm32(0xff)); |
| ma_mov(Imm32(0xff), output, LeaveCC, Above); |
| ma_b(&outOfRange, Above); |
| // Convert it back to see if we got the same value back. |
| as_vcvt(scratchDouble, VFPRegister(scratchDouble).uintOverlay()); |
| // Do the check. |
| as_vcmp(scratchDouble, input); |
| as_vmrs(pc); |
| ma_bic(Imm32(1), output, LeaveCC, Zero); |
| bind(&outOfRange); |
| } |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmp32(Register lhs, Imm32 rhs) |
| { |
| MOZ_ASSERT(lhs != ScratchRegister); |
| ma_cmp(lhs, rhs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmp32(const Operand& lhs, Register rhs) |
| { |
| ma_cmp(lhs.toReg(), rhs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmp32(const Operand& lhs, Imm32 rhs) |
| { |
| MOZ_ASSERT(lhs.toReg() != ScratchRegister); |
| ma_cmp(lhs.toReg(), rhs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmp32(Register lhs, Register rhs) |
| { |
| ma_cmp(lhs, rhs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmpPtr(Register lhs, ImmWord rhs) |
| { |
| MOZ_ASSERT(lhs != ScratchRegister); |
| ma_cmp(lhs, Imm32(rhs.value)); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmpPtr(Register lhs, ImmPtr rhs) |
| { |
| return cmpPtr(lhs, ImmWord(uintptr_t(rhs.value))); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmpPtr(Register lhs, Register rhs) |
| { |
| ma_cmp(lhs, rhs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmpPtr(Register lhs, ImmGCPtr rhs) |
| { |
| ma_cmp(lhs, rhs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmpPtr(Register lhs, Imm32 rhs) |
| { |
| ma_cmp(lhs, rhs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmpPtr(const Address& lhs, Register rhs) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| loadPtr(lhs, scratch); |
| cmpPtr(scratch, rhs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmpPtr(const Address& lhs, ImmWord rhs) |
| { |
| AutoRegisterScope scratch2(asMasm(), secondScratchReg_); |
| loadPtr(lhs, scratch2); |
| ma_cmp(scratch2, Imm32(rhs.value)); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmpPtr(const Address& lhs, ImmPtr rhs) |
| { |
| cmpPtr(lhs, ImmWord(uintptr_t(rhs.value))); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmpPtr(const Address& lhs, ImmGCPtr rhs) |
| { |
| AutoRegisterScope scratch2(asMasm(), secondScratchReg_); |
| loadPtr(lhs, scratch2); |
| ma_cmp(scratch2, rhs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::cmpPtr(const Address& lhs, Imm32 rhs) |
| { |
| AutoRegisterScope scratch2(asMasm(), secondScratchReg_); |
| loadPtr(lhs, scratch2); |
| ma_cmp(scratch2, rhs); |
| } |
| |
| void |
| MacroAssemblerARMCompat::setStackArg(Register reg, uint32_t arg) |
| { |
| ma_dataTransferN(IsStore, 32, true, sp, Imm32(arg * sizeof(intptr_t)), reg); |
| |
| } |
| |
| void |
| MacroAssemblerARMCompat::subPtr(Imm32 imm, const Register dest) |
| { |
| ma_sub(imm, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::subPtr(const Address& addr, const Register dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| loadPtr(addr, scratch); |
| ma_sub(scratch, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::subPtr(Register src, Register dest) |
| { |
| ma_sub(src, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::subPtr(Register src, const Address& dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| loadPtr(dest, scratch); |
| ma_sub(src, scratch); |
| storePtr(scratch, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::addPtr(Imm32 imm, const Register dest) |
| { |
| ma_add(imm, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::addPtr(Imm32 imm, const Address& dest) |
| { |
| ScratchRegisterScope scratch(asMasm()); |
| loadPtr(dest, scratch); |
| addPtr(imm, scratch); |
| storePtr(scratch, dest); |
| } |
| |
| void |
| MacroAssemblerARMCompat::compareDouble(FloatRegister lhs, FloatRegister rhs) |
| { |
| // Compare the doubles, setting vector status flags. |
| if (rhs.isMissing()) |
| ma_vcmpz(lhs); |
| else |
| ma_vcmp(lhs, rhs); |
| |
| // Move vector status bits to normal status flags. |
| as_vmrs(pc); |
| } |
| |
| void |
| MacroAssemblerARMCompat::branchDouble(DoubleCondition cond, FloatRegister lhs, |
| FloatRegister rhs, Label* label) |
| { |
| compareDouble(lhs, rhs); |
| |
| if (cond == DoubleNotEqual) { |
| // Force the unordered cases not to jump. |
| Label unordered; |
| ma_b(&unordered, VFP_Unordered); |
| ma_b(label, VFP_NotEqualOrUnordered); |
| bind(&unordered); |
| return; |
| } |
| |
| if (cond == DoubleEqualOrUnordered) { |
| ma_b(label, VFP_Unordered); |
| ma_b(label, VFP_Equal); |
| return; |
| } |
| |
| ma_b(label, ConditionFromDoubleCondition(cond)); |
| } |
| |
| void |
| MacroAssemblerARMCompat::compareFloat(FloatRegister lhs, FloatRegister rhs) |
| { |
| // Compare the doubles, setting vector status flags. |
| if (rhs.isMissing()) |
| as_vcmpz(VFPRegister(lhs).singleOverlay()); |
| else |
| as_vcmp(VFPRegister(lhs).singleOverlay(), VFPRegister(rhs).singleOverlay()); |
| |
| // Move vector status bits to normal status flags. |
| as_vmrs(pc); |
| } |
| |
| void |
| MacroAssemblerARMCompat::branchFloat(DoubleCondition cond, FloatRegister lhs, |
| FloatRegister rhs, Label* label) |
| { |
| compareFloat(lhs, rhs); |
| |
| if (cond == DoubleNotEqual) { |
| // Force the unordered cases not to jump. |
| Label unordered; |
| ma_b(&unordered, VFP_Unordered); |
| ma_b(label, VFP_NotEqualOrUnordered); |
| bind(&unordered); |
| return; |
| } |
| |
| if (cond == DoubleEqualOrUnordered) { |
| ma_b(label, VFP_Unordered); |
| ma_b(label, VFP_Equal); |
| return; |
| } |
| |
| ma_b(label, ConditionFromDoubleCondition(cond)); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const ValueOperand& value) |
| { |
| MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
| ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_INT32)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, const ValueOperand& value) |
| { |
| MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
| ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testDouble(Assembler::Condition cond, const ValueOperand& value) |
| { |
| MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
| Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual; |
| ma_cmp(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR)); |
| return actual; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testNull(Assembler::Condition cond, const ValueOperand& value) |
| { |
| MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
| ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_NULL)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testUndefined(Assembler::Condition cond, const ValueOperand& value) |
| { |
| MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
| ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testString(Assembler::Condition cond, const ValueOperand& value) |
| { |
| return testString(cond, value.typeReg()); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testSymbol(Assembler::Condition cond, const ValueOperand& value) |
| { |
| return testSymbol(cond, value.typeReg()); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testObject(Assembler::Condition cond, const ValueOperand& value) |
| { |
| return testObject(cond, value.typeReg()); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testNumber(Assembler::Condition cond, const ValueOperand& value) |
| { |
| return testNumber(cond, value.typeReg()); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const ValueOperand& value) |
| { |
| return testMagic(cond, value.typeReg()); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testPrimitive(Assembler::Condition cond, const ValueOperand& value) |
| { |
| return testPrimitive(cond, value.typeReg()); |
| } |
| |
| // Register-based tests. |
| Assembler::Condition |
| MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, Register tag) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ma_cmp(tag, ImmTag(JSVAL_TAG_INT32)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, Register tag) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ma_cmp(tag, ImmTag(JSVAL_TAG_BOOLEAN)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testNull(Assembler::Condition cond, Register tag) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ma_cmp(tag, ImmTag(JSVAL_TAG_NULL)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testUndefined(Assembler::Condition cond, Register tag) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ma_cmp(tag, ImmTag(JSVAL_TAG_UNDEFINED)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testString(Assembler::Condition cond, Register tag) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ma_cmp(tag, ImmTag(JSVAL_TAG_STRING)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testSymbol(Assembler::Condition cond, Register tag) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ma_cmp(tag, ImmTag(JSVAL_TAG_SYMBOL)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testObject(Assembler::Condition cond, Register tag) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ma_cmp(tag, ImmTag(JSVAL_TAG_OBJECT)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, Register tag) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ma_cmp(tag, ImmTag(JSVAL_TAG_MAGIC)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testPrimitive(Assembler::Condition cond, Register tag) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ma_cmp(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET)); |
| return cond == Equal ? Below : AboveOrEqual; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testGCThing(Assembler::Condition cond, const Address& address) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ScratchRegisterScope scratch(asMasm()); |
| extractTag(address, scratch); |
| ma_cmp(scratch, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET)); |
| return cond == Equal ? AboveOrEqual : Below; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const Address& address) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ScratchRegisterScope scratch(asMasm()); |
| extractTag(address, scratch); |
| ma_cmp(scratch, ImmTag(JSVAL_TAG_MAGIC)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const Address& address) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ScratchRegisterScope scratch(asMasm()); |
| extractTag(address, scratch); |
| ma_cmp(scratch, ImmTag(JSVAL_TAG_INT32)); |
| return cond; |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testDouble(Condition cond, const Address& address) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ScratchRegisterScope scratch(asMasm()); |
| extractTag(address, scratch); |
| return testDouble(cond, scratch); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testBoolean(Condition cond, const Address& address) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ScratchRegisterScope scratch(asMasm()); |
| extractTag(address, scratch); |
| return testBoolean(cond, scratch); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testNull(Condition cond, const Address& address) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ScratchRegisterScope scratch(asMasm()); |
| extractTag(address, scratch); |
| return testNull(cond, scratch); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testUndefined(Condition cond, const Address& address) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ScratchRegisterScope scratch(asMasm()); |
| extractTag(address, scratch); |
| return testUndefined(cond, scratch); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testString(Condition cond, const Address& address) |
| { |
| MOZ_ASSERT(cond == Equal || cond == NotEqual); |
| ScratchRegisterScope scratch(asMasm()); |
| extractTag(address, scratch); |
| return testString(cond, scratch); |
| } |
| |
| Assembler::Condition |
| MacroAssemblerARMCompat::testSymbol(Condition cond, const Address |