| // Copyright 2015, ARM Limited |
| // All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions are met: |
| // |
| // * Redistributions of source code must retain the above copyright notice, |
| // this list of conditions and the following disclaimer. |
| // * Redistributions in binary form must reproduce the above copyright notice, |
| // this list of conditions and the following disclaimer in the documentation |
| // and/or other materials provided with the distribution. |
| // * Neither the name of ARM Limited nor the names of its contributors may be |
| // used to endorse or promote products derived from this software without |
| // specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND |
| // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE |
| // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| #include "jit/arm64/vixl/Assembler-vixl.h" |
| |
| #include <cmath> |
| |
| #include "jit/arm64/vixl/MacroAssembler-vixl.h" |
| |
| namespace vixl { |
| |
| // CPURegList utilities. |
| CPURegister CPURegList::PopLowestIndex() { |
| if (IsEmpty()) { |
| return NoCPUReg; |
| } |
| int index = CountTrailingZeros(list_); |
| VIXL_ASSERT((1 << index) & list_); |
| Remove(index); |
| return CPURegister(index, size_, type_); |
| } |
| |
| |
| CPURegister CPURegList::PopHighestIndex() { |
| VIXL_ASSERT(IsValid()); |
| if (IsEmpty()) { |
| return NoCPUReg; |
| } |
| int index = CountLeadingZeros(list_); |
| index = kRegListSizeInBits - 1 - index; |
| VIXL_ASSERT((1 << index) & list_); |
| Remove(index); |
| return CPURegister(index, size_, type_); |
| } |
| |
| |
| bool CPURegList::IsValid() const { |
| if ((type_ == CPURegister::kRegister) || |
| (type_ == CPURegister::kVRegister)) { |
| bool is_valid = true; |
| // Try to create a CPURegister for each element in the list. |
| for (int i = 0; i < kRegListSizeInBits; i++) { |
| if (((list_ >> i) & 1) != 0) { |
| is_valid &= CPURegister(i, size_, type_).IsValid(); |
| } |
| } |
| return is_valid; |
| } else if (type_ == CPURegister::kNoRegister) { |
| // We can't use IsEmpty here because that asserts IsValid(). |
| return list_ == 0; |
| } else { |
| return false; |
| } |
| } |
| |
| |
| void CPURegList::RemoveCalleeSaved() { |
| if (type() == CPURegister::kRegister) { |
| Remove(GetCalleeSaved(RegisterSizeInBits())); |
| } else if (type() == CPURegister::kVRegister) { |
| Remove(GetCalleeSavedV(RegisterSizeInBits())); |
| } else { |
| VIXL_ASSERT(type() == CPURegister::kNoRegister); |
| VIXL_ASSERT(IsEmpty()); |
| // The list must already be empty, so do nothing. |
| } |
| } |
| |
| |
| CPURegList CPURegList::Union(const CPURegList& list_1, |
| const CPURegList& list_2, |
| const CPURegList& list_3) { |
| return Union(list_1, Union(list_2, list_3)); |
| } |
| |
| |
| CPURegList CPURegList::Union(const CPURegList& list_1, |
| const CPURegList& list_2, |
| const CPURegList& list_3, |
| const CPURegList& list_4) { |
| return Union(Union(list_1, list_2), Union(list_3, list_4)); |
| } |
| |
| |
| CPURegList CPURegList::Intersection(const CPURegList& list_1, |
| const CPURegList& list_2, |
| const CPURegList& list_3) { |
| return Intersection(list_1, Intersection(list_2, list_3)); |
| } |
| |
| |
| CPURegList CPURegList::Intersection(const CPURegList& list_1, |
| const CPURegList& list_2, |
| const CPURegList& list_3, |
| const CPURegList& list_4) { |
| return Intersection(Intersection(list_1, list_2), |
| Intersection(list_3, list_4)); |
| } |
| |
| |
| CPURegList CPURegList::GetCalleeSaved(unsigned size) { |
| return CPURegList(CPURegister::kRegister, size, 19, 29); |
| } |
| |
| |
| CPURegList CPURegList::GetCalleeSavedV(unsigned size) { |
| return CPURegList(CPURegister::kVRegister, size, 8, 15); |
| } |
| |
| |
| CPURegList CPURegList::GetCallerSaved(unsigned size) { |
| // Registers x0-x18 and lr (x30) are caller-saved. |
| CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18); |
| // Do not use lr directly to avoid initialisation order fiasco bugs for users. |
| list.Combine(Register(30, kXRegSize)); |
| return list; |
| } |
| |
| |
| CPURegList CPURegList::GetCallerSavedV(unsigned size) { |
| // Registers d0-d7 and d16-d31 are caller-saved. |
| CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7); |
| list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31)); |
| return list; |
| } |
| |
| |
| const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved(); |
| const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV(); |
| const CPURegList kCallerSaved = CPURegList::GetCallerSaved(); |
| const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV(); |
| |
| |
| // Registers. |
| #define WREG(n) w##n, |
| const Register Register::wregisters[] = { |
| REGISTER_CODE_LIST(WREG) |
| }; |
| #undef WREG |
| |
| #define XREG(n) x##n, |
| const Register Register::xregisters[] = { |
| REGISTER_CODE_LIST(XREG) |
| }; |
| #undef XREG |
| |
| #define BREG(n) b##n, |
| const VRegister VRegister::bregisters[] = { |
| REGISTER_CODE_LIST(BREG) |
| }; |
| #undef BREG |
| |
| #define HREG(n) h##n, |
| const VRegister VRegister::hregisters[] = { |
| REGISTER_CODE_LIST(HREG) |
| }; |
| #undef HREG |
| |
| #define SREG(n) s##n, |
| const VRegister VRegister::sregisters[] = { |
| REGISTER_CODE_LIST(SREG) |
| }; |
| #undef SREG |
| |
| #define DREG(n) d##n, |
| const VRegister VRegister::dregisters[] = { |
| REGISTER_CODE_LIST(DREG) |
| }; |
| #undef DREG |
| |
| #define QREG(n) q##n, |
| const VRegister VRegister::qregisters[] = { |
| REGISTER_CODE_LIST(QREG) |
| }; |
| #undef QREG |
| |
| #define VREG(n) v##n, |
| const VRegister VRegister::vregisters[] = { |
| REGISTER_CODE_LIST(VREG) |
| }; |
| #undef VREG |
| |
| |
| const Register& Register::WRegFromCode(unsigned code) { |
| if (code == kSPRegInternalCode) { |
| return wsp; |
| } else { |
| VIXL_ASSERT(code < kNumberOfRegisters); |
| return wregisters[code]; |
| } |
| } |
| |
| |
| const Register& Register::XRegFromCode(unsigned code) { |
| if (code == kSPRegInternalCode) { |
| return sp; |
| } else { |
| VIXL_ASSERT(code < kNumberOfRegisters); |
| return xregisters[code]; |
| } |
| } |
| |
| |
| const VRegister& VRegister::BRegFromCode(unsigned code) { |
| VIXL_ASSERT(code < kNumberOfVRegisters); |
| return bregisters[code]; |
| } |
| |
| |
| const VRegister& VRegister::HRegFromCode(unsigned code) { |
| VIXL_ASSERT(code < kNumberOfVRegisters); |
| return hregisters[code]; |
| } |
| |
| |
| const VRegister& VRegister::SRegFromCode(unsigned code) { |
| VIXL_ASSERT(code < kNumberOfVRegisters); |
| return sregisters[code]; |
| } |
| |
| |
| const VRegister& VRegister::DRegFromCode(unsigned code) { |
| VIXL_ASSERT(code < kNumberOfVRegisters); |
| return dregisters[code]; |
| } |
| |
| |
| const VRegister& VRegister::QRegFromCode(unsigned code) { |
| VIXL_ASSERT(code < kNumberOfVRegisters); |
| return qregisters[code]; |
| } |
| |
| |
| const VRegister& VRegister::VRegFromCode(unsigned code) { |
| VIXL_ASSERT(code < kNumberOfVRegisters); |
| return vregisters[code]; |
| } |
| |
| |
| const Register& CPURegister::W() const { |
| VIXL_ASSERT(IsValidRegister()); |
| return Register::WRegFromCode(code_); |
| } |
| |
| |
| const Register& CPURegister::X() const { |
| VIXL_ASSERT(IsValidRegister()); |
| return Register::XRegFromCode(code_); |
| } |
| |
| |
| const VRegister& CPURegister::B() const { |
| VIXL_ASSERT(IsValidVRegister()); |
| return VRegister::BRegFromCode(code_); |
| } |
| |
| |
| const VRegister& CPURegister::H() const { |
| VIXL_ASSERT(IsValidVRegister()); |
| return VRegister::HRegFromCode(code_); |
| } |
| |
| |
| const VRegister& CPURegister::S() const { |
| VIXL_ASSERT(IsValidVRegister()); |
| return VRegister::SRegFromCode(code_); |
| } |
| |
| |
| const VRegister& CPURegister::D() const { |
| VIXL_ASSERT(IsValidVRegister()); |
| return VRegister::DRegFromCode(code_); |
| } |
| |
| |
| const VRegister& CPURegister::Q() const { |
| VIXL_ASSERT(IsValidVRegister()); |
| return VRegister::QRegFromCode(code_); |
| } |
| |
| |
| const VRegister& CPURegister::V() const { |
| VIXL_ASSERT(IsValidVRegister()); |
| return VRegister::VRegFromCode(code_); |
| } |
| |
| |
| // Operand. |
| Operand::Operand(int64_t immediate) |
| : immediate_(immediate), |
| reg_(NoReg), |
| shift_(NO_SHIFT), |
| extend_(NO_EXTEND), |
| shift_amount_(0) {} |
| |
| |
| Operand::Operand(Register reg, Shift shift, unsigned shift_amount) |
| : reg_(reg), |
| shift_(shift), |
| extend_(NO_EXTEND), |
| shift_amount_(shift_amount) { |
| VIXL_ASSERT(shift != MSL); |
| VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize)); |
| VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize)); |
| VIXL_ASSERT(!reg.IsSP()); |
| } |
| |
| |
| Operand::Operand(Register reg, Extend extend, unsigned shift_amount) |
| : reg_(reg), |
| shift_(NO_SHIFT), |
| extend_(extend), |
| shift_amount_(shift_amount) { |
| VIXL_ASSERT(reg.IsValid()); |
| VIXL_ASSERT(shift_amount <= 4); |
| VIXL_ASSERT(!reg.IsSP()); |
| |
| // Extend modes SXTX and UXTX require a 64-bit register. |
| VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX))); |
| } |
| |
| |
| bool Operand::IsImmediate() const { |
| return reg_.Is(NoReg); |
| } |
| |
| |
| bool Operand::IsShiftedRegister() const { |
| return reg_.IsValid() && (shift_ != NO_SHIFT); |
| } |
| |
| |
| bool Operand::IsExtendedRegister() const { |
| return reg_.IsValid() && (extend_ != NO_EXTEND); |
| } |
| |
| |
| bool Operand::IsZero() const { |
| if (IsImmediate()) { |
| return immediate() == 0; |
| } else { |
| return reg().IsZero(); |
| } |
| } |
| |
| |
| Operand Operand::ToExtendedRegister() const { |
| VIXL_ASSERT(IsShiftedRegister()); |
| VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4)); |
| return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); |
| } |
| |
| |
| // MemOperand |
| MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode) |
| : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode) { |
| VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); |
| } |
| |
| |
| MemOperand::MemOperand(Register base, |
| Register regoffset, |
| Extend extend, |
| unsigned shift_amount) |
| : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset), |
| shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) { |
| VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); |
| VIXL_ASSERT(!regoffset.IsSP()); |
| VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); |
| |
| // SXTX extend mode requires a 64-bit offset register. |
| VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX)); |
| } |
| |
| |
| MemOperand::MemOperand(Register base, |
| Register regoffset, |
| Shift shift, |
| unsigned shift_amount) |
| : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset), |
| shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) { |
| VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); |
| VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP()); |
| VIXL_ASSERT(shift == LSL); |
| } |
| |
| |
| MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode) |
| : base_(base), regoffset_(NoReg), addrmode_(addrmode) { |
| VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); |
| |
| if (offset.IsImmediate()) { |
| offset_ = offset.immediate(); |
| } else if (offset.IsShiftedRegister()) { |
| VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex)); |
| |
| regoffset_ = offset.reg(); |
| shift_ = offset.shift(); |
| shift_amount_ = offset.shift_amount(); |
| |
| extend_ = NO_EXTEND; |
| offset_ = 0; |
| |
| // These assertions match those in the shifted-register constructor. |
| VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP()); |
| VIXL_ASSERT(shift_ == LSL); |
| } else { |
| VIXL_ASSERT(offset.IsExtendedRegister()); |
| VIXL_ASSERT(addrmode == Offset); |
| |
| regoffset_ = offset.reg(); |
| extend_ = offset.extend(); |
| shift_amount_ = offset.shift_amount(); |
| |
| shift_ = NO_SHIFT; |
| offset_ = 0; |
| |
| // These assertions match those in the extended-register constructor. |
| VIXL_ASSERT(!regoffset_.IsSP()); |
| VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); |
| VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX))); |
| } |
| } |
| |
| |
| bool MemOperand::IsImmediateOffset() const { |
| return (addrmode_ == Offset) && regoffset_.Is(NoReg); |
| } |
| |
| |
| bool MemOperand::IsRegisterOffset() const { |
| return (addrmode_ == Offset) && !regoffset_.Is(NoReg); |
| } |
| |
| |
| bool MemOperand::IsPreIndex() const { |
| return addrmode_ == PreIndex; |
| } |
| |
| |
| bool MemOperand::IsPostIndex() const { |
| return addrmode_ == PostIndex; |
| } |
| |
| |
| void MemOperand::AddOffset(int64_t offset) { |
| VIXL_ASSERT(IsImmediateOffset()); |
| offset_ += offset; |
| } |
| |
| |
| // Assembler |
| Assembler::Assembler(PositionIndependentCodeOption pic) |
| : pic_(pic) { |
| } |
| |
| |
| // Code generation. |
| void Assembler::br(const Register& xn) { |
| VIXL_ASSERT(xn.Is64Bits()); |
| Emit(BR | Rn(xn)); |
| } |
| |
| |
| void Assembler::blr(const Register& xn) { |
| VIXL_ASSERT(xn.Is64Bits()); |
| Emit(BLR | Rn(xn)); |
| } |
| |
| |
| void Assembler::ret(const Register& xn) { |
| VIXL_ASSERT(xn.Is64Bits()); |
| Emit(RET | Rn(xn)); |
| } |
| |
| |
| void Assembler::NEONTable(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm, |
| NEONTableOp op) { |
| VIXL_ASSERT(vd.Is16B() || vd.Is8B()); |
| VIXL_ASSERT(vn.Is16B()); |
| VIXL_ASSERT(AreSameFormat(vd, vm)); |
| Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| void Assembler::tbl(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| NEONTable(vd, vn, vm, NEON_TBL_1v); |
| } |
| |
| |
| void Assembler::tbl(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vn2, |
| const VRegister& vm) { |
| USE(vn2); |
| VIXL_ASSERT(AreSameFormat(vn, vn2)); |
| VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); |
| |
| NEONTable(vd, vn, vm, NEON_TBL_2v); |
| } |
| |
| |
| void Assembler::tbl(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vn2, |
| const VRegister& vn3, |
| const VRegister& vm) { |
| USE(vn2, vn3); |
| VIXL_ASSERT(AreSameFormat(vn, vn2, vn3)); |
| VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); |
| VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters)); |
| |
| NEONTable(vd, vn, vm, NEON_TBL_3v); |
| } |
| |
| |
| void Assembler::tbl(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vn2, |
| const VRegister& vn3, |
| const VRegister& vn4, |
| const VRegister& vm) { |
| USE(vn2, vn3, vn4); |
| VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4)); |
| VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); |
| VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters)); |
| VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters)); |
| |
| NEONTable(vd, vn, vm, NEON_TBL_4v); |
| } |
| |
| |
| void Assembler::tbx(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| NEONTable(vd, vn, vm, NEON_TBX_1v); |
| } |
| |
| |
| void Assembler::tbx(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vn2, |
| const VRegister& vm) { |
| USE(vn2); |
| VIXL_ASSERT(AreSameFormat(vn, vn2)); |
| VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); |
| |
| NEONTable(vd, vn, vm, NEON_TBX_2v); |
| } |
| |
| |
| void Assembler::tbx(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vn2, |
| const VRegister& vn3, |
| const VRegister& vm) { |
| USE(vn2, vn3); |
| VIXL_ASSERT(AreSameFormat(vn, vn2, vn3)); |
| VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); |
| VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters)); |
| |
| NEONTable(vd, vn, vm, NEON_TBX_3v); |
| } |
| |
| |
| void Assembler::tbx(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vn2, |
| const VRegister& vn3, |
| const VRegister& vn4, |
| const VRegister& vm) { |
| USE(vn2, vn3, vn4); |
| VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4)); |
| VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters)); |
| VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters)); |
| VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters)); |
| |
| NEONTable(vd, vn, vm, NEON_TBX_4v); |
| } |
| |
| |
| void Assembler::add(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| AddSub(rd, rn, operand, LeaveFlags, ADD); |
| } |
| |
| |
| void Assembler::adds(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| AddSub(rd, rn, operand, SetFlags, ADD); |
| } |
| |
| |
| void Assembler::cmn(const Register& rn, |
| const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rn); |
| adds(zr, rn, operand); |
| } |
| |
| |
| void Assembler::sub(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| AddSub(rd, rn, operand, LeaveFlags, SUB); |
| } |
| |
| |
| void Assembler::subs(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| AddSub(rd, rn, operand, SetFlags, SUB); |
| } |
| |
| |
| void Assembler::cmp(const Register& rn, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rn); |
| subs(zr, rn, operand); |
| } |
| |
| |
| void Assembler::neg(const Register& rd, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rd); |
| sub(rd, zr, operand); |
| } |
| |
| |
| void Assembler::negs(const Register& rd, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rd); |
| subs(rd, zr, operand); |
| } |
| |
| |
| void Assembler::adc(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC); |
| } |
| |
| |
| void Assembler::adcs(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| AddSubWithCarry(rd, rn, operand, SetFlags, ADC); |
| } |
| |
| |
| void Assembler::sbc(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC); |
| } |
| |
| |
| void Assembler::sbcs(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| AddSubWithCarry(rd, rn, operand, SetFlags, SBC); |
| } |
| |
| |
| void Assembler::ngc(const Register& rd, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rd); |
| sbc(rd, zr, operand); |
| } |
| |
| |
| void Assembler::ngcs(const Register& rd, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rd); |
| sbcs(rd, zr, operand); |
| } |
| |
| |
| // Logical instructions. |
| void Assembler::and_(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, AND); |
| } |
| |
| |
| void Assembler::bic(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, BIC); |
| } |
| |
| |
| void Assembler::bics(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, BICS); |
| } |
| |
| |
| void Assembler::orr(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, ORR); |
| } |
| |
| |
| void Assembler::orn(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, ORN); |
| } |
| |
| |
| void Assembler::eor(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, EOR); |
| } |
| |
| |
| void Assembler::eon(const Register& rd, |
| const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, EON); |
| } |
| |
| |
| void Assembler::lslv(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.size() == rn.size()); |
| VIXL_ASSERT(rd.size() == rm.size()); |
| Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::lsrv(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.size() == rn.size()); |
| VIXL_ASSERT(rd.size() == rm.size()); |
| Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::asrv(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.size() == rn.size()); |
| VIXL_ASSERT(rd.size() == rm.size()); |
| Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::rorv(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.size() == rn.size()); |
| VIXL_ASSERT(rd.size() == rm.size()); |
| Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| // Bitfield operations. |
| void Assembler::bfm(const Register& rd, |
| const Register& rn, |
| unsigned immr, |
| unsigned imms) { |
| VIXL_ASSERT(rd.size() == rn.size()); |
| Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| Emit(SF(rd) | BFM | N | |
| ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::sbfm(const Register& rd, |
| const Register& rn, |
| unsigned immr, |
| unsigned imms) { |
| VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits()); |
| Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| Emit(SF(rd) | SBFM | N | |
| ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::ubfm(const Register& rd, |
| const Register& rn, |
| unsigned immr, |
| unsigned imms) { |
| VIXL_ASSERT(rd.size() == rn.size()); |
| Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| Emit(SF(rd) | UBFM | N | |
| ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::extr(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| unsigned lsb) { |
| VIXL_ASSERT(rd.size() == rn.size()); |
| VIXL_ASSERT(rd.size() == rm.size()); |
| Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.size()) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::csel(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| Condition cond) { |
| ConditionalSelect(rd, rn, rm, cond, CSEL); |
| } |
| |
| |
| void Assembler::csinc(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| Condition cond) { |
| ConditionalSelect(rd, rn, rm, cond, CSINC); |
| } |
| |
| |
| void Assembler::csinv(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| Condition cond) { |
| ConditionalSelect(rd, rn, rm, cond, CSINV); |
| } |
| |
| |
| void Assembler::csneg(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| Condition cond) { |
| ConditionalSelect(rd, rn, rm, cond, CSNEG); |
| } |
| |
| |
| void Assembler::cset(const Register &rd, Condition cond) { |
| VIXL_ASSERT((cond != al) && (cond != nv)); |
| Register zr = AppropriateZeroRegFor(rd); |
| csinc(rd, zr, zr, InvertCondition(cond)); |
| } |
| |
| |
| void Assembler::csetm(const Register &rd, Condition cond) { |
| VIXL_ASSERT((cond != al) && (cond != nv)); |
| Register zr = AppropriateZeroRegFor(rd); |
| csinv(rd, zr, zr, InvertCondition(cond)); |
| } |
| |
| |
| void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) { |
| VIXL_ASSERT((cond != al) && (cond != nv)); |
| csinc(rd, rn, rn, InvertCondition(cond)); |
| } |
| |
| |
| void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) { |
| VIXL_ASSERT((cond != al) && (cond != nv)); |
| csinv(rd, rn, rn, InvertCondition(cond)); |
| } |
| |
| |
| void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) { |
| VIXL_ASSERT((cond != al) && (cond != nv)); |
| csneg(rd, rn, rn, InvertCondition(cond)); |
| } |
| |
| |
| void Assembler::ConditionalSelect(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| Condition cond, |
| ConditionalSelectOp op) { |
| VIXL_ASSERT(rd.size() == rn.size()); |
| VIXL_ASSERT(rd.size() == rm.size()); |
| Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::ccmn(const Register& rn, |
| const Operand& operand, |
| StatusFlags nzcv, |
| Condition cond) { |
| ConditionalCompare(rn, operand, nzcv, cond, CCMN); |
| } |
| |
| |
| void Assembler::ccmp(const Register& rn, |
| const Operand& operand, |
| StatusFlags nzcv, |
| Condition cond) { |
| ConditionalCompare(rn, operand, nzcv, cond, CCMP); |
| } |
| |
| |
| void Assembler::DataProcessing3Source(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| const Register& ra, |
| DataProcessing3SourceOp op) { |
| Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::crc32b(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); |
| Emit(SF(rm) | Rm(rm) | CRC32B | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::crc32h(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); |
| Emit(SF(rm) | Rm(rm) | CRC32H | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::crc32w(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); |
| Emit(SF(rm) | Rm(rm) | CRC32W | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::crc32x(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits()); |
| Emit(SF(rm) | Rm(rm) | CRC32X | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::crc32cb(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); |
| Emit(SF(rm) | Rm(rm) | CRC32CB | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::crc32ch(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); |
| Emit(SF(rm) | Rm(rm) | CRC32CH | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::crc32cw(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits()); |
| Emit(SF(rm) | Rm(rm) | CRC32CW | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::crc32cx(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits()); |
| Emit(SF(rm) | Rm(rm) | CRC32CX | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::mul(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm)); |
| DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD); |
| } |
| |
| |
| void Assembler::madd(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| const Register& ra) { |
| DataProcessing3Source(rd, rn, rm, ra, MADD); |
| } |
| |
| |
| void Assembler::mneg(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm)); |
| DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB); |
| } |
| |
| |
| void Assembler::msub(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| const Register& ra) { |
| DataProcessing3Source(rd, rn, rm, ra, MSUB); |
| } |
| |
| |
| void Assembler::umaddl(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| const Register& ra) { |
| VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits()); |
| VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits()); |
| DataProcessing3Source(rd, rn, rm, ra, UMADDL_x); |
| } |
| |
| |
| void Assembler::smaddl(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| const Register& ra) { |
| VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits()); |
| VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits()); |
| DataProcessing3Source(rd, rn, rm, ra, SMADDL_x); |
| } |
| |
| |
| void Assembler::umsubl(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| const Register& ra) { |
| VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits()); |
| VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits()); |
| DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x); |
| } |
| |
| |
| void Assembler::smsubl(const Register& rd, |
| const Register& rn, |
| const Register& rm, |
| const Register& ra) { |
| VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits()); |
| VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits()); |
| DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x); |
| } |
| |
| |
| void Assembler::smull(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.Is64Bits()); |
| VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits()); |
| DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x); |
| } |
| |
| |
| void Assembler::sdiv(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.size() == rn.size()); |
| VIXL_ASSERT(rd.size() == rm.size()); |
| Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::smulh(const Register& xd, |
| const Register& xn, |
| const Register& xm) { |
| VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); |
| DataProcessing3Source(xd, xn, xm, xzr, SMULH_x); |
| } |
| |
| |
| void Assembler::umulh(const Register& xd, |
| const Register& xn, |
| const Register& xm) { |
| VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); |
| DataProcessing3Source(xd, xn, xm, xzr, UMULH_x); |
| } |
| |
| |
| void Assembler::udiv(const Register& rd, |
| const Register& rn, |
| const Register& rm) { |
| VIXL_ASSERT(rd.size() == rn.size()); |
| VIXL_ASSERT(rd.size() == rm.size()); |
| Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::rbit(const Register& rd, |
| const Register& rn) { |
| DataProcessing1Source(rd, rn, RBIT); |
| } |
| |
| |
| void Assembler::rev16(const Register& rd, |
| const Register& rn) { |
| DataProcessing1Source(rd, rn, REV16); |
| } |
| |
| |
| void Assembler::rev32(const Register& rd, |
| const Register& rn) { |
| VIXL_ASSERT(rd.Is64Bits()); |
| DataProcessing1Source(rd, rn, REV); |
| } |
| |
| |
| void Assembler::rev(const Register& rd, |
| const Register& rn) { |
| DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w); |
| } |
| |
| |
| void Assembler::clz(const Register& rd, |
| const Register& rn) { |
| DataProcessing1Source(rd, rn, CLZ); |
| } |
| |
| |
| void Assembler::cls(const Register& rd, |
| const Register& rn) { |
| DataProcessing1Source(rd, rn, CLS); |
| } |
| |
| |
| void Assembler::ldp(const CPURegister& rt, |
| const CPURegister& rt2, |
| const MemOperand& src) { |
| LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2)); |
| } |
| |
| |
| void Assembler::stp(const CPURegister& rt, |
| const CPURegister& rt2, |
| const MemOperand& dst) { |
| LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2)); |
| } |
| |
| |
| void Assembler::ldpsw(const Register& rt, |
| const Register& rt2, |
| const MemOperand& src) { |
| VIXL_ASSERT(rt.Is64Bits()); |
| LoadStorePair(rt, rt2, src, LDPSW_x); |
| } |
| |
| |
| void Assembler::LoadStorePair(const CPURegister& rt, |
| const CPURegister& rt2, |
| const MemOperand& addr, |
| LoadStorePairOp op) { |
| // 'rt' and 'rt2' can only be aliased for stores. |
| VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); |
| VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); |
| VIXL_ASSERT(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op))); |
| |
| int offset = static_cast<int>(addr.offset()); |
| Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | |
| ImmLSPair(offset, CalcLSPairDataSize(op)); |
| |
| Instr addrmodeop; |
| if (addr.IsImmediateOffset()) { |
| addrmodeop = LoadStorePairOffsetFixed; |
| } else { |
| VIXL_ASSERT(addr.offset() != 0); |
| if (addr.IsPreIndex()) { |
| addrmodeop = LoadStorePairPreIndexFixed; |
| } else { |
| VIXL_ASSERT(addr.IsPostIndex()); |
| addrmodeop = LoadStorePairPostIndexFixed; |
| } |
| } |
| Emit(addrmodeop | memop); |
| } |
| |
| |
| void Assembler::ldnp(const CPURegister& rt, |
| const CPURegister& rt2, |
| const MemOperand& src) { |
| LoadStorePairNonTemporal(rt, rt2, src, |
| LoadPairNonTemporalOpFor(rt, rt2)); |
| } |
| |
| |
| void Assembler::stnp(const CPURegister& rt, |
| const CPURegister& rt2, |
| const MemOperand& dst) { |
| LoadStorePairNonTemporal(rt, rt2, dst, |
| StorePairNonTemporalOpFor(rt, rt2)); |
| } |
| |
| |
| void Assembler::LoadStorePairNonTemporal(const CPURegister& rt, |
| const CPURegister& rt2, |
| const MemOperand& addr, |
| LoadStorePairNonTemporalOp op) { |
| VIXL_ASSERT(!rt.Is(rt2)); |
| VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); |
| VIXL_ASSERT(addr.IsImmediateOffset()); |
| |
| unsigned size = CalcLSPairDataSize( |
| static_cast<LoadStorePairOp>(op & LoadStorePairMask)); |
| VIXL_ASSERT(IsImmLSPair(addr.offset(), size)); |
| int offset = static_cast<int>(addr.offset()); |
| Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size)); |
| } |
| |
| |
| // Memory instructions. |
| void Assembler::ldrb(const Register& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireUnscaledOffset); |
| VIXL_ASSERT(option != PreferUnscaledOffset); |
| LoadStore(rt, src, LDRB_w, option); |
| } |
| |
| |
| void Assembler::strb(const Register& rt, const MemOperand& dst, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireUnscaledOffset); |
| VIXL_ASSERT(option != PreferUnscaledOffset); |
| LoadStore(rt, dst, STRB_w, option); |
| } |
| |
| |
| void Assembler::ldrsb(const Register& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireUnscaledOffset); |
| VIXL_ASSERT(option != PreferUnscaledOffset); |
| LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option); |
| } |
| |
| |
| void Assembler::ldrh(const Register& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireUnscaledOffset); |
| VIXL_ASSERT(option != PreferUnscaledOffset); |
| LoadStore(rt, src, LDRH_w, option); |
| } |
| |
| |
| void Assembler::strh(const Register& rt, const MemOperand& dst, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireUnscaledOffset); |
| VIXL_ASSERT(option != PreferUnscaledOffset); |
| LoadStore(rt, dst, STRH_w, option); |
| } |
| |
| |
| void Assembler::ldrsh(const Register& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireUnscaledOffset); |
| VIXL_ASSERT(option != PreferUnscaledOffset); |
| LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option); |
| } |
| |
| |
| void Assembler::ldr(const CPURegister& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireUnscaledOffset); |
| VIXL_ASSERT(option != PreferUnscaledOffset); |
| LoadStore(rt, src, LoadOpFor(rt), option); |
| } |
| |
| |
| void Assembler::str(const CPURegister& rt, const MemOperand& dst, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireUnscaledOffset); |
| VIXL_ASSERT(option != PreferUnscaledOffset); |
| LoadStore(rt, dst, StoreOpFor(rt), option); |
| } |
| |
| |
| void Assembler::ldrsw(const Register& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(rt.Is64Bits()); |
| VIXL_ASSERT(option != RequireUnscaledOffset); |
| VIXL_ASSERT(option != PreferUnscaledOffset); |
| LoadStore(rt, src, LDRSW_x, option); |
| } |
| |
| |
| void Assembler::ldurb(const Register& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireScaledOffset); |
| VIXL_ASSERT(option != PreferScaledOffset); |
| LoadStore(rt, src, LDRB_w, option); |
| } |
| |
| |
| void Assembler::sturb(const Register& rt, const MemOperand& dst, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireScaledOffset); |
| VIXL_ASSERT(option != PreferScaledOffset); |
| LoadStore(rt, dst, STRB_w, option); |
| } |
| |
| |
| void Assembler::ldursb(const Register& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireScaledOffset); |
| VIXL_ASSERT(option != PreferScaledOffset); |
| LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option); |
| } |
| |
| |
| void Assembler::ldurh(const Register& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireScaledOffset); |
| VIXL_ASSERT(option != PreferScaledOffset); |
| LoadStore(rt, src, LDRH_w, option); |
| } |
| |
| |
| void Assembler::sturh(const Register& rt, const MemOperand& dst, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireScaledOffset); |
| VIXL_ASSERT(option != PreferScaledOffset); |
| LoadStore(rt, dst, STRH_w, option); |
| } |
| |
| |
| void Assembler::ldursh(const Register& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireScaledOffset); |
| VIXL_ASSERT(option != PreferScaledOffset); |
| LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option); |
| } |
| |
| |
| void Assembler::ldur(const CPURegister& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireScaledOffset); |
| VIXL_ASSERT(option != PreferScaledOffset); |
| LoadStore(rt, src, LoadOpFor(rt), option); |
| } |
| |
| |
| void Assembler::stur(const CPURegister& rt, const MemOperand& dst, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireScaledOffset); |
| VIXL_ASSERT(option != PreferScaledOffset); |
| LoadStore(rt, dst, StoreOpFor(rt), option); |
| } |
| |
| |
| void Assembler::ldursw(const Register& rt, const MemOperand& src, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(rt.Is64Bits()); |
| VIXL_ASSERT(option != RequireScaledOffset); |
| VIXL_ASSERT(option != PreferScaledOffset); |
| LoadStore(rt, src, LDRSW_x, option); |
| } |
| |
| |
| void Assembler::ldrsw(const Register& rt, int imm19) { |
| Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt)); |
| } |
| |
| |
| void Assembler::ldr(const CPURegister& rt, int imm19) { |
| LoadLiteralOp op = LoadLiteralOpFor(rt); |
| Emit(op | ImmLLiteral(imm19) | Rt(rt)); |
| } |
| |
| |
| void Assembler::prfm(PrefetchOperation op, int imm19) { |
| Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19)); |
| } |
| |
| |
| // Exclusive-access instructions. |
| void Assembler::stxrb(const Register& rs, |
| const Register& rt, |
| const MemOperand& dst) { |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::stxrh(const Register& rs, |
| const Register& rt, |
| const MemOperand& dst) { |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::stxr(const Register& rs, |
| const Register& rt, |
| const MemOperand& dst) { |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w; |
| Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::ldxrb(const Register& rt, |
| const MemOperand& src) { |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::ldxrh(const Register& rt, |
| const MemOperand& src) { |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::ldxr(const Register& rt, |
| const MemOperand& src) { |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w; |
| Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::stxp(const Register& rs, |
| const Register& rt, |
| const Register& rt2, |
| const MemOperand& dst) { |
| VIXL_ASSERT(rt.size() == rt2.size()); |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w; |
| Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::ldxp(const Register& rt, |
| const Register& rt2, |
| const MemOperand& src) { |
| VIXL_ASSERT(rt.size() == rt2.size()); |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w; |
| Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::stlxrb(const Register& rs, |
| const Register& rt, |
| const MemOperand& dst) { |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::stlxrh(const Register& rs, |
| const Register& rt, |
| const MemOperand& dst) { |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::stlxr(const Register& rs, |
| const Register& rt, |
| const MemOperand& dst) { |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w; |
| Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::ldaxrb(const Register& rt, |
| const MemOperand& src) { |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::ldaxrh(const Register& rt, |
| const MemOperand& src) { |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::ldaxr(const Register& rt, |
| const MemOperand& src) { |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w; |
| Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::stlxp(const Register& rs, |
| const Register& rt, |
| const Register& rt2, |
| const MemOperand& dst) { |
| VIXL_ASSERT(rt.size() == rt2.size()); |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w; |
| Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::ldaxp(const Register& rt, |
| const Register& rt2, |
| const MemOperand& src) { |
| VIXL_ASSERT(rt.size() == rt2.size()); |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w; |
| Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::stlrb(const Register& rt, |
| const MemOperand& dst) { |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::stlrh(const Register& rt, |
| const MemOperand& dst) { |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::stlr(const Register& rt, |
| const MemOperand& dst) { |
| VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0)); |
| LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w; |
| Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base())); |
| } |
| |
| |
| void Assembler::ldarb(const Register& rt, |
| const MemOperand& src) { |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::ldarh(const Register& rt, |
| const MemOperand& src) { |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::ldar(const Register& rt, |
| const MemOperand& src) { |
| VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0)); |
| LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w; |
| Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base())); |
| } |
| |
| |
| void Assembler::prfm(PrefetchOperation op, const MemOperand& address, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireUnscaledOffset); |
| VIXL_ASSERT(option != PreferUnscaledOffset); |
| Prefetch(op, address, option); |
| } |
| |
| |
| void Assembler::prfum(PrefetchOperation op, const MemOperand& address, |
| LoadStoreScalingOption option) { |
| VIXL_ASSERT(option != RequireScaledOffset); |
| VIXL_ASSERT(option != PreferScaledOffset); |
| Prefetch(op, address, option); |
| } |
| |
| |
| void Assembler::sys(int op1, int crn, int crm, int op2, const Register& rt) { |
| Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(rt)); |
| } |
| |
| |
| void Assembler::sys(int op, const Register& rt) { |
| Emit(SYS | SysOp(op) | Rt(rt)); |
| } |
| |
| |
| void Assembler::dc(DataCacheOp op, const Register& rt) { |
| VIXL_ASSERT((op == CVAC) || (op == CVAU) || (op == CIVAC) || (op == ZVA)); |
| sys(op, rt); |
| } |
| |
| |
| void Assembler::ic(InstructionCacheOp op, const Register& rt) { |
| VIXL_ASSERT(op == IVAU); |
| sys(op, rt); |
| } |
| |
| |
| // NEON structure loads and stores. |
| Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) { |
| Instr addr_field = RnSP(addr.base()); |
| |
| if (addr.IsPostIndex()) { |
| VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex == |
| static_cast<NEONLoadStoreMultiStructPostIndexOp>( |
| NEONLoadStoreSingleStructPostIndex)); |
| |
| addr_field |= NEONLoadStoreMultiStructPostIndex; |
| if (addr.offset() == 0) { |
| addr_field |= RmNot31(addr.regoffset()); |
| } else { |
| // The immediate post index addressing mode is indicated by rm = 31. |
| // The immediate is implied by the number of vector registers used. |
| addr_field |= (0x1f << Rm_offset); |
| } |
| } else { |
| VIXL_ASSERT(addr.IsImmediateOffset() && (addr.offset() == 0)); |
| } |
| return addr_field; |
| } |
| |
| void Assembler::LoadStoreStructVerify(const VRegister& vt, |
| const MemOperand& addr, |
| Instr op) { |
| #ifdef DEBUG |
| // Assert that addressing mode is either offset (with immediate 0), post |
| // index by immediate of the size of the register list, or post index by a |
| // value in a core register. |
| if (addr.IsImmediateOffset()) { |
| VIXL_ASSERT(addr.offset() == 0); |
| } else { |
| int offset = vt.SizeInBytes(); |
| switch (op) { |
| case NEON_LD1_1v: |
| case NEON_ST1_1v: |
| offset *= 1; break; |
| case NEONLoadStoreSingleStructLoad1: |
| case NEONLoadStoreSingleStructStore1: |
| case NEON_LD1R: |
| offset = (offset / vt.lanes()) * 1; break; |
| |
| case NEON_LD1_2v: |
| case NEON_ST1_2v: |
| case NEON_LD2: |
| case NEON_ST2: |
| offset *= 2; |
| break; |
| case NEONLoadStoreSingleStructLoad2: |
| case NEONLoadStoreSingleStructStore2: |
| case NEON_LD2R: |
| offset = (offset / vt.lanes()) * 2; break; |
| |
| case NEON_LD1_3v: |
| case NEON_ST1_3v: |
| case NEON_LD3: |
| case NEON_ST3: |
| offset *= 3; break; |
| case NEONLoadStoreSingleStructLoad3: |
| case NEONLoadStoreSingleStructStore3: |
| case NEON_LD3R: |
| offset = (offset / vt.lanes()) * 3; break; |
| |
| case NEON_LD1_4v: |
| case NEON_ST1_4v: |
| case NEON_LD4: |
| case NEON_ST4: |
| offset *= 4; break; |
| case NEONLoadStoreSingleStructLoad4: |
| case NEONLoadStoreSingleStructStore4: |
| case NEON_LD4R: |
| offset = (offset / vt.lanes()) * 4; break; |
| default: |
| VIXL_UNREACHABLE(); |
| } |
| VIXL_ASSERT(!addr.regoffset().Is(NoReg) || |
| addr.offset() == offset); |
| } |
| #else |
| USE(vt, addr, op); |
| #endif |
| } |
| |
| void Assembler::LoadStoreStruct(const VRegister& vt, |
| const MemOperand& addr, |
| NEONLoadStoreMultiStructOp op) { |
| LoadStoreStructVerify(vt, addr, op); |
| VIXL_ASSERT(vt.IsVector() || vt.Is1D()); |
| Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); |
| } |
| |
| |
| void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt, |
| const MemOperand& addr, |
| NEONLoadStoreSingleStructOp op) { |
| LoadStoreStructVerify(vt, addr, op); |
| Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); |
| } |
| |
| |
| void Assembler::ld1(const VRegister& vt, |
| const MemOperand& src) { |
| LoadStoreStruct(vt, src, NEON_LD1_1v); |
| } |
| |
| |
| void Assembler::ld1(const VRegister& vt, |
| const VRegister& vt2, |
| const MemOperand& src) { |
| USE(vt2); |
| VIXL_ASSERT(AreSameFormat(vt, vt2)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2)); |
| LoadStoreStruct(vt, src, NEON_LD1_2v); |
| } |
| |
| |
| void Assembler::ld1(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const MemOperand& src) { |
| USE(vt2, vt3); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStruct(vt, src, NEON_LD1_3v); |
| } |
| |
| |
| void Assembler::ld1(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const VRegister& vt4, |
| const MemOperand& src) { |
| USE(vt2, vt3, vt4); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStruct(vt, src, NEON_LD1_4v); |
| } |
| |
| |
| void Assembler::ld2(const VRegister& vt, |
| const VRegister& vt2, |
| const MemOperand& src) { |
| USE(vt2); |
| VIXL_ASSERT(AreSameFormat(vt, vt2)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2)); |
| LoadStoreStruct(vt, src, NEON_LD2); |
| } |
| |
| |
| void Assembler::ld2(const VRegister& vt, |
| const VRegister& vt2, |
| int lane, |
| const MemOperand& src) { |
| USE(vt2); |
| VIXL_ASSERT(AreSameFormat(vt, vt2)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2)); |
| LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2); |
| } |
| |
| |
| void Assembler::ld2r(const VRegister& vt, |
| const VRegister& vt2, |
| const MemOperand& src) { |
| USE(vt2); |
| VIXL_ASSERT(AreSameFormat(vt, vt2)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2)); |
| LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R); |
| } |
| |
| |
| void Assembler::ld3(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const MemOperand& src) { |
| USE(vt2, vt3); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStruct(vt, src, NEON_LD3); |
| } |
| |
| |
| void Assembler::ld3(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| int lane, |
| const MemOperand& src) { |
| USE(vt2, vt3); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3); |
| } |
| |
| |
| void Assembler::ld3r(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const MemOperand& src) { |
| USE(vt2, vt3); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R); |
| } |
| |
| |
| void Assembler::ld4(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const VRegister& vt4, |
| const MemOperand& src) { |
| USE(vt2, vt3, vt4); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStruct(vt, src, NEON_LD4); |
| } |
| |
| |
| void Assembler::ld4(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const VRegister& vt4, |
| int lane, |
| const MemOperand& src) { |
| USE(vt2, vt3, vt4); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4); |
| } |
| |
| |
| void Assembler::ld4r(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const VRegister& vt4, |
| const MemOperand& src) { |
| USE(vt2, vt3, vt4); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R); |
| } |
| |
| |
| void Assembler::st1(const VRegister& vt, |
| const MemOperand& src) { |
| LoadStoreStruct(vt, src, NEON_ST1_1v); |
| } |
| |
| |
| void Assembler::st1(const VRegister& vt, |
| const VRegister& vt2, |
| const MemOperand& src) { |
| USE(vt2); |
| VIXL_ASSERT(AreSameFormat(vt, vt2)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2)); |
| LoadStoreStruct(vt, src, NEON_ST1_2v); |
| } |
| |
| |
| void Assembler::st1(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const MemOperand& src) { |
| USE(vt2, vt3); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStruct(vt, src, NEON_ST1_3v); |
| } |
| |
| |
| void Assembler::st1(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const VRegister& vt4, |
| const MemOperand& src) { |
| USE(vt2, vt3, vt4); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStruct(vt, src, NEON_ST1_4v); |
| } |
| |
| |
| void Assembler::st2(const VRegister& vt, |
| const VRegister& vt2, |
| const MemOperand& dst) { |
| USE(vt2); |
| VIXL_ASSERT(AreSameFormat(vt, vt2)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2)); |
| LoadStoreStruct(vt, dst, NEON_ST2); |
| } |
| |
| |
| void Assembler::st2(const VRegister& vt, |
| const VRegister& vt2, |
| int lane, |
| const MemOperand& dst) { |
| USE(vt2); |
| VIXL_ASSERT(AreSameFormat(vt, vt2)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2)); |
| LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2); |
| } |
| |
| |
| void Assembler::st3(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const MemOperand& dst) { |
| USE(vt2, vt3); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStruct(vt, dst, NEON_ST3); |
| } |
| |
| |
| void Assembler::st3(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| int lane, |
| const MemOperand& dst) { |
| USE(vt2, vt3); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3); |
| } |
| |
| |
| void Assembler::st4(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const VRegister& vt4, |
| const MemOperand& dst) { |
| USE(vt2, vt3, vt4); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStruct(vt, dst, NEON_ST4); |
| } |
| |
| |
| void Assembler::st4(const VRegister& vt, |
| const VRegister& vt2, |
| const VRegister& vt3, |
| const VRegister& vt4, |
| int lane, |
| const MemOperand& dst) { |
| USE(vt2, vt3, vt4); |
| VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); |
| VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4); |
| } |
| |
| |
| void Assembler::LoadStoreStructSingle(const VRegister& vt, |
| uint32_t lane, |
| const MemOperand& addr, |
| NEONLoadStoreSingleStructOp op) { |
| LoadStoreStructVerify(vt, addr, op); |
| |
| // We support vt arguments of the form vt.VxT() or vt.T(), where x is the |
| // number of lanes, and T is b, h, s or d. |
| unsigned lane_size = vt.LaneSizeInBytes(); |
| VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size)); |
| |
| // Lane size is encoded in the opcode field. Lane index is encoded in the Q, |
| // S and size fields. |
| lane *= lane_size; |
| if (lane_size == 8) lane++; |
| |
| Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask; |
| Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask; |
| Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask; |
| |
| Instr instr = op; |
| switch (lane_size) { |
| case 1: instr |= NEONLoadStoreSingle_b; break; |
| case 2: instr |= NEONLoadStoreSingle_h; break; |
| case 4: instr |= NEONLoadStoreSingle_s; break; |
| default: |
| VIXL_ASSERT(lane_size == 8); |
| instr |= NEONLoadStoreSingle_d; |
| } |
| |
| Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt)); |
| } |
| |
| |
| void Assembler::ld1(const VRegister& vt, |
| int lane, |
| const MemOperand& src) { |
| LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1); |
| } |
| |
| |
| void Assembler::ld1r(const VRegister& vt, |
| const MemOperand& src) { |
| LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R); |
| } |
| |
| |
| void Assembler::st1(const VRegister& vt, |
| int lane, |
| const MemOperand& dst) { |
| LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1); |
| } |
| |
| |
| void Assembler::NEON3DifferentL(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm, |
| NEON3DifferentOp vop) { |
| VIXL_ASSERT(AreSameFormat(vn, vm)); |
| VIXL_ASSERT((vn.Is1H() && vd.Is1S()) || |
| (vn.Is1S() && vd.Is1D()) || |
| (vn.Is8B() && vd.Is8H()) || |
| (vn.Is4H() && vd.Is4S()) || |
| (vn.Is2S() && vd.Is2D()) || |
| (vn.Is16B() && vd.Is8H())|| |
| (vn.Is8H() && vd.Is4S()) || |
| (vn.Is4S() && vd.Is2D())); |
| Instr format, op = vop; |
| if (vd.IsScalar()) { |
| op |= NEON_Q | NEONScalar; |
| format = SFormat(vn); |
| } else { |
| format = VFormat(vn); |
| } |
| Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| void Assembler::NEON3DifferentW(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm, |
| NEON3DifferentOp vop) { |
| VIXL_ASSERT(AreSameFormat(vd, vn)); |
| VIXL_ASSERT((vm.Is8B() && vd.Is8H()) || |
| (vm.Is4H() && vd.Is4S()) || |
| (vm.Is2S() && vd.Is2D()) || |
| (vm.Is16B() && vd.Is8H())|| |
| (vm.Is8H() && vd.Is4S()) || |
| (vm.Is4S() && vd.Is2D())); |
| Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| void Assembler::NEON3DifferentHN(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm, |
| NEON3DifferentOp vop) { |
| VIXL_ASSERT(AreSameFormat(vm, vn)); |
| VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || |
| (vd.Is4H() && vn.Is4S()) || |
| (vd.Is2S() && vn.Is2D()) || |
| (vd.Is16B() && vn.Is8H())|| |
| (vd.Is8H() && vn.Is4S()) || |
| (vd.Is4S() && vn.Is2D())); |
| Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| #define NEON_3DIFF_LONG_LIST(V) \ |
| V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \ |
| V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \ |
| V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \ |
| V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \ |
| V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \ |
| V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \ |
| V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \ |
| V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \ |
| V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \ |
| V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \ |
| V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \ |
| V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \ |
| V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \ |
| V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \ |
| V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \ |
| V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \ |
| V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \ |
| V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \ |
| V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \ |
| V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \ |
| V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \ |
| V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \ |
| V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \ |
| V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \ |
| V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \ |
| V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \ |
| V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \ |
| V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \ |
| V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \ |
| V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \ |
| V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ |
| V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ |
| V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ |
| V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ |
| V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ |
| V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ |
| |
| |
| #define DEFINE_ASM_FUNC(FN, OP, AS) \ |
| void Assembler::FN(const VRegister& vd, \ |
| const VRegister& vn, \ |
| const VRegister& vm) { \ |
| VIXL_ASSERT(AS); \ |
| NEON3DifferentL(vd, vn, vm, OP); \ |
| } |
| NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC) |
| #undef DEFINE_ASM_FUNC |
| |
| #define NEON_3DIFF_HN_LIST(V) \ |
| V(addhn, NEON_ADDHN, vd.IsD()) \ |
| V(addhn2, NEON_ADDHN2, vd.IsQ()) \ |
| V(raddhn, NEON_RADDHN, vd.IsD()) \ |
| V(raddhn2, NEON_RADDHN2, vd.IsQ()) \ |
| V(subhn, NEON_SUBHN, vd.IsD()) \ |
| V(subhn2, NEON_SUBHN2, vd.IsQ()) \ |
| V(rsubhn, NEON_RSUBHN, vd.IsD()) \ |
| V(rsubhn2, NEON_RSUBHN2, vd.IsQ()) |
| |
| #define DEFINE_ASM_FUNC(FN, OP, AS) \ |
| void Assembler::FN(const VRegister& vd, \ |
| const VRegister& vn, \ |
| const VRegister& vm) { \ |
| VIXL_ASSERT(AS); \ |
| NEON3DifferentHN(vd, vn, vm, OP); \ |
| } |
| NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC) |
| #undef DEFINE_ASM_FUNC |
| |
| void Assembler::uaddw(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| VIXL_ASSERT(vm.IsD()); |
| NEON3DifferentW(vd, vn, vm, NEON_UADDW); |
| } |
| |
| |
| void Assembler::uaddw2(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| VIXL_ASSERT(vm.IsQ()); |
| NEON3DifferentW(vd, vn, vm, NEON_UADDW2); |
| } |
| |
| |
| void Assembler::saddw(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| VIXL_ASSERT(vm.IsD()); |
| NEON3DifferentW(vd, vn, vm, NEON_SADDW); |
| } |
| |
| |
| void Assembler::saddw2(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| VIXL_ASSERT(vm.IsQ()); |
| NEON3DifferentW(vd, vn, vm, NEON_SADDW2); |
| } |
| |
| |
| void Assembler::usubw(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| VIXL_ASSERT(vm.IsD()); |
| NEON3DifferentW(vd, vn, vm, NEON_USUBW); |
| } |
| |
| |
| void Assembler::usubw2(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| VIXL_ASSERT(vm.IsQ()); |
| NEON3DifferentW(vd, vn, vm, NEON_USUBW2); |
| } |
| |
| |
| void Assembler::ssubw(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| VIXL_ASSERT(vm.IsD()); |
| NEON3DifferentW(vd, vn, vm, NEON_SSUBW); |
| } |
| |
| |
| void Assembler::ssubw2(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| VIXL_ASSERT(vm.IsQ()); |
| NEON3DifferentW(vd, vn, vm, NEON_SSUBW2); |
| } |
| |
| |
| void Assembler::mov(const Register& rd, const Register& rm) { |
| // Moves involving the stack pointer are encoded as add immediate with |
| // second operand of zero. Otherwise, orr with first operand zr is |
| // used. |
| if (rd.IsSP() || rm.IsSP()) { |
| add(rd, rm, 0); |
| } else { |
| orr(rd, AppropriateZeroRegFor(rd), rm); |
| } |
| } |
| |
| |
| void Assembler::mvn(const Register& rd, const Operand& operand) { |
| orn(rd, AppropriateZeroRegFor(rd), operand); |
| } |
| |
| |
| void Assembler::mrs(const Register& rt, SystemRegister sysreg) { |
| VIXL_ASSERT(rt.Is64Bits()); |
| Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); |
| } |
| |
| |
| void Assembler::msr(SystemRegister sysreg, const Register& rt) { |
| VIXL_ASSERT(rt.Is64Bits()); |
| Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); |
| } |
| |
| |
| void Assembler::clrex(int imm4) { |
| Emit(CLREX | CRm(imm4)); |
| } |
| |
| |
| void Assembler::dmb(BarrierDomain domain, BarrierType type) { |
| Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); |
| } |
| |
| |
| void Assembler::dsb(BarrierDomain domain, BarrierType type) { |
| Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); |
| } |
| |
| |
| void Assembler::isb() { |
| Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); |
| } |
| |
| |
| void Assembler::fmov(const VRegister& vd, double imm) { |
| if (vd.IsScalar()) { |
| VIXL_ASSERT(vd.Is1D()); |
| Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm)); |
| } else { |
| VIXL_ASSERT(vd.Is2D()); |
| Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit; |
| Instr q = NEON_Q; |
| uint32_t encoded_imm = FP64ToImm8(imm); |
| Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd)); |
| } |
| } |
| |
| |
| void Assembler::fmov(const VRegister& vd, float imm) { |
| if (vd.IsScalar()) { |
| VIXL_ASSERT(vd.Is1S()); |
| Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm)); |
| } else { |
| VIXL_ASSERT(vd.Is2S() | vd.Is4S()); |
| Instr op = NEONModifiedImmediate_MOVI; |
| Instr q = vd.Is4S() ? NEON_Q : 0; |
| uint32_t encoded_imm = FP32ToImm8(imm); |
| Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd)); |
| } |
| } |
| |
| |
| void Assembler::fmov(const Register& rd, const VRegister& vn) { |
| VIXL_ASSERT(vn.Is1S() || vn.Is1D()); |
| VIXL_ASSERT(rd.size() == vn.size()); |
| FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; |
| Emit(op | Rd(rd) | Rn(vn)); |
| } |
| |
| |
| void Assembler::fmov(const VRegister& vd, const Register& rn) { |
| VIXL_ASSERT(vd.Is1S() || vd.Is1D()); |
| VIXL_ASSERT(vd.size() == rn.size()); |
| FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx; |
| Emit(op | Rd(vd) | Rn(rn)); |
| } |
| |
| |
| void Assembler::fmov(const VRegister& vd, const VRegister& vn) { |
| VIXL_ASSERT(vd.Is1S() || vd.Is1D()); |
| VIXL_ASSERT(vd.IsSameFormat(vn)); |
| Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn)); |
| } |
| |
| |
| void Assembler::fmov(const VRegister& vd, int index, const Register& rn) { |
| VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX()); |
| USE(index); |
| Emit(FMOV_d1_x | Rd(vd) | Rn(rn)); |
| } |
| |
| |
| void Assembler::fmov(const Register& rd, const VRegister& vn, int index) { |
| VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX()); |
| USE(index); |
| Emit(FMOV_x_d1 | Rd(rd) | Rn(vn)); |
| } |
| |
| |
| void Assembler::fmadd(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm, |
| const VRegister& va) { |
| FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMADD_s : FMADD_d); |
| } |
| |
| |
| void Assembler::fmsub(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm, |
| const VRegister& va) { |
| FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMSUB_s : FMSUB_d); |
| } |
| |
| |
| void Assembler::fnmadd(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm, |
| const VRegister& va) { |
| FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMADD_s : FNMADD_d); |
| } |
| |
| |
| void Assembler::fnmsub(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm, |
| const VRegister& va) { |
| FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMSUB_s : FNMSUB_d); |
| } |
| |
| |
| void Assembler::fnmul(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm) { |
| VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm)); |
| Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d; |
| Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| void Assembler::FPCompareMacro(const VRegister& vn, |
| double value, |
| FPTrapFlags trap) { |
| USE(value); |
| // Although the fcmp{e} instructions can strictly only take an immediate |
| // value of +0.0, we don't need to check for -0.0 because the sign of 0.0 |
| // doesn't affect the result of the comparison. |
| VIXL_ASSERT(value == 0.0); |
| VIXL_ASSERT(vn.Is1S() || vn.Is1D()); |
| Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero; |
| Emit(FPType(vn) | op | Rn(vn)); |
| } |
| |
| |
| void Assembler::FPCompareMacro(const VRegister& vn, |
| const VRegister& vm, |
| FPTrapFlags trap) { |
| VIXL_ASSERT(vn.Is1S() || vn.Is1D()); |
| VIXL_ASSERT(vn.IsSameSizeAndType(vm)); |
| Instr op = (trap == EnableTrap) ? FCMPE : FCMP; |
| Emit(FPType(vn) | op | Rm(vm) | Rn(vn)); |
| } |
| |
| |
| void Assembler::fcmp(const VRegister& vn, |
| const VRegister& vm) { |
| FPCompareMacro(vn, vm, DisableTrap); |
| } |
| |
| |
| void Assembler::fcmpe(const VRegister& vn, |
| const VRegister& vm) { |
| FPCompareMacro(vn, vm, EnableTrap); |
| } |
| |
| |
| void Assembler::fcmp(const VRegister& vn, |
| double value) { |
| FPCompareMacro(vn, value, DisableTrap); |
| } |
| |
| |
| void Assembler::fcmpe(const VRegister& vn, |
| double value) { |
| FPCompareMacro(vn, value, EnableTrap); |
| } |
| |
| |
| void Assembler::FPCCompareMacro(const VRegister& vn, |
| const VRegister& vm, |
| StatusFlags nzcv, |
| Condition cond, |
| FPTrapFlags trap) { |
| VIXL_ASSERT(vn.Is1S() || vn.Is1D()); |
| VIXL_ASSERT(vn.IsSameSizeAndType(vm)); |
| Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP; |
| Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv)); |
| } |
| |
| void Assembler::fccmp(const VRegister& vn, |
| const VRegister& vm, |
| StatusFlags nzcv, |
| Condition cond) { |
| FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap); |
| } |
| |
| |
| void Assembler::fccmpe(const VRegister& vn, |
| const VRegister& vm, |
| StatusFlags nzcv, |
| Condition cond) { |
| FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap); |
| } |
| |
| |
| void Assembler::fcsel(const VRegister& vd, |
| const VRegister& vn, |
| const VRegister& vm, |
| Condition cond) { |
| VIXL_ASSERT(vd.Is1S() || vd.Is1D()); |
| VIXL_ASSERT(AreSameFormat(vd, vn, vm)); |
| Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| void Assembler::NEONFPConvertToInt(const Register& rd, |
| const VRegister& vn, |
| Instr op) { |
| Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd)); |
| } |
| |
| |
| void Assembler::NEONFPConvertToInt(const VRegister& vd, |
| const VRegister& vn, |
| Instr op) { |
| if (vn.IsScalar()) { |
| VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D())); |
| op |= NEON_Q | NEONScalar; |
| } |
| Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| void Assembler::fcvt(const VRegister& vd, |
| const VRegister& vn) { |
| FPDataProcessing1SourceOp op; |
| if (vd.Is1D()) { |
| VIXL_ASSERT(vn.Is1S() || vn.Is1H()); |
| op = vn.Is1S() ? FCVT_ds : FCVT_dh; |
| } else if (vd.Is1S()) { |
| VIXL_ASSERT(vn.Is1D() || vn.Is1H()); |
| op = vn.Is1D() ? FCVT_sd : FCVT_sh; |
| } else { |
| VIXL_ASSERT(vd.Is1H()); |
| VIXL_ASSERT(vn.Is1D() || vn.Is1S()); |
| op = vn.Is1D() ? FCVT_hd : FCVT_hs; |
| } |
| FPDataProcessing1Source(vd, vn, op); |
| } |
| |
| |
| void Assembler::fcvtl(const VRegister& vd, |
| const VRegister& vn) { |
| VIXL_ASSERT((vd.Is4S() && vn.Is4H()) || |
| (vd.Is2D() && vn.Is2S())); |
| Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; |
| Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| void Assembler::fcvtl2(const VRegister& vd, |
| const VRegister& vn) { |
| VIXL_ASSERT((vd.Is4S() && vn.Is8H()) || |
| (vd.Is2D() && vn.Is4S())); |
| Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; |
| Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| void Assembler::fcvtn(const VRegister& vd, |
| const VRegister& vn) { |
| VIXL_ASSERT((vn.Is4S() && vd.Is4H()) || |
| (vn.Is2D() && vd.Is2S())); |
| Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; |
| Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| void Assembler::fcvtn2(const VRegister& vd, |
| const VRegister& vn) { |
| VIXL_ASSERT((vn.Is4S() && vd.Is8H()) || |
| (vn.Is2D() && vd.Is4S())); |
| Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; |
| Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| void Assembler::fcvtxn(const VRegister& vd, |
| const VRegister& vn) { |
| Instr format = 1 << NEONSize_offset; |
| if (vd.IsScalar()) { |
| VIXL_ASSERT(vd.Is1S() && vn.Is1D()); |
| Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd)); |
| } else { |
| VIXL_ASSERT(vd.Is2S() && vn.Is2D()); |
| Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd)); |
| } |
| } |
| |
| |
| void Assembler::fcvtxn2(const VRegister& vd, |
| const VRegister& vn) { |
| VIXL_ASSERT(vd.Is4S() && vn.Is2D()); |
| Instr format = 1 << NEONSize_offset; |
| Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd)); |
| } |
| |
| |
| #define NEON_FP2REGMISC_FCVT_LIST(V) \ |
| V(fcvtnu, NEON_FCVTNU, FCVTNU) \ |
| V(fcvtns, NEON_FCVTNS, FCVTNS) \ |
| V(fcvtpu, NEON_FCVTPU, FCVTPU) \ |
| V(fcvtps, NEON_FCVTPS, FCVTPS) \ |
| V(fcvtmu, NEON_FCVTMU, FCVTMU) \ |
| V(fcvtms, NEON_FCVTMS, FCVTMS) \ |
| V(fcvtau, NEON_FCVTAU, FCVTAU) \ |
| V(fcvtas, NEON_FCVTAS, FCVTAS) |
| |
| #define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \ |
| void Assembler::FN(const Register& rd, \ |
| const VRegister& vn) { \ |
| NEONFPConvertToInt(rd, vn, SCA_OP); \ |
| } \ |
| void Assembler::FN(const VRegister& vd, \ |
| const VRegister& vn) { \ |
| NEONFPConvertToInt(vd, vn, VEC_OP); \ |
| } |
| NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS) |
| #undef DEFINE_ASM_FUNCS |
| |
| |
| void Assembler::fcvtzs(const Register& rd, |
| const VRegister& vn, |
| int fbits) { |
| VIXL_ASSERT(vn.Is1S() || vn.Is1D()); |
| VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits())); |
| if (fbits == 0) { |
| Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd)); |
| } else { |
| Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) | |
| Rd(rd)); |
| } |
| } |
| |
| |
| void Assembler::fcvtzs(const VRegister& vd, |
| const VRegister& vn, |
| int fbits) { |
| VIXL_ASSERT(fbits >= 0); |
| if (fbits == 0) { |
| NEONFP2RegMisc(vd, vn, NEON_FCVTZS); |
| } else { |
| VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); |
| NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm); |
| } |
| } |
| |
| |
| void Assembler::fcvtzu(const Register& rd, |
| const VRegister& vn, |
| int fbits) { |
| VIXL_ASSERT(vn.Is1S() || vn.Is1D()); |
| VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits())); |
| if (fbits == 0) { |
| Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd)); |
| } else { |
| Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) | |
| Rd(rd)); |
| } |
| } |
| |
| |
| void Assembler::fcvtzu(const VRegister& vd, |
| const VRegister& vn, |
| int fbits) { |
| VIXL_ASSERT(fbits >= 0); |
| if (fbits == 0) { |
| NEONFP2RegMisc(vd, vn, NEON_FCVTZU); |
| } else { |
| VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); |
| NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm); |
| } |
| } |
| |
| void Assembler::ucvtf(const VRegister& vd, |
| const VRegister& vn, |
| int fbits) { |
| VIXL_ASSERT(fbits >= 0); |
| if (fbits == 0) { |
| NEONFP2RegMisc(vd, vn, NEON_UCVTF); |
| } else { |
| VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); |
| NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm); |
| } |
| } |
|