| //===-- PPCISelDAGToDAG.cpp - PPC --pattern matching inst selector --------===// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is distributed under the University of Illinois Open Source |
| // License. See LICENSE.TXT for details. |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This file defines a pattern matching instruction selector for PowerPC, |
| // converting from a legalized dag to a PPC dag. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "MCTargetDesc/PPCMCTargetDesc.h" |
| #include "MCTargetDesc/PPCPredicates.h" |
| #include "PPC.h" |
| #include "PPCISelLowering.h" |
| #include "PPCMachineFunctionInfo.h" |
| #include "PPCSubtarget.h" |
| #include "PPCTargetMachine.h" |
| #include "llvm/ADT/APInt.h" |
| #include "llvm/ADT/DenseMap.h" |
| #include "llvm/ADT/STLExtras.h" |
| #include "llvm/ADT/SmallPtrSet.h" |
| #include "llvm/ADT/SmallVector.h" |
| #include "llvm/ADT/Statistic.h" |
| #include "llvm/Analysis/BranchProbabilityInfo.h" |
| #include "llvm/CodeGen/FunctionLoweringInfo.h" |
| #include "llvm/CodeGen/ISDOpcodes.h" |
| #include "llvm/CodeGen/MachineBasicBlock.h" |
| #include "llvm/CodeGen/MachineFunction.h" |
| #include "llvm/CodeGen/MachineInstrBuilder.h" |
| #include "llvm/CodeGen/MachineRegisterInfo.h" |
| #include "llvm/CodeGen/SelectionDAG.h" |
| #include "llvm/CodeGen/SelectionDAGISel.h" |
| #include "llvm/CodeGen/SelectionDAGNodes.h" |
| #include "llvm/CodeGen/TargetInstrInfo.h" |
| #include "llvm/CodeGen/TargetRegisterInfo.h" |
| #include "llvm/CodeGen/ValueTypes.h" |
| #include "llvm/IR/BasicBlock.h" |
| #include "llvm/IR/DebugLoc.h" |
| #include "llvm/IR/Function.h" |
| #include "llvm/IR/GlobalValue.h" |
| #include "llvm/IR/InlineAsm.h" |
| #include "llvm/IR/InstrTypes.h" |
| #include "llvm/IR/Module.h" |
| #include "llvm/Support/Casting.h" |
| #include "llvm/Support/CodeGen.h" |
| #include "llvm/Support/CommandLine.h" |
| #include "llvm/Support/Compiler.h" |
| #include "llvm/Support/Debug.h" |
| #include "llvm/Support/ErrorHandling.h" |
| #include "llvm/Support/KnownBits.h" |
| #include "llvm/Support/MachineValueType.h" |
| #include "llvm/Support/MathExtras.h" |
| #include "llvm/Support/raw_ostream.h" |
| #include <algorithm> |
| #include <cassert> |
| #include <cstdint> |
| #include <iterator> |
| #include <limits> |
| #include <memory> |
| #include <new> |
| #include <tuple> |
| #include <utility> |
| |
| using namespace llvm; |
| |
| #define DEBUG_TYPE "ppc-codegen" |
| |
| STATISTIC(NumSextSetcc, |
| "Number of (sext(setcc)) nodes expanded into GPR sequence."); |
| STATISTIC(NumZextSetcc, |
| "Number of (zext(setcc)) nodes expanded into GPR sequence."); |
| STATISTIC(SignExtensionsAdded, |
| "Number of sign extensions for compare inputs added."); |
| STATISTIC(ZeroExtensionsAdded, |
| "Number of zero extensions for compare inputs added."); |
| STATISTIC(NumLogicOpsOnComparison, |
| "Number of logical ops on i1 values calculated in GPR."); |
| STATISTIC(OmittedForNonExtendUses, |
| "Number of compares not eliminated as they have non-extending uses."); |
| |
| // FIXME: Remove this once the bug has been fixed! |
| cl::opt<bool> ANDIGlueBug("expose-ppc-andi-glue-bug", |
| cl::desc("expose the ANDI glue bug on PPC"), cl::Hidden); |
| |
| static cl::opt<bool> |
| UseBitPermRewriter("ppc-use-bit-perm-rewriter", cl::init(true), |
| cl::desc("use aggressive ppc isel for bit permutations"), |
| cl::Hidden); |
| static cl::opt<bool> BPermRewriterNoMasking( |
| "ppc-bit-perm-rewriter-stress-rotates", |
| cl::desc("stress rotate selection in aggressive ppc isel for " |
| "bit permutations"), |
| cl::Hidden); |
| |
| static cl::opt<bool> EnableBranchHint( |
| "ppc-use-branch-hint", cl::init(true), |
| cl::desc("Enable static hinting of branches on ppc"), |
| cl::Hidden); |
| |
| static cl::opt<bool> EnableTLSOpt( |
| "ppc-tls-opt", cl::init(true), |
| cl::desc("Enable tls optimization peephole"), |
| cl::Hidden); |
| |
| enum ICmpInGPRType { ICGPR_All, ICGPR_None, ICGPR_I32, ICGPR_I64, |
| ICGPR_NonExtIn, ICGPR_Zext, ICGPR_Sext, ICGPR_ZextI32, |
| ICGPR_SextI32, ICGPR_ZextI64, ICGPR_SextI64 }; |
| |
| static cl::opt<ICmpInGPRType> CmpInGPR( |
| "ppc-gpr-icmps", cl::Hidden, cl::init(ICGPR_All), |
| cl::desc("Specify the types of comparisons to emit GPR-only code for."), |
| cl::values(clEnumValN(ICGPR_None, "none", "Do not modify integer comparisons."), |
| clEnumValN(ICGPR_All, "all", "All possible int comparisons in GPRs."), |
| clEnumValN(ICGPR_I32, "i32", "Only i32 comparisons in GPRs."), |
| clEnumValN(ICGPR_I64, "i64", "Only i64 comparisons in GPRs."), |
| clEnumValN(ICGPR_NonExtIn, "nonextin", |
| "Only comparisons where inputs don't need [sz]ext."), |
| clEnumValN(ICGPR_Zext, "zext", "Only comparisons with zext result."), |
| clEnumValN(ICGPR_ZextI32, "zexti32", |
| "Only i32 comparisons with zext result."), |
| clEnumValN(ICGPR_ZextI64, "zexti64", |
| "Only i64 comparisons with zext result."), |
| clEnumValN(ICGPR_Sext, "sext", "Only comparisons with sext result."), |
| clEnumValN(ICGPR_SextI32, "sexti32", |
| "Only i32 comparisons with sext result."), |
| clEnumValN(ICGPR_SextI64, "sexti64", |
| "Only i64 comparisons with sext result."))); |
| namespace { |
| |
| //===--------------------------------------------------------------------===// |
| /// PPCDAGToDAGISel - PPC specific code to select PPC machine |
| /// instructions for SelectionDAG operations. |
| /// |
| class PPCDAGToDAGISel : public SelectionDAGISel { |
| const PPCTargetMachine &TM; |
| const PPCSubtarget *PPCSubTarget; |
| const PPCTargetLowering *PPCLowering; |
| unsigned GlobalBaseReg; |
| |
| public: |
| explicit PPCDAGToDAGISel(PPCTargetMachine &tm, CodeGenOpt::Level OptLevel) |
| : SelectionDAGISel(tm, OptLevel), TM(tm) {} |
| |
| bool runOnMachineFunction(MachineFunction &MF) override { |
| // Make sure we re-emit a set of the global base reg if necessary |
| GlobalBaseReg = 0; |
| PPCSubTarget = &MF.getSubtarget<PPCSubtarget>(); |
| PPCLowering = PPCSubTarget->getTargetLowering(); |
| SelectionDAGISel::runOnMachineFunction(MF); |
| |
| if (!PPCSubTarget->isSVR4ABI()) |
| InsertVRSaveCode(MF); |
| |
| return true; |
| } |
| |
| void PreprocessISelDAG() override; |
| void PostprocessISelDAG() override; |
| |
| /// getI16Imm - Return a target constant with the specified value, of type |
| /// i16. |
| inline SDValue getI16Imm(unsigned Imm, const SDLoc &dl) { |
| return CurDAG->getTargetConstant(Imm, dl, MVT::i16); |
| } |
| |
| /// getI32Imm - Return a target constant with the specified value, of type |
| /// i32. |
| inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) { |
| return CurDAG->getTargetConstant(Imm, dl, MVT::i32); |
| } |
| |
| /// getI64Imm - Return a target constant with the specified value, of type |
| /// i64. |
| inline SDValue getI64Imm(uint64_t Imm, const SDLoc &dl) { |
| return CurDAG->getTargetConstant(Imm, dl, MVT::i64); |
| } |
| |
| /// getSmallIPtrImm - Return a target constant of pointer type. |
| inline SDValue getSmallIPtrImm(unsigned Imm, const SDLoc &dl) { |
| return CurDAG->getTargetConstant( |
| Imm, dl, PPCLowering->getPointerTy(CurDAG->getDataLayout())); |
| } |
| |
| /// isRotateAndMask - Returns true if Mask and Shift can be folded into a |
| /// rotate and mask opcode and mask operation. |
| static bool isRotateAndMask(SDNode *N, unsigned Mask, bool isShiftMask, |
| unsigned &SH, unsigned &MB, unsigned &ME); |
| |
| /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC |
| /// base register. Return the virtual register that holds this value. |
| SDNode *getGlobalBaseReg(); |
| |
| void selectFrameIndex(SDNode *SN, SDNode *N, unsigned Offset = 0); |
| |
| // Select - Convert the specified operand from a target-independent to a |
| // target-specific node if it hasn't already been changed. |
| void Select(SDNode *N) override; |
| |
| bool tryBitfieldInsert(SDNode *N); |
| bool tryBitPermutation(SDNode *N); |
| bool tryIntCompareInGPR(SDNode *N); |
| |
| // tryTLSXFormLoad - Convert an ISD::LOAD fed by a PPCISD::ADD_TLS into |
| // an X-Form load instruction with the offset being a relocation coming from |
| // the PPCISD::ADD_TLS. |
| bool tryTLSXFormLoad(LoadSDNode *N); |
| // tryTLSXFormStore - Convert an ISD::STORE fed by a PPCISD::ADD_TLS into |
| // an X-Form store instruction with the offset being a relocation coming from |
| // the PPCISD::ADD_TLS. |
| bool tryTLSXFormStore(StoreSDNode *N); |
| /// SelectCC - Select a comparison of the specified values with the |
| /// specified condition code, returning the CR# of the expression. |
| SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| const SDLoc &dl); |
| |
| /// SelectAddrImm - Returns true if the address N can be represented by |
| /// a base register plus a signed 16-bit displacement [r+imm]. |
| bool SelectAddrImm(SDValue N, SDValue &Disp, |
| SDValue &Base) { |
| return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 0); |
| } |
| |
| /// SelectAddrImmOffs - Return true if the operand is valid for a preinc |
| /// immediate field. Note that the operand at this point is already the |
| /// result of a prior SelectAddressRegImm call. |
| bool SelectAddrImmOffs(SDValue N, SDValue &Out) const { |
| if (N.getOpcode() == ISD::TargetConstant || |
| N.getOpcode() == ISD::TargetGlobalAddress) { |
| Out = N; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /// SelectAddrIdx - Given the specified addressed, check to see if it can be |
| /// represented as an indexed [r+r] operation. Returns false if it can |
| /// be represented by [r+imm], which are preferred. |
| bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) { |
| return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG); |
| } |
| |
| /// SelectAddrIdxOnly - Given the specified addressed, force it to be |
| /// represented as an indexed [r+r] operation. |
| bool SelectAddrIdxOnly(SDValue N, SDValue &Base, SDValue &Index) { |
| return PPCLowering->SelectAddressRegRegOnly(N, Base, Index, *CurDAG); |
| } |
| |
| /// SelectAddrImmX4 - Returns true if the address N can be represented by |
| /// a base register plus a signed 16-bit displacement that is a multiple of 4. |
| /// Suitable for use by STD and friends. |
| bool SelectAddrImmX4(SDValue N, SDValue &Disp, SDValue &Base) { |
| return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 4); |
| } |
| |
| bool SelectAddrImmX16(SDValue N, SDValue &Disp, SDValue &Base) { |
| return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 16); |
| } |
| |
| // Select an address into a single register. |
| bool SelectAddr(SDValue N, SDValue &Base) { |
| Base = N; |
| return true; |
| } |
| |
| /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for |
| /// inline asm expressions. It is always correct to compute the value into |
| /// a register. The case of adding a (possibly relocatable) constant to a |
| /// register can be improved, but it is wrong to substitute Reg+Reg for |
| /// Reg in an asm, because the load or store opcode would have to change. |
| bool SelectInlineAsmMemoryOperand(const SDValue &Op, |
| unsigned ConstraintID, |
| std::vector<SDValue> &OutOps) override { |
| switch(ConstraintID) { |
| default: |
| errs() << "ConstraintID: " << ConstraintID << "\n"; |
| llvm_unreachable("Unexpected asm memory constraint"); |
| case InlineAsm::Constraint_es: |
| case InlineAsm::Constraint_i: |
| case InlineAsm::Constraint_m: |
| case InlineAsm::Constraint_o: |
| case InlineAsm::Constraint_Q: |
| case InlineAsm::Constraint_Z: |
| case InlineAsm::Constraint_Zy: |
| // We need to make sure that this one operand does not end up in r0 |
| // (because we might end up lowering this as 0(%op)). |
| const TargetRegisterInfo *TRI = PPCSubTarget->getRegisterInfo(); |
| const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF, /*Kind=*/1); |
| SDLoc dl(Op); |
| SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32); |
| SDValue NewOp = |
| SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, |
| dl, Op.getValueType(), |
| Op, RC), 0); |
| |
| OutOps.push_back(NewOp); |
| return false; |
| } |
| return true; |
| } |
| |
| void InsertVRSaveCode(MachineFunction &MF); |
| |
| StringRef getPassName() const override { |
| return "PowerPC DAG->DAG Pattern Instruction Selection"; |
| } |
| |
| // Include the pieces autogenerated from the target description. |
| #include "PPCGenDAGISel.inc" |
| |
| private: |
| bool trySETCC(SDNode *N); |
| |
| void PeepholePPC64(); |
| void PeepholePPC64ZExt(); |
| void PeepholeCROps(); |
| |
| SDValue combineToCMPB(SDNode *N); |
| void foldBoolExts(SDValue &Res, SDNode *&N); |
| |
| bool AllUsersSelectZero(SDNode *N); |
| void SwapAllSelectUsers(SDNode *N); |
| |
| bool isOffsetMultipleOf(SDNode *N, unsigned Val) const; |
| void transferMemOperands(SDNode *N, SDNode *Result); |
| MachineSDNode *flipSignBit(const SDValue &N, SDNode **SignBit = nullptr); |
| }; |
| |
| } // end anonymous namespace |
| |
| /// InsertVRSaveCode - Once the entire function has been instruction selected, |
| /// all virtual registers are created and all machine instructions are built, |
| /// check to see if we need to save/restore VRSAVE. If so, do it. |
| void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) { |
| // Check to see if this function uses vector registers, which means we have to |
| // save and restore the VRSAVE register and update it with the regs we use. |
| // |
| // In this case, there will be virtual registers of vector type created |
| // by the scheduler. Detect them now. |
| bool HasVectorVReg = false; |
| for (unsigned i = 0, e = RegInfo->getNumVirtRegs(); i != e; ++i) { |
| unsigned Reg = TargetRegisterInfo::index2VirtReg(i); |
| if (RegInfo->getRegClass(Reg) == &PPC::VRRCRegClass) { |
| HasVectorVReg = true; |
| break; |
| } |
| } |
| if (!HasVectorVReg) return; // nothing to do. |
| |
| // If we have a vector register, we want to emit code into the entry and exit |
| // blocks to save and restore the VRSAVE register. We do this here (instead |
| // of marking all vector instructions as clobbering VRSAVE) for two reasons: |
| // |
| // 1. This (trivially) reduces the load on the register allocator, by not |
| // having to represent the live range of the VRSAVE register. |
| // 2. This (more significantly) allows us to create a temporary virtual |
| // register to hold the saved VRSAVE value, allowing this temporary to be |
| // register allocated, instead of forcing it to be spilled to the stack. |
| |
| // Create two vregs - one to hold the VRSAVE register that is live-in to the |
| // function and one for the value after having bits or'd into it. |
| unsigned InVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); |
| unsigned UpdatedVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); |
| |
| const TargetInstrInfo &TII = *PPCSubTarget->getInstrInfo(); |
| MachineBasicBlock &EntryBB = *Fn.begin(); |
| DebugLoc dl; |
| // Emit the following code into the entry block: |
| // InVRSAVE = MFVRSAVE |
| // UpdatedVRSAVE = UPDATE_VRSAVE InVRSAVE |
| // MTVRSAVE UpdatedVRSAVE |
| MachineBasicBlock::iterator IP = EntryBB.begin(); // Insert Point |
| BuildMI(EntryBB, IP, dl, TII.get(PPC::MFVRSAVE), InVRSAVE); |
| BuildMI(EntryBB, IP, dl, TII.get(PPC::UPDATE_VRSAVE), |
| UpdatedVRSAVE).addReg(InVRSAVE); |
| BuildMI(EntryBB, IP, dl, TII.get(PPC::MTVRSAVE)).addReg(UpdatedVRSAVE); |
| |
| // Find all return blocks, outputting a restore in each epilog. |
| for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) { |
| if (BB->isReturnBlock()) { |
| IP = BB->end(); --IP; |
| |
| // Skip over all terminator instructions, which are part of the return |
| // sequence. |
| MachineBasicBlock::iterator I2 = IP; |
| while (I2 != BB->begin() && (--I2)->isTerminator()) |
| IP = I2; |
| |
| // Emit: MTVRSAVE InVRSave |
| BuildMI(*BB, IP, dl, TII.get(PPC::MTVRSAVE)).addReg(InVRSAVE); |
| } |
| } |
| } |
| |
| /// getGlobalBaseReg - Output the instructions required to put the |
| /// base address to use for accessing globals into a register. |
| /// |
| SDNode *PPCDAGToDAGISel::getGlobalBaseReg() { |
| if (!GlobalBaseReg) { |
| const TargetInstrInfo &TII = *PPCSubTarget->getInstrInfo(); |
| // Insert the set of GlobalBaseReg into the first MBB of the function |
| MachineBasicBlock &FirstMBB = MF->front(); |
| MachineBasicBlock::iterator MBBI = FirstMBB.begin(); |
| const Module *M = MF->getFunction().getParent(); |
| DebugLoc dl; |
| |
| if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) == MVT::i32) { |
| if (PPCSubTarget->isTargetELF()) { |
| GlobalBaseReg = PPC::R30; |
| if (M->getPICLevel() == PICLevel::SmallPIC) { |
| BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MoveGOTtoLR)); |
| BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg); |
| MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true); |
| } else { |
| BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR)); |
| BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg); |
| unsigned TempReg = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); |
| BuildMI(FirstMBB, MBBI, dl, |
| TII.get(PPC::UpdateGBR), GlobalBaseReg) |
| .addReg(TempReg, RegState::Define).addReg(GlobalBaseReg); |
| MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true); |
| } |
| } else { |
| GlobalBaseReg = |
| RegInfo->createVirtualRegister(&PPC::GPRC_and_GPRC_NOR0RegClass); |
| BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR)); |
| BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg); |
| } |
| } else { |
| // We must ensure that this sequence is dominated by the prologue. |
| // FIXME: This is a bit of a big hammer since we don't get the benefits |
| // of shrink-wrapping whenever we emit this instruction. Considering |
| // this is used in any function where we emit a jump table, this may be |
| // a significant limitation. We should consider inserting this in the |
| // block where it is used and then commoning this sequence up if it |
| // appears in multiple places. |
| // Note: on ISA 3.0 cores, we can use lnia (addpcis) instead of |
| // MovePCtoLR8. |
| MF->getInfo<PPCFunctionInfo>()->setShrinkWrapDisabled(true); |
| GlobalBaseReg = RegInfo->createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass); |
| BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR8)); |
| BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR8), GlobalBaseReg); |
| } |
| } |
| return CurDAG->getRegister(GlobalBaseReg, |
| PPCLowering->getPointerTy(CurDAG->getDataLayout())) |
| .getNode(); |
| } |
| |
| /// isInt32Immediate - This method tests to see if the node is a 32-bit constant |
| /// operand. If so Imm will receive the 32-bit value. |
| static bool isInt32Immediate(SDNode *N, unsigned &Imm) { |
| if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) { |
| Imm = cast<ConstantSDNode>(N)->getZExtValue(); |
| return true; |
| } |
| return false; |
| } |
| |
| /// isInt64Immediate - This method tests to see if the node is a 64-bit constant |
| /// operand. If so Imm will receive the 64-bit value. |
| static bool isInt64Immediate(SDNode *N, uint64_t &Imm) { |
| if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i64) { |
| Imm = cast<ConstantSDNode>(N)->getZExtValue(); |
| return true; |
| } |
| return false; |
| } |
| |
| // isInt32Immediate - This method tests to see if a constant operand. |
| // If so Imm will receive the 32 bit value. |
| static bool isInt32Immediate(SDValue N, unsigned &Imm) { |
| return isInt32Immediate(N.getNode(), Imm); |
| } |
| |
| /// isInt64Immediate - This method tests to see if the value is a 64-bit |
| /// constant operand. If so Imm will receive the 64-bit value. |
| static bool isInt64Immediate(SDValue N, uint64_t &Imm) { |
| return isInt64Immediate(N.getNode(), Imm); |
| } |
| |
| static unsigned getBranchHint(unsigned PCC, FunctionLoweringInfo *FuncInfo, |
| const SDValue &DestMBB) { |
| assert(isa<BasicBlockSDNode>(DestMBB)); |
| |
| if (!FuncInfo->BPI) return PPC::BR_NO_HINT; |
| |
| const BasicBlock *BB = FuncInfo->MBB->getBasicBlock(); |
| const TerminatorInst *BBTerm = BB->getTerminator(); |
| |
| if (BBTerm->getNumSuccessors() != 2) return PPC::BR_NO_HINT; |
| |
| const BasicBlock *TBB = BBTerm->getSuccessor(0); |
| const BasicBlock *FBB = BBTerm->getSuccessor(1); |
| |
| auto TProb = FuncInfo->BPI->getEdgeProbability(BB, TBB); |
| auto FProb = FuncInfo->BPI->getEdgeProbability(BB, FBB); |
| |
| // We only want to handle cases which are easy to predict at static time, e.g. |
| // C++ throw statement, that is very likely not taken, or calling never |
| // returned function, e.g. stdlib exit(). So we set Threshold to filter |
| // unwanted cases. |
| // |
| // Below is LLVM branch weight table, we only want to handle case 1, 2 |
| // |
| // Case Taken:Nontaken Example |
| // 1. Unreachable 1048575:1 C++ throw, stdlib exit(), |
| // 2. Invoke-terminating 1:1048575 |
| // 3. Coldblock 4:64 __builtin_expect |
| // 4. Loop Branch 124:4 For loop |
| // 5. PH/ZH/FPH 20:12 |
| const uint32_t Threshold = 10000; |
| |
| if (std::max(TProb, FProb) / Threshold < std::min(TProb, FProb)) |
| return PPC::BR_NO_HINT; |
| |
| LLVM_DEBUG(dbgs() << "Use branch hint for '" << FuncInfo->Fn->getName() |
| << "::" << BB->getName() << "'\n" |
| << " -> " << TBB->getName() << ": " << TProb << "\n" |
| << " -> " << FBB->getName() << ": " << FProb << "\n"); |
| |
| const BasicBlockSDNode *BBDN = cast<BasicBlockSDNode>(DestMBB); |
| |
| // If Dest BasicBlock is False-BasicBlock (FBB), swap branch probabilities, |
| // because we want 'TProb' stands for 'branch probability' to Dest BasicBlock |
| if (BBDN->getBasicBlock()->getBasicBlock() != TBB) |
| std::swap(TProb, FProb); |
| |
| return (TProb > FProb) ? PPC::BR_TAKEN_HINT : PPC::BR_NONTAKEN_HINT; |
| } |
| |
| // isOpcWithIntImmediate - This method tests to see if the node is a specific |
| // opcode and that it has a immediate integer right operand. |
| // If so Imm will receive the 32 bit value. |
| static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { |
| return N->getOpcode() == Opc |
| && isInt32Immediate(N->getOperand(1).getNode(), Imm); |
| } |
| |
| void PPCDAGToDAGISel::selectFrameIndex(SDNode *SN, SDNode *N, unsigned Offset) { |
| SDLoc dl(SN); |
| int FI = cast<FrameIndexSDNode>(N)->getIndex(); |
| SDValue TFI = CurDAG->getTargetFrameIndex(FI, N->getValueType(0)); |
| unsigned Opc = N->getValueType(0) == MVT::i32 ? PPC::ADDI : PPC::ADDI8; |
| if (SN->hasOneUse()) |
| CurDAG->SelectNodeTo(SN, Opc, N->getValueType(0), TFI, |
| getSmallIPtrImm(Offset, dl)); |
| else |
| ReplaceNode(SN, CurDAG->getMachineNode(Opc, dl, N->getValueType(0), TFI, |
| getSmallIPtrImm(Offset, dl))); |
| } |
| |
| bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask, |
| bool isShiftMask, unsigned &SH, |
| unsigned &MB, unsigned &ME) { |
| // Don't even go down this path for i64, since different logic will be |
| // necessary for rldicl/rldicr/rldimi. |
| if (N->getValueType(0) != MVT::i32) |
| return false; |
| |
| unsigned Shift = 32; |
| unsigned Indeterminant = ~0; // bit mask marking indeterminant results |
| unsigned Opcode = N->getOpcode(); |
| if (N->getNumOperands() != 2 || |
| !isInt32Immediate(N->getOperand(1).getNode(), Shift) || (Shift > 31)) |
| return false; |
| |
| if (Opcode == ISD::SHL) { |
| // apply shift left to mask if it comes first |
| if (isShiftMask) Mask = Mask << Shift; |
| // determine which bits are made indeterminant by shift |
| Indeterminant = ~(0xFFFFFFFFu << Shift); |
| } else if (Opcode == ISD::SRL) { |
| // apply shift right to mask if it comes first |
| if (isShiftMask) Mask = Mask >> Shift; |
| // determine which bits are made indeterminant by shift |
| Indeterminant = ~(0xFFFFFFFFu >> Shift); |
| // adjust for the left rotate |
| Shift = 32 - Shift; |
| } else if (Opcode == ISD::ROTL) { |
| Indeterminant = 0; |
| } else { |
| return false; |
| } |
| |
| // if the mask doesn't intersect any Indeterminant bits |
| if (Mask && !(Mask & Indeterminant)) { |
| SH = Shift & 31; |
| // make sure the mask is still a mask (wrap arounds may not be) |
| return isRunOfOnes(Mask, MB, ME); |
| } |
| return false; |
| } |
| |
| bool PPCDAGToDAGISel::tryTLSXFormStore(StoreSDNode *ST) { |
| SDValue Base = ST->getBasePtr(); |
| if (Base.getOpcode() != PPCISD::ADD_TLS) |
| return false; |
| SDValue Offset = ST->getOffset(); |
| if (!Offset.isUndef()) |
| return false; |
| |
| SDLoc dl(ST); |
| EVT MemVT = ST->getMemoryVT(); |
| EVT RegVT = ST->getValue().getValueType(); |
| |
| unsigned Opcode; |
| switch (MemVT.getSimpleVT().SimpleTy) { |
| default: |
| return false; |
| case MVT::i8: { |
| Opcode = (RegVT == MVT::i32) ? PPC::STBXTLS_32 : PPC::STBXTLS; |
| break; |
| } |
| case MVT::i16: { |
| Opcode = (RegVT == MVT::i32) ? PPC::STHXTLS_32 : PPC::STHXTLS; |
| break; |
| } |
| case MVT::i32: { |
| Opcode = (RegVT == MVT::i32) ? PPC::STWXTLS_32 : PPC::STWXTLS; |
| break; |
| } |
| case MVT::i64: { |
| Opcode = PPC::STDXTLS; |
| break; |
| } |
| } |
| SDValue Chain = ST->getChain(); |
| SDVTList VTs = ST->getVTList(); |
| SDValue Ops[] = {ST->getValue(), Base.getOperand(0), Base.getOperand(1), |
| Chain}; |
| SDNode *MN = CurDAG->getMachineNode(Opcode, dl, VTs, Ops); |
| transferMemOperands(ST, MN); |
| ReplaceNode(ST, MN); |
| return true; |
| } |
| |
| bool PPCDAGToDAGISel::tryTLSXFormLoad(LoadSDNode *LD) { |
| SDValue Base = LD->getBasePtr(); |
| if (Base.getOpcode() != PPCISD::ADD_TLS) |
| return false; |
| SDValue Offset = LD->getOffset(); |
| if (!Offset.isUndef()) |
| return false; |
| |
| SDLoc dl(LD); |
| EVT MemVT = LD->getMemoryVT(); |
| EVT RegVT = LD->getValueType(0); |
| unsigned Opcode; |
| switch (MemVT.getSimpleVT().SimpleTy) { |
| default: |
| return false; |
| case MVT::i8: { |
| Opcode = (RegVT == MVT::i32) ? PPC::LBZXTLS_32 : PPC::LBZXTLS; |
| break; |
| } |
| case MVT::i16: { |
| Opcode = (RegVT == MVT::i32) ? PPC::LHZXTLS_32 : PPC::LHZXTLS; |
| break; |
| } |
| case MVT::i32: { |
| Opcode = (RegVT == MVT::i32) ? PPC::LWZXTLS_32 : PPC::LWZXTLS; |
| break; |
| } |
| case MVT::i64: { |
| Opcode = PPC::LDXTLS; |
| break; |
| } |
| } |
| SDValue Chain = LD->getChain(); |
| SDVTList VTs = LD->getVTList(); |
| SDValue Ops[] = {Base.getOperand(0), Base.getOperand(1), Chain}; |
| SDNode *MN = CurDAG->getMachineNode(Opcode, dl, VTs, Ops); |
| transferMemOperands(LD, MN); |
| ReplaceNode(LD, MN); |
| return true; |
| } |
| |
| /// Turn an or of two masked values into the rotate left word immediate then |
| /// mask insert (rlwimi) instruction. |
| bool PPCDAGToDAGISel::tryBitfieldInsert(SDNode *N) { |
| SDValue Op0 = N->getOperand(0); |
| SDValue Op1 = N->getOperand(1); |
| SDLoc dl(N); |
| |
| KnownBits LKnown, RKnown; |
| CurDAG->computeKnownBits(Op0, LKnown); |
| CurDAG->computeKnownBits(Op1, RKnown); |
| |
| unsigned TargetMask = LKnown.Zero.getZExtValue(); |
| unsigned InsertMask = RKnown.Zero.getZExtValue(); |
| |
| if ((TargetMask | InsertMask) == 0xFFFFFFFF) { |
| unsigned Op0Opc = Op0.getOpcode(); |
| unsigned Op1Opc = Op1.getOpcode(); |
| unsigned Value, SH = 0; |
| TargetMask = ~TargetMask; |
| InsertMask = ~InsertMask; |
| |
| // If the LHS has a foldable shift and the RHS does not, then swap it to the |
| // RHS so that we can fold the shift into the insert. |
| if (Op0Opc == ISD::AND && Op1Opc == ISD::AND) { |
| if (Op0.getOperand(0).getOpcode() == ISD::SHL || |
| Op0.getOperand(0).getOpcode() == ISD::SRL) { |
| if (Op1.getOperand(0).getOpcode() != ISD::SHL && |
| Op1.getOperand(0).getOpcode() != ISD::SRL) { |
| std::swap(Op0, Op1); |
| std::swap(Op0Opc, Op1Opc); |
| std::swap(TargetMask, InsertMask); |
| } |
| } |
| } else if (Op0Opc == ISD::SHL || Op0Opc == ISD::SRL) { |
| if (Op1Opc == ISD::AND && Op1.getOperand(0).getOpcode() != ISD::SHL && |
| Op1.getOperand(0).getOpcode() != ISD::SRL) { |
| std::swap(Op0, Op1); |
| std::swap(Op0Opc, Op1Opc); |
| std::swap(TargetMask, InsertMask); |
| } |
| } |
| |
| unsigned MB, ME; |
| if (isRunOfOnes(InsertMask, MB, ME)) { |
| if ((Op1Opc == ISD::SHL || Op1Opc == ISD::SRL) && |
| isInt32Immediate(Op1.getOperand(1), Value)) { |
| Op1 = Op1.getOperand(0); |
| SH = (Op1Opc == ISD::SHL) ? Value : 32 - Value; |
| } |
| if (Op1Opc == ISD::AND) { |
| // The AND mask might not be a constant, and we need to make sure that |
| // if we're going to fold the masking with the insert, all bits not |
| // know to be zero in the mask are known to be one. |
| KnownBits MKnown; |
| CurDAG->computeKnownBits(Op1.getOperand(1), MKnown); |
| bool CanFoldMask = InsertMask == MKnown.One.getZExtValue(); |
| |
| unsigned SHOpc = Op1.getOperand(0).getOpcode(); |
| if ((SHOpc == ISD::SHL || SHOpc == ISD::SRL) && CanFoldMask && |
| isInt32Immediate(Op1.getOperand(0).getOperand(1), Value)) { |
| // Note that Value must be in range here (less than 32) because |
| // otherwise there would not be any bits set in InsertMask. |
| Op1 = Op1.getOperand(0).getOperand(0); |
| SH = (SHOpc == ISD::SHL) ? Value : 32 - Value; |
| } |
| } |
| |
| SH &= 31; |
| SDValue Ops[] = { Op0, Op1, getI32Imm(SH, dl), getI32Imm(MB, dl), |
| getI32Imm(ME, dl) }; |
| ReplaceNode(N, CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops)); |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| // Predict the number of instructions that would be generated by calling |
| // selectI64Imm(N). |
| static unsigned selectI64ImmInstrCountDirect(int64_t Imm) { |
| // Assume no remaining bits. |
| unsigned Remainder = 0; |
| // Assume no shift required. |
| unsigned Shift = 0; |
| |
| // If it can't be represented as a 32 bit value. |
| if (!isInt<32>(Imm)) { |
| Shift = countTrailingZeros<uint64_t>(Imm); |
| int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift; |
| |
| // If the shifted value fits 32 bits. |
| if (isInt<32>(ImmSh)) { |
| // Go with the shifted value. |
| Imm = ImmSh; |
| } else { |
| // Still stuck with a 64 bit value. |
| Remainder = Imm; |
| Shift = 32; |
| Imm >>= 32; |
| } |
| } |
| |
| // Intermediate operand. |
| unsigned Result = 0; |
| |
| // Handle first 32 bits. |
| unsigned Lo = Imm & 0xFFFF; |
| |
| // Simple value. |
| if (isInt<16>(Imm)) { |
| // Just the Lo bits. |
| ++Result; |
| } else if (Lo) { |
| // Handle the Hi bits and Lo bits. |
| Result += 2; |
| } else { |
| // Just the Hi bits. |
| ++Result; |
| } |
| |
| // If no shift, we're done. |
| if (!Shift) return Result; |
| |
| // If Hi word == Lo word, |
| // we can use rldimi to insert the Lo word into Hi word. |
| if ((unsigned)(Imm & 0xFFFFFFFF) == Remainder) { |
| ++Result; |
| return Result; |
| } |
| |
| // Shift for next step if the upper 32-bits were not zero. |
| if (Imm) |
| ++Result; |
| |
| // Add in the last bits as required. |
| if ((Remainder >> 16) & 0xFFFF) |
| ++Result; |
| if (Remainder & 0xFFFF) |
| ++Result; |
| |
| return Result; |
| } |
| |
| static uint64_t Rot64(uint64_t Imm, unsigned R) { |
| return (Imm << R) | (Imm >> (64 - R)); |
| } |
| |
| static unsigned selectI64ImmInstrCount(int64_t Imm) { |
| unsigned Count = selectI64ImmInstrCountDirect(Imm); |
| |
| // If the instruction count is 1 or 2, we do not need further analysis |
| // since rotate + load constant requires at least 2 instructions. |
| if (Count <= 2) |
| return Count; |
| |
| for (unsigned r = 1; r < 63; ++r) { |
| uint64_t RImm = Rot64(Imm, r); |
| unsigned RCount = selectI64ImmInstrCountDirect(RImm) + 1; |
| Count = std::min(Count, RCount); |
| |
| // See comments in selectI64Imm for an explanation of the logic below. |
| unsigned LS = findLastSet(RImm); |
| if (LS != r-1) |
| continue; |
| |
| uint64_t OnesMask = -(int64_t) (UINT64_C(1) << (LS+1)); |
| uint64_t RImmWithOnes = RImm | OnesMask; |
| |
| RCount = selectI64ImmInstrCountDirect(RImmWithOnes) + 1; |
| Count = std::min(Count, RCount); |
| } |
| |
| return Count; |
| } |
| |
| // Select a 64-bit constant. For cost-modeling purposes, selectI64ImmInstrCount |
| // (above) needs to be kept in sync with this function. |
| static SDNode *selectI64ImmDirect(SelectionDAG *CurDAG, const SDLoc &dl, |
| int64_t Imm) { |
| // Assume no remaining bits. |
| unsigned Remainder = 0; |
| // Assume no shift required. |
| unsigned Shift = 0; |
| |
| // If it can't be represented as a 32 bit value. |
| if (!isInt<32>(Imm)) { |
| Shift = countTrailingZeros<uint64_t>(Imm); |
| int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift; |
| |
| // If the shifted value fits 32 bits. |
| if (isInt<32>(ImmSh)) { |
| // Go with the shifted value. |
| Imm = ImmSh; |
| } else { |
| // Still stuck with a 64 bit value. |
| Remainder = Imm; |
| Shift = 32; |
| Imm >>= 32; |
| } |
| } |
| |
| // Intermediate operand. |
| SDNode *Result; |
| |
| // Handle first 32 bits. |
| unsigned Lo = Imm & 0xFFFF; |
| unsigned Hi = (Imm >> 16) & 0xFFFF; |
| |
| auto getI32Imm = [CurDAG, dl](unsigned Imm) { |
| return CurDAG->getTargetConstant(Imm, dl, MVT::i32); |
| }; |
| |
| // Simple value. |
| if (isInt<16>(Imm)) { |
| uint64_t SextImm = SignExtend64(Lo, 16); |
| SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64); |
| // Just the Lo bits. |
| Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, SDImm); |
| } else if (Lo) { |
| // Handle the Hi bits. |
| unsigned OpC = Hi ? PPC::LIS8 : PPC::LI8; |
| Result = CurDAG->getMachineNode(OpC, dl, MVT::i64, getI32Imm(Hi)); |
| // And Lo bits. |
| Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, |
| SDValue(Result, 0), getI32Imm(Lo)); |
| } else { |
| // Just the Hi bits. |
| Result = CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, getI32Imm(Hi)); |
| } |
| |
| // If no shift, we're done. |
| if (!Shift) return Result; |
| |
| // If Hi word == Lo word, |
| // we can use rldimi to insert the Lo word into Hi word. |
| if ((unsigned)(Imm & 0xFFFFFFFF) == Remainder) { |
| SDValue Ops[] = |
| { SDValue(Result, 0), SDValue(Result, 0), getI32Imm(Shift), getI32Imm(0)}; |
| return CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops); |
| } |
| |
| // Shift for next step if the upper 32-bits were not zero. |
| if (Imm) { |
| Result = CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, |
| SDValue(Result, 0), |
| getI32Imm(Shift), |
| getI32Imm(63 - Shift)); |
| } |
| |
| // Add in the last bits as required. |
| if ((Hi = (Remainder >> 16) & 0xFFFF)) { |
| Result = CurDAG->getMachineNode(PPC::ORIS8, dl, MVT::i64, |
| SDValue(Result, 0), getI32Imm(Hi)); |
| } |
| if ((Lo = Remainder & 0xFFFF)) { |
| Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, |
| SDValue(Result, 0), getI32Imm(Lo)); |
| } |
| |
| return Result; |
| } |
| |
| static SDNode *selectI64Imm(SelectionDAG *CurDAG, const SDLoc &dl, |
| int64_t Imm) { |
| unsigned Count = selectI64ImmInstrCountDirect(Imm); |
| |
| // If the instruction count is 1 or 2, we do not need further analysis |
| // since rotate + load constant requires at least 2 instructions. |
| if (Count <= 2) |
| return selectI64ImmDirect(CurDAG, dl, Imm); |
| |
| unsigned RMin = 0; |
| |
| int64_t MatImm; |
| unsigned MaskEnd; |
| |
| for (unsigned r = 1; r < 63; ++r) { |
| uint64_t RImm = Rot64(Imm, r); |
| unsigned RCount = selectI64ImmInstrCountDirect(RImm) + 1; |
| if (RCount < Count) { |
| Count = RCount; |
| RMin = r; |
| MatImm = RImm; |
| MaskEnd = 63; |
| } |
| |
| // If the immediate to generate has many trailing zeros, it might be |
| // worthwhile to generate a rotated value with too many leading ones |
| // (because that's free with li/lis's sign-extension semantics), and then |
| // mask them off after rotation. |
| |
| unsigned LS = findLastSet(RImm); |
| // We're adding (63-LS) higher-order ones, and we expect to mask them off |
| // after performing the inverse rotation by (64-r). So we need that: |
| // 63-LS == 64-r => LS == r-1 |
| if (LS != r-1) |
| continue; |
| |
| uint64_t OnesMask = -(int64_t) (UINT64_C(1) << (LS+1)); |
| uint64_t RImmWithOnes = RImm | OnesMask; |
| |
| RCount = selectI64ImmInstrCountDirect(RImmWithOnes) + 1; |
| if (RCount < Count) { |
| Count = RCount; |
| RMin = r; |
| MatImm = RImmWithOnes; |
| MaskEnd = LS; |
| } |
| } |
| |
| if (!RMin) |
| return selectI64ImmDirect(CurDAG, dl, Imm); |
| |
| auto getI32Imm = [CurDAG, dl](unsigned Imm) { |
| return CurDAG->getTargetConstant(Imm, dl, MVT::i32); |
| }; |
| |
| SDValue Val = SDValue(selectI64ImmDirect(CurDAG, dl, MatImm), 0); |
| return CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, Val, |
| getI32Imm(64 - RMin), getI32Imm(MaskEnd)); |
| } |
| |
| static unsigned allUsesTruncate(SelectionDAG *CurDAG, SDNode *N) { |
| unsigned MaxTruncation = 0; |
| // Cannot use range-based for loop here as we need the actual use (i.e. we |
| // need the operand number corresponding to the use). A range-based for |
| // will unbox the use and provide an SDNode*. |
| for (SDNode::use_iterator Use = N->use_begin(), UseEnd = N->use_end(); |
| Use != UseEnd; ++Use) { |
| unsigned Opc = |
| Use->isMachineOpcode() ? Use->getMachineOpcode() : Use->getOpcode(); |
| switch (Opc) { |
| default: return 0; |
| case ISD::TRUNCATE: |
| if (Use->isMachineOpcode()) |
| return 0; |
| MaxTruncation = |
| std::max(MaxTruncation, Use->getValueType(0).getSizeInBits()); |
| continue; |
| case ISD::STORE: { |
| if (Use->isMachineOpcode()) |
| return 0; |
| StoreSDNode *STN = cast<StoreSDNode>(*Use); |
| unsigned MemVTSize = STN->getMemoryVT().getSizeInBits(); |
| if (MemVTSize == 64 || Use.getOperandNo() != 0) |
| return 0; |
| MaxTruncation = std::max(MaxTruncation, MemVTSize); |
| continue; |
| } |
| case PPC::STW8: |
| case PPC::STWX8: |
| case PPC::STWU8: |
| case PPC::STWUX8: |
| if (Use.getOperandNo() != 0) |
| return 0; |
| MaxTruncation = std::max(MaxTruncation, 32u); |
| continue; |
| case PPC::STH8: |
| case PPC::STHX8: |
| case PPC::STHU8: |
| case PPC::STHUX8: |
| if (Use.getOperandNo() != 0) |
| return 0; |
| MaxTruncation = std::max(MaxTruncation, 16u); |
| continue; |
| case PPC::STB8: |
| case PPC::STBX8: |
| case PPC::STBU8: |
| case PPC::STBUX8: |
| if (Use.getOperandNo() != 0) |
| return 0; |
| MaxTruncation = std::max(MaxTruncation, 8u); |
| continue; |
| } |
| } |
| return MaxTruncation; |
| } |
| |
| // Select a 64-bit constant. |
| static SDNode *selectI64Imm(SelectionDAG *CurDAG, SDNode *N) { |
| SDLoc dl(N); |
| |
| // Get 64 bit value. |
| int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue(); |
| if (unsigned MinSize = allUsesTruncate(CurDAG, N)) { |
| uint64_t SextImm = SignExtend64(Imm, MinSize); |
| SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64); |
| if (isInt<16>(SextImm)) |
| return CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, SDImm); |
| } |
| return selectI64Imm(CurDAG, dl, Imm); |
| } |
| |
| namespace { |
| |
| class BitPermutationSelector { |
| struct ValueBit { |
| SDValue V; |
| |
| // The bit number in the value, using a convention where bit 0 is the |
| // lowest-order bit. |
| unsigned Idx; |
| |
| enum Kind { |
| ConstZero, |
| Variable |
| } K; |
| |
| ValueBit(SDValue V, unsigned I, Kind K = Variable) |
| : V(V), Idx(I), K(K) {} |
| ValueBit(Kind K = Variable) |
| : V(SDValue(nullptr, 0)), Idx(UINT32_MAX), K(K) {} |
| |
| bool isZero() const { |
| return K == ConstZero; |
| } |
| |
| bool hasValue() const { |
| return K == Variable; |
| } |
| |
| SDValue getValue() const { |
| assert(hasValue() && "Cannot get the value of a constant bit"); |
| return V; |
| } |
| |
| unsigned getValueBitIndex() const { |
| assert(hasValue() && "Cannot get the value bit index of a constant bit"); |
| return Idx; |
| } |
| }; |
| |
| // A bit group has the same underlying value and the same rotate factor. |
| struct BitGroup { |
| SDValue V; |
| unsigned RLAmt; |
| unsigned StartIdx, EndIdx; |
| |
| // This rotation amount assumes that the lower 32 bits of the quantity are |
| // replicated in the high 32 bits by the rotation operator (which is done |
| // by rlwinm and friends in 64-bit mode). |
| bool Repl32; |
| // Did converting to Repl32 == true change the rotation factor? If it did, |
| // it decreased it by 32. |
| bool Repl32CR; |
| // Was this group coalesced after setting Repl32 to true? |
| bool Repl32Coalesced; |
| |
| BitGroup(SDValue V, unsigned R, unsigned S, unsigned E) |
| : V(V), RLAmt(R), StartIdx(S), EndIdx(E), Repl32(false), Repl32CR(false), |
| Repl32Coalesced(false) { |
| LLVM_DEBUG(dbgs() << "\tbit group for " << V.getNode() << " RLAmt = " << R |
| << " [" << S << ", " << E << "]\n"); |
| } |
| }; |
| |
| // Information on each (Value, RLAmt) pair (like the number of groups |
| // associated with each) used to choose the lowering method. |
| struct ValueRotInfo { |
| SDValue V; |
| unsigned RLAmt = std::numeric_limits<unsigned>::max(); |
| unsigned NumGroups = 0; |
| unsigned FirstGroupStartIdx = std::numeric_limits<unsigned>::max(); |
| bool Repl32 = false; |
| |
| ValueRotInfo() = default; |
| |
| // For sorting (in reverse order) by NumGroups, and then by |
| // FirstGroupStartIdx. |
| bool operator < (const ValueRotInfo &Other) const { |
| // We need to sort so that the non-Repl32 come first because, when we're |
| // doing masking, the Repl32 bit groups might be subsumed into the 64-bit |
| // masking operation. |
| if (Repl32 < Other.Repl32) |
| return true; |
| else if (Repl32 > Other.Repl32) |
| return false; |
| else if (NumGroups > Other.NumGroups) |
| return true; |
| else if (NumGroups < Other.NumGroups) |
| return false; |
| else if (RLAmt == 0 && Other.RLAmt != 0) |
| return true; |
| else if (RLAmt != 0 && Other.RLAmt == 0) |
| return false; |
| else if (FirstGroupStartIdx < Other.FirstGroupStartIdx) |
| return true; |
| return false; |
| } |
| }; |
| |
| using ValueBitsMemoizedValue = std::pair<bool, SmallVector<ValueBit, 64>>; |
| using ValueBitsMemoizer = |
| DenseMap<SDValue, std::unique_ptr<ValueBitsMemoizedValue>>; |
| ValueBitsMemoizer Memoizer; |
| |
| // Return a pair of bool and a SmallVector pointer to a memoization entry. |
| // The bool is true if something interesting was deduced, otherwise if we're |
| // providing only a generic representation of V (or something else likewise |
| // uninteresting for instruction selection) through the SmallVector. |
| std::pair<bool, SmallVector<ValueBit, 64> *> getValueBits(SDValue V, |
| unsigned NumBits) { |
| auto &ValueEntry = Memoizer[V]; |
| if (ValueEntry) |
| return std::make_pair(ValueEntry->first, &ValueEntry->second); |
| ValueEntry.reset(new ValueBitsMemoizedValue()); |
| bool &Interesting = ValueEntry->first; |
| SmallVector<ValueBit, 64> &Bits = ValueEntry->second; |
| Bits.resize(NumBits); |
| |
| switch (V.getOpcode()) { |
| default: break; |
| case ISD::ROTL: |
| if (isa<ConstantSDNode>(V.getOperand(1))) { |
| unsigned RotAmt = V.getConstantOperandVal(1); |
| |
| const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; |
| |
| for (unsigned i = 0; i < NumBits; ++i) |
| Bits[i] = LHSBits[i < RotAmt ? i + (NumBits - RotAmt) : i - RotAmt]; |
| |
| return std::make_pair(Interesting = true, &Bits); |
| } |
| break; |
| case ISD::SHL: |
| if (isa<ConstantSDNode>(V.getOperand(1))) { |
| unsigned ShiftAmt = V.getConstantOperandVal(1); |
| |
| const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; |
| |
| for (unsigned i = ShiftAmt; i < NumBits; ++i) |
| Bits[i] = LHSBits[i - ShiftAmt]; |
| |
| for (unsigned i = 0; i < ShiftAmt; ++i) |
| Bits[i] = ValueBit(ValueBit::ConstZero); |
| |
| return std::make_pair(Interesting = true, &Bits); |
| } |
| break; |
| case ISD::SRL: |
| if (isa<ConstantSDNode>(V.getOperand(1))) { |
| unsigned ShiftAmt = V.getConstantOperandVal(1); |
| |
| const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; |
| |
| for (unsigned i = 0; i < NumBits - ShiftAmt; ++i) |
| Bits[i] = LHSBits[i + ShiftAmt]; |
| |
| for (unsigned i = NumBits - ShiftAmt; i < NumBits; ++i) |
| Bits[i] = ValueBit(ValueBit::ConstZero); |
| |
| return std::make_pair(Interesting = true, &Bits); |
| } |
| break; |
| case ISD::AND: |
| if (isa<ConstantSDNode>(V.getOperand(1))) { |
| uint64_t Mask = V.getConstantOperandVal(1); |
| |
| const SmallVector<ValueBit, 64> *LHSBits; |
| // Mark this as interesting, only if the LHS was also interesting. This |
| // prevents the overall procedure from matching a single immediate 'and' |
| // (which is non-optimal because such an and might be folded with other |
| // things if we don't select it here). |
| std::tie(Interesting, LHSBits) = getValueBits(V.getOperand(0), NumBits); |
| |
| for (unsigned i = 0; i < NumBits; ++i) |
| if (((Mask >> i) & 1) == 1) |
| Bits[i] = (*LHSBits)[i]; |
| else |
| Bits[i] = ValueBit(ValueBit::ConstZero); |
| |
| return std::make_pair(Interesting, &Bits); |
| } |
| break; |
| case ISD::OR: { |
| const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; |
| const auto &RHSBits = *getValueBits(V.getOperand(1), NumBits).second; |
| |
| bool AllDisjoint = true; |
| for (unsigned i = 0; i < NumBits; ++i) |
| if (LHSBits[i].isZero()) |
| Bits[i] = RHSBits[i]; |
| else if (RHSBits[i].isZero()) |
| Bits[i] = LHSBits[i]; |
| else { |
| AllDisjoint = false; |
| break; |
| } |
| |
| if (!AllDisjoint) |
| break; |
| |
| return std::make_pair(Interesting = true, &Bits); |
| } |
| case ISD::ZERO_EXTEND: { |
| // We support only the case with zero extension from i32 to i64 so far. |
| if (V.getValueType() != MVT::i64 || |
| V.getOperand(0).getValueType() != MVT::i32) |
| break; |
| |
| const SmallVector<ValueBit, 64> *LHSBits; |
| const unsigned NumOperandBits = 32; |
| std::tie(Interesting, LHSBits) = getValueBits(V.getOperand(0), |
| NumOperandBits); |
| |
| for (unsigned i = 0; i < NumOperandBits; ++i) |
| Bits[i] = (*LHSBits)[i]; |
| |
| for (unsigned i = NumOperandBits; i < NumBits; ++i) |
| Bits[i] = ValueBit(ValueBit::ConstZero); |
| |
| return std::make_pair(Interesting, &Bits); |
| } |
| } |
| |
| for (unsigned i = 0; i < NumBits; ++i) |
| Bits[i] = ValueBit(V, i); |
| |
| return std::make_pair(Interesting = false, &Bits); |
| } |
| |
| // For each value (except the constant ones), compute the left-rotate amount |
| // to get it from its original to final position. |
| void computeRotationAmounts() { |
| HasZeros = false; |
| RLAmt.resize(Bits.size()); |
| for (unsigned i = 0; i < Bits.size(); ++i) |
| if (Bits[i].hasValue()) { |
| unsigned VBI = Bits[i].getValueBitIndex(); |
| if (i >= VBI) |
| RLAmt[i] = i - VBI; |
| else |
| RLAmt[i] = Bits.size() - (VBI - i); |
| } else if (Bits[i].isZero()) { |
| HasZeros = true; |
| RLAmt[i] = UINT32_MAX; |
| } else { |
| llvm_unreachable("Unknown value bit type"); |
| } |
| } |
| |
| // Collect groups of consecutive bits with the same underlying value and |
| // rotation factor. If we're doing late masking, we ignore zeros, otherwise |
| // they break up groups. |
| void collectBitGroups(bool LateMask) { |
| BitGroups.clear(); |
| |
| unsigned LastRLAmt = RLAmt[0]; |
| SDValue LastValue = Bits[0].hasValue() ? Bits[0].getValue() : SDValue(); |
| unsigned LastGroupStartIdx = 0; |
| for (unsigned i = 1; i < Bits.size(); ++i) { |
| unsigned ThisRLAmt = RLAmt[i]; |
| SDValue ThisValue = Bits[i].hasValue() ? Bits[i].getValue() : SDValue(); |
| if (LateMask && !ThisValue) { |
| ThisValue = LastValue; |
| ThisRLAmt = LastRLAmt; |
| // If we're doing late masking, then the first bit group always starts |
| // at zero (even if the first bits were zero). |
| if (BitGroups.empty()) |
| LastGroupStartIdx = 0; |
| } |
| |
| // If this bit has the same underlying value and the same rotate factor as |
| // the last one, then they're part of the same group. |
| if (ThisRLAmt == LastRLAmt && ThisValue == LastValue) |
| continue; |
| |
| if (LastValue.getNode()) |
| BitGroups.push_back(BitGroup(LastValue, LastRLAmt, LastGroupStartIdx, |
| i-1)); |
| LastRLAmt = ThisRLAmt; |
| LastValue = ThisValue; |
| LastGroupStartIdx = i; |
| } |
| if (LastValue.getNode()) |
| BitGroups.push_back(BitGroup(LastValue, LastRLAmt, LastGroupStartIdx, |
| Bits.size()-1)); |
| |
| if (BitGroups.empty()) |
| return; |
| |
| // We might be able to combine the first and last groups. |
| if (BitGroups.size() > 1) { |
| // If the first and last groups are the same, then remove the first group |
| // in favor of the last group, making the ending index of the last group |
| // equal to the ending index of the to-be-removed first group. |
| if (BitGroups[0].StartIdx == 0 && |
| BitGroups[BitGroups.size()-1].EndIdx == Bits.size()-1 && |
| BitGroups[0].V == BitGroups[BitGroups.size()-1].V && |
| BitGroups[0].RLAmt == BitGroups[BitGroups.size()-1].RLAmt) { |
| LLVM_DEBUG(dbgs() << "\tcombining final bit group with initial one\n"); |
| BitGroups[BitGroups.size()-1].EndIdx = BitGroups[0].EndIdx; |
| BitGroups.erase(BitGroups.begin()); |
| } |
| } |
| } |
| |
| // Take all (SDValue, RLAmt) pairs and sort them by the number of groups |
| // associated with each. If the number of groups are same, we prefer a group |
| // which does not require rotate, i.e. RLAmt is 0, to avoid the first rotate |
| // instruction. If there is a degeneracy, pick the one that occurs |
| // first (in the final value). |
| void collectValueRotInfo() { |
| ValueRots.clear(); |
| |
| for (auto &BG : BitGroups) { |
| unsigned RLAmtKey = BG.RLAmt + (BG.Repl32 ? 64 : 0); |
| ValueRotInfo &VRI = ValueRots[std::make_pair(BG.V, RLAmtKey)]; |
| VRI.V = BG.V; |
| VRI.RLAmt = BG.RLAmt; |
| VRI.Repl32 = BG.Repl32; |
| VRI.NumGroups += 1; |
| VRI.FirstGroupStartIdx = std::min(VRI.FirstGroupStartIdx, BG.StartIdx); |
| } |
| |
| // Now that we've collected the various ValueRotInfo instances, we need to |
| // sort them. |
| ValueRotsVec.clear(); |
| for (auto &I : ValueRots) { |
| ValueRotsVec.push_back(I.second); |
| } |
| llvm::sort(ValueRotsVec.begin(), ValueRotsVec.end()); |
| } |
| |
| // In 64-bit mode, rlwinm and friends have a rotation operator that |
| // replicates the low-order 32 bits into the high-order 32-bits. The mask |
| // indices of these instructions can only be in the lower 32 bits, so they |
| // can only represent some 64-bit bit groups. However, when they can be used, |
| // the 32-bit replication can be used to represent, as a single bit group, |
| // otherwise separate bit groups. We'll convert to replicated-32-bit bit |
| // groups when possible. Returns true if any of the bit groups were |
| // converted. |
| void assignRepl32BitGroups() { |
| // If we have bits like this: |
| // |
| // Indices: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 |
| // V bits: ... 7 6 5 4 3 2 1 0 31 30 29 28 27 26 25 24 |
| // Groups: | RLAmt = 8 | RLAmt = 40 | |
| // |
| // But, making use of a 32-bit operation that replicates the low-order 32 |
| // bits into the high-order 32 bits, this can be one bit group with a RLAmt |
| // of 8. |
| |
| auto IsAllLow32 = [this](BitGroup & BG) { |
| if (BG.StartIdx <= BG.EndIdx) { |
| for (unsigned i = BG.StartIdx; i <= BG.EndIdx; ++i) { |
| if (!Bits[i].hasValue()) |
| continue; |
| if (Bits[i].getValueBitIndex() >= 32) |
| return false; |
| } |
| } else { |
| for (unsigned i = BG.StartIdx; i < Bits.size(); ++i) { |
| if (!Bits[i].hasValue()) |
| continue; |
| if (Bits[i].getValueBitIndex() >= 32) |
| return false; |
| } |
| for (unsigned i = 0; i <= BG.EndIdx; ++i) { |
| if (!Bits[i].hasValue()) |
| continue; |
| if (Bits[i].getValueBitIndex() >= 32) |
| return false; |
| } |
| } |
| |
| return true; |
| }; |
| |
| for (auto &BG : BitGroups) { |
| // If this bit group has RLAmt of 0 and will not be merged with |
| // another bit group, we don't benefit from Repl32. We don't mark |
| // such group to give more freedom for later instruction selection. |
| if (BG.RLAmt == 0) { |
| auto PotentiallyMerged = [this](BitGroup & BG) { |
| for (auto &BG2 : BitGroups) |
| if (&BG != &BG2 && BG.V == BG2.V && |
| (BG2.RLAmt == 0 || BG2.RLAmt == 32)) |
| return true; |
| return false; |
| }; |
| if (!PotentiallyMerged(BG)) |
| continue; |
| } |
| if (BG.StartIdx < 32 && BG.EndIdx < 32) { |
| if (IsAllLow32(BG)) { |
| if (BG.RLAmt >= 32) { |
| BG.RLAmt -= 32; |
| BG.Repl32CR = true; |
| } |
| |
| BG.Repl32 = true; |
| |
| LLVM_DEBUG(dbgs() << "\t32-bit replicated bit group for " |
| << BG.V.getNode() << " RLAmt = " << BG.RLAmt << " [" |
| << BG.StartIdx << ", " << BG.EndIdx << "]\n"); |
| } |
| } |
| } |
| |
| // Now walk through the bit groups, consolidating where possible. |
| for (auto I = BitGroups.begin(); I != BitGroups.end();) { |
| // We might want to remove this bit group by merging it with the previous |
| // group (which might be the ending group). |
| auto IP = (I == BitGroups.begin()) ? |
| std::prev(BitGroups.end()) : std::prev(I); |
| if (I->Repl32 && IP->Repl32 && I->V == IP->V && I->RLAmt == IP->RLAmt && |
| I->StartIdx == (IP->EndIdx + 1) % 64 && I != IP) { |
| |
| LLVM_DEBUG(dbgs() << "\tcombining 32-bit replicated bit group for " |
| << I->V.getNode() << " RLAmt = " << I->RLAmt << " [" |
| << I->StartIdx << ", " << I->EndIdx |
| << "] with group with range [" << IP->StartIdx << ", " |
| << IP->EndIdx << "]\n"); |
| |
| IP->EndIdx = I->EndIdx; |
| IP->Repl32CR = IP->Repl32CR || I->Repl32CR; |
| IP->Repl32Coalesced = true; |
| I = BitGroups.erase(I); |
| continue; |
| } else { |
| // There is a special case worth handling: If there is a single group |
| // covering the entire upper 32 bits, and it can be merged with both |
| // the next and previous groups (which might be the same group), then |
| // do so. If it is the same group (so there will be only one group in |
| // total), then we need to reverse the order of the range so that it |
| // covers the entire 64 bits. |
| if (I->StartIdx == 32 && I->EndIdx == 63) { |
| assert(std::next(I) == BitGroups.end() && |
| "bit group ends at index 63 but there is another?"); |
| auto IN = BitGroups.begin(); |
| |
| if (IP->Repl32 && IN->Repl32 && I->V == IP->V && I->V == IN->V && |
| (I->RLAmt % 32) == IP->RLAmt && (I->RLAmt % 32) == IN->RLAmt && |
| IP->EndIdx == 31 && IN->StartIdx == 0 && I != IP && |
| IsAllLow32(*I)) { |
| |
| LLVM_DEBUG(dbgs() << "\tcombining bit group for " << I->V.getNode() |
| << " RLAmt = " << I->RLAmt << " [" << I->StartIdx |
| << ", " << I->EndIdx |
| << "] with 32-bit replicated groups with ranges [" |
| << IP->StartIdx << ", " << IP->EndIdx << "] and [" |
| << IN->StartIdx << ", " << IN->EndIdx << "]\n"); |
| |
| if (IP == IN) { |
| // There is only one other group; change it to cover the whole |
| // range (backward, so that it can still be Repl32 but cover the |
| // whole 64-bit range). |
| IP->StartIdx = 31; |
| IP->EndIdx = 30; |
| IP->Repl32CR = IP->Repl32CR || I->RLAmt >= 32; |
| IP->Repl32Coalesced = true; |
| I = BitGroups.erase(I); |
| } else { |
| // There are two separate groups, one before this group and one |
| // after us (at the beginning). We're going to remove this group, |
| // but also the group at the very beginning. |
| IP->EndIdx = IN->EndIdx; |
| IP->Repl32CR = IP->Repl32CR || IN->Repl32CR || I->RLAmt >= 32; |
| IP->Repl32Coalesced = true; |
| I = BitGroups.erase(I); |
| BitGroups.erase(BitGroups.begin()); |
| } |
| |
| // This must be the last group in the vector (and we might have |
| // just invalidated the iterator above), so break here. |
| break; |
| } |
| } |
| } |
| |
| ++I; |
| } |
| } |
| |
| SDValue getI32Imm(unsigned Imm, const SDLoc &dl) { |
| return CurDAG->getTargetConstant(Imm, dl, MVT::i32); |
| } |
| |
| uint64_t getZerosMask() { |
| uint64_t Mask = 0; |
| for (unsigned i = 0; i < Bits.size(); ++i) { |
| if (Bits[i].hasValue()) |
| continue; |
| Mask |= (UINT64_C(1) << i); |
| } |
| |
| return ~Mask; |
| } |
| |
| // This method extends an input value to 64 bit if input is 32-bit integer. |
| // While selecting instructions in BitPermutationSelector in 64-bit mode, |
| // an input value can be a 32-bit integer if a ZERO_EXTEND node is included. |
| // In such case, we extend it to 64 bit to be consistent with other values. |
| SDValue ExtendToInt64(SDValue V, const SDLoc &dl) { |
| if (V.getValueSizeInBits() == 64) |
| return V; |
| |
| assert(V.getValueSizeInBits() == 32); |
| SDValue SubRegIdx = CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32); |
| SDValue ImDef = SDValue(CurDAG->getMachineNode(PPC::IMPLICIT_DEF, dl, |
| MVT::i64), 0); |
| SDValue ExtVal = SDValue(CurDAG->getMachineNode(PPC::INSERT_SUBREG, dl, |
| MVT::i64, ImDef, V, |
| SubRegIdx), 0); |
| return ExtVal; |
| } |
| |
| // Depending on the number of groups for a particular value, it might be |
| // better to rotate, mask explicitly (using andi/andis), and then or the |
| // result. Select this part of the result first. |
| void SelectAndParts32(const SDLoc &dl, SDValue &Res, unsigned *InstCnt) { |
| if (BPermRewriterNoMasking) |
| return; |
| |
| for (ValueRotInfo &VRI : ValueRotsVec) { |
| unsigned Mask = 0; |
| for (unsigned i = 0; i < Bits.size(); ++i) { |
| if (!Bits[i].hasValue() || Bits[i].getValue() != VRI.V) |
| continue; |
| if (RLAmt[i] != VRI.RLAmt) |
| continue; |
| Mask |= (1u << i); |
| } |
| |
| // Compute the masks for andi/andis that would be necessary. |
| unsigned ANDIMask = (Mask & UINT16_MAX), ANDISMask = Mask >> 16; |
| assert((ANDIMask != 0 || ANDISMask != 0) && |
| "No set bits in mask for value bit groups"); |
| bool NeedsRotate = VRI.RLAmt != 0; |
| |
| // We're trying to minimize the number of instructions. If we have one |
| // group, using one of andi/andis can break even. If we have three |
| // groups, we can use both andi and andis and break even (to use both |
| // andi and andis we also need to or the results together). We need four |
| // groups if we also need to rotate. To use andi/andis we need to do more |
| // than break even because rotate-and-mask instructions tend to be easier |
| // to schedule. |
| |
| // FIXME: We've biased here against using andi/andis, which is right for |
| // POWER cores, but not optimal everywhere. For example, on the A2, |
| // andi/andis have single-cycle latency whereas the rotate-and-mask |
| // instructions take two cycles, and it would be better to bias toward |
| // andi/andis in break-even cases. |
| |
| unsigned NumAndInsts = (unsigned) NeedsRotate + |
| (unsigned) (ANDIMask != 0) + |
| (unsigned) (ANDISMask != 0) + |
| (unsigned) (ANDIMask != 0 && ANDISMask != 0) + |
| (unsigned) (bool) Res; |
| |
| LLVM_DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() |
| << " RL: " << VRI.RLAmt << ":" |
| << "\n\t\t\tisel using masking: " << NumAndInsts |
| << " using rotates: " << VRI.NumGroups << "\n"); |
| |
| if (NumAndInsts >= VRI.NumGroups) |
| continue; |
| |
| LLVM_DEBUG(dbgs() << "\t\t\t\tusing masking\n"); |
| |
| if (InstCnt) *InstCnt += NumAndInsts; |
| |
| SDValue VRot; |
| if (VRI.RLAmt) { |
| SDValue Ops[] = |
| { VRI.V, getI32Imm(VRI.RLAmt, dl), getI32Imm(0, dl), |
| getI32Imm(31, dl) }; |
| VRot = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, |
| Ops), 0); |
| } else { |
| VRot = VRI.V; |
| } |
| |
| SDValue ANDIVal, ANDISVal; |
| if (ANDIMask != 0) |
| ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo, dl, MVT::i32, |
| VRot, getI32Imm(ANDIMask, dl)), 0); |
| if (ANDISMask != 0) |
| ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo, dl, MVT::i32, |
| VRot, getI32Imm(ANDISMask, dl)), 0); |
| |
| SDValue TotalVal; |
| if (!ANDIVal) |
| TotalVal = ANDISVal; |
| else if (!ANDISVal) |
| TotalVal = ANDIVal; |
| else |
| TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32, |
| ANDIVal, ANDISVal), 0); |
| |
| if (!Res) |
| Res = TotalVal; |
| else |
| Res = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32, |
| Res, TotalVal), 0); |
| |
| // Now, remove all groups with this underlying value and rotation |
| // factor. |
| eraseMatchingBitGroups([VRI](const BitGroup &BG) { |
| return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt; |
| }); |
| } |
| } |
| |
| // Instruction selection for the 32-bit case. |
| SDNode *Select32(SDNode *N, bool LateMask, unsigned *InstCnt) { |
| SDLoc dl(N); |
| SDValue Res; |
| |
| if (InstCnt) *InstCnt = 0; |
| |
| // Take care of cases that should use andi/andis first. |
| SelectAndParts32(dl, Res, InstCnt); |
| |
| // If we've not yet selected a 'starting' instruction, and we have no zeros |
| // to fill in, select the (Value, RLAmt) with the highest priority (largest |
| // number of groups), and start with this rotated value. |
| if ((!HasZeros || LateMask) && !Res) { |
| ValueRotInfo &VRI = ValueRotsVec[0]; |
| if (VRI.RLAmt) { |
| if (InstCnt) *InstCnt += 1; |
| SDValue Ops[] = |
| { VRI.V, getI32Imm(VRI.RLAmt, dl), getI32Imm(0, dl), |
| getI32Imm(31, dl) }; |
| Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), |
| 0); |
| } else { |
| Res = VRI.V; |
| } |
| |
| // Now, remove all groups with this underlying value and rotation factor. |
| eraseMatchingBitGroups([VRI](const BitGroup &BG) { |
| return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt; |
| }); |
| } |
| |
| if (InstCnt) *InstCnt += BitGroups.size(); |
| |
| // Insert the other groups (one at a time). |
| for (auto &BG : BitGroups) { |
| if (!Res) { |
| SDValue Ops[] = |
| { BG.V, getI32Imm(BG.RLAmt, dl), |
| getI32Imm(Bits.size() - BG.EndIdx - 1, dl), |
| getI32Imm(Bits.size() - BG.StartIdx - 1, dl) }; |
| Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); |
| } else { |
| SDValue Ops[] = |
| { Res, BG.V, getI32Imm(BG.RLAmt, dl), |
| getI32Imm(Bits.size() - BG.EndIdx - 1, dl), |
| getI32Imm(Bits.size() - BG.StartIdx - 1, dl) }; |
| Res = SDValue(CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops), 0); |
| } |
| } |
| |
| if (LateMask) { |
| unsigned Mask = (unsigned) getZerosMask(); |
| |
| unsigned ANDIMask = (Mask & UINT16_MAX), ANDISMask = Mask >> 16; |
| assert((ANDIMask != 0 || ANDISMask != 0) && |
| "No set bits in zeros mask?"); |
| |
| if (InstCnt) *InstCnt += (unsigned) (ANDIMask != 0) + |
| (unsigned) (ANDISMask != 0) + |
| (unsigned) (ANDIMask != 0 && ANDISMask != 0); |
| |
| SDValue ANDIVal, ANDISVal; |
| if (ANDIMask != 0) |
| ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo, dl, MVT::i32, |
| Res, getI32Imm(ANDIMask, dl)), 0); |
| if (ANDISMask != 0) |
| ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo, dl, MVT::i32, |
| Res, getI32Imm(ANDISMask, dl)), 0); |
| |
| if (!ANDIVal) |
| Res = ANDISVal; |
| else if (!ANDISVal) |
| Res = ANDIVal; |
| else |
| Res = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32, |
| ANDIVal, ANDISVal), 0); |
| } |
| |
| return Res.getNode(); |
| } |
| |
| unsigned SelectRotMask64Count(unsigned RLAmt, bool Repl32, |
| unsigned MaskStart, unsigned MaskEnd, |
| bool IsIns) { |
| // In the notation used by the instructions, 'start' and 'end' are reversed |
| // because bits are counted from high to low order. |
| unsigned InstMaskStart = 64 - MaskEnd - 1, |
| InstMaskEnd = 64 - MaskStart - 1; |
| |
| if (Repl32) |
| return 1; |
| |
| if ((!IsIns && (InstMaskEnd == 63 || InstMaskStart == 0)) || |
| InstMaskEnd == 63 - RLAmt) |
| return 1; |
| |
| return 2; |
| } |
| |
| // For 64-bit values, not all combinations of rotates and masks are |
| // available. Produce one if it is available. |
| SDValue SelectRotMask64(SDValue V, const SDLoc &dl, unsigned RLAmt, |
| bool Repl32, unsigned MaskStart, unsigned MaskEnd, |
| unsigned *InstCnt = nullptr) { |
| // In the notation used by the instructions, 'start' and 'end' are reversed |
| // because bits are counted from high to low order. |
| unsigned InstMaskStart = 64 - MaskEnd - 1, |
| InstMaskEnd = 64 - MaskStart - 1; |
| |
| if (InstCnt) *InstCnt += 1; |
| |
| if (Repl32) { |
| // This rotation amount assumes that the lower 32 bits of the quantity |
| // are replicated in the high 32 bits by the rotation operator (which is |
| // done by rlwinm and friends). |
| assert(InstMaskStart >= 32 && "Mask cannot start out of range"); |
| assert(InstMaskEnd >= 32 && "Mask cannot end out of range"); |
| SDValue Ops[] = |
| { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), |
| getI32Imm(InstMaskStart - 32, dl), getI32Imm(InstMaskEnd - 32, dl) }; |
| return SDValue(CurDAG->getMachineNode(PPC::RLWINM8, dl, MVT::i64, |
| Ops), 0); |
| } |
| |
| if (InstMaskEnd == 63) { |
| SDValue Ops[] = |
| { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), |
| getI32Imm(InstMaskStart, dl) }; |
| return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Ops), 0); |
| } |
| |
| if (InstMaskStart == 0) { |
| SDValue Ops[] = |
| { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), |
| getI32Imm(InstMaskEnd, dl) }; |
| return SDValue(CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, Ops), 0); |
| } |
| |
| if (InstMaskEnd == 63 - RLAmt) { |
| SDValue Ops[] = |
| { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), |
| getI32Imm(InstMaskStart, dl) }; |
| return SDValue(CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, Ops), 0); |
| } |
| |
| // We cannot do this with a single instruction, so we'll use two. The |
| // problem is that we're not free to choose both a rotation amount and mask |
| // start and end independently. We can choose an arbitrary mask start and |
| // end, but then the rotation amount is fixed. Rotation, however, can be |
| // inverted, and so by applying an "inverse" rotation first, we can get the |
| // desired result. |
| if (InstCnt) *InstCnt += 1; |
| |
| // The rotation mask for the second instruction must be MaskStart. |
| unsigned RLAmt2 = MaskStart; |
| // The first instruction must rotate V so that the overall rotation amount |
| // is RLAmt. |
| unsigned RLAmt1 = (64 + RLAmt - RLAmt2) % 64; |
| if (RLAmt1) |
| V = SelectRotMask64(V, dl, RLAmt1, false, 0, 63); |
| return SelectRotMask64(V, dl, RLAmt2, false, MaskStart, MaskEnd); |
| } |
| |
| // For 64-bit values, not all combinations of rotates and masks are |
| // available. Produce a rotate-mask-and-insert if one is available. |
| SDValue SelectRotMaskIns64(SDValue Base, SDValue V, const SDLoc &dl, |
| unsigned RLAmt, bool Repl32, unsigned MaskStart, |
| unsigned MaskEnd, unsigned *InstCnt = nullptr) { |
| // In the notation used by the instructions, 'start' and 'end' are reversed |
| // because bits are counted from high to low order. |
| unsigned InstMaskStart = 64 - MaskEnd - 1, |
| InstMaskEnd = 64 - MaskStart - 1; |
| |
| if (InstCnt) *InstCnt += 1; |
| |
| if (Repl32) { |
| // This rotation amount assumes that the lower 32 bits of the quantity |
| // are replicated in the high 32 bits by the rotation operator (which is |
| // done by rlwinm and friends). |
| assert(InstMaskStart >= 32 && "Mask cannot start out of range"); |
| assert(InstMaskEnd >= 32 && "Mask cannot end out of range"); |
| SDValue Ops[] = |
| { ExtendToInt64(Base, dl), ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), |
| getI32Imm(InstMaskStart - 32, dl), getI32Imm(InstMaskEnd - 32, dl) }; |
| return SDValue(CurDAG->getMachineNode(PPC::RLWIMI8, dl, MVT::i64, |
| Ops), 0); |
| } |
| |
| if (InstMaskEnd == 63 - RLAmt) { |
| SDValue Ops[] = |
| { ExtendToInt64(Base, dl), ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), |
| getI32Imm(InstMaskStart, dl) }; |
| return SDValue(CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops), 0); |
| } |
| |
| // We cannot do this with a single instruction, so we'll use two. The |
| // problem is that we're not free to choose both a rotation amount and mask |
| // start and end independently. We can choose an arbitrary mask start and |
| // end, but then the rotation amount is fixed. Rotation, however, can be |
| // inverted, and so by applying an "inverse" rotation first, we can get the |
| // desired result. |
| if (InstCnt) *InstCnt += 1; |
| |
| // The rotation mask for the second instruction must be MaskStart. |
| unsigned RLAmt2 = MaskStart; |
| // The first instruction must rotate V so that the overall rotation amount |
| // is RLAmt. |
| unsigned RLAmt1 = (64 + RLAmt - RLAmt2) % 64; |
| if (RLAmt1) |
| V = SelectRotMask64(V, dl, RLAmt1, false, 0, 63); |
| return SelectRotMaskIns64(Base, V, dl, RLAmt2, false, MaskStart, MaskEnd); |
| } |
| |
| void SelectAndParts64(const SDLoc &dl, SDValue &Res, unsigned *InstCnt) { |
| if (BPermRewriterNoMasking) |
| return; |
| |
| // The idea here is the same as in the 32-bit version, but with additional |
| // complications from the fact that Repl32 might be true. Because we |
| // aggressively convert bit groups to Repl32 form (which, for small |
| // rotation factors, involves no other change), and then coalesce, it might |
| // be the case that a single 64-bit masking operation could handle both |
| // some Repl32 groups and some non-Repl32 groups. If converting to Repl32 |
| // form allowed coalescing, then we must use a 32-bit rotaton in order to |
| // completely capture the new combined bit group. |
| |
| for (ValueRotInfo &VRI : ValueRotsVec) { |
| uint64_t Mask = 0; |
| |
| // We need to add to the mask all bits from the associated bit groups. |
| // If Repl32 is false, we need to add bits from bit groups that have |
| // Repl32 true, but are trivially convertable to Repl32 false. Such a |
| // group is trivially convertable if it overlaps only with the lower 32 |
| // bits, and the group has not been coalesced. |
| auto MatchingBG = [VRI](const BitGroup &BG) { |
| if (VRI.V != BG.V) |
| return false; |
| |
| unsigned EffRLAmt = BG.RLAmt; |
| if (!VRI.Repl32 && BG.Repl32) { |
| if (BG.StartIdx < 32 && BG.EndIdx < 32 && BG.StartIdx <= BG.EndIdx && |
| !BG.Repl32Coalesced) { |
| if (BG.Repl32CR) |
| EffRLAmt += 32; |
| } else { |
| return false; |
| } |
| } else if (VRI.Repl32 != BG.Repl32) { |
| return false; |
| } |
| |
| return VRI.RLAmt == EffRLAmt; |
| }; |
| |
| for (auto &BG : BitGroups) { |
| if (!MatchingBG(BG)) |
| continue; |
| |
| if (BG.StartIdx <= BG.EndIdx) { |
| for (unsigned i = BG.StartIdx; i <= BG.EndIdx; ++i) |
| Mask |= (UINT64_C(1) << i); |
| } else { |
| for (unsigned i = BG.StartIdx; i < Bits.size(); ++i) |
| Mask |= (UINT64_C(1) << i); |
| for (unsigned i = 0; i <= BG.EndIdx; ++i) |
| Mask |= (UINT64_C(1) << i); |
| } |
| } |
| |
| // We can use the 32-bit andi/andis technique if the mask does not |
| // require any higher-order bits. This can save an instruction compared |
| // to always using the general 64-bit technique. |
| bool Use32BitInsts = isUInt<32>(Mask); |
| // Compute the masks for andi/andis that would be necessary. |
| unsigned ANDIMask = (Mask & UINT16_MAX), |
| ANDISMask = (Mask >> 16) & UINT16_MAX; |
| |
| bool NeedsRotate = VRI.RLAmt || (VRI.Repl32 && !isUInt<32>(Mask)); |
| |
| unsigned NumAndInsts = (unsigned) NeedsRotate + |
| (unsigned) (bool) Res; |
| if (Use32BitInsts) |
| NumAndInsts += (unsigned) (ANDIMask != 0) + (unsigned) (ANDISMask != 0) + |
| (unsigned) (ANDIMask != 0 && ANDISMask != 0); |
| else |
| NumAndInsts += selectI64ImmInstrCount(Mask) + /* and */ 1; |
| |
| unsigned NumRLInsts = 0; |
| bool FirstBG = true; |
| bool MoreBG = false; |
| for (auto &BG : BitGroups) { |
| if (!MatchingBG(BG)) { |
| MoreBG = true; |
| continue; |
| } |
| NumRLInsts += |
| SelectRotMask64Count(BG.RLAmt, BG.Repl32, BG.StartIdx, BG.EndIdx, |
| !FirstBG); |
| FirstBG = false; |
| } |
| |
| LLVM_DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() |
| << " RL: " << VRI.RLAmt << (VRI.Repl32 ? " (32):" : ":") |
| << "\n\t\t\tisel using masking: " << NumAndInsts |
| << " using rotates: " << NumRLInsts << "\n"); |
| |
| // When we'd use andi/andis, we bias toward using the rotates (andi only |
| // has a record form, and is cracked on POWER cores). However, when using |
| // general 64-bit constant formation, bias toward the constant form, |
| // because that exposes more opportunities for CSE. |
| if (NumAndInsts > NumRLInsts) |
| continue; |
| // When merging multiple bit groups, instruction or is used. |
| // But when rotate is used, rldimi can inert the rotated value into any |
| // register, so instruction or can be avoided. |
| if ((Use32BitInsts || MoreBG) && NumAndInsts == NumRLInsts) |
| continue; |
| |
| LLVM_DEBUG(dbgs() << "\t\t\t\tusing masking\n"); |
| |
| if (InstCnt) *InstCnt += NumAndInsts; |
| |
| SDValue VRot; |
| // We actually need to generate a rotation if we have a non-zero rotation |
| // factor or, in the Repl32 case, if we care about any of the |
| // higher-order replicated bits. In the latter case, we generate a mask |
| // backward so that it actually includes the entire 64 bits. |
| if (VRI.RLAmt || (VRI.Repl32 && !isUInt<32>(Mask))) |
| VRot = SelectRotMask64(VRI.V, dl, VRI.RLAmt, VRI.Repl32, |
| VRI.Repl32 ? 31 : 0, VRI.Repl32 ? 30 : 63); |
| else |
| VRot = VRI.V; |
| |
| SDValue TotalVal; |
| if (Use32BitInsts) { |
| assert((ANDIMask != 0 || ANDISMask != 0) && |
| "No set bits in mask when using 32-bit ands for 64-bit value"); |
| |
| SDValue ANDIVal, ANDISVal; |
| if (ANDIMask != 0) |
| ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo8, dl, MVT::i64, |
| ExtendToInt64(VRot, dl), |
| getI32Imm(ANDIMask, dl)), |
| 0); |
| if (ANDISMask != 0) |
| ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo8, dl, MVT::i64, |
| ExtendToInt64(VRot, dl), |
| getI32Imm(ANDISMask, dl)), |
| 0); |
| |
| if (!ANDIVal) |
| TotalVal = ANDISVal; |
| else if (!ANDISVal) |
| TotalVal = ANDIVal; |
| else |
| TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, |
| ExtendToInt64(ANDIVal, dl), ANDISVal), 0); |
| } else { |
| TotalVal = SDValue(selectI64Imm(CurDAG, dl, Mask), 0); |
| TotalVal = |
| SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64, |
| ExtendToInt64(VRot, dl), TotalVal), |
| 0); |
| } |
| |
| if (!Res) |
| Res = TotalVal; |
| else |
| Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, |
| ExtendToInt64(Res, dl), TotalVal), |
| 0); |
| |
| // Now, remove all groups with this underlying value and rotation |
| // factor. |
| eraseMatchingBitGroups(MatchingBG); |
| } |
| } |
| |
| // Instruction selection for the 64-bit case. |
| SDNode *Select64(SDNode *N, bool LateMask, unsigned *InstCnt) { |
| SDLoc dl(N); |
| SDValue Res; |
| |
| if (InstCnt) *InstCnt = 0; |
| |
| // Take care of cases that should use andi/andis first. |
| SelectAndParts64(dl, Res, InstCnt); |
| |
| // If we've not yet selected a 'starting' instruction, and we have no zeros |
| // to fill in, select the (Value, RLAmt) with the highest priority (largest |
| // number of groups), and start with this rotated value. |
| if ((!HasZeros || LateMask) && !Res) { |
| // If we have both Repl32 groups and non-Repl32 groups, the non-Repl32 |
| // groups will come first, and so the VRI representing the largest number |
| // of groups might not be first (it might be the first Repl32 groups). |
| unsigned MaxGroupsIdx = 0; |
| if (!ValueRotsVec[0].Repl32) { |
| for (unsigned i = 0, ie = ValueRotsVec.size(); i < ie; ++i) |
| if (ValueRotsVec[i].Repl32) { |
| if (ValueRotsVec[i].NumGroups > ValueRotsVec[0].NumGroups) |
| MaxGroupsIdx = i; |
| break; |
| } |
| } |
| |
| ValueRotInfo &VRI = ValueRotsVec[MaxGroupsIdx]; |
| bool NeedsRotate = false; |
| if (VRI.RLAmt) { |
| NeedsRotate = true; |
| } else if (VRI.Repl32) { |
| for (auto &BG : BitGroups) { |
| if (BG.V != VRI.V || BG.RLAmt != VRI.RLAmt || |
| BG.Repl32 != VRI.Repl32) |
| continue; |
| |
| // We don't need a rotate if the bit group is confined to the lower |
| // 32 bits. |
| if (BG.StartIdx < 32 && BG.EndIdx < 32 && BG.StartIdx < BG.EndIdx) |
| continue; |
| |
| NeedsRotate = true; |
| break; |
| } |
| } |
| |
| if (NeedsRotate) |
| Res = SelectRotMask64(VRI.V, dl, VRI.RLAmt, VRI.Repl32, |
| VRI.Repl32 ? 31 : 0, VRI.Repl32 ? 30 : 63, |
| InstCnt); |
| else |
| Res = VRI.V; |
| |
| // Now, remove all groups with this underlying value and rotation factor. |
| if (Res) |
| eraseMatchingBitGroups([VRI](const BitGroup &BG) { |
| return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt && |
| BG.Repl32 == VRI.Repl32; |
| }); |
| } |
| |
| // Because 64-bit rotates are more flexible than inserts, we might have a |
| // preference regarding which one we do first (to save one instruction). |
| if (!Res) |
| for (auto I = BitGroups.begin(), IE = BitGroups.end(); I != IE; ++I) { |
| if (SelectRotMask64Count(I->RLAmt, I->Repl32, I->StartIdx, I->EndIdx, |
| false) < |
| SelectRotMask64Count(I->RLAmt, I->Repl32, I->StartIdx, I->EndIdx, |
| true)) { |
| if (I != BitGroups.begin()) { |
| BitGroup BG = *I; |
| BitGroups.erase(I); |
| BitGroups.insert(BitGroups.begin(), BG); |
| } |
| |
| break; |
| } |
| } |
| |
| // Insert the other groups (one at a time). |
| for (auto &BG : BitGroups) { |
| if (!Res) |
| Res = SelectRotMask64(BG.V, dl, BG.RLAmt, BG.Repl32, BG.StartIdx, |
| BG.EndIdx, InstCnt); |
| else |
| Res = SelectRotMaskIns64(Res, BG.V, dl, BG.RLAmt, BG.Repl32, |
| BG.StartIdx, BG.EndIdx, InstCnt); |
| } |
| |
| if (LateMask) { |
| uint64_t Mask = getZerosMask(); |
| |
| // We can use the 32-bit andi/andis technique if the mask does not |
| // require any higher-order bits. This can save an instruction compared |
| // to always using the general 64-bit technique. |
| bool Use32BitInsts = isUInt<32>(Mask); |
| // Compute the masks for andi/andis that would be necessary. |
| unsigned ANDIMask = (Mask & UINT16_MAX), |
| ANDISMask = (Mask >> 16) & UINT16_MAX; |
| |
| if (Use32BitInsts) { |
| assert((ANDIMask != 0 || ANDISMask != 0) && |
| "No set bits in mask when using 32-bit ands for 64-bit value"); |
| |
| if (InstCnt) *InstCnt += (unsigned) (ANDIMask != 0) + |
| (unsigned) (ANDISMask != 0) + |
| (unsigned) (ANDIMask != 0 && ANDISMask != 0); |
| |
| SDValue ANDIVal, ANDISVal; |
| if (ANDIMask != 0) |
| ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo8, dl, MVT::i64, |
| ExtendToInt64(Res, dl), getI32Imm(ANDIMask, dl)), 0); |
| if (ANDISMask != 0) |
| ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo8, dl, MVT::i64, |
| ExtendToInt64(Res, dl), getI32Imm(ANDISMask, dl)), 0); |
| |
| if (!ANDIVal) |
| Res = ANDISVal; |
| else if (!ANDISVal) |
| Res = ANDIVal; |
| else |
| Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, |
| ExtendToInt64(ANDIVal, dl), ANDISVal), 0); |
| } else { |
| if (InstCnt) *InstCnt += selectI64ImmInstrCount(Mask) + /* and */ 1; |
| |
| SDValue MaskVal = SDValue(selectI64Imm(CurDAG, dl, Mask), 0); |
| Res = |
| SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64, |
| ExtendToInt64(Res, dl), MaskVal), 0); |
| } |
| } |
| |
| return Res.getNode(); |
| } |
| |
| SDNode *Select(SDNode *N, bool LateMask, unsigned *InstCnt = nullptr) { |
| // Fill in BitGroups. |
| collectBitGroups(LateMask); |
| if (BitGroups.empty()) |
| return nullptr; |
| |
| // For 64-bit values, figure out when we can use 32-bit instructions. |
| if (Bits.size() == 64) |
| assignRepl32BitGroups(); |
| |
| // Fill in ValueRotsVec. |
| collectValueRotInfo(); |
| |
| if (Bits.size() == 32) { |
| return Select32(N, LateMask, InstCnt); |
| } else { |
| assert(Bits.size() == 64 && "Not 64 bits here?"); |
| return Select64(N, LateMask, InstCnt); |
| } |
| |
| return nullptr; |
| } |
| |
| void eraseMatchingBitGroups(function_ref<bool(const BitGroup &)> F) { |
| BitGroups.erase(remove_if(BitGroups, F), BitGroups.end()); |
| } |
| |
| SmallVector<ValueBit, 64> Bits; |
| |
| bool HasZeros; |
| SmallVector<unsigned, 64> RLAmt; |
| |
| SmallVector<BitGroup, 16> BitGroups; |
| |
| DenseMap<std::pair<SDValue, unsigned>, ValueRotInfo> ValueRots; |
| SmallVector<ValueRotInfo, 16> ValueRotsVec; |
| |
| SelectionDAG *CurDAG; |
| |
| public: |
| BitPermutationSelector(SelectionDAG *DAG) |
| : CurDAG(DAG) {} |
| |
| // Here we try to match complex bit permutations into a set of |
| // rotate-and-shift/shift/and/or instructions, using a set of heuristics |
| // known to produce optimial code for common cases (like i32 byte swapping). |
| SDNode *Select(SDNode *N) { |
| Memoizer.clear(); |
| auto Result = |
| getValueBits(SDValue(N, 0), N->getValueType(0).getSizeInBits()); |
| if (!Result.first) |
| return nullptr; |
| Bits = std::move(*Result.second); |
| |
| LLVM_DEBUG(dbgs() << "Considering bit-permutation-based instruction" |
| " selection for: "); |
| LLVM_DEBUG(N->dump(CurDAG)); |
| |
| // Fill it RLAmt and set HasZeros. |
| computeRotationAmounts(); |
| |
| if (!HasZeros) |
| return Select(N, false); |
| |
| // We currently have two techniques for handling results with zeros: early |
| // masking (the default) and late masking. Late masking is sometimes more |
| // efficient, but because the structure of the bit groups is different, it |
| // is hard to tell without generating both and comparing the results. With |
| // late masking, we ignore zeros in the resulting value when inserting each |
| // set of bit groups, and then mask in the zeros at the end. With early |
| // masking, we only insert the non-zero parts of the result at every step. |
| |
| unsigned InstCnt = 0, InstCntLateMask = 0; |
| LLVM_DEBUG(dbgs() << "\tEarly masking:\n"); |
| SDNode *RN = Select(N, false, &InstCnt); |
| LLVM_DEBUG(dbgs() << "\t\tisel would use " << InstCnt << " instructions\n"); |
| |
| LLVM_DEBUG(dbgs() << "\tLate masking:\n"); |
| SDNode *RNLM = Select(N, true, &InstCntLateMask); |
| LLVM_DEBUG(dbgs() << "\t\tisel would use " << InstCntLateMask |
| << " instructions\n"); |
| |
| if (InstCnt <= InstCntLateMask) { |
| LLVM_DEBUG(dbgs() << "\tUsing early-masking for isel\n"); |
| return RN; |
| } |
| |
| LLVM_DEBUG(dbgs() << "\tUsing late-masking for isel\n"); |
| return RNLM; |
| } |
| }; |
| |
| class IntegerCompareEliminator { |
| SelectionDAG *CurDAG; |
| PPCDAGToDAGISel *S; |
| // Conversion type for interpreting results of a 32-bit instruction as |
| // a 64-bit value or vice versa. |
| enum ExtOrTruncConversion { Ext, Trunc }; |
| |
| // Modifiers to guide how an ISD::SETCC node's result is to be computed |
| // in a GPR. |
| // ZExtOrig - use the original condition code, zero-extend value |
| // ZExtInvert - invert the condition code, zero-extend value |
| // SExtOrig - use the original condition code, sign-extend value |
| // SExtInvert - invert the condition code, sign-extend value |
| enum SetccInGPROpts { ZExtOrig, ZExtInvert, SExtOrig, SExtInvert }; |
| |
| // Comparisons against zero to emit GPR code sequences for. Each of these |
| // sequences may need to be emitted for two or more equivalent patterns. |
| // For example (a >= 0) == (a > -1). The direction of the comparison (</>) |
| // matters as well as the extension type: sext (-1/0), zext (1/0). |
| // GEZExt - (zext (LHS >= 0)) |
| // GESExt - (sext (LHS >= 0)) |
| // LEZExt - (zext (LHS <= 0)) |
| // LESExt - (sext (LHS <= 0)) |
| enum ZeroCompare { GEZExt, GESExt, LEZExt, LESExt }; |
| |
| SDNode *tryEXTEND(SDNode *N); |
| SDNode *tryLogicOpOfCompares(SDNode *N); |
| SDValue computeLogicOpInGPR(SDValue LogicOp); |
| SDValue signExtendInputIfNeeded(SDValue Input); |
| SDValue zeroExtendInputIfNeeded(SDValue Input); |
| SDValue addExtOrTrunc(SDValue NatWidthRes, ExtOrTruncConversion Conv); |
| SDValue getCompoundZeroComparisonInGPR(SDValue LHS, SDLoc dl, |
| ZeroCompare CmpTy); |
| SDValue get32BitZExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| int64_t RHSValue, SDLoc dl); |
| SDValue get32BitSExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| int64_t RHSValue, SDLoc dl); |
| SDValue get64BitZExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| int64_t RHSValue, SDLoc dl); |
| SDValue get64BitSExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| int64_t RHSValue, SDLoc dl); |
| SDValue getSETCCInGPR(SDValue Compare, SetccInGPROpts ConvOpts); |
| |
| public: |
| IntegerCompareEliminator(SelectionDAG *DAG, |
| PPCDAGToDAGISel *Sel) : CurDAG(DAG), S(Sel) { |
| assert(CurDAG->getTargetLoweringInfo() |
| .getPointerTy(CurDAG->getDataLayout()).getSizeInBits() == 64 && |
| "Only expecting to use this on 64 bit targets."); |
| } |
| SDNode *Select(SDNode *N) { |
| if (CmpInGPR == ICGPR_None) |
| return nullptr; |
| switch (N->getOpcode()) { |
| default: break; |
| case ISD::ZERO_EXTEND: |
| if (CmpInGPR == ICGPR_Sext || CmpInGPR == ICGPR_SextI32 || |
| CmpInGPR == ICGPR_SextI64) |
| return nullptr; |
| LLVM_FALLTHROUGH; |
| case ISD::SIGN_EXTEND: |
| if (CmpInGPR == ICGPR_Zext || CmpInGPR == ICGPR_ZextI32 || |
| CmpInGPR == ICGPR_ZextI64) |
| return nullptr; |
| return tryEXTEND(N); |
| case ISD::AND: |
| case ISD::OR: |
| case ISD::XOR: |
| return tryLogicOpOfCompares(N); |
| } |
| return nullptr; |
| } |
| }; |
| |
| static bool isLogicOp(unsigned Opc) { |
| return Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR; |
| } |
| // The obvious case for wanting to keep the value in a GPR. Namely, the |
| // result of the comparison is actually needed in a GPR. |
| SDNode *IntegerCompareEliminator::tryEXTEND(SDNode *N) { |
| assert((N->getOpcode() == ISD::ZERO_EXTEND || |
| N->getOpcode() == ISD::SIGN_EXTEND) && |
| "Expecting a zero/sign extend node!"); |
| SDValue WideRes; |
| // If we are zero-extending the result of a logical operation on i1 |
| // values, we can keep the values in GPRs. |
| if (isLogicOp(N->getOperand(0).getOpcode()) && |
| N->getOperand(0).getValueType() == MVT::i1 && |
| N->getOpcode() == ISD::ZERO_EXTEND) |
| WideRes = computeLogicOpInGPR(N->getOperand(0)); |
| else if (N->getOperand(0).getOpcode() != ISD::SETCC) |
| return nullptr; |
| else |
| WideRes = |
| getSETCCInGPR(N->getOperand(0), |
| N->getOpcode() == ISD::SIGN_EXTEND ? |
| SetccInGPROpts::SExtOrig : SetccInGPROpts::ZExtOrig); |
| |
| if (!WideRes) |
| return nullptr; |
| |
| SDLoc dl(N); |
| bool Input32Bit = WideRes.getValueType() == MVT::i32; |
| bool Output32Bit = N->getValueType(0) == MVT::i32; |
| |
| NumSextSetcc += N->getOpcode() == ISD::SIGN_EXTEND ? 1 : 0; |
| NumZextSetcc += N->getOpcode() == ISD::SIGN_EXTEND ? 0 : 1; |
| |
| SDValue ConvOp = WideRes; |
| if (Input32Bit != Output32Bit) |
| ConvOp = addExtOrTrunc(WideRes, Input32Bit ? ExtOrTruncConversion::Ext : |
| ExtOrTruncConversion::Trunc); |
| return ConvOp.getNode(); |
| } |
| |
| // Attempt to perform logical operations on the results of comparisons while |
| // keeping the values in GPRs. Without doing so, these would end up being |
| // lowered to CR-logical operations which suffer from significant latency and |
| // low ILP. |
| SDNode *IntegerCompareEliminator::tryLogicOpOfCompares(SDNode *N) { |
| if (N->getValueType(0) != MVT::i1) |
| return nullptr; |
| assert(isLogicOp(N->getOpcode()) && |
| "Expected a logic operation on setcc results."); |
| SDValue LoweredLogical = computeLogicOpInGPR(SDValue(N, 0)); |
| if (!LoweredLogical) |
| return nullptr; |
| |
| SDLoc dl(N); |
| bool IsBitwiseNegate = LoweredLogical.getMachineOpcode() == PPC::XORI8; |
| unsigned SubRegToExtract = IsBitwiseNegate ? PPC::sub_eq : PPC::sub_gt; |
| SDValue CR0Reg = CurDAG->getRegister(PPC::CR0, MVT::i32); |
| SDValue LHS = LoweredLogical.getOperand(0); |
| SDValue RHS = LoweredLogical.getOperand(1); |
| SDValue WideOp; |
| SDValue OpToConvToRecForm; |
| |
| // Look through any 32-bit to 64-bit implicit extend nodes to find the |
| // opcode that is input to the XORI. |
| if (IsBitwiseNegate && |
| LoweredLogical.getOperand(0).getMachineOpcode() == PPC::INSERT_SUBREG) |
| OpToConvToRecForm = LoweredLogical.getOperand(0).getOperand(1); |
| else if (IsBitwiseNegate) |
| // If the input to the XORI isn't an extension, that's what we're after. |
| OpToConvToRecForm = LoweredLogical.getOperand(0); |
| else |
| // If this is not an XORI, it is a reg-reg logical op and we can convert |
| // it to record-form. |
| OpToConvToRecForm = LoweredLogical; |
| |
| // Get the record-form version of the node we're looking to use to get the |
| // CR result from. |
| uint16_t NonRecOpc = OpToConvToRecForm.getMachineOpcode(); |
| int NewOpc = PPCInstrInfo::getRecordFormOpcode(NonRecOpc); |
| |
| // Convert the right node to record-form. This is either the logical we're |
| // looking at or it is the input node to the negation (if we're looking at |
| // a bitwise negation). |
| if (NewOpc != -1 && IsBitwiseNegate) { |
| // The input to the XORI has a record-form. Use it. |
| assert(LoweredLogical.getConstantOperandVal(1) == 1 && |
| "Expected a PPC::XORI8 only for bitwise negation."); |
| // Emit the record-form instruction. |
| std::vector<SDValue> Ops; |
| for (int i = 0, e = OpToConvToRecForm.getNumOperands(); i < e; i++) |
| Ops.push_back(OpToConvToRecForm.getOperand(i)); |
| |
| WideOp = |
| SDValue(CurDAG->getMachineNode(NewOpc, dl, |
| OpToConvToRecForm.getValueType(), |
| MVT::Glue, Ops), 0); |
| } else { |
| assert((NewOpc != -1 || !IsBitwiseNegate) && |
| "No record form available for AND8/OR8/XOR8?"); |
| WideOp = |
| SDValue(CurDAG->getMachineNode(NewOpc == -1 ? PPC::ANDIo8 : NewOpc, dl, |
| MVT::i64, MVT::Glue, LHS, RHS), 0); |
| } |
| |
| // Select this node to a single bit from CR0 set by the record-form node |
| // just created. For bitwise negation, use the EQ bit which is the equivalent |
| // of negating the result (i.e. it is a bit set when the result of the |
| // operation is zero). |
| SDValue SRIdxVal = |
| CurDAG->getTargetConstant(SubRegToExtract, dl, MVT::i32); |
| SDValue CRBit = |
| SDValue(CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, |
| MVT::i1, CR0Reg, SRIdxVal, |
| WideOp.getValue(1)), 0); |
| return CRBit.getNode(); |
| } |
| |
| // Lower a logical operation on i1 values into a GPR sequence if possible. |
| // The result can be kept in a GPR if requested. |
| // Three types of inputs can be handled: |
| // - SETCC |
| // - TRUNCATE |
| // - Logical operation (AND/OR/XOR) |
| // There is also a special case that is handled (namely a complement operation |
| // achieved with xor %a, -1). |
| SDValue IntegerCompareEliminator::computeLogicOpInGPR(SDValue LogicOp) { |
| assert(isLogicOp(LogicOp.getOpcode()) && |
| "Can only handle logic operations here."); |
| assert(LogicOp.getValueType() == MVT::i1 && |
| "Can only handle logic operations on i1 values here."); |
| SDLoc dl(LogicOp); |
| SDValue LHS, RHS; |
| |
| // Special case: xor %a, -1 |
| bool IsBitwiseNegation = isBitwiseNot(LogicOp); |
| |
| // Produces a GPR sequence for each operand of the binary logic operation. |
| // For SETCC, it produces the respective comparison, for TRUNCATE it truncates |
| // the value in a GPR and for logic operations, it will recursively produce |
| // a GPR sequence for the operation. |
| auto getLogicOperand = [&] (SDValue Operand) -> SDValue { |
| unsigned OperandOpcode = Operand.getOpcode(); |
| if (OperandOpcode == ISD::SETCC) |
| return getSETCCInGPR(Operand, SetccInGPROpts::ZExtOrig); |
| else if (OperandOpcode == ISD::TRUNCATE) { |
| SDValue InputOp = Operand.getOperand(0); |
| EVT InVT = InputOp.getValueType(); |
| return SDValue(CurDAG->getMachineNode(InVT == MVT::i32 ? PPC::RLDICL_32 : |
| PPC::RLDICL, dl, InVT, InputOp, |
| S->getI64Imm(0, dl), |
| S->getI64Imm(63, dl)), 0); |
| } else if (isLogicOp(OperandOpcode)) |
| return computeLogicOpInGPR(Operand); |
| return SDValue(); |
| }; |
| LHS = getLogicOperand(LogicOp.getOperand(0)); |
| RHS = getLogicOperand(LogicOp.getOperand(1)); |
| |
| // If a GPR sequence can't be produced for the LHS we can't proceed. |
| // Not producing a GPR sequence for the RHS is only a problem if this isn't |
| // a bitwise negation operation. |
| if (!LHS || (!RHS && !IsBitwiseNegation)) |
| return SDValue(); |
| |
| NumLogicOpsOnComparison++; |
| |
| // We will use the inputs as 64-bit values. |
| if (LHS.getValueType() == MVT::i32) |
| LHS = addExtOrTrunc(LHS, ExtOrTruncConversion::Ext); |
| if (!IsBitwiseNegation && RHS.getValueType() == MVT::i32) |
| RHS = addExtOrTrunc(RHS, ExtOrTruncConversion::Ext); |
| |
| unsigned NewOpc; |
| switch (LogicOp.getOpcode()) { |
| default: llvm_unreachable("Unknown logic operation."); |
| case ISD::AND: NewOpc = PPC::AND8; break; |
| case ISD::OR: NewOpc = PPC::OR8; break; |
| case ISD::XOR: NewOpc = PPC::XOR8; break; |
| } |
| |
| if (IsBitwiseNegation) { |
| RHS = S->getI64Imm(1, dl); |
| NewOpc = PPC::XORI8; |
| } |
| |
| return SDValue(CurDAG->getMachineNode(NewOpc, dl, MVT::i64, LHS, RHS), 0); |
| |
| } |
| |
| /// If the value isn't guaranteed to be sign-extended to 64-bits, extend it. |
| /// Otherwise just reinterpret it as a 64-bit value. |
| /// Useful when emitting comparison code for 32-bit values without using |
| /// the compare instruction (which only considers the lower 32-bits). |
| SDValue IntegerCompareEliminator::signExtendInputIfNeeded(SDValue Input) { |
| assert(Input.getValueType() == MVT::i32 && |
| "Can only sign-extend 32-bit values here."); |
| unsigned |