diff options
Diffstat (limited to 'lib/Target/Hexagon/HexagonInstrInfo.cpp')
-rw-r--r-- | lib/Target/Hexagon/HexagonInstrInfo.cpp | 128 |
1 files changed, 91 insertions, 37 deletions
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index b265a883da5c..852bfb1b4f54 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -869,6 +869,9 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, MachineFrameInfo &MFI = MF.getFrameInfo(); unsigned Align = MFI.getObjectAlignment(FI); unsigned KillFlag = getKillRegState(isKill); + bool HasAlloca = MFI.hasVarSizedObjects(); + const auto &HST = MF.getSubtarget<HexagonSubtarget>(); + const HexagonFrameLowering &HFI = *HST.getFrameLowering(); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, @@ -899,24 +902,36 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, .addFrameIndex(FI).addImm(0) .addReg(SrcReg, KillFlag).addMemOperand(MMO); } else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) { + // If there are variable-sized objects, spills will not be aligned. + if (HasAlloca) + Align = HFI.getStackAlignment(); unsigned Opc = Align < 128 ? Hexagon::V6_vS32Ub_ai_128B : Hexagon::V6_vS32b_ai_128B; BuildMI(MBB, I, DL, get(Opc)) .addFrameIndex(FI).addImm(0) .addReg(SrcReg, KillFlag).addMemOperand(MMO); } else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) { + // If there are variable-sized objects, spills will not be aligned. + if (HasAlloca) + Align = HFI.getStackAlignment(); unsigned Opc = Align < 64 ? Hexagon::V6_vS32Ub_ai : Hexagon::V6_vS32b_ai; BuildMI(MBB, I, DL, get(Opc)) .addFrameIndex(FI).addImm(0) .addReg(SrcReg, KillFlag).addMemOperand(MMO); } else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) { + // If there are variable-sized objects, spills will not be aligned. + if (HasAlloca) + Align = HFI.getStackAlignment(); unsigned Opc = Align < 64 ? Hexagon::PS_vstorerwu_ai : Hexagon::PS_vstorerw_ai; BuildMI(MBB, I, DL, get(Opc)) .addFrameIndex(FI).addImm(0) .addReg(SrcReg, KillFlag).addMemOperand(MMO); } else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) { + // If there are variable-sized objects, spills will not be aligned. + if (HasAlloca) + Align = HFI.getStackAlignment(); unsigned Opc = Align < 128 ? Hexagon::PS_vstorerwu_ai_128B : Hexagon::PS_vstorerw_ai_128B; BuildMI(MBB, I, DL, get(Opc)) @@ -935,6 +950,9 @@ void HexagonInstrInfo::loadRegFromStackSlot( MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); unsigned Align = MFI.getObjectAlignment(FI); + bool HasAlloca = MFI.hasVarSizedObjects(); + const auto &HST = MF.getSubtarget<HexagonSubtarget>(); + const HexagonFrameLowering &HFI = *HST.getFrameLowering(); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, @@ -959,21 +977,33 @@ void HexagonInstrInfo::loadRegFromStackSlot( BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); } else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) { + // If there are variable-sized objects, spills will not be aligned. + if (HasAlloca) + Align = HFI.getStackAlignment(); unsigned Opc = Align < 128 ? Hexagon::PS_vloadrwu_ai_128B : Hexagon::PS_vloadrw_ai_128B; BuildMI(MBB, I, DL, get(Opc), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); } else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) { + // If there are variable-sized objects, spills will not be aligned. + if (HasAlloca) + Align = HFI.getStackAlignment(); unsigned Opc = Align < 128 ? Hexagon::V6_vL32Ub_ai_128B : Hexagon::V6_vL32b_ai_128B; BuildMI(MBB, I, DL, get(Opc), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); } else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) { + // If there are variable-sized objects, spills will not be aligned. + if (HasAlloca) + Align = HFI.getStackAlignment(); unsigned Opc = Align < 64 ? Hexagon::V6_vL32Ub_ai : Hexagon::V6_vL32b_ai; BuildMI(MBB, I, DL, get(Opc), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); } else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) { + // If there are variable-sized objects, spills will not be aligned. + if (HasAlloca) + Align = HFI.getStackAlignment(); unsigned Opc = Align < 64 ? Hexagon::PS_vloadrwu_ai : Hexagon::PS_vloadrw_ai; BuildMI(MBB, I, DL, get(Opc), DestReg) @@ -1110,8 +1140,9 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6; MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_lo)) - .add(MI.getOperand(1)) - .addImm(MI.getOperand(2).getImm()); + .add(MI.getOperand(1)) + .addImm(MI.getOperand(2).getImm()) + .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); MI1New->getOperand(1).setIsKill(false); BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi)) .add(MI.getOperand(1)) @@ -1940,7 +1971,7 @@ bool HexagonInstrInfo::isDeallocRet(const MachineInstr &MI) const { case Hexagon::L4_return_fnew_pnt : case Hexagon::L4_return_tnew_pt : case Hexagon::L4_return_fnew_pt : - return true; + return true; } return false; } @@ -1967,12 +1998,12 @@ bool HexagonInstrInfo::isDependent(const MachineInstr &ProdMI, if (RegA == RegB) return true; - if (Hexagon::DoubleRegsRegClass.contains(RegA)) + if (TargetRegisterInfo::isPhysicalRegister(RegA)) for (MCSubRegIterator SubRegs(RegA, &HRI); SubRegs.isValid(); ++SubRegs) if (RegB == *SubRegs) return true; - if (Hexagon::DoubleRegsRegClass.contains(RegB)) + if (TargetRegisterInfo::isPhysicalRegister(RegB)) for (MCSubRegIterator SubRegs(RegB, &HRI); SubRegs.isValid(); ++SubRegs) if (RegA == *SubRegs) return true; @@ -2139,7 +2170,7 @@ bool HexagonInstrInfo::isJumpR(const MachineInstr &MI) const { bool HexagonInstrInfo::isJumpWithinBranchRange(const MachineInstr &MI, unsigned offset) const { // This selection of jump instructions matches to that what - // AnalyzeBranch can parse, plus NVJ. + // analyzeBranch can parse, plus NVJ. if (isNewValueJump(MI)) // r9:2 return isInt<11>(offset); @@ -2666,6 +2697,7 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset, case Hexagon::L2_loadrh_io: case Hexagon::L2_loadruh_io: case Hexagon::S2_storerh_io: + case Hexagon::S2_storerf_io: return (Offset >= Hexagon_MEMH_OFFSET_MIN) && (Offset <= Hexagon_MEMH_OFFSET_MAX); @@ -2876,6 +2908,11 @@ bool HexagonInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, /// \brief Can these instructions execute at the same time in a bundle. bool HexagonInstrInfo::canExecuteInBundle(const MachineInstr &First, const MachineInstr &Second) const { + if (Second.mayStore() && First.getOpcode() == Hexagon::S2_allocframe) { + const MachineOperand &Op = Second.getOperand(0); + if (Op.isReg() && Op.isUse() && Op.getReg() == Hexagon::R29) + return true; + } if (DisableNVSchedule) return false; if (mayBeNewStore(Second)) { @@ -3000,13 +3037,9 @@ bool HexagonInstrInfo::producesStall(const MachineInstr &MI, MachineBasicBlock::const_instr_iterator MII = BII; MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end(); - if (!MII->isBundle()) { + if (!(*MII).isBundle()) { const MachineInstr &J = *MII; - if (!isV60VectorInstruction(J)) - return false; - else if (isVecUsableNextPacket(J, MI)) - return false; - return true; + return producesStall(J, MI); } for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) { @@ -3034,12 +3067,14 @@ bool HexagonInstrInfo::predCanBeUsedAsDotNew(const MachineInstr &MI, } bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const { - return (Opcode == Hexagon::J2_jumpt) || - (Opcode == Hexagon::J2_jumpf) || - (Opcode == Hexagon::J2_jumptnew) || - (Opcode == Hexagon::J2_jumpfnew) || - (Opcode == Hexagon::J2_jumptnewpt) || - (Opcode == Hexagon::J2_jumpfnewpt); + return Opcode == Hexagon::J2_jumpt || + Opcode == Hexagon::J2_jumptpt || + Opcode == Hexagon::J2_jumpf || + Opcode == Hexagon::J2_jumpfpt || + Opcode == Hexagon::J2_jumptnew || + Opcode == Hexagon::J2_jumpfnew || + Opcode == Hexagon::J2_jumptnewpt || + Opcode == Hexagon::J2_jumpfnewpt; } bool HexagonInstrInfo::predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const { @@ -3341,9 +3376,30 @@ int HexagonInstrInfo::getDotCurOp(const MachineInstr &MI) const { return 0; } +// Return the regular version of the .cur instruction. +int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const { + switch (MI.getOpcode()) { + default: llvm_unreachable("Unknown .cur type"); + case Hexagon::V6_vL32b_cur_pi: + return Hexagon::V6_vL32b_pi; + case Hexagon::V6_vL32b_cur_ai: + return Hexagon::V6_vL32b_ai; + //128B + case Hexagon::V6_vL32b_cur_pi_128B: + return Hexagon::V6_vL32b_pi_128B; + case Hexagon::V6_vL32b_cur_ai_128B: + return Hexagon::V6_vL32b_ai_128B; + } + return 0; +} + + // The diagram below shows the steps involved in the conversion of a predicated // store instruction to its .new predicated new-value form. // +// Note: It doesn't include conditional new-value stores as they can't be +// converted to .new predicate. +// // p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ] // ^ ^ // / \ (not OK. it will cause new-value store to be @@ -3564,11 +3620,11 @@ int HexagonInstrInfo::getDotNewPredOp(const MachineInstr &MI, } int HexagonInstrInfo::getDotOldOp(const MachineInstr &MI) const { + const MachineFunction &MF = *MI.getParent()->getParent(); + const HexagonSubtarget &HST = MF.getSubtarget<HexagonSubtarget>(); int NewOp = MI.getOpcode(); if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form NewOp = Hexagon::getPredOldOpcode(NewOp); - const MachineFunction &MF = *MI.getParent()->getParent(); - const HexagonSubtarget &HST = MF.getSubtarget<HexagonSubtarget>(); // All Hexagon architectures have prediction bits on dot-new branches, // but only Hexagon V60+ has prediction bits on dot-old ones. Make sure // to pick the right opcode when converting back to dot-old. @@ -3596,6 +3652,21 @@ int HexagonInstrInfo::getDotOldOp(const MachineInstr &MI) const { NewOp = Hexagon::getNonNVStore(NewOp); assert(NewOp >= 0 && "Couldn't change new-value store to its old form."); } + + if (HST.hasV60TOps()) + return NewOp; + + // Subtargets prior to V60 didn't support 'taken' forms of predicated jumps. + switch (NewOp) { + case Hexagon::J2_jumpfpt: + return Hexagon::J2_jumpf; + case Hexagon::J2_jumptpt: + return Hexagon::J2_jumpt; + case Hexagon::J2_jumprfpt: + return Hexagon::J2_jumprf; + case Hexagon::J2_jumprtpt: + return Hexagon::J2_jumprt; + } return NewOp; } @@ -3947,18 +4018,6 @@ short HexagonInstrInfo::getEquivalentHWInstr(const MachineInstr &MI) const { return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Real); } -// Return first non-debug instruction in the basic block. -MachineInstr *HexagonInstrInfo::getFirstNonDbgInst(MachineBasicBlock *BB) - const { - for (auto MII = BB->instr_begin(), End = BB->instr_end(); MII != End; MII++) { - MachineInstr &MI = *MII; - if (MI.isDebugValue()) - continue; - return &MI; - } - return nullptr; -} - unsigned HexagonInstrInfo::getInstrTimingClassLatency( const InstrItineraryData *ItinData, const MachineInstr &MI) const { // Default to one cycle for no itinerary. However, an "empty" itinerary may @@ -4139,11 +4198,6 @@ unsigned HexagonInstrInfo::getUnits(const MachineInstr &MI) const { return IS.getUnits(); } -unsigned HexagonInstrInfo::getValidSubTargets(const unsigned Opcode) const { - const uint64_t F = get(Opcode).TSFlags; - return (F >> HexagonII::validSubTargetPos) & HexagonII::validSubTargetMask; -} - // Calculate size of the basic block without debug instructions. unsigned HexagonInstrInfo::nonDbgBBSize(const MachineBasicBlock *BB) const { return nonDbgMICount(BB->instr_begin(), BB->instr_end()); |