aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/GlobalISel/CombinerHelper.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-01-19 10:01:25 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-01-19 10:01:25 +0000
commitd8e91e46262bc44006913e6796843909f1ac7bcd (patch)
tree7d0c143d9b38190e0fa0180805389da22cd834c5 /lib/CodeGen/GlobalISel/CombinerHelper.cpp
parentb7eb8e35e481a74962664b63dfb09483b200209a (diff)
downloadsrc-d8e91e46262bc44006913e6796843909f1ac7bcd.tar.gz
src-d8e91e46262bc44006913e6796843909f1ac7bcd.zip
Vendor import of llvm trunk r351319 (just before the release_80 branchvendor/llvm/llvm-trunk-r351319
Notes
Notes: svn path=/vendor/llvm/dist/; revision=343171 svn path=/vendor/llvm/llvm-trunk-r351319/; revision=343172; tag=vendor/llvm/llvm-trunk-r351319
Diffstat (limited to 'lib/CodeGen/GlobalISel/CombinerHelper.cpp')
-rw-r--r--lib/CodeGen/GlobalISel/CombinerHelper.cpp306
1 files changed, 300 insertions, 6 deletions
diff --git a/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 44e904a6391b..b1c5670a6dec 100644
--- a/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -1,4 +1,4 @@
-//== ---lib/CodeGen/GlobalISel/GICombinerHelper.cpp --------------------- == //
+//===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,17 +7,44 @@
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
+#include "llvm/CodeGen/GlobalISel/Combiner.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
-#define DEBUG_TYPE "gi-combine"
+#define DEBUG_TYPE "gi-combiner"
using namespace llvm;
-CombinerHelper::CombinerHelper(MachineIRBuilder &B) :
- Builder(B), MRI(Builder.getMF().getRegInfo()) {}
+CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
+ MachineIRBuilder &B)
+ : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer) {}
+
+void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg,
+ unsigned ToReg) const {
+ Observer.changingAllUsesOfReg(MRI, FromReg);
+
+ if (MRI.constrainRegAttrs(ToReg, FromReg))
+ MRI.replaceRegWith(FromReg, ToReg);
+ else
+ Builder.buildCopy(ToReg, FromReg);
+
+ Observer.finishedChangingAllUsesOfReg();
+}
+
+void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
+ MachineOperand &FromRegOp,
+ unsigned ToReg) const {
+ assert(FromRegOp.getParent() && "Expected an operand in an MI");
+ Observer.changingInstr(*FromRegOp.getParent());
+
+ FromRegOp.setReg(ToReg);
+
+ Observer.changedInstr(*FromRegOp.getParent());
+}
bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
if (MI.getOpcode() != TargetOpcode::COPY)
@@ -30,12 +57,279 @@ bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
// a(sx) = COPY b(sx) -> Replace all uses of a with b.
if (DstTy.isValid() && SrcTy.isValid() && DstTy == SrcTy) {
MI.eraseFromParent();
- MRI.replaceRegWith(DstReg, SrcReg);
+ replaceRegWith(MRI, DstReg, SrcReg);
return true;
}
return false;
}
+namespace {
+struct PreferredTuple {
+ LLT Ty; // The result type of the extend.
+ unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
+ MachineInstr *MI;
+};
+
+/// Select a preference between two uses. CurrentUse is the current preference
+/// while *ForCandidate is attributes of the candidate under consideration.
+PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
+ const LLT &TyForCandidate,
+ unsigned OpcodeForCandidate,
+ MachineInstr *MIForCandidate) {
+ if (!CurrentUse.Ty.isValid()) {
+ if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
+ CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
+ return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
+ return CurrentUse;
+ }
+
+ // We permit the extend to hoist through basic blocks but this is only
+ // sensible if the target has extending loads. If you end up lowering back
+ // into a load and extend during the legalizer then the end result is
+ // hoisting the extend up to the load.
+
+ // Prefer defined extensions to undefined extensions as these are more
+ // likely to reduce the number of instructions.
+ if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
+ CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
+ return CurrentUse;
+ else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
+ OpcodeForCandidate != TargetOpcode::G_ANYEXT)
+ return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
+
+ // Prefer sign extensions to zero extensions as sign-extensions tend to be
+ // more expensive.
+ if (CurrentUse.Ty == TyForCandidate) {
+ if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
+ OpcodeForCandidate == TargetOpcode::G_ZEXT)
+ return CurrentUse;
+ else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
+ OpcodeForCandidate == TargetOpcode::G_SEXT)
+ return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
+ }
+
+ // This is potentially target specific. We've chosen the largest type
+ // because G_TRUNC is usually free. One potential catch with this is that
+ // some targets have a reduced number of larger registers than smaller
+ // registers and this choice potentially increases the live-range for the
+ // larger value.
+ if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
+ return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
+ }
+ return CurrentUse;
+}
+
+/// Find a suitable place to insert some instructions and insert them. This
+/// function accounts for special cases like inserting before a PHI node.
+/// The current strategy for inserting before PHI's is to duplicate the
+/// instructions for each predecessor. However, while that's ok for G_TRUNC
+/// on most targets since it generally requires no code, other targets/cases may
+/// want to try harder to find a dominating block.
+static void InsertInsnsWithoutSideEffectsBeforeUse(
+ MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
+ std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator)>
+ Inserter) {
+ MachineInstr &UseMI = *UseMO.getParent();
+
+ MachineBasicBlock *InsertBB = UseMI.getParent();
+
+ // If the use is a PHI then we want the predecessor block instead.
+ if (UseMI.isPHI()) {
+ MachineOperand *PredBB = std::next(&UseMO);
+ InsertBB = PredBB->getMBB();
+ }
+
+ // If the block is the same block as the def then we want to insert just after
+ // the def instead of at the start of the block.
+ if (InsertBB == DefMI.getParent()) {
+ MachineBasicBlock::iterator InsertPt = &DefMI;
+ Inserter(InsertBB, std::next(InsertPt));
+ return;
+ }
+
+ // Otherwise we want the start of the BB
+ Inserter(InsertBB, InsertBB->getFirstNonPHI());
+}
+} // end anonymous namespace
+
+bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
+ struct InsertionPoint {
+ MachineOperand *UseMO;
+ MachineBasicBlock *InsertIntoBB;
+ MachineBasicBlock::iterator InsertBefore;
+ InsertionPoint(MachineOperand *UseMO, MachineBasicBlock *InsertIntoBB,
+ MachineBasicBlock::iterator InsertBefore)
+ : UseMO(UseMO), InsertIntoBB(InsertIntoBB), InsertBefore(InsertBefore) {
+ }
+ };
+
+ // We match the loads and follow the uses to the extend instead of matching
+ // the extends and following the def to the load. This is because the load
+ // must remain in the same position for correctness (unless we also add code
+ // to find a safe place to sink it) whereas the extend is freely movable.
+ // It also prevents us from duplicating the load for the volatile case or just
+ // for performance.
+
+ if (MI.getOpcode() != TargetOpcode::G_LOAD &&
+ MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
+ MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
+ return false;
+
+ auto &LoadValue = MI.getOperand(0);
+ assert(LoadValue.isReg() && "Result wasn't a register?");
+
+ LLT LoadValueTy = MRI.getType(LoadValue.getReg());
+ if (!LoadValueTy.isScalar())
+ return false;
+
+ // Find the preferred type aside from the any-extends (unless it's the only
+ // one) and non-extending ops. We'll emit an extending load to that type and
+ // and emit a variant of (extend (trunc X)) for the others according to the
+ // relative type sizes. At the same time, pick an extend to use based on the
+ // extend involved in the chosen type.
+ unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
+ ? TargetOpcode::G_ANYEXT
+ : MI.getOpcode() == TargetOpcode::G_SEXTLOAD
+ ? TargetOpcode::G_SEXT
+ : TargetOpcode::G_ZEXT;
+ PreferredTuple Preferred = {LLT(), PreferredOpcode, nullptr};
+ for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) {
+ if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
+ UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
+ UseMI.getOpcode() == TargetOpcode::G_ANYEXT) {
+ Preferred = ChoosePreferredUse(Preferred,
+ MRI.getType(UseMI.getOperand(0).getReg()),
+ UseMI.getOpcode(), &UseMI);
+ }
+ }
+
+ // There were no extends
+ if (!Preferred.MI)
+ return false;
+ // It should be impossible to chose an extend without selecting a different
+ // type since by definition the result of an extend is larger.
+ assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
+
+ LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
+
+ // Rewrite the load to the chosen extending load.
+ unsigned ChosenDstReg = Preferred.MI->getOperand(0).getReg();
+ Observer.changingInstr(MI);
+ MI.setDesc(
+ Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
+ ? TargetOpcode::G_SEXTLOAD
+ : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
+ ? TargetOpcode::G_ZEXTLOAD
+ : TargetOpcode::G_LOAD));
+
+ // Rewrite all the uses to fix up the types.
+ SmallVector<MachineInstr *, 1> ScheduleForErase;
+ SmallVector<InsertionPoint, 4> ScheduleForInsert;
+ for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) {
+ MachineInstr *UseMI = UseMO.getParent();
+
+ // If the extend is compatible with the preferred extend then we should fix
+ // up the type and extend so that it uses the preferred use.
+ if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
+ UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
+ unsigned UseDstReg = UseMI->getOperand(0).getReg();
+ MachineOperand &UseSrcMO = UseMI->getOperand(1);
+ const LLT &UseDstTy = MRI.getType(UseDstReg);
+ if (UseDstReg != ChosenDstReg) {
+ if (Preferred.Ty == UseDstTy) {
+ // If the use has the same type as the preferred use, then merge
+ // the vregs and erase the extend. For example:
+ // %1:_(s8) = G_LOAD ...
+ // %2:_(s32) = G_SEXT %1(s8)
+ // %3:_(s32) = G_ANYEXT %1(s8)
+ // ... = ... %3(s32)
+ // rewrites to:
+ // %2:_(s32) = G_SEXTLOAD ...
+ // ... = ... %2(s32)
+ replaceRegWith(MRI, UseDstReg, ChosenDstReg);
+ ScheduleForErase.push_back(UseMO.getParent());
+ } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
+ // If the preferred size is smaller, then keep the extend but extend
+ // from the result of the extending load. For example:
+ // %1:_(s8) = G_LOAD ...
+ // %2:_(s32) = G_SEXT %1(s8)
+ // %3:_(s64) = G_ANYEXT %1(s8)
+ // ... = ... %3(s64)
+ /// rewrites to:
+ // %2:_(s32) = G_SEXTLOAD ...
+ // %3:_(s64) = G_ANYEXT %2:_(s32)
+ // ... = ... %3(s64)
+ replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
+ } else {
+ // If the preferred size is large, then insert a truncate. For
+ // example:
+ // %1:_(s8) = G_LOAD ...
+ // %2:_(s64) = G_SEXT %1(s8)
+ // %3:_(s32) = G_ZEXT %1(s8)
+ // ... = ... %3(s32)
+ /// rewrites to:
+ // %2:_(s64) = G_SEXTLOAD ...
+ // %4:_(s8) = G_TRUNC %2:_(s32)
+ // %3:_(s64) = G_ZEXT %2:_(s8)
+ // ... = ... %3(s64)
+ InsertInsnsWithoutSideEffectsBeforeUse(
+ Builder, MI, UseMO,
+ [&](MachineBasicBlock *InsertIntoBB,
+ MachineBasicBlock::iterator InsertBefore) {
+ ScheduleForInsert.emplace_back(&UseMO, InsertIntoBB, InsertBefore);
+ });
+ }
+ continue;
+ }
+ // The use is (one of) the uses of the preferred use we chose earlier.
+ // We're going to update the load to def this value later so just erase
+ // the old extend.
+ ScheduleForErase.push_back(UseMO.getParent());
+ continue;
+ }
+
+ // The use isn't an extend. Truncate back to the type we originally loaded.
+ // This is free on many targets.
+ InsertInsnsWithoutSideEffectsBeforeUse(
+ Builder, MI, UseMO,
+ [&](MachineBasicBlock *InsertIntoBB,
+ MachineBasicBlock::iterator InsertBefore) {
+ ScheduleForInsert.emplace_back(&UseMO, InsertIntoBB, InsertBefore);
+ });
+ }
+
+ DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
+ for (auto &InsertionInfo : ScheduleForInsert) {
+ MachineOperand *UseMO = InsertionInfo.UseMO;
+ MachineBasicBlock *InsertIntoBB = InsertionInfo.InsertIntoBB;
+ MachineBasicBlock::iterator InsertBefore = InsertionInfo.InsertBefore;
+
+ MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
+ if (PreviouslyEmitted) {
+ Observer.changingInstr(*UseMO->getParent());
+ UseMO->setReg(PreviouslyEmitted->getOperand(0).getReg());
+ Observer.changedInstr(*UseMO->getParent());
+ continue;
+ }
+
+ Builder.setInsertPt(*InsertIntoBB, InsertBefore);
+ unsigned NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
+ MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
+ EmittedInsns[InsertIntoBB] = NewMI;
+ replaceRegOpWith(MRI, *UseMO, NewDstReg);
+ }
+ for (auto &EraseMI : ScheduleForErase) {
+ Observer.erasingInstr(*EraseMI);
+ EraseMI->eraseFromParent();
+ }
+ MI.getOperand(0).setReg(ChosenDstReg);
+ Observer.changedInstr(MI);
+
+ return true;
+}
+
bool CombinerHelper::tryCombine(MachineInstr &MI) {
- return tryCombineCopy(MI);
+ if (tryCombineCopy(MI))
+ return true;
+ return tryCombineExtendingLoads(MI);
}