aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms/Scalar
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-05-27 18:44:32 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-05-27 18:44:32 +0000
commit5a5ac124e1efaf208671f01c46edb15f29ed2a0b (patch)
treea6140557876943cdd800ee997c9317283394b22c /lib/Transforms/Scalar
parentf03b5bed27d0d2eafd68562ce14f8b5e3f1f0801 (diff)
Vendor import of llvm trunk r238337:vendor/llvm/llvm-trunk-r238337
Notes
Notes: svn path=/vendor/llvm/dist/; revision=283625 svn path=/vendor/llvm/llvm-trunk-r238337/; revision=283626; tag=vendor/llvm/llvm-trunk-r238337
Diffstat (limited to 'lib/Transforms/Scalar')
-rw-r--r--lib/Transforms/Scalar/ADCE.cpp70
-rw-r--r--lib/Transforms/Scalar/AlignmentFromAssumptions.cpp19
-rw-r--r--lib/Transforms/Scalar/BDCE.cpp410
-rw-r--r--lib/Transforms/Scalar/CMakeLists.txt15
-rw-r--r--lib/Transforms/Scalar/ConstantHoisting.cpp10
-rw-r--r--lib/Transforms/Scalar/ConstantProp.cpp13
-rw-r--r--lib/Transforms/Scalar/CorrelatedValuePropagation.cpp47
-rw-r--r--lib/Transforms/Scalar/DCE.cpp8
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp68
-rw-r--r--lib/Transforms/Scalar/EarlyCSE.cpp654
-rw-r--r--lib/Transforms/Scalar/Float2Int.cpp540
-rw-r--r--lib/Transforms/Scalar/GVN.cpp366
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp176
-rw-r--r--lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp1495
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp40
-rw-r--r--lib/Transforms/Scalar/LICM.cpp439
-rw-r--r--lib/Transforms/Scalar/LLVMBuild.txt2
-rw-r--r--lib/Transforms/Scalar/LoadCombine.cpp39
-rw-r--r--lib/Transforms/Scalar/LoopDeletion.cpp8
-rw-r--r--lib/Transforms/Scalar/LoopDistribute.cpp976
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp146
-rw-r--r--lib/Transforms/Scalar/LoopInstSimplify.cpp18
-rw-r--r--lib/Transforms/Scalar/LoopInterchange.cpp1300
-rw-r--r--lib/Transforms/Scalar/LoopRerollPass.cpp1287
-rw-r--r--lib/Transforms/Scalar/LoopRotation.cpp93
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp381
-rw-r--r--lib/Transforms/Scalar/LoopUnrollPass.cpp597
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp55
-rw-r--r--lib/Transforms/Scalar/LowerExpectIntrinsic.cpp192
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp226
-rw-r--r--lib/Transforms/Scalar/MergedLoadStoreMotion.cpp11
-rw-r--r--lib/Transforms/Scalar/NaryReassociate.cpp481
-rw-r--r--lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp14
-rw-r--r--lib/Transforms/Scalar/PlaceSafepoints.cpp993
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp114
-rw-r--r--lib/Transforms/Scalar/RewriteStatepointsForGC.cpp2506
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp29
-rw-r--r--lib/Transforms/Scalar/SROA.cpp195
-rw-r--r--lib/Transforms/Scalar/SampleProfile.cpp22
-rw-r--r--lib/Transforms/Scalar/Scalar.cpp21
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp211
-rw-r--r--lib/Transforms/Scalar/Scalarizer.cpp28
-rw-r--r--lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp199
-rw-r--r--lib/Transforms/Scalar/SimplifyCFGPass.cpp112
-rw-r--r--lib/Transforms/Scalar/Sink.cpp14
-rw-r--r--lib/Transforms/Scalar/SpeculativeExecution.cpp243
-rw-r--r--lib/Transforms/Scalar/StraightLineStrengthReduce.cpp710
-rw-r--r--lib/Transforms/Scalar/StructurizeCFG.cpp77
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp76
49 files changed, 13340 insertions, 2406 deletions
diff --git a/lib/Transforms/Scalar/ADCE.cpp b/lib/Transforms/Scalar/ADCE.cpp
index 3d9198469bc5..d6fc91641588 100644
--- a/lib/Transforms/Scalar/ADCE.cpp
+++ b/lib/Transforms/Scalar/ADCE.cpp
@@ -32,19 +32,18 @@ using namespace llvm;
STATISTIC(NumRemoved, "Number of instructions removed");
namespace {
- struct ADCE : public FunctionPass {
- static char ID; // Pass identification, replacement for typeid
- ADCE() : FunctionPass(ID) {
- initializeADCEPass(*PassRegistry::getPassRegistry());
- }
-
- bool runOnFunction(Function& F) override;
+struct ADCE : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ ADCE() : FunctionPass(ID) {
+ initializeADCEPass(*PassRegistry::getPassRegistry());
+ }
- void getAnalysisUsage(AnalysisUsage& AU) const override {
- AU.setPreservesCFG();
- }
+ bool runOnFunction(Function& F) override;
- };
+ void getAnalysisUsage(AnalysisUsage& AU) const override {
+ AU.setPreservesCFG();
+ }
+};
}
char ADCE::ID = 0;
@@ -54,46 +53,45 @@ bool ADCE::runOnFunction(Function& F) {
if (skipOptnoneFunction(F))
return false;
- SmallPtrSet<Instruction*, 128> alive;
- SmallVector<Instruction*, 128> worklist;
+ SmallPtrSet<Instruction*, 128> Alive;
+ SmallVector<Instruction*, 128> Worklist;
// Collect the set of "root" instructions that are known live.
- for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
- if (isa<TerminatorInst>(I.getInstructionIterator()) ||
- isa<DbgInfoIntrinsic>(I.getInstructionIterator()) ||
- isa<LandingPadInst>(I.getInstructionIterator()) ||
- I->mayHaveSideEffects()) {
- alive.insert(I.getInstructionIterator());
- worklist.push_back(I.getInstructionIterator());
+ for (Instruction &I : inst_range(F)) {
+ if (isa<TerminatorInst>(I) || isa<DbgInfoIntrinsic>(I) ||
+ isa<LandingPadInst>(I) || I.mayHaveSideEffects()) {
+ Alive.insert(&I);
+ Worklist.push_back(&I);
}
+ }
// Propagate liveness backwards to operands.
- while (!worklist.empty()) {
- Instruction* curr = worklist.pop_back_val();
- for (Instruction::op_iterator OI = curr->op_begin(), OE = curr->op_end();
- OI != OE; ++OI)
- if (Instruction* Inst = dyn_cast<Instruction>(OI))
- if (alive.insert(Inst).second)
- worklist.push_back(Inst);
+ while (!Worklist.empty()) {
+ Instruction *Curr = Worklist.pop_back_val();
+ for (Use &OI : Curr->operands()) {
+ if (Instruction *Inst = dyn_cast<Instruction>(OI))
+ if (Alive.insert(Inst).second)
+ Worklist.push_back(Inst);
+ }
}
// The inverse of the live set is the dead set. These are those instructions
// which have no side effects and do not influence the control flow or return
// value of the function, and may therefore be deleted safely.
- // NOTE: We reuse the worklist vector here for memory efficiency.
- for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
- if (!alive.count(I.getInstructionIterator())) {
- worklist.push_back(I.getInstructionIterator());
- I->dropAllReferences();
+ // NOTE: We reuse the Worklist vector here for memory efficiency.
+ for (Instruction &I : inst_range(F)) {
+ if (!Alive.count(&I)) {
+ Worklist.push_back(&I);
+ I.dropAllReferences();
}
+ }
- for (SmallVectorImpl<Instruction *>::iterator I = worklist.begin(),
- E = worklist.end(); I != E; ++I) {
+ for (Instruction *&I : Worklist) {
++NumRemoved;
- (*I)->eraseFromParent();
+ I->eraseFromParent();
}
- return !worklist.empty();
+ return !Worklist.empty();
}
FunctionPass *llvm::createAggressiveDCEPass() {
diff --git a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
index f48cefaa4fba..8918909f484a 100644
--- a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
+++ b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
@@ -23,15 +23,15 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
-#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -50,15 +50,15 @@ struct AlignmentFromAssumptions : public FunctionPass {
initializeAlignmentFromAssumptionsPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<ScalarEvolution>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
- AU.addPreserved<LoopInfo>();
+ AU.addPreserved<LoopInfoWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<ScalarEvolution>();
}
@@ -71,7 +71,6 @@ struct AlignmentFromAssumptions : public FunctionPass {
ScalarEvolution *SE;
DominatorTree *DT;
- const DataLayout *DL;
bool extractAlignmentInfo(CallInst *I, Value *&AAPtr, const SCEV *&AlignSCEV,
const SCEV *&OffSCEV);
@@ -123,7 +122,7 @@ static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV,
// If the displacement is not an exact multiple, but the remainder is a
// constant, then return this remainder (but only if it is a power of 2).
- uint64_t DiffUnitsAbs = abs64(DiffUnits);
+ uint64_t DiffUnitsAbs = std::abs(DiffUnits);
if (isPowerOf2_64(DiffUnitsAbs))
return (unsigned) DiffUnitsAbs;
}
@@ -316,7 +315,7 @@ bool AlignmentFromAssumptions::processAssumption(CallInst *ACall) {
continue;
if (Instruction *K = dyn_cast<Instruction>(J))
- if (isValidAssumeForContext(ACall, K, DL, DT))
+ if (isValidAssumeForContext(ACall, K, DT))
WorkList.push_back(K);
}
@@ -400,7 +399,7 @@ bool AlignmentFromAssumptions::processAssumption(CallInst *ACall) {
Visited.insert(J);
for (User *UJ : J->users()) {
Instruction *K = cast<Instruction>(UJ);
- if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DL, DT))
+ if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT))
WorkList.push_back(K);
}
}
@@ -413,8 +412,6 @@ bool AlignmentFromAssumptions::runOnFunction(Function &F) {
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
NewDestAlignments.clear();
NewSrcAlignments.clear();
diff --git a/lib/Transforms/Scalar/BDCE.cpp b/lib/Transforms/Scalar/BDCE.cpp
new file mode 100644
index 000000000000..09c605e76737
--- /dev/null
+++ b/lib/Transforms/Scalar/BDCE.cpp
@@ -0,0 +1,410 @@
+//===---- BDCE.cpp - Bit-tracking dead code elimination -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Bit-Tracking Dead Code Elimination pass. Some
+// instructions (shifts, some ands, ors, etc.) kill some of their input bits.
+// We track these dead bits and remove instructions that compute only these
+// dead bits.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "bdce"
+
+STATISTIC(NumRemoved, "Number of instructions removed (unused)");
+STATISTIC(NumSimplified, "Number of instructions trivialized (dead bits)");
+
+namespace {
+struct BDCE : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ BDCE() : FunctionPass(ID) {
+ initializeBDCEPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function& F) override;
+
+ void getAnalysisUsage(AnalysisUsage& AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<AssumptionCacheTracker>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ }
+
+ void determineLiveOperandBits(const Instruction *UserI,
+ const Instruction *I, unsigned OperandNo,
+ const APInt &AOut, APInt &AB,
+ APInt &KnownZero, APInt &KnownOne,
+ APInt &KnownZero2, APInt &KnownOne2);
+
+ AssumptionCache *AC;
+ DominatorTree *DT;
+};
+}
+
+char BDCE::ID = 0;
+INITIALIZE_PASS_BEGIN(BDCE, "bdce", "Bit-Tracking Dead Code Elimination",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_END(BDCE, "bdce", "Bit-Tracking Dead Code Elimination",
+ false, false)
+
+static bool isAlwaysLive(Instruction *I) {
+ return isa<TerminatorInst>(I) || isa<DbgInfoIntrinsic>(I) ||
+ isa<LandingPadInst>(I) || I->mayHaveSideEffects();
+}
+
+void BDCE::determineLiveOperandBits(const Instruction *UserI,
+ const Instruction *I, unsigned OperandNo,
+ const APInt &AOut, APInt &AB,
+ APInt &KnownZero, APInt &KnownOne,
+ APInt &KnownZero2, APInt &KnownOne2) {
+ unsigned BitWidth = AB.getBitWidth();
+
+ // We're called once per operand, but for some instructions, we need to
+ // compute known bits of both operands in order to determine the live bits of
+ // either (when both operands are instructions themselves). We don't,
+ // however, want to do this twice, so we cache the result in APInts that live
+ // in the caller. For the two-relevant-operands case, both operand values are
+ // provided here.
+ auto ComputeKnownBits =
+ [&](unsigned BitWidth, const Value *V1, const Value *V2) {
+ const DataLayout &DL = I->getModule()->getDataLayout();
+ KnownZero = APInt(BitWidth, 0);
+ KnownOne = APInt(BitWidth, 0);
+ computeKnownBits(const_cast<Value *>(V1), KnownZero, KnownOne, DL, 0,
+ AC, UserI, DT);
+
+ if (V2) {
+ KnownZero2 = APInt(BitWidth, 0);
+ KnownOne2 = APInt(BitWidth, 0);
+ computeKnownBits(const_cast<Value *>(V2), KnownZero2, KnownOne2, DL,
+ 0, AC, UserI, DT);
+ }
+ };
+
+ switch (UserI->getOpcode()) {
+ default: break;
+ case Instruction::Call:
+ case Instruction::Invoke:
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(UserI))
+ switch (II->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::bswap:
+ // The alive bits of the input are the swapped alive bits of
+ // the output.
+ AB = AOut.byteSwap();
+ break;
+ case Intrinsic::ctlz:
+ if (OperandNo == 0) {
+ // We need some output bits, so we need all bits of the
+ // input to the left of, and including, the leftmost bit
+ // known to be one.
+ ComputeKnownBits(BitWidth, I, nullptr);
+ AB = APInt::getHighBitsSet(BitWidth,
+ std::min(BitWidth, KnownOne.countLeadingZeros()+1));
+ }
+ break;
+ case Intrinsic::cttz:
+ if (OperandNo == 0) {
+ // We need some output bits, so we need all bits of the
+ // input to the right of, and including, the rightmost bit
+ // known to be one.
+ ComputeKnownBits(BitWidth, I, nullptr);
+ AB = APInt::getLowBitsSet(BitWidth,
+ std::min(BitWidth, KnownOne.countTrailingZeros()+1));
+ }
+ break;
+ }
+ break;
+ case Instruction::Add:
+ case Instruction::Sub:
+ // Find the highest live output bit. We don't need any more input
+ // bits than that (adds, and thus subtracts, ripple only to the
+ // left).
+ AB = APInt::getLowBitsSet(BitWidth, AOut.getActiveBits());
+ break;
+ case Instruction::Shl:
+ if (OperandNo == 0)
+ if (ConstantInt *CI =
+ dyn_cast<ConstantInt>(UserI->getOperand(1))) {
+ uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1);
+ AB = AOut.lshr(ShiftAmt);
+
+ // If the shift is nuw/nsw, then the high bits are not dead
+ // (because we've promised that they *must* be zero).
+ const ShlOperator *S = cast<ShlOperator>(UserI);
+ if (S->hasNoSignedWrap())
+ AB |= APInt::getHighBitsSet(BitWidth, ShiftAmt+1);
+ else if (S->hasNoUnsignedWrap())
+ AB |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
+ }
+ break;
+ case Instruction::LShr:
+ if (OperandNo == 0)
+ if (ConstantInt *CI =
+ dyn_cast<ConstantInt>(UserI->getOperand(1))) {
+ uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1);
+ AB = AOut.shl(ShiftAmt);
+
+ // If the shift is exact, then the low bits are not dead
+ // (they must be zero).
+ if (cast<LShrOperator>(UserI)->isExact())
+ AB |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
+ }
+ break;
+ case Instruction::AShr:
+ if (OperandNo == 0)
+ if (ConstantInt *CI =
+ dyn_cast<ConstantInt>(UserI->getOperand(1))) {
+ uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1);
+ AB = AOut.shl(ShiftAmt);
+ // Because the high input bit is replicated into the
+ // high-order bits of the result, if we need any of those
+ // bits, then we must keep the highest input bit.
+ if ((AOut & APInt::getHighBitsSet(BitWidth, ShiftAmt))
+ .getBoolValue())
+ AB.setBit(BitWidth-1);
+
+ // If the shift is exact, then the low bits are not dead
+ // (they must be zero).
+ if (cast<AShrOperator>(UserI)->isExact())
+ AB |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
+ }
+ break;
+ case Instruction::And:
+ AB = AOut;
+
+ // For bits that are known zero, the corresponding bits in the
+ // other operand are dead (unless they're both zero, in which
+ // case they can't both be dead, so just mark the LHS bits as
+ // dead).
+ if (OperandNo == 0) {
+ ComputeKnownBits(BitWidth, I, UserI->getOperand(1));
+ AB &= ~KnownZero2;
+ } else {
+ if (!isa<Instruction>(UserI->getOperand(0)))
+ ComputeKnownBits(BitWidth, UserI->getOperand(0), I);
+ AB &= ~(KnownZero & ~KnownZero2);
+ }
+ break;
+ case Instruction::Or:
+ AB = AOut;
+
+ // For bits that are known one, the corresponding bits in the
+ // other operand are dead (unless they're both one, in which
+ // case they can't both be dead, so just mark the LHS bits as
+ // dead).
+ if (OperandNo == 0) {
+ ComputeKnownBits(BitWidth, I, UserI->getOperand(1));
+ AB &= ~KnownOne2;
+ } else {
+ if (!isa<Instruction>(UserI->getOperand(0)))
+ ComputeKnownBits(BitWidth, UserI->getOperand(0), I);
+ AB &= ~(KnownOne & ~KnownOne2);
+ }
+ break;
+ case Instruction::Xor:
+ case Instruction::PHI:
+ AB = AOut;
+ break;
+ case Instruction::Trunc:
+ AB = AOut.zext(BitWidth);
+ break;
+ case Instruction::ZExt:
+ AB = AOut.trunc(BitWidth);
+ break;
+ case Instruction::SExt:
+ AB = AOut.trunc(BitWidth);
+ // Because the high input bit is replicated into the
+ // high-order bits of the result, if we need any of those
+ // bits, then we must keep the highest input bit.
+ if ((AOut & APInt::getHighBitsSet(AOut.getBitWidth(),
+ AOut.getBitWidth() - BitWidth))
+ .getBoolValue())
+ AB.setBit(BitWidth-1);
+ break;
+ case Instruction::Select:
+ if (OperandNo != 0)
+ AB = AOut;
+ break;
+ }
+}
+
+bool BDCE::runOnFunction(Function& F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
+ AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+
+ DenseMap<Instruction *, APInt> AliveBits;
+ SmallVector<Instruction*, 128> Worklist;
+
+ // The set of visited instructions (non-integer-typed only).
+ SmallPtrSet<Instruction*, 128> Visited;
+
+ // Collect the set of "root" instructions that are known live.
+ for (Instruction &I : inst_range(F)) {
+ if (!isAlwaysLive(&I))
+ continue;
+
+ DEBUG(dbgs() << "BDCE: Root: " << I << "\n");
+ // For integer-valued instructions, set up an initial empty set of alive
+ // bits and add the instruction to the work list. For other instructions
+ // add their operands to the work list (for integer values operands, mark
+ // all bits as live).
+ if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
+ if (!AliveBits.count(&I)) {
+ AliveBits[&I] = APInt(IT->getBitWidth(), 0);
+ Worklist.push_back(&I);
+ }
+
+ continue;
+ }
+
+ // Non-integer-typed instructions...
+ for (Use &OI : I.operands()) {
+ if (Instruction *J = dyn_cast<Instruction>(OI)) {
+ if (IntegerType *IT = dyn_cast<IntegerType>(J->getType()))
+ AliveBits[J] = APInt::getAllOnesValue(IT->getBitWidth());
+ Worklist.push_back(J);
+ }
+ }
+ // To save memory, we don't add I to the Visited set here. Instead, we
+ // check isAlwaysLive on every instruction when searching for dead
+ // instructions later (we need to check isAlwaysLive for the
+ // integer-typed instructions anyway).
+ }
+
+ // Propagate liveness backwards to operands.
+ while (!Worklist.empty()) {
+ Instruction *UserI = Worklist.pop_back_val();
+
+ DEBUG(dbgs() << "BDCE: Visiting: " << *UserI);
+ APInt AOut;
+ if (UserI->getType()->isIntegerTy()) {
+ AOut = AliveBits[UserI];
+ DEBUG(dbgs() << " Alive Out: " << AOut);
+ }
+ DEBUG(dbgs() << "\n");
+
+ if (!UserI->getType()->isIntegerTy())
+ Visited.insert(UserI);
+
+ APInt KnownZero, KnownOne, KnownZero2, KnownOne2;
+ // Compute the set of alive bits for each operand. These are anded into the
+ // existing set, if any, and if that changes the set of alive bits, the
+ // operand is added to the work-list.
+ for (Use &OI : UserI->operands()) {
+ if (Instruction *I = dyn_cast<Instruction>(OI)) {
+ if (IntegerType *IT = dyn_cast<IntegerType>(I->getType())) {
+ unsigned BitWidth = IT->getBitWidth();
+ APInt AB = APInt::getAllOnesValue(BitWidth);
+ if (UserI->getType()->isIntegerTy() && !AOut &&
+ !isAlwaysLive(UserI)) {
+ AB = APInt(BitWidth, 0);
+ } else {
+ // If all bits of the output are dead, then all bits of the input
+ // Bits of each operand that are used to compute alive bits of the
+ // output are alive, all others are dead.
+ determineLiveOperandBits(UserI, I, OI.getOperandNo(), AOut, AB,
+ KnownZero, KnownOne,
+ KnownZero2, KnownOne2);
+ }
+
+ // If we've added to the set of alive bits (or the operand has not
+ // been previously visited), then re-queue the operand to be visited
+ // again.
+ APInt ABPrev(BitWidth, 0);
+ auto ABI = AliveBits.find(I);
+ if (ABI != AliveBits.end())
+ ABPrev = ABI->second;
+
+ APInt ABNew = AB | ABPrev;
+ if (ABNew != ABPrev || ABI == AliveBits.end()) {
+ AliveBits[I] = std::move(ABNew);
+ Worklist.push_back(I);
+ }
+ } else if (!Visited.count(I)) {
+ Worklist.push_back(I);
+ }
+ }
+ }
+ }
+
+ bool Changed = false;
+ // The inverse of the live set is the dead set. These are those instructions
+ // which have no side effects and do not influence the control flow or return
+ // value of the function, and may therefore be deleted safely.
+ // NOTE: We reuse the Worklist vector here for memory efficiency.
+ for (Instruction &I : inst_range(F)) {
+ // For live instructions that have all dead bits, first make them dead by
+ // replacing all uses with something else. Then, if they don't need to
+ // remain live (because they have side effects, etc.) we can remove them.
+ if (I.getType()->isIntegerTy()) {
+ auto ABI = AliveBits.find(&I);
+ if (ABI != AliveBits.end()) {
+ if (ABI->second.getBoolValue())
+ continue;
+
+ DEBUG(dbgs() << "BDCE: Trivializing: " << I << " (all bits dead)\n");
+ // FIXME: In theory we could substitute undef here instead of zero.
+ // This should be reconsidered once we settle on the semantics of
+ // undef, poison, etc.
+ Value *Zero = ConstantInt::get(I.getType(), 0);
+ ++NumSimplified;
+ I.replaceAllUsesWith(Zero);
+ Changed = true;
+ }
+ } else if (Visited.count(&I)) {
+ continue;
+ }
+
+ if (isAlwaysLive(&I))
+ continue;
+
+ Worklist.push_back(&I);
+ I.dropAllReferences();
+ Changed = true;
+ }
+
+ for (Instruction *&I : Worklist) {
+ ++NumRemoved;
+ I->eraseFromParent();
+ }
+
+ return Changed;
+}
+
+FunctionPass *llvm::createBitTrackingDCEPass() {
+ return new BDCE();
+}
+
diff --git a/lib/Transforms/Scalar/CMakeLists.txt b/lib/Transforms/Scalar/CMakeLists.txt
index b3ee11ed67cd..7ee279a56600 100644
--- a/lib/Transforms/Scalar/CMakeLists.txt
+++ b/lib/Transforms/Scalar/CMakeLists.txt
@@ -1,6 +1,7 @@
add_llvm_library(LLVMScalarOpts
ADCE.cpp
AlignmentFromAssumptions.cpp
+ BDCE.cpp
ConstantHoisting.cpp
ConstantProp.cpp
CorrelatedValuePropagation.cpp
@@ -8,25 +9,33 @@ add_llvm_library(LLVMScalarOpts
DeadStoreElimination.cpp
EarlyCSE.cpp
FlattenCFGPass.cpp
+ Float2Int.cpp
GVN.cpp
+ InductiveRangeCheckElimination.cpp
IndVarSimplify.cpp
JumpThreading.cpp
LICM.cpp
LoadCombine.cpp
LoopDeletion.cpp
+ LoopDistribute.cpp
LoopIdiomRecognize.cpp
LoopInstSimplify.cpp
+ LoopInterchange.cpp
LoopRerollPass.cpp
LoopRotation.cpp
LoopStrengthReduce.cpp
LoopUnrollPass.cpp
LoopUnswitch.cpp
LowerAtomic.cpp
+ LowerExpectIntrinsic.cpp
MemCpyOptimizer.cpp
MergedLoadStoreMotion.cpp
+ NaryReassociate.cpp
PartiallyInlineLibCalls.cpp
+ PlaceSafepoints.cpp
Reassociate.cpp
Reg2Mem.cpp
+ RewriteStatepointsForGC.cpp
SCCP.cpp
SROA.cpp
SampleProfile.cpp
@@ -36,8 +45,14 @@ add_llvm_library(LLVMScalarOpts
SeparateConstOffsetFromGEP.cpp
SimplifyCFGPass.cpp
Sink.cpp
+ SpeculativeExecution.cpp
+ StraightLineStrengthReduce.cpp
StructurizeCFG.cpp
TailRecursionElimination.cpp
+
+ ADDITIONAL_HEADER_DIRS
+ ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms
+ ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms/Scalar
)
add_dependencies(LLVMScalarOpts intrinsics_gen)
diff --git a/lib/Transforms/Scalar/ConstantHoisting.cpp b/lib/Transforms/Scalar/ConstantHoisting.cpp
index 27c177a542e3..4288742dd3eb 100644
--- a/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -43,6 +43,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include <tuple>
using namespace llvm;
@@ -131,14 +132,14 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<TargetTransformInfo>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
}
private:
/// \brief Initialize the pass.
void setup(Function &Fn) {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- TTI = &getAnalysis<TargetTransformInfo>();
+ TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(Fn);
Entry = &Fn.getEntryBlock();
}
@@ -176,7 +177,7 @@ char ConstantHoisting::ID = 0;
INITIALIZE_PASS_BEGIN(ConstantHoisting, "consthoist", "Constant Hoisting",
false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(ConstantHoisting, "consthoist", "Constant Hoisting",
false, false)
@@ -186,6 +187,9 @@ FunctionPass *llvm::createConstantHoistingPass() {
/// \brief Perform the constant hoisting optimization for the given function.
bool ConstantHoisting::runOnFunction(Function &Fn) {
+ if (skipOptnoneFunction(Fn))
+ return false;
+
DEBUG(dbgs() << "********** Begin Constant Hoisting **********\n");
DEBUG(dbgs() << "********** Function: " << Fn.getName() << '\n');
diff --git a/lib/Transforms/Scalar/ConstantProp.cpp b/lib/Transforms/Scalar/ConstantProp.cpp
index dd51ce1bc28c..c974ebb9456f 100644
--- a/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/lib/Transforms/Scalar/ConstantProp.cpp
@@ -22,11 +22,10 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/IR/Constant.h"
-#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include <set>
using namespace llvm;
@@ -45,7 +44,7 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
- AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
}
@@ -53,7 +52,7 @@ namespace {
char ConstantPropagation::ID = 0;
INITIALIZE_PASS_BEGIN(ConstantPropagation, "constprop",
"Simple constant propagation", false, false)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(ConstantPropagation, "constprop",
"Simple constant propagation", false, false)
@@ -68,9 +67,9 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.insert(&*i);
}
bool Changed = false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
- TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ TargetLibraryInfo *TLI =
+ &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
while (!WorkList.empty()) {
Instruction *I = *WorkList.begin();
diff --git a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 5a3b5cf34cc3..d1302c6e22f4 100644
--- a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -19,6 +19,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -102,32 +103,52 @@ bool CorrelatedValuePropagation::processPHI(PHINode *P) {
Value *V = LVI->getConstantOnEdge(Incoming, P->getIncomingBlock(i), BB, P);
- // Look if the incoming value is a select with a constant but LVI tells us
- // that the incoming value can never be that constant. In that case replace
- // the incoming value with the other value of the select. This often allows
- // us to remove the select later.
+ // Look if the incoming value is a select with a scalar condition for which
+ // LVI can tells us the value. In that case replace the incoming value with
+ // the appropriate value of the select. This often allows us to remove the
+ // select later.
if (!V) {
SelectInst *SI = dyn_cast<SelectInst>(Incoming);
if (!SI) continue;
- Constant *C = dyn_cast<Constant>(SI->getFalseValue());
- if (!C) continue;
+ Value *Condition = SI->getCondition();
+ if (!Condition->getType()->isVectorTy()) {
+ if (Constant *C = LVI->getConstantOnEdge(Condition, P->getIncomingBlock(i), BB, P)) {
+ if (C == ConstantInt::getTrue(Condition->getType())) {
+ V = SI->getTrueValue();
+ } else {
+ V = SI->getFalseValue();
+ }
+ // Once LVI learns to handle vector types, we could also add support
+ // for vector type constants that are not all zeroes or all ones.
+ }
+ }
- if (LVI->getPredicateOnEdge(ICmpInst::ICMP_EQ, SI, C,
- P->getIncomingBlock(i), BB, P) !=
- LazyValueInfo::False)
- continue;
+ // Look if the select has a constant but LVI tells us that the incoming
+ // value can never be that constant. In that case replace the incoming
+ // value with the other value of the select. This often allows us to
+ // remove the select later.
+ if (!V) {
+ Constant *C = dyn_cast<Constant>(SI->getFalseValue());
+ if (!C) continue;
+
+ if (LVI->getPredicateOnEdge(ICmpInst::ICMP_EQ, SI, C,
+ P->getIncomingBlock(i), BB, P) !=
+ LazyValueInfo::False)
+ continue;
+ V = SI->getTrueValue();
+ }
DEBUG(dbgs() << "CVP: Threading PHI over " << *SI << '\n');
- V = SI->getTrueValue();
}
P->setIncomingValue(i, V);
Changed = true;
}
- // FIXME: Provide DL, TLI, DT, AT to SimplifyInstruction.
- if (Value *V = SimplifyInstruction(P)) {
+ // FIXME: Provide TLI, DT, AT to SimplifyInstruction.
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ if (Value *V = SimplifyInstruction(P, DL)) {
P->replaceAllUsesWith(V);
P->eraseFromParent();
Changed = true;
diff --git a/lib/Transforms/Scalar/DCE.cpp b/lib/Transforms/Scalar/DCE.cpp
index 99fac751df8e..3b262a23091f 100644
--- a/lib/Transforms/Scalar/DCE.cpp
+++ b/lib/Transforms/Scalar/DCE.cpp
@@ -21,7 +21,7 @@
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -42,7 +42,8 @@ namespace {
bool runOnBasicBlock(BasicBlock &BB) override {
if (skipOptnoneFunction(BB))
return false;
- TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
+ auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
+ TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr;
bool Changed = false;
for (BasicBlock::iterator DI = BB.begin(); DI != BB.end(); ) {
Instruction *Inst = DI++;
@@ -95,7 +96,8 @@ bool DCE::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
- TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
+ auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
+ TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr;
// Start out with all of the instructions in the worklist...
std::vector<Instruction*> WorkList;
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index a1ddc00da5ba..01952cf6e8b3 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -23,6 +23,7 @@
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@@ -33,7 +34,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -78,7 +79,8 @@ namespace {
bool HandleFree(CallInst *F);
bool handleEndBlock(BasicBlock &BB);
void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
- SmallSetVector<Value*, 16> &DeadStackObjects);
+ SmallSetVector<Value *, 16> &DeadStackObjects,
+ const DataLayout &DL);
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
@@ -166,7 +168,7 @@ static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
return true;
}
}
- if (CallSite CS = I) {
+ if (auto CS = CallSite(I)) {
if (Function *F = CS.getCalledFunction()) {
if (TLI && TLI->has(LibFunc::strcpy) &&
F->getName() == TLI->getName(LibFunc::strcpy)) {
@@ -194,18 +196,12 @@ static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
/// describe the memory operations for this instruction.
static AliasAnalysis::Location
getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
- const DataLayout *DL = AA.getDataLayout();
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return AA.getLocation(SI);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
// memcpy/memmove/memset.
AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
- // If we don't have target data around, an unknown size in Location means
- // that we should use the size of the pointee type. This isn't valid for
- // memset/memcpy, which writes more than an i8.
- if (Loc.Size == AliasAnalysis::UnknownSize && DL == nullptr)
- return AliasAnalysis::Location();
return Loc;
}
@@ -215,11 +211,6 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
switch (II->getIntrinsicID()) {
default: return AliasAnalysis::Location(); // Unhandled intrinsic.
case Intrinsic::init_trampoline:
- // If we don't have target data around, an unknown size in Location means
- // that we should use the size of the pointee type. This isn't valid for
- // init.trampoline, which writes more than an i8.
- if (!DL) return AliasAnalysis::Location();
-
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
return AliasAnalysis::Location(II->getArgOperand(0));
@@ -271,7 +262,7 @@ static bool isRemovable(Instruction *I) {
}
}
- if (CallSite CS = I)
+ if (auto CS = CallSite(I))
return CS.getInstruction()->use_empty();
return false;
@@ -315,15 +306,16 @@ static Value *getStoredPointerOperand(Instruction *I) {
}
}
- CallSite CS = I;
+ CallSite CS(I);
// All the supported functions so far happen to have dest as their first
// argument.
return CS.getArgument(0);
}
-static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
+static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
+ const TargetLibraryInfo *TLI) {
uint64_t Size;
- if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo()))
+ if (getObjectSize(V, Size, DL, TLI))
return Size;
return AliasAnalysis::UnknownSize;
}
@@ -343,10 +335,9 @@ namespace {
/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
const AliasAnalysis::Location &Earlier,
- AliasAnalysis &AA,
- int64_t &EarlierOff,
- int64_t &LaterOff) {
- const DataLayout *DL = AA.getDataLayout();
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI,
+ int64_t &EarlierOff, int64_t &LaterOff) {
const Value *P1 = Earlier.Ptr->stripPointerCasts();
const Value *P2 = Later.Ptr->stripPointerCasts();
@@ -367,7 +358,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// Otherwise, we have to have size information, and the later store has to be
// larger than the earlier one.
if (Later.Size == AliasAnalysis::UnknownSize ||
- Earlier.Size == AliasAnalysis::UnknownSize || DL == nullptr)
+ Earlier.Size == AliasAnalysis::UnknownSize)
return OverwriteUnknown;
// Check to see if the later store is to the entire object (either a global,
@@ -382,7 +373,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
return OverwriteUnknown;
// If the "Later" store is to a recognizable object, get its size.
- uint64_t ObjectSize = getPointerSize(UO2, AA);
+ uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
if (ObjectSize != AliasAnalysis::UnknownSize)
if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
return OverwriteComplete;
@@ -560,8 +551,10 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
if (isRemovable(DepWrite) &&
!isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
int64_t InstWriteOffset, DepWriteOffset;
- OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA,
- DepWriteOffset, InstWriteOffset);
+ const DataLayout &DL = BB.getModule()->getDataLayout();
+ OverwriteResult OR =
+ isOverwrite(Loc, DepLoc, DL, AA->getTargetLibraryInfo(),
+ DepWriteOffset, InstWriteOffset);
if (OR == OverwriteComplete) {
DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *DepWrite << "\n KILLER: " << *Inst << '\n');
@@ -655,6 +648,7 @@ bool DSE::HandleFree(CallInst *F) {
AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
SmallVector<BasicBlock *, 16> Blocks;
Blocks.push_back(F->getParent());
+ const DataLayout &DL = F->getModule()->getDataLayout();
while (!Blocks.empty()) {
BasicBlock *BB = Blocks.pop_back_val();
@@ -668,7 +662,7 @@ bool DSE::HandleFree(CallInst *F) {
break;
Value *DepPointer =
- GetUnderlyingObject(getStoredPointerOperand(Dependency));
+ GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
// Check for aliasing.
if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
@@ -728,6 +722,8 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
if (AI->hasByValOrInAllocaAttr())
DeadStackObjects.insert(AI);
+ const DataLayout &DL = BB.getModule()->getDataLayout();
+
// Scan the basic block backwards
for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
--BBI;
@@ -736,7 +732,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) {
// See through pointer-to-pointer bitcasts
SmallVector<Value *, 4> Pointers;
- GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers);
+ GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers, DL);
// Stores to stack values are valid candidates for removal.
bool AllDead = true;
@@ -784,7 +780,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
continue;
}
- if (CallSite CS = cast<Value>(BBI)) {
+ if (auto CS = CallSite(BBI)) {
// Remove allocation function calls from the list of dead stack objects;
// there can't be any references before the definition.
if (isAllocLikeFn(BBI, TLI))
@@ -799,8 +795,8 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// the call is live.
DeadStackObjects.remove_if([&](Value *I) {
// See if the call site touches the value.
- AliasAnalysis::ModRefResult A =
- AA->getModRefInfo(CS, I, getPointerSize(I, *AA));
+ AliasAnalysis::ModRefResult A = AA->getModRefInfo(
+ CS, I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
});
@@ -835,7 +831,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// Remove any allocas from the DeadPointer set that are loaded, as this
// makes any stores above the access live.
- RemoveAccessedObjects(LoadedLoc, DeadStackObjects);
+ RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL);
// If all of the allocas were clobbered by the access then we're not going
// to find anything else to process.
@@ -850,8 +846,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
/// of the stack objects in the DeadStackObjects set. If so, they become live
/// because the location is being loaded.
void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
- SmallSetVector<Value*, 16> &DeadStackObjects) {
- const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr);
+ SmallSetVector<Value *, 16> &DeadStackObjects,
+ const DataLayout &DL) {
+ const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
// A constant can't be in the dead pointer set.
if (isa<Constant>(UnderlyingPointer))
@@ -867,7 +864,8 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
// Remove objects that could alias LoadedLoc.
DeadStackObjects.remove_if([&](Value *I) {
// See if the loaded location could alias the stack location.
- AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA));
+ AliasAnalysis::Location StackLoc(
+ I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
return !AA->isNoAlias(StackLoc, LoadedLoc);
});
}
diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp
index 969b9a8f8df1..d536a937dce1 100644
--- a/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -12,12 +12,14 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Scalar/EarlyCSE.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/ScopedHashTable.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
@@ -26,7 +28,8 @@
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/RecyclingAllocator.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
#include <deque>
using namespace llvm;
@@ -40,49 +43,44 @@ STATISTIC(NumCSELoad, "Number of load instructions CSE'd");
STATISTIC(NumCSECall, "Number of call instructions CSE'd");
STATISTIC(NumDSE, "Number of trivial dead stores removed");
-static unsigned getHash(const void *V) {
- return DenseMapInfo<const void*>::getHashValue(V);
-}
-
//===----------------------------------------------------------------------===//
// SimpleValue
//===----------------------------------------------------------------------===//
namespace {
- /// SimpleValue - Instances of this struct represent available values in the
- /// scoped hash table.
- struct SimpleValue {
- Instruction *Inst;
+/// \brief Struct representing the available values in the scoped hash table.
+struct SimpleValue {
+ Instruction *Inst;
- SimpleValue(Instruction *I) : Inst(I) {
- assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
- }
+ SimpleValue(Instruction *I) : Inst(I) {
+ assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
+ }
- bool isSentinel() const {
- return Inst == DenseMapInfo<Instruction*>::getEmptyKey() ||
- Inst == DenseMapInfo<Instruction*>::getTombstoneKey();
- }
+ bool isSentinel() const {
+ return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
+ Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
+ }
- static bool canHandle(Instruction *Inst) {
- // This can only handle non-void readnone functions.
- if (CallInst *CI = dyn_cast<CallInst>(Inst))
- return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
- return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
- isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
- isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
- isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
- isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
- }
- };
+ static bool canHandle(Instruction *Inst) {
+ // This can only handle non-void readnone functions.
+ if (CallInst *CI = dyn_cast<CallInst>(Inst))
+ return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
+ return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
+ isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
+ isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
+ isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
+ isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
+ }
+};
}
namespace llvm {
-template<> struct DenseMapInfo<SimpleValue> {
+template <> struct DenseMapInfo<SimpleValue> {
static inline SimpleValue getEmptyKey() {
- return DenseMapInfo<Instruction*>::getEmptyKey();
+ return DenseMapInfo<Instruction *>::getEmptyKey();
}
static inline SimpleValue getTombstoneKey() {
- return DenseMapInfo<Instruction*>::getTombstoneKey();
+ return DenseMapInfo<Instruction *>::getTombstoneKey();
}
static unsigned getHashValue(SimpleValue Val);
static bool isEqual(SimpleValue LHS, SimpleValue RHS);
@@ -92,7 +90,7 @@ template<> struct DenseMapInfo<SimpleValue> {
unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
Instruction *Inst = Val.Inst;
// Hash in all of the operands as pointers.
- if (BinaryOperator* BinOp = dyn_cast<BinaryOperator>(Inst)) {
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
Value *LHS = BinOp->getOperand(0);
Value *RHS = BinOp->getOperand(1);
if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
@@ -101,8 +99,9 @@ unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
if (isa<OverflowingBinaryOperator>(BinOp)) {
// Hash the overflow behavior
unsigned Overflow =
- BinOp->hasNoSignedWrap() * OverflowingBinaryOperator::NoSignedWrap |
- BinOp->hasNoUnsignedWrap() * OverflowingBinaryOperator::NoUnsignedWrap;
+ BinOp->hasNoSignedWrap() * OverflowingBinaryOperator::NoSignedWrap |
+ BinOp->hasNoUnsignedWrap() *
+ OverflowingBinaryOperator::NoUnsignedWrap;
return hash_combine(BinOp->getOpcode(), Overflow, LHS, RHS);
}
@@ -135,12 +134,13 @@ unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) ||
isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) ||
isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
- isa<ShuffleVectorInst>(Inst)) && "Invalid/unknown instruction");
+ isa<ShuffleVectorInst>(Inst)) &&
+ "Invalid/unknown instruction");
// Mix in the opcode.
- return hash_combine(Inst->getOpcode(),
- hash_combine_range(Inst->value_op_begin(),
- Inst->value_op_end()));
+ return hash_combine(
+ Inst->getOpcode(),
+ hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
}
bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
@@ -149,22 +149,24 @@ bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
if (LHS.isSentinel() || RHS.isSentinel())
return LHSI == RHSI;
- if (LHSI->getOpcode() != RHSI->getOpcode()) return false;
- if (LHSI->isIdenticalTo(RHSI)) return true;
+ if (LHSI->getOpcode() != RHSI->getOpcode())
+ return false;
+ if (LHSI->isIdenticalTo(RHSI))
+ return true;
// If we're not strictly identical, we still might be a commutable instruction
if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
if (!LHSBinOp->isCommutative())
return false;
- assert(isa<BinaryOperator>(RHSI)
- && "same opcode, but different instruction type?");
+ assert(isa<BinaryOperator>(RHSI) &&
+ "same opcode, but different instruction type?");
BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
// Check overflow attributes
if (isa<OverflowingBinaryOperator>(LHSBinOp)) {
- assert(isa<OverflowingBinaryOperator>(RHSBinOp)
- && "same opcode, but different operator type?");
+ assert(isa<OverflowingBinaryOperator>(RHSBinOp) &&
+ "same opcode, but different operator type?");
if (LHSBinOp->hasNoUnsignedWrap() != RHSBinOp->hasNoUnsignedWrap() ||
LHSBinOp->hasNoSignedWrap() != RHSBinOp->hasNoSignedWrap())
return false;
@@ -172,16 +174,16 @@ bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
// Commuted equality
return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
- LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
+ LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
}
if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
- assert(isa<CmpInst>(RHSI)
- && "same opcode, but different instruction type?");
+ assert(isa<CmpInst>(RHSI) &&
+ "same opcode, but different instruction type?");
CmpInst *RHSCmp = cast<CmpInst>(RHSI);
// Commuted equality
return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
- LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
- LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
+ LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
+ LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
}
return false;
@@ -192,57 +194,52 @@ bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
//===----------------------------------------------------------------------===//
namespace {
- /// CallValue - Instances of this struct represent available call values in
- /// the scoped hash table.
- struct CallValue {
- Instruction *Inst;
+/// \brief Struct representing the available call values in the scoped hash
+/// table.
+struct CallValue {
+ Instruction *Inst;
- CallValue(Instruction *I) : Inst(I) {
- assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
- }
+ CallValue(Instruction *I) : Inst(I) {
+ assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
+ }
- bool isSentinel() const {
- return Inst == DenseMapInfo<Instruction*>::getEmptyKey() ||
- Inst == DenseMapInfo<Instruction*>::getTombstoneKey();
- }
+ bool isSentinel() const {
+ return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
+ Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
+ }
- static bool canHandle(Instruction *Inst) {
- // Don't value number anything that returns void.
- if (Inst->getType()->isVoidTy())
- return false;
+ static bool canHandle(Instruction *Inst) {
+ // Don't value number anything that returns void.
+ if (Inst->getType()->isVoidTy())
+ return false;
- CallInst *CI = dyn_cast<CallInst>(Inst);
- if (!CI || !CI->onlyReadsMemory())
- return false;
- return true;
- }
- };
+ CallInst *CI = dyn_cast<CallInst>(Inst);
+ if (!CI || !CI->onlyReadsMemory())
+ return false;
+ return true;
+ }
+};
}
namespace llvm {
- template<> struct DenseMapInfo<CallValue> {
- static inline CallValue getEmptyKey() {
- return DenseMapInfo<Instruction*>::getEmptyKey();
- }
- static inline CallValue getTombstoneKey() {
- return DenseMapInfo<Instruction*>::getTombstoneKey();
- }
- static unsigned getHashValue(CallValue Val);
- static bool isEqual(CallValue LHS, CallValue RHS);
- };
+template <> struct DenseMapInfo<CallValue> {
+ static inline CallValue getEmptyKey() {
+ return DenseMapInfo<Instruction *>::getEmptyKey();
+ }
+ static inline CallValue getTombstoneKey() {
+ return DenseMapInfo<Instruction *>::getTombstoneKey();
+ }
+ static unsigned getHashValue(CallValue Val);
+ static bool isEqual(CallValue LHS, CallValue RHS);
+};
}
+
unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
Instruction *Inst = Val.Inst;
- // Hash in all of the operands as pointers.
- unsigned Res = 0;
- for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) {
- assert(!Inst->getOperand(i)->getType()->isMetadataTy() &&
- "Cannot value number calls with metadata operands");
- Res ^= getHash(Inst->getOperand(i)) << (i & 0xF);
- }
-
- // Mix in the opcode.
- return (Res << 1) ^ Inst->getOpcode();
+ // Hash all of the operands as pointers and mix in the opcode.
+ return hash_combine(
+ Inst->getOpcode(),
+ hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
}
bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
@@ -252,103 +249,104 @@ bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
return LHSI->isIdenticalTo(RHSI);
}
-
//===----------------------------------------------------------------------===//
-// EarlyCSE pass.
+// EarlyCSE implementation
//===----------------------------------------------------------------------===//
namespace {
-
-/// EarlyCSE - This pass does a simple depth-first walk over the dominator
-/// tree, eliminating trivially redundant instructions and using instsimplify
-/// to canonicalize things as it goes. It is intended to be fast and catch
-/// obvious cases so that instcombine and other passes are more effective. It
-/// is expected that a later pass of GVN will catch the interesting/hard
-/// cases.
-class EarlyCSE : public FunctionPass {
+/// \brief A simple and fast domtree-based CSE pass.
+///
+/// This pass does a simple depth-first walk over the dominator tree,
+/// eliminating trivially redundant instructions and using instsimplify to
+/// canonicalize things as it goes. It is intended to be fast and catch obvious
+/// cases so that instcombine and other passes are more effective. It is
+/// expected that a later pass of GVN will catch the interesting/hard cases.
+class EarlyCSE {
public:
- const DataLayout *DL;
- const TargetLibraryInfo *TLI;
- DominatorTree *DT;
- AssumptionCache *AC;
- typedef RecyclingAllocator<BumpPtrAllocator,
- ScopedHashTableVal<SimpleValue, Value*> > AllocatorTy;
- typedef ScopedHashTable<SimpleValue, Value*, DenseMapInfo<SimpleValue>,
+ Function &F;
+ const TargetLibraryInfo &TLI;
+ const TargetTransformInfo &TTI;
+ DominatorTree &DT;
+ AssumptionCache &AC;
+ typedef RecyclingAllocator<
+ BumpPtrAllocator, ScopedHashTableVal<SimpleValue, Value *>> AllocatorTy;
+ typedef ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
AllocatorTy> ScopedHTType;
- /// AvailableValues - This scoped hash table contains the current values of
- /// all of our simple scalar expressions. As we walk down the domtree, we
- /// look to see if instructions are in this: if so, we replace them with what
- /// we find, otherwise we insert them so that dominated values can succeed in
- /// their lookup.
- ScopedHTType *AvailableValues;
-
- /// AvailableLoads - This scoped hash table contains the current values
- /// of loads. This allows us to get efficient access to dominating loads when
- /// we have a fully redundant load. In addition to the most recent load, we
- /// keep track of a generation count of the read, which is compared against
- /// the current generation count. The current generation count is
- /// incremented after every possibly writing memory operation, which ensures
- /// that we only CSE loads with other loads that have no intervening store.
- typedef RecyclingAllocator<BumpPtrAllocator,
- ScopedHashTableVal<Value*, std::pair<Value*, unsigned> > > LoadMapAllocator;
- typedef ScopedHashTable<Value*, std::pair<Value*, unsigned>,
- DenseMapInfo<Value*>, LoadMapAllocator> LoadHTType;
- LoadHTType *AvailableLoads;
-
- /// AvailableCalls - This scoped hash table contains the current values
- /// of read-only call values. It uses the same generation count as loads.
- typedef ScopedHashTable<CallValue, std::pair<Value*, unsigned> > CallHTType;
- CallHTType *AvailableCalls;
-
- /// CurrentGeneration - This is the current generation of the memory value.
+ /// \brief A scoped hash table of the current values of all of our simple
+ /// scalar expressions.
+ ///
+ /// As we walk down the domtree, we look to see if instructions are in this:
+ /// if so, we replace them with what we find, otherwise we insert them so
+ /// that dominated values can succeed in their lookup.
+ ScopedHTType AvailableValues;
+
+ /// \brief A scoped hash table of the current values of loads.
+ ///
+ /// This allows us to get efficient access to dominating loads when we have
+ /// a fully redundant load. In addition to the most recent load, we keep
+ /// track of a generation count of the read, which is compared against the
+ /// current generation count. The current generation count is incremented
+ /// after every possibly writing memory operation, which ensures that we only
+ /// CSE loads with other loads that have no intervening store.
+ typedef RecyclingAllocator<
+ BumpPtrAllocator,
+ ScopedHashTableVal<Value *, std::pair<Value *, unsigned>>>
+ LoadMapAllocator;
+ typedef ScopedHashTable<Value *, std::pair<Value *, unsigned>,
+ DenseMapInfo<Value *>, LoadMapAllocator> LoadHTType;
+ LoadHTType AvailableLoads;
+
+ /// \brief A scoped hash table of the current values of read-only call
+ /// values.
+ ///
+ /// It uses the same generation count as loads.
+ typedef ScopedHashTable<CallValue, std::pair<Value *, unsigned>> CallHTType;
+ CallHTType AvailableCalls;
+
+ /// \brief This is the current generation of the memory value.
unsigned CurrentGeneration;
- static char ID;
- explicit EarlyCSE() : FunctionPass(ID) {
- initializeEarlyCSEPass(*PassRegistry::getPassRegistry());
- }
+ /// \brief Set up the EarlyCSE runner for a particular function.
+ EarlyCSE(Function &F, const TargetLibraryInfo &TLI,
+ const TargetTransformInfo &TTI, DominatorTree &DT,
+ AssumptionCache &AC)
+ : F(F), TLI(TLI), TTI(TTI), DT(DT), AC(AC), CurrentGeneration(0) {}
- bool runOnFunction(Function &F) override;
+ bool run();
private:
-
- // NodeScope - almost a POD, but needs to call the constructors for the
- // scoped hash tables so that a new scope gets pushed on. These are RAII so
- // that the scope gets popped when the NodeScope is destroyed.
+ // Almost a POD, but needs to call the constructors for the scoped hash
+ // tables so that a new scope gets pushed on. These are RAII so that the
+ // scope gets popped when the NodeScope is destroyed.
class NodeScope {
- public:
- NodeScope(ScopedHTType *availableValues,
- LoadHTType *availableLoads,
- CallHTType *availableCalls) :
- Scope(*availableValues),
- LoadScope(*availableLoads),
- CallScope(*availableCalls) {}
-
- private:
- NodeScope(const NodeScope&) LLVM_DELETED_FUNCTION;
- void operator=(const NodeScope&) LLVM_DELETED_FUNCTION;
+ public:
+ NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
+ CallHTType &AvailableCalls)
+ : Scope(AvailableValues), LoadScope(AvailableLoads),
+ CallScope(AvailableCalls) {}
+
+ private:
+ NodeScope(const NodeScope &) = delete;
+ void operator=(const NodeScope &) = delete;
ScopedHTType::ScopeTy Scope;
LoadHTType::ScopeTy LoadScope;
CallHTType::ScopeTy CallScope;
};
- // StackNode - contains all the needed information to create a stack for
- // doing a depth first tranversal of the tree. This includes scopes for
- // values, loads, and calls as well as the generation. There is a child
- // iterator so that the children do not need to be store spearately.
+ // Contains all the needed information to create a stack for doing a depth
+ // first tranversal of the tree. This includes scopes for values, loads, and
+ // calls as well as the generation. There is a child iterator so that the
+ // children do not need to be store spearately.
class StackNode {
- public:
- StackNode(ScopedHTType *availableValues,
- LoadHTType *availableLoads,
- CallHTType *availableCalls,
- unsigned cg, DomTreeNode *n,
- DomTreeNode::iterator child, DomTreeNode::iterator end) :
- CurrentGeneration(cg), ChildGeneration(cg), Node(n),
- ChildIter(child), EndIter(end),
- Scopes(availableValues, availableLoads, availableCalls),
- Processed(false) {}
+ public:
+ StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
+ CallHTType &AvailableCalls, unsigned cg, DomTreeNode *n,
+ DomTreeNode::iterator child, DomTreeNode::iterator end)
+ : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
+ EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls),
+ Processed(false) {}
// Accessors.
unsigned currentGeneration() { return CurrentGeneration; }
@@ -365,9 +363,9 @@ private:
bool isProcessed() { return Processed; }
void process() { Processed = true; }
- private:
- StackNode(const StackNode&) LLVM_DELETED_FUNCTION;
- void operator=(const StackNode&) LLVM_DELETED_FUNCTION;
+ private:
+ StackNode(const StackNode &) = delete;
+ void operator=(const StackNode &) = delete;
// Members.
unsigned CurrentGeneration;
@@ -379,31 +377,78 @@ private:
bool Processed;
};
+ /// \brief Wrapper class to handle memory instructions, including loads,
+ /// stores and intrinsic loads and stores defined by the target.
+ class ParseMemoryInst {
+ public:
+ ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
+ : Load(false), Store(false), Vol(false), MayReadFromMemory(false),
+ MayWriteToMemory(false), MatchingId(-1), Ptr(nullptr) {
+ MayReadFromMemory = Inst->mayReadFromMemory();
+ MayWriteToMemory = Inst->mayWriteToMemory();
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
+ MemIntrinsicInfo Info;
+ if (!TTI.getTgtMemIntrinsic(II, Info))
+ return;
+ if (Info.NumMemRefs == 1) {
+ Store = Info.WriteMem;
+ Load = Info.ReadMem;
+ MatchingId = Info.MatchingId;
+ MayReadFromMemory = Info.ReadMem;
+ MayWriteToMemory = Info.WriteMem;
+ Vol = Info.Vol;
+ Ptr = Info.PtrVal;
+ }
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ Load = true;
+ Vol = !LI->isSimple();
+ Ptr = LI->getPointerOperand();
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ Store = true;
+ Vol = !SI->isSimple();
+ Ptr = SI->getPointerOperand();
+ }
+ }
+ bool isLoad() { return Load; }
+ bool isStore() { return Store; }
+ bool isVolatile() { return Vol; }
+ bool isMatchingMemLoc(const ParseMemoryInst &Inst) {
+ return Ptr == Inst.Ptr && MatchingId == Inst.MatchingId;
+ }
+ bool isValid() { return Ptr != nullptr; }
+ int getMatchingId() { return MatchingId; }
+ Value *getPtr() { return Ptr; }
+ bool mayReadFromMemory() { return MayReadFromMemory; }
+ bool mayWriteToMemory() { return MayWriteToMemory; }
+
+ private:
+ bool Load;
+ bool Store;
+ bool Vol;
+ bool MayReadFromMemory;
+ bool MayWriteToMemory;
+ // For regular (non-intrinsic) loads/stores, this is set to -1. For
+ // intrinsic loads/stores, the id is retrieved from the corresponding
+ // field in the MemIntrinsicInfo structure. That field contains
+ // non-negative values only.
+ int MatchingId;
+ Value *Ptr;
+ };
+
bool processNode(DomTreeNode *Node);
- // This transformation requires dominator postdominator info
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<AssumptionCacheTracker>();
- AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<TargetLibraryInfo>();
- AU.setPreservesCFG();
+ Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
+ if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
+ return LI;
+ else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ return SI->getValueOperand();
+ assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
+ return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
+ ExpectedType);
}
};
}
-char EarlyCSE::ID = 0;
-
-// createEarlyCSEPass - The public interface to this file.
-FunctionPass *llvm::createEarlyCSEPass() {
- return new EarlyCSE();
-}
-
-INITIALIZE_PASS_BEGIN(EarlyCSE, "early-cse", "Early CSE", false, false)
-INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
-INITIALIZE_PASS_END(EarlyCSE, "early-cse", "Early CSE", false, false)
-
bool EarlyCSE::processNode(DomTreeNode *Node) {
BasicBlock *BB = Node->getBlock();
@@ -416,21 +461,46 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
if (!BB->getSinglePredecessor())
++CurrentGeneration;
+ // If this node has a single predecessor which ends in a conditional branch,
+ // we can infer the value of the branch condition given that we took this
+ // path. We need the single predeccesor to ensure there's not another path
+ // which reaches this block where the condition might hold a different
+ // value. Since we're adding this to the scoped hash table (like any other
+ // def), it will have been popped if we encounter a future merge block.
+ if (BasicBlock *Pred = BB->getSinglePredecessor())
+ if (auto *BI = dyn_cast<BranchInst>(Pred->getTerminator()))
+ if (BI->isConditional())
+ if (auto *CondInst = dyn_cast<Instruction>(BI->getCondition()))
+ if (SimpleValue::canHandle(CondInst)) {
+ assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
+ auto *ConditionalConstant = (BI->getSuccessor(0) == BB) ?
+ ConstantInt::getTrue(BB->getContext()) :
+ ConstantInt::getFalse(BB->getContext());
+ AvailableValues.insert(CondInst, ConditionalConstant);
+ DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
+ << CondInst->getName() << "' as " << *ConditionalConstant
+ << " in " << BB->getName() << "\n");
+ // Replace all dominated uses with the known value
+ replaceDominatedUsesWith(CondInst, ConditionalConstant, DT,
+ BasicBlockEdge(Pred, BB));
+ }
+
/// LastStore - Keep track of the last non-volatile store that we saw... for
/// as long as there in no instruction that reads memory. If we see a store
/// to the same location, we delete the dead store. This zaps trivial dead
/// stores which can occur in bitfield code among other things.
- StoreInst *LastStore = nullptr;
+ Instruction *LastStore = nullptr;
bool Changed = false;
+ const DataLayout &DL = BB->getModule()->getDataLayout();
// See if any instructions in the block can be eliminated. If so, do it. If
// not, add them to AvailableValues.
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
Instruction *Inst = I++;
// Dead instructions should just be removed.
- if (isInstructionTriviallyDead(Inst, TLI)) {
+ if (isInstructionTriviallyDead(Inst, &TLI)) {
DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
Inst->eraseFromParent();
Changed = true;
@@ -449,7 +519,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// If the instruction can be simplified (e.g. X+0 = X) then replace it with
// its simpler value.
- if (Value *V = SimplifyInstruction(Inst, DL, TLI, DT, AC)) {
+ if (Value *V = SimplifyInstruction(Inst, DL, &TLI, &DT, &AC)) {
DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n');
Inst->replaceAllUsesWith(V);
Inst->eraseFromParent();
@@ -461,7 +531,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// If this is a simple instruction that we can value number, process it.
if (SimpleValue::canHandle(Inst)) {
// See if the instruction has an available value. If so, use it.
- if (Value *V = AvailableValues->lookup(Inst)) {
+ if (Value *V = AvailableValues.lookup(Inst)) {
DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n');
Inst->replaceAllUsesWith(V);
Inst->eraseFromParent();
@@ -471,14 +541,15 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
}
// Otherwise, just remember that this value is available.
- AvailableValues->insert(Inst, Inst);
+ AvailableValues.insert(Inst, Inst);
continue;
}
+ ParseMemoryInst MemInst(Inst, TTI);
// If this is a non-volatile load, process it.
- if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ if (MemInst.isValid() && MemInst.isLoad()) {
// Ignore volatile loads.
- if (!LI->isSimple()) {
+ if (MemInst.isVolatile()) {
LastStore = nullptr;
// Don't CSE across synchronization boundaries.
if (Inst->mayWriteToMemory())
@@ -488,38 +559,48 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// If we have an available version of this load, and if it is the right
// generation, replace this instruction.
- std::pair<Value*, unsigned> InVal =
- AvailableLoads->lookup(Inst->getOperand(0));
+ std::pair<Value *, unsigned> InVal =
+ AvailableLoads.lookup(MemInst.getPtr());
if (InVal.first != nullptr && InVal.second == CurrentGeneration) {
- DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst << " to: "
- << *InVal.first << '\n');
- if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first);
- Inst->eraseFromParent();
- Changed = true;
- ++NumCSELoad;
- continue;
+ Value *Op = getOrCreateResult(InVal.first, Inst->getType());
+ if (Op != nullptr) {
+ DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
+ << " to: " << *InVal.first << '\n');
+ if (!Inst->use_empty())
+ Inst->replaceAllUsesWith(Op);
+ Inst->eraseFromParent();
+ Changed = true;
+ ++NumCSELoad;
+ continue;
+ }
}
// Otherwise, remember that we have this instruction.
- AvailableLoads->insert(Inst->getOperand(0),
- std::pair<Value*, unsigned>(Inst, CurrentGeneration));
+ AvailableLoads.insert(MemInst.getPtr(), std::pair<Value *, unsigned>(
+ Inst, CurrentGeneration));
LastStore = nullptr;
continue;
}
// If this instruction may read from memory, forget LastStore.
- if (Inst->mayReadFromMemory())
+ // Load/store intrinsics will indicate both a read and a write to
+ // memory. The target may override this (e.g. so that a store intrinsic
+ // does not read from memory, and thus will be treated the same as a
+ // regular store for commoning purposes).
+ if (Inst->mayReadFromMemory() &&
+ !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
LastStore = nullptr;
// If this is a read-only call, process it.
if (CallValue::canHandle(Inst)) {
// If we have an available version of this call, and if it is the right
// generation, replace this instruction.
- std::pair<Value*, unsigned> InVal = AvailableCalls->lookup(Inst);
+ std::pair<Value *, unsigned> InVal = AvailableCalls.lookup(Inst);
if (InVal.first != nullptr && InVal.second == CurrentGeneration) {
- DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst << " to: "
- << *InVal.first << '\n');
- if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first);
+ DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
+ << " to: " << *InVal.first << '\n');
+ if (!Inst->use_empty())
+ Inst->replaceAllUsesWith(InVal.first);
Inst->eraseFromParent();
Changed = true;
++NumCSECall;
@@ -527,8 +608,8 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
}
// Otherwise, remember that we have this instruction.
- AvailableCalls->insert(Inst,
- std::pair<Value*, unsigned>(Inst, CurrentGeneration));
+ AvailableCalls.insert(
+ Inst, std::pair<Value *, unsigned>(Inst, CurrentGeneration));
continue;
}
@@ -538,17 +619,19 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
if (Inst->mayWriteToMemory()) {
++CurrentGeneration;
- if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ if (MemInst.isValid() && MemInst.isStore()) {
// We do a trivial form of DSE if there are two stores to the same
// location with no intervening loads. Delete the earlier store.
- if (LastStore &&
- LastStore->getPointerOperand() == SI->getPointerOperand()) {
- DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore << " due to: "
- << *Inst << '\n');
- LastStore->eraseFromParent();
- Changed = true;
- ++NumDSE;
- LastStore = nullptr;
+ if (LastStore) {
+ ParseMemoryInst LastStoreMemInst(LastStore, TTI);
+ if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
+ DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
+ << " due to: " << *Inst << '\n');
+ LastStore->eraseFromParent();
+ Changed = true;
+ ++NumDSE;
+ LastStore = nullptr;
+ }
// fallthrough - we can exploit information about this store
}
@@ -557,12 +640,12 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// version of the pointer. It is safe to forward from volatile stores
// to non-volatile loads, so we don't have to check for volatility of
// the store.
- AvailableLoads->insert(SI->getPointerOperand(),
- std::pair<Value*, unsigned>(SI->getValueOperand(), CurrentGeneration));
+ AvailableLoads.insert(MemInst.getPtr(), std::pair<Value *, unsigned>(
+ Inst, CurrentGeneration));
// Remember that this was the last store we saw for DSE.
- if (SI->isSimple())
- LastStore = SI;
+ if (!MemInst.isVolatile())
+ LastStore = Inst;
}
}
}
@@ -570,40 +653,20 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
return Changed;
}
-
-bool EarlyCSE::runOnFunction(Function &F) {
- if (skipOptnoneFunction(F))
- return false;
-
- // Note, deque is being used here because there is significant performance gains
- // over vector when the container becomes very large due to the specific access
- // patterns. For more information see the mailing list discussion on this:
+bool EarlyCSE::run() {
+ // Note, deque is being used here because there is significant performance
+ // gains over vector when the container becomes very large due to the
+ // specific access patterns. For more information see the mailing list
+ // discussion on this:
// http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
std::deque<StackNode *> nodesToProcess;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
- TLI = &getAnalysis<TargetLibraryInfo>();
- DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
-
- // Tables that the pass uses when walking the domtree.
- ScopedHTType AVTable;
- AvailableValues = &AVTable;
- LoadHTType LoadTable;
- AvailableLoads = &LoadTable;
- CallHTType CallTable;
- AvailableCalls = &CallTable;
-
- CurrentGeneration = 0;
bool Changed = false;
// Process the root node.
- nodesToProcess.push_back(
- new StackNode(AvailableValues, AvailableLoads, AvailableCalls,
- CurrentGeneration, DT->getRootNode(),
- DT->getRootNode()->begin(),
- DT->getRootNode()->end()));
+ nodesToProcess.push_back(new StackNode(
+ AvailableValues, AvailableLoads, AvailableCalls, CurrentGeneration,
+ DT.getRootNode(), DT.getRootNode()->begin(), DT.getRootNode()->end()));
// Save the current generation.
unsigned LiveOutGeneration = CurrentGeneration;
@@ -627,11 +690,9 @@ bool EarlyCSE::runOnFunction(Function &F) {
// Push the next child onto the stack.
DomTreeNode *child = NodeToProcess->nextChild();
nodesToProcess.push_back(
- new StackNode(AvailableValues,
- AvailableLoads,
- AvailableCalls,
- NodeToProcess->childGeneration(), child,
- child->begin(), child->end()));
+ new StackNode(AvailableValues, AvailableLoads, AvailableCalls,
+ NodeToProcess->childGeneration(), child, child->begin(),
+ child->end()));
} else {
// It has been processed, and there are no more children to process,
// so delete it and pop it off the stack.
@@ -645,3 +706,74 @@ bool EarlyCSE::runOnFunction(Function &F) {
return Changed;
}
+
+PreservedAnalyses EarlyCSEPass::run(Function &F,
+ AnalysisManager<Function> *AM) {
+ auto &TLI = AM->getResult<TargetLibraryAnalysis>(F);
+ auto &TTI = AM->getResult<TargetIRAnalysis>(F);
+ auto &DT = AM->getResult<DominatorTreeAnalysis>(F);
+ auto &AC = AM->getResult<AssumptionAnalysis>(F);
+
+ EarlyCSE CSE(F, TLI, TTI, DT, AC);
+
+ if (!CSE.run())
+ return PreservedAnalyses::all();
+
+ // CSE preserves the dominator tree because it doesn't mutate the CFG.
+ // FIXME: Bundle this with other CFG-preservation.
+ PreservedAnalyses PA;
+ PA.preserve<DominatorTreeAnalysis>();
+ return PA;
+}
+
+namespace {
+/// \brief A simple and fast domtree-based CSE pass.
+///
+/// This pass does a simple depth-first walk over the dominator tree,
+/// eliminating trivially redundant instructions and using instsimplify to
+/// canonicalize things as it goes. It is intended to be fast and catch obvious
+/// cases so that instcombine and other passes are more effective. It is
+/// expected that a later pass of GVN will catch the interesting/hard cases.
+class EarlyCSELegacyPass : public FunctionPass {
+public:
+ static char ID;
+
+ EarlyCSELegacyPass() : FunctionPass(ID) {
+ initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override {
+ if (skipOptnoneFunction(F))
+ return false;
+
+ auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+ auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
+
+ EarlyCSE CSE(F, TLI, TTI, DT, AC);
+
+ return CSE.run();
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<AssumptionCacheTracker>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ AU.setPreservesCFG();
+ }
+};
+}
+
+char EarlyCSELegacyPass::ID = 0;
+
+FunctionPass *llvm::createEarlyCSEPass() { return new EarlyCSELegacyPass(); }
+
+INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
+ false)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
diff --git a/lib/Transforms/Scalar/Float2Int.cpp b/lib/Transforms/Scalar/Float2Int.cpp
new file mode 100644
index 000000000000..c9314229c38b
--- /dev/null
+++ b/lib/Transforms/Scalar/Float2Int.cpp
@@ -0,0 +1,540 @@
+//===- Float2Int.cpp - Demote floating point ops to work on integers ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Float2Int pass, which aims to demote floating
+// point operations to work on integers, where that is losslessly possible.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "float2int"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/EquivalenceClasses.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar.h"
+#include <deque>
+#include <functional> // For std::function
+using namespace llvm;
+
+// The algorithm is simple. Start at instructions that convert from the
+// float to the int domain: fptoui, fptosi and fcmp. Walk up the def-use
+// graph, using an equivalence datastructure to unify graphs that interfere.
+//
+// Mappable instructions are those with an integer corrollary that, given
+// integer domain inputs, produce an integer output; fadd, for example.
+//
+// If a non-mappable instruction is seen, this entire def-use graph is marked
+// as non-transformable. If we see an instruction that converts from the
+// integer domain to FP domain (uitofp,sitofp), we terminate our walk.
+
+/// The largest integer type worth dealing with.
+static cl::opt<unsigned>
+MaxIntegerBW("float2int-max-integer-bw", cl::init(64), cl::Hidden,
+ cl::desc("Max integer bitwidth to consider in float2int"
+ "(default=64)"));
+
+namespace {
+ struct Float2Int : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ Float2Int() : FunctionPass(ID) {
+ initializeFloat2IntPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ }
+
+ void findRoots(Function &F, SmallPtrSet<Instruction*,8> &Roots);
+ ConstantRange seen(Instruction *I, ConstantRange R);
+ ConstantRange badRange();
+ ConstantRange unknownRange();
+ ConstantRange validateRange(ConstantRange R);
+ void walkBackwards(const SmallPtrSetImpl<Instruction*> &Roots);
+ void walkForwards();
+ bool validateAndTransform();
+ Value *convert(Instruction *I, Type *ToTy);
+ void cleanup();
+
+ MapVector<Instruction*, ConstantRange > SeenInsts;
+ SmallPtrSet<Instruction*,8> Roots;
+ EquivalenceClasses<Instruction*> ECs;
+ MapVector<Instruction*, Value*> ConvertedInsts;
+ LLVMContext *Ctx;
+ };
+}
+
+char Float2Int::ID = 0;
+INITIALIZE_PASS(Float2Int, "float2int", "Float to int", false, false)
+
+// Given a FCmp predicate, return a matching ICmp predicate if one
+// exists, otherwise return BAD_ICMP_PREDICATE.
+static CmpInst::Predicate mapFCmpPred(CmpInst::Predicate P) {
+ switch (P) {
+ case CmpInst::FCMP_OEQ:
+ case CmpInst::FCMP_UEQ:
+ return CmpInst::ICMP_EQ;
+ case CmpInst::FCMP_OGT:
+ case CmpInst::FCMP_UGT:
+ return CmpInst::ICMP_SGT;
+ case CmpInst::FCMP_OGE:
+ case CmpInst::FCMP_UGE:
+ return CmpInst::ICMP_SGE;
+ case CmpInst::FCMP_OLT:
+ case CmpInst::FCMP_ULT:
+ return CmpInst::ICMP_SLT;
+ case CmpInst::FCMP_OLE:
+ case CmpInst::FCMP_ULE:
+ return CmpInst::ICMP_SLE;
+ case CmpInst::FCMP_ONE:
+ case CmpInst::FCMP_UNE:
+ return CmpInst::ICMP_NE;
+ default:
+ return CmpInst::BAD_ICMP_PREDICATE;
+ }
+}
+
+// Given a floating point binary operator, return the matching
+// integer version.
+static Instruction::BinaryOps mapBinOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ default: llvm_unreachable("Unhandled opcode!");
+ case Instruction::FAdd: return Instruction::Add;
+ case Instruction::FSub: return Instruction::Sub;
+ case Instruction::FMul: return Instruction::Mul;
+ }
+}
+
+// Find the roots - instructions that convert from the FP domain to
+// integer domain.
+void Float2Int::findRoots(Function &F, SmallPtrSet<Instruction*,8> &Roots) {
+ for (auto &I : inst_range(F)) {
+ switch (I.getOpcode()) {
+ default: break;
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ Roots.insert(&I);
+ break;
+ case Instruction::FCmp:
+ if (mapFCmpPred(cast<CmpInst>(&I)->getPredicate()) !=
+ CmpInst::BAD_ICMP_PREDICATE)
+ Roots.insert(&I);
+ break;
+ }
+ }
+}
+
+// Helper - mark I as having been traversed, having range R.
+ConstantRange Float2Int::seen(Instruction *I, ConstantRange R) {
+ DEBUG(dbgs() << "F2I: " << *I << ":" << R << "\n");
+ if (SeenInsts.find(I) != SeenInsts.end())
+ SeenInsts.find(I)->second = R;
+ else
+ SeenInsts.insert(std::make_pair(I, R));
+ return R;
+}
+
+// Helper - get a range representing a poison value.
+ConstantRange Float2Int::badRange() {
+ return ConstantRange(MaxIntegerBW + 1, true);
+}
+ConstantRange Float2Int::unknownRange() {
+ return ConstantRange(MaxIntegerBW + 1, false);
+}
+ConstantRange Float2Int::validateRange(ConstantRange R) {
+ if (R.getBitWidth() > MaxIntegerBW + 1)
+ return badRange();
+ return R;
+}
+
+// The most obvious way to structure the search is a depth-first, eager
+// search from each root. However, that require direct recursion and so
+// can only handle small instruction sequences. Instead, we split the search
+// up into two phases:
+// - walkBackwards: A breadth-first walk of the use-def graph starting from
+// the roots. Populate "SeenInsts" with interesting
+// instructions and poison values if they're obvious and
+// cheap to compute. Calculate the equivalance set structure
+// while we're here too.
+// - walkForwards: Iterate over SeenInsts in reverse order, so we visit
+// defs before their uses. Calculate the real range info.
+
+// Breadth-first walk of the use-def graph; determine the set of nodes
+// we care about and eagerly determine if some of them are poisonous.
+void Float2Int::walkBackwards(const SmallPtrSetImpl<Instruction*> &Roots) {
+ std::deque<Instruction*> Worklist(Roots.begin(), Roots.end());
+ while (!Worklist.empty()) {
+ Instruction *I = Worklist.back();
+ Worklist.pop_back();
+
+ if (SeenInsts.find(I) != SeenInsts.end())
+ // Seen already.
+ continue;
+
+ switch (I->getOpcode()) {
+ // FIXME: Handle select and phi nodes.
+ default:
+ // Path terminated uncleanly.
+ seen(I, badRange());
+ break;
+
+ case Instruction::UIToFP: {
+ // Path terminated cleanly.
+ unsigned BW = I->getOperand(0)->getType()->getPrimitiveSizeInBits();
+ APInt Min = APInt::getMinValue(BW).zextOrSelf(MaxIntegerBW+1);
+ APInt Max = APInt::getMaxValue(BW).zextOrSelf(MaxIntegerBW+1);
+ seen(I, validateRange(ConstantRange(Min, Max)));
+ continue;
+ }
+
+ case Instruction::SIToFP: {
+ // Path terminated cleanly.
+ unsigned BW = I->getOperand(0)->getType()->getPrimitiveSizeInBits();
+ APInt SMin = APInt::getSignedMinValue(BW).sextOrSelf(MaxIntegerBW+1);
+ APInt SMax = APInt::getSignedMaxValue(BW).sextOrSelf(MaxIntegerBW+1);
+ seen(I, validateRange(ConstantRange(SMin, SMax)));
+ continue;
+ }
+
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::FCmp:
+ seen(I, unknownRange());
+ break;
+ }
+
+ for (Value *O : I->operands()) {
+ if (Instruction *OI = dyn_cast<Instruction>(O)) {
+ // Unify def-use chains if they interfere.
+ ECs.unionSets(I, OI);
+ if (SeenInsts.find(I)->second != badRange())
+ Worklist.push_back(OI);
+ } else if (!isa<ConstantFP>(O)) {
+ // Not an instruction or ConstantFP? we can't do anything.
+ seen(I, badRange());
+ }
+ }
+ }
+}
+
+// Walk forwards down the list of seen instructions, so we visit defs before
+// uses.
+void Float2Int::walkForwards() {
+ for (auto It = SeenInsts.rbegin(), E = SeenInsts.rend(); It != E; ++It) {
+ if (It->second != unknownRange())
+ continue;
+
+ Instruction *I = It->first;
+ std::function<ConstantRange(ArrayRef<ConstantRange>)> Op;
+ switch (I->getOpcode()) {
+ // FIXME: Handle select and phi nodes.
+ default:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ llvm_unreachable("Should have been handled in walkForwards!");
+
+ case Instruction::FAdd:
+ Op = [](ArrayRef<ConstantRange> Ops) {
+ assert(Ops.size() == 2 && "FAdd is a binary operator!");
+ return Ops[0].add(Ops[1]);
+ };
+ break;
+
+ case Instruction::FSub:
+ Op = [](ArrayRef<ConstantRange> Ops) {
+ assert(Ops.size() == 2 && "FSub is a binary operator!");
+ return Ops[0].sub(Ops[1]);
+ };
+ break;
+
+ case Instruction::FMul:
+ Op = [](ArrayRef<ConstantRange> Ops) {
+ assert(Ops.size() == 2 && "FMul is a binary operator!");
+ return Ops[0].multiply(Ops[1]);
+ };
+ break;
+
+ //
+ // Root-only instructions - we'll only see these if they're the
+ // first node in a walk.
+ //
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ Op = [](ArrayRef<ConstantRange> Ops) {
+ assert(Ops.size() == 1 && "FPTo[US]I is a unary operator!");
+ return Ops[0];
+ };
+ break;
+
+ case Instruction::FCmp:
+ Op = [](ArrayRef<ConstantRange> Ops) {
+ assert(Ops.size() == 2 && "FCmp is a binary operator!");
+ return Ops[0].unionWith(Ops[1]);
+ };
+ break;
+ }
+
+ bool Abort = false;
+ SmallVector<ConstantRange,4> OpRanges;
+ for (Value *O : I->operands()) {
+ if (Instruction *OI = dyn_cast<Instruction>(O)) {
+ assert(SeenInsts.find(OI) != SeenInsts.end() &&
+ "def not seen before use!");
+ OpRanges.push_back(SeenInsts.find(OI)->second);
+ } else if (ConstantFP *CF = dyn_cast<ConstantFP>(O)) {
+ // Work out if the floating point number can be losslessly represented
+ // as an integer.
+ // APFloat::convertToInteger(&Exact) purports to do what we want, but
+ // the exactness can be too precise. For example, negative zero can
+ // never be exactly converted to an integer.
+ //
+ // Instead, we ask APFloat to round itself to an integral value - this
+ // preserves sign-of-zero - then compare the result with the original.
+ //
+ APFloat F = CF->getValueAPF();
+
+ // First, weed out obviously incorrect values. Non-finite numbers
+ // can't be represented and neither can negative zero, unless
+ // we're in fast math mode.
+ if (!F.isFinite() ||
+ (F.isZero() && F.isNegative() && isa<FPMathOperator>(I) &&
+ !I->hasNoSignedZeros())) {
+ seen(I, badRange());
+ Abort = true;
+ break;
+ }
+
+ APFloat NewF = F;
+ auto Res = NewF.roundToIntegral(APFloat::rmNearestTiesToEven);
+ if (Res != APFloat::opOK || NewF.compare(F) != APFloat::cmpEqual) {
+ seen(I, badRange());
+ Abort = true;
+ break;
+ }
+ // OK, it's representable. Now get it.
+ APSInt Int(MaxIntegerBW+1, false);
+ bool Exact;
+ CF->getValueAPF().convertToInteger(Int,
+ APFloat::rmNearestTiesToEven,
+ &Exact);
+ OpRanges.push_back(ConstantRange(Int));
+ } else {
+ llvm_unreachable("Should have already marked this as badRange!");
+ }
+ }
+
+ // Reduce the operands' ranges to a single range and return.
+ if (!Abort)
+ seen(I, Op(OpRanges));
+ }
+}
+
+// If there is a valid transform to be done, do it.
+bool Float2Int::validateAndTransform() {
+ bool MadeChange = false;
+
+ // Iterate over every disjoint partition of the def-use graph.
+ for (auto It = ECs.begin(), E = ECs.end(); It != E; ++It) {
+ ConstantRange R(MaxIntegerBW + 1, false);
+ bool Fail = false;
+ Type *ConvertedToTy = nullptr;
+
+ // For every member of the partition, union all the ranges together.
+ for (auto MI = ECs.member_begin(It), ME = ECs.member_end();
+ MI != ME; ++MI) {
+ Instruction *I = *MI;
+ auto SeenI = SeenInsts.find(I);
+ if (SeenI == SeenInsts.end())
+ continue;
+
+ R = R.unionWith(SeenI->second);
+ // We need to ensure I has no users that have not been seen.
+ // If it does, transformation would be illegal.
+ //
+ // Don't count the roots, as they terminate the graphs.
+ if (Roots.count(I) == 0) {
+ // Set the type of the conversion while we're here.
+ if (!ConvertedToTy)
+ ConvertedToTy = I->getType();
+ for (User *U : I->users()) {
+ Instruction *UI = dyn_cast<Instruction>(U);
+ if (!UI || SeenInsts.find(UI) == SeenInsts.end()) {
+ DEBUG(dbgs() << "F2I: Failing because of " << *U << "\n");
+ Fail = true;
+ break;
+ }
+ }
+ }
+ if (Fail)
+ break;
+ }
+
+ // If the set was empty, or we failed, or the range is poisonous,
+ // bail out.
+ if (ECs.member_begin(It) == ECs.member_end() || Fail ||
+ R.isFullSet() || R.isSignWrappedSet())
+ continue;
+ assert(ConvertedToTy && "Must have set the convertedtoty by this point!");
+
+ // The number of bits required is the maximum of the upper and
+ // lower limits, plus one so it can be signed.
+ unsigned MinBW = std::max(R.getLower().getMinSignedBits(),
+ R.getUpper().getMinSignedBits()) + 1;
+ DEBUG(dbgs() << "F2I: MinBitwidth=" << MinBW << ", R: " << R << "\n");
+
+ // If we've run off the realms of the exactly representable integers,
+ // the floating point result will differ from an integer approximation.
+
+ // Do we need more bits than are in the mantissa of the type we converted
+ // to? semanticsPrecision returns the number of mantissa bits plus one
+ // for the sign bit.
+ unsigned MaxRepresentableBits
+ = APFloat::semanticsPrecision(ConvertedToTy->getFltSemantics()) - 1;
+ if (MinBW > MaxRepresentableBits) {
+ DEBUG(dbgs() << "F2I: Value not guaranteed to be representable!\n");
+ continue;
+ }
+ if (MinBW > 64) {
+ DEBUG(dbgs() << "F2I: Value requires more than 64 bits to represent!\n");
+ continue;
+ }
+
+ // OK, R is known to be representable. Now pick a type for it.
+ // FIXME: Pick the smallest legal type that will fit.
+ Type *Ty = (MinBW > 32) ? Type::getInt64Ty(*Ctx) : Type::getInt32Ty(*Ctx);
+
+ for (auto MI = ECs.member_begin(It), ME = ECs.member_end();
+ MI != ME; ++MI)
+ convert(*MI, Ty);
+ MadeChange = true;
+ }
+
+ return MadeChange;
+}
+
+Value *Float2Int::convert(Instruction *I, Type *ToTy) {
+ if (ConvertedInsts.find(I) != ConvertedInsts.end())
+ // Already converted this instruction.
+ return ConvertedInsts[I];
+
+ SmallVector<Value*,4> NewOperands;
+ for (Value *V : I->operands()) {
+ // Don't recurse if we're an instruction that terminates the path.
+ if (I->getOpcode() == Instruction::UIToFP ||
+ I->getOpcode() == Instruction::SIToFP) {
+ NewOperands.push_back(V);
+ } else if (Instruction *VI = dyn_cast<Instruction>(V)) {
+ NewOperands.push_back(convert(VI, ToTy));
+ } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
+ APSInt Val(ToTy->getPrimitiveSizeInBits(), /*IsUnsigned=*/false);
+ bool Exact;
+ CF->getValueAPF().convertToInteger(Val,
+ APFloat::rmNearestTiesToEven,
+ &Exact);
+ NewOperands.push_back(ConstantInt::get(ToTy, Val));
+ } else {
+ llvm_unreachable("Unhandled operand type?");
+ }
+ }
+
+ // Now create a new instruction.
+ IRBuilder<> IRB(I);
+ Value *NewV = nullptr;
+ switch (I->getOpcode()) {
+ default: llvm_unreachable("Unhandled instruction!");
+
+ case Instruction::FPToUI:
+ NewV = IRB.CreateZExtOrTrunc(NewOperands[0], I->getType());
+ break;
+
+ case Instruction::FPToSI:
+ NewV = IRB.CreateSExtOrTrunc(NewOperands[0], I->getType());
+ break;
+
+ case Instruction::FCmp: {
+ CmpInst::Predicate P = mapFCmpPred(cast<CmpInst>(I)->getPredicate());
+ assert(P != CmpInst::BAD_ICMP_PREDICATE && "Unhandled predicate!");
+ NewV = IRB.CreateICmp(P, NewOperands[0], NewOperands[1], I->getName());
+ break;
+ }
+
+ case Instruction::UIToFP:
+ NewV = IRB.CreateZExtOrTrunc(NewOperands[0], ToTy);
+ break;
+
+ case Instruction::SIToFP:
+ NewV = IRB.CreateSExtOrTrunc(NewOperands[0], ToTy);
+ break;
+
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ NewV = IRB.CreateBinOp(mapBinOpcode(I->getOpcode()),
+ NewOperands[0], NewOperands[1],
+ I->getName());
+ break;
+ }
+
+ // If we're a root instruction, RAUW.
+ if (Roots.count(I))
+ I->replaceAllUsesWith(NewV);
+
+ ConvertedInsts[I] = NewV;
+ return NewV;
+}
+
+// Perform dead code elimination on the instructions we just modified.
+void Float2Int::cleanup() {
+ for (auto I = ConvertedInsts.rbegin(), E = ConvertedInsts.rend();
+ I != E; ++I)
+ I->first->eraseFromParent();
+}
+
+bool Float2Int::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
+ DEBUG(dbgs() << "F2I: Looking at function " << F.getName() << "\n");
+ // Clear out all state.
+ ECs = EquivalenceClasses<Instruction*>();
+ SeenInsts.clear();
+ ConvertedInsts.clear();
+ Roots.clear();
+
+ Ctx = &F.getParent()->getContext();
+
+ findRoots(F, Roots);
+
+ walkBackwards(Roots);
+ walkForwards();
+
+ bool Modified = validateAndTransform();
+ if (Modified)
+ cleanup();
+ return Modified;
+}
+
+FunctionPass *llvm::createFloat2IntPass() {
+ return new Float2Int();
+}
+
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 1ed14d001093..7770ddcb9d7a 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -33,6 +33,7 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/PHITransAddr.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
@@ -45,7 +46,7 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -458,7 +459,7 @@ uint32_t ValueTable::lookup_or_add(Value *V) {
return e;
}
-/// lookup - Returns the value number of the specified value. Fails if
+/// Returns the value number of the specified value. Fails if
/// the value has not yet been numbered.
uint32_t ValueTable::lookup(Value *V) const {
DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
@@ -466,7 +467,7 @@ uint32_t ValueTable::lookup(Value *V) const {
return VI->second;
}
-/// lookup_or_add_cmp - Returns the value number of the given comparison,
+/// Returns the value number of the given comparison,
/// assigning it a new number if it did not have one before. Useful when
/// we deduced the result of a comparison, but don't immediately have an
/// instruction realizing that comparison to hand.
@@ -479,14 +480,14 @@ uint32_t ValueTable::lookup_or_add_cmp(unsigned Opcode,
return e;
}
-/// clear - Remove all entries from the ValueTable.
+/// Remove all entries from the ValueTable.
void ValueTable::clear() {
valueNumbering.clear();
expressionNumbering.clear();
nextValueNumber = 1;
}
-/// erase - Remove a value from the value numbering.
+/// Remove a value from the value numbering.
void ValueTable::erase(Value *V) {
valueNumbering.erase(V);
}
@@ -582,23 +583,22 @@ namespace {
return cast<MemIntrinsic>(Val.getPointer());
}
- /// MaterializeAdjustedValue - Emit code into this block to adjust the value
- /// defined here to the specified type. This handles various coercion cases.
- Value *MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const;
+ /// Emit code into this block to adjust the value defined here to the
+ /// specified type. This handles various coercion cases.
+ Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const;
};
class GVN : public FunctionPass {
bool NoLoads;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
- const DataLayout *DL;
const TargetLibraryInfo *TLI;
AssumptionCache *AC;
SetVector<BasicBlock *> DeadBlocks;
ValueTable VN;
- /// LeaderTable - A mapping from value numbers to lists of Value*'s that
+ /// A mapping from value numbers to lists of Value*'s that
/// have that value number. Use findLeader to query it.
struct LeaderTableEntry {
Value *Val;
@@ -623,20 +623,18 @@ namespace {
bool runOnFunction(Function &F) override;
- /// markInstructionForDeletion - This removes the specified instruction from
+ /// This removes the specified instruction from
/// our various maps and marks it for deletion.
void markInstructionForDeletion(Instruction *I) {
VN.erase(I);
InstrsToErase.push_back(I);
}
- const DataLayout *getDataLayout() const { return DL; }
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
MemoryDependenceAnalysis &getMemDep() const { return *MD; }
private:
- /// addToLeaderTable - Push a new Value to the LeaderTable onto the list for
- /// its value number.
+ /// Push a new Value to the LeaderTable onto the list for its value number.
void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) {
LeaderTableEntry &Curr = LeaderTable[N];
if (!Curr.Val) {
@@ -652,7 +650,7 @@ namespace {
Curr.Next = Node;
}
- /// removeFromLeaderTable - Scan the list of values corresponding to a given
+ /// Scan the list of values corresponding to a given
/// value number, and remove the given instruction if encountered.
void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) {
LeaderTableEntry* Prev = nullptr;
@@ -685,7 +683,7 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
if (!NoLoads)
AU.addRequired<MemoryDependenceAnalysis>();
AU.addRequired<AliasAnalysis>();
@@ -711,13 +709,13 @@ namespace {
bool iterateOnFunction(Function &F);
bool performPRE(Function &F);
bool performScalarPRE(Instruction *I);
+ bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
+ unsigned int ValNo);
Value *findLeader(const BasicBlock *BB, uint32_t num);
void cleanupGlobalSets();
void verifyRemoved(const Instruction *I) const;
bool splitCriticalEdges();
BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ);
- unsigned replaceAllDominatedUsesWith(Value *From, Value *To,
- const BasicBlockEdge &Root);
bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root);
bool processFoldableCondBr(BranchInst *BI);
void addDeadBlock(BasicBlock *BB);
@@ -727,7 +725,7 @@ namespace {
char GVN::ID = 0;
}
-// createGVNPass - The public interface to this file...
+// The public interface to this file...
FunctionPass *llvm::createGVNPass(bool NoLoads) {
return new GVN(NoLoads);
}
@@ -736,7 +734,7 @@ INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
@@ -752,7 +750,7 @@ void GVN::dump(DenseMap<uint32_t, Value*>& d) {
}
#endif
-/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
+/// Return true if we can prove that the value
/// we're analyzing is fully available in the specified block. As we go, keep
/// track of which blocks we know are fully alive in FullyAvailableBlocks. This
/// map is actually a tri-state map with the following values:
@@ -798,7 +796,7 @@ static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
return true;
-// SpeculationFailure - If we get here, we found out that this is not, after
+// If we get here, we found out that this is not, after
// all, a fully-available block. We have a problem if we speculated on this and
// used the speculation to mark other blocks as available.
SpeculationFailure:
@@ -833,8 +831,7 @@ SpeculationFailure:
}
-/// CanCoerceMustAliasedValueToLoad - Return true if
-/// CoerceAvailableValueToLoadType will succeed.
+/// Return true if CoerceAvailableValueToLoadType will succeed.
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
Type *LoadTy,
const DataLayout &DL) {
@@ -853,7 +850,7 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
return true;
}
-/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
+/// If we saw a store of a value to memory, and
/// then a load from a must-aliased pointer of a different type, try to coerce
/// the stored value. LoadedTy is the type of the load we want to replace and
/// InsertPt is the place to insert new instructions.
@@ -938,7 +935,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
}
-/// AnalyzeLoadFromClobberingWrite - This function is called when we have a
+/// This function is called when we have a
/// memdep query of a load that ends up being a clobbering memory write (store,
/// memset, memcpy, memmove). This means that the write *may* provide bits used
/// by the load but we can't be sure because the pointers don't mustalias.
@@ -956,8 +953,9 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
return -1;
int64_t StoreOffset = 0, LoadOffset = 0;
- Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&DL);
- Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &DL);
+ Value *StoreBase =
+ GetPointerBaseWithConstantOffset(WritePtr, StoreOffset, DL);
+ Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, DL);
if (StoreBase != LoadBase)
return -1;
@@ -1018,23 +1016,23 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
return LoadOffset-StoreOffset;
}
-/// AnalyzeLoadFromClobberingStore - This function is called when we have a
+/// This function is called when we have a
/// memdep query of a load that ends up being a clobbering store.
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
- StoreInst *DepSI,
- const DataLayout &DL) {
+ StoreInst *DepSI) {
// Cannot handle reading from store of first-class aggregate yet.
if (DepSI->getValueOperand()->getType()->isStructTy() ||
DepSI->getValueOperand()->getType()->isArrayTy())
return -1;
+ const DataLayout &DL = DepSI->getModule()->getDataLayout();
Value *StorePtr = DepSI->getPointerOperand();
uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType());
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
StorePtr, StoreSize, DL);
}
-/// AnalyzeLoadFromClobberingLoad - This function is called when we have a
+/// This function is called when we have a
/// memdep query of a load that ends up being clobbered by another load. See if
/// the other load can feed into the second load.
static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
@@ -1052,11 +1050,11 @@ static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
// then we should widen it!
int64_t LoadOffs = 0;
const Value *LoadBase =
- GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &DL);
+ GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL);
unsigned LoadSize = DL.getTypeStoreSize(LoadTy);
- unsigned Size = MemoryDependenceAnalysis::
- getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, DL);
+ unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
+ LoadBase, LoadOffs, LoadSize, DepLI);
if (Size == 0) return -1;
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL);
@@ -1086,7 +1084,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
Constant *Src = dyn_cast<Constant>(MTI->getSource());
if (!Src) return -1;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &DL));
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL));
if (!GV || !GV->isConstant()) return -1;
// See if the access is within the bounds of the transfer.
@@ -1102,15 +1100,16 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
Type::getInt8PtrTy(Src->getContext(), AS));
Constant *OffsetCst =
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
- Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
+ Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src,
+ OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
- if (ConstantFoldLoadFromConstPtr(Src, &DL))
+ if (ConstantFoldLoadFromConstPtr(Src, DL))
return Offset;
return -1;
}
-/// GetStoreValueForLoad - This function is called when we have a
+/// This function is called when we have a
/// memdep query of a load that ends up being a clobbering store. This means
/// that the store provides bits used by the load but we the pointers don't
/// mustalias. Check this case to see if there is anything more we can do
@@ -1149,7 +1148,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, DL);
}
-/// GetLoadValueForLoad - This function is called when we have a
+/// This function is called when we have a
/// memdep query of a load that ends up being a clobbering load. This means
/// that the load *may* provide bits used by the load but we can't be sure
/// because the pointers don't mustalias. Check this case to see if there is
@@ -1157,7 +1156,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
GVN &gvn) {
- const DataLayout &DL = *gvn.getDataLayout();
+ const DataLayout &DL = SrcVal->getModule()->getDataLayout();
// If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
// widen SrcVal out to a larger load.
unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType());
@@ -1212,7 +1211,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
}
-/// GetMemInstValueForLoad - This function is called when we have a
+/// This function is called when we have a
/// memdep query of a load that ends up being a clobbering mem intrinsic.
static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
@@ -1263,13 +1262,14 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
Type::getInt8PtrTy(Src->getContext(), AS));
Constant *OffsetCst =
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
- Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
+ Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src,
+ OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
- return ConstantFoldLoadFromConstPtr(Src, &DL);
+ return ConstantFoldLoadFromConstPtr(Src, DL);
}
-/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
+/// Given a set of loads specified by ValuesPerBlock,
/// construct SSA form, allowing us to eliminate LI. This returns the value
/// that should be used at LI's definition site.
static Value *ConstructSSAForLoadSet(LoadInst *LI,
@@ -1281,7 +1281,7 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
LI->getParent())) {
assert(!ValuesPerBlock[0].isUndefValue() && "Dead BB dominate this block");
- return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), gvn);
+ return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn);
}
// Otherwise, we have to construct SSA form.
@@ -1289,8 +1289,6 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
SSAUpdater SSAUpdate(&NewPHIs);
SSAUpdate.Initialize(LI->getType(), LI->getName());
- Type *LoadTy = LI->getType();
-
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
const AvailableValueInBlock &AV = ValuesPerBlock[i];
BasicBlock *BB = AV.BB;
@@ -1298,7 +1296,7 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
if (SSAUpdate.HasValueForBlock(BB))
continue;
- SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, gvn));
+ SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn));
}
// Perform PHI construction.
@@ -1326,16 +1324,16 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
return V;
}
-Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const {
+Value *AvailableValueInBlock::MaterializeAdjustedValue(LoadInst *LI,
+ GVN &gvn) const {
Value *Res;
+ Type *LoadTy = LI->getType();
+ const DataLayout &DL = LI->getModule()->getDataLayout();
if (isSimpleValue()) {
Res = getSimpleValue();
if (Res->getType() != LoadTy) {
- const DataLayout *DL = gvn.getDataLayout();
- assert(DL && "Need target data to handle type mismatch case");
- Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
- *DL);
-
+ Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), DL);
+
DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
<< *getSimpleValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@@ -1353,10 +1351,8 @@ Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) c
<< *Res << '\n' << "\n\n\n");
}
} else if (isMemIntrinValue()) {
- const DataLayout *DL = gvn.getDataLayout();
- assert(DL && "Need target data to handle type mismatch case");
- Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
- LoadTy, BB->getTerminator(), *DL);
+ Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
+ BB->getTerminator(), DL);
DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
<< " " << *getMemIntrinValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@@ -1383,6 +1379,7 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
// dependencies that produce an unknown value for the load (such as a call
// that could potentially clobber the load).
unsigned NumDeps = Deps.size();
+ const DataLayout &DL = LI->getModule()->getDataLayout();
for (unsigned i = 0, e = NumDeps; i != e; ++i) {
BasicBlock *DepBB = Deps[i].getBB();
MemDepResult DepInfo = Deps[i].getResult();
@@ -1409,9 +1406,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
// read by the load, we can extract the bits we need for the load from the
// stored value.
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
- if (DL && Address) {
- int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
- DepSI, *DL);
+ if (Address) {
+ int Offset =
+ AnalyzeLoadFromClobberingStore(LI->getType(), Address, DepSI);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
DepSI->getValueOperand(),
@@ -1428,9 +1425,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
// If this is a clobber and L is the first instruction in its block, then
// we have the first instruction in the entry block.
- if (DepLI != LI && Address && DL) {
- int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), Address,
- DepLI, *DL);
+ if (DepLI != LI && Address) {
+ int Offset =
+ AnalyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,
@@ -1443,9 +1440,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
// If the clobbering value is a memset/memcpy/memmove, see if we can
// forward a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
- if (DL && Address) {
+ if (Address) {
int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
- DepMI, *DL);
+ DepMI, DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
Offset));
@@ -1484,8 +1481,8 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (S->getValueOperand()->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (!DL || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
- LI->getType(), *DL)) {
+ if (!CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
+ LI->getType(), DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1501,7 +1498,7 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (LD->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (!DL || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*DL)) {
+ if (!CanCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1613,6 +1610,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// Check if the load can safely be moved to all the unavailable predecessors.
bool CanDoPRE = true;
+ const DataLayout &DL = LI->getModule()->getDataLayout();
SmallVector<Instruction*, 8> NewInsts;
for (auto &PredLoad : PredLoads) {
BasicBlock *UnavailablePred = PredLoad.first;
@@ -1704,7 +1702,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
return true;
}
-/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
+/// Attempt to eliminate a load whose dependencies are
/// non-local by performing PHI construction.
bool GVN::processNonLocalLoad(LoadInst *LI) {
// Step 1: Find the non-local dependencies of the load.
@@ -1817,7 +1815,7 @@ static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
I->replaceAllUsesWith(Repl);
}
-/// processLoad - Attempt to eliminate a load, first by eliminating it
+/// Attempt to eliminate a load, first by eliminating it
/// locally, and then attempting non-local elimination if that fails.
bool GVN::processLoad(LoadInst *L) {
if (!MD)
@@ -1833,10 +1831,11 @@ bool GVN::processLoad(LoadInst *L) {
// ... to a pointer that has been loaded from before...
MemDepResult Dep = MD->getDependency(L);
+ const DataLayout &DL = L->getModule()->getDataLayout();
// If we have a clobber and target data is around, see if this is a clobber
// that we can fix up through code synthesis.
- if (Dep.isClobber() && DL) {
+ if (Dep.isClobber()) {
// Check to see if we have something like this:
// store i32 123, i32* %P
// %A = bitcast i32* %P to i8*
@@ -1849,12 +1848,11 @@ bool GVN::processLoad(LoadInst *L) {
// access code.
Value *AvailVal = nullptr;
if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) {
- int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
- L->getPointerOperand(),
- DepSI, *DL);
+ int Offset = AnalyzeLoadFromClobberingStore(
+ L->getType(), L->getPointerOperand(), DepSI);
if (Offset != -1)
AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
- L->getType(), L, *DL);
+ L->getType(), L, DL);
}
// Check to see if we have something like this:
@@ -1867,9 +1865,8 @@ bool GVN::processLoad(LoadInst *L) {
if (DepLI == L)
return false;
- int Offset = AnalyzeLoadFromClobberingLoad(L->getType(),
- L->getPointerOperand(),
- DepLI, *DL);
+ int Offset = AnalyzeLoadFromClobberingLoad(
+ L->getType(), L->getPointerOperand(), DepLI, DL);
if (Offset != -1)
AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this);
}
@@ -1877,11 +1874,10 @@ bool GVN::processLoad(LoadInst *L) {
// If the clobbering value is a memset/memcpy/memmove, see if we can forward
// a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
- int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
- L->getPointerOperand(),
- DepMI, *DL);
+ int Offset = AnalyzeLoadFromClobberingMemInst(
+ L->getType(), L->getPointerOperand(), DepMI, DL);
if (Offset != -1)
- AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *DL);
+ AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, DL);
}
if (AvailVal) {
@@ -1932,17 +1928,13 @@ bool GVN::processLoad(LoadInst *L) {
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
if (StoredVal->getType() != L->getType()) {
- if (DL) {
- StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
- L, *DL);
- if (!StoredVal)
- return false;
-
- DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
- << '\n' << *L << "\n\n\n");
- }
- else
+ StoredVal =
+ CoerceAvailableValueToLoadType(StoredVal, L->getType(), L, DL);
+ if (!StoredVal)
return false;
+
+ DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
+ << '\n' << *L << "\n\n\n");
}
// Remove it!
@@ -1961,17 +1953,12 @@ bool GVN::processLoad(LoadInst *L) {
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
if (DepLI->getType() != L->getType()) {
- if (DL) {
- AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(),
- L, *DL);
- if (!AvailableVal)
- return false;
-
- DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
- << "\n" << *L << "\n\n\n");
- }
- else
+ AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L, DL);
+ if (!AvailableVal)
return false;
+
+ DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
+ << "\n" << *L << "\n\n\n");
}
// Remove it!
@@ -2016,7 +2003,7 @@ bool GVN::processLoad(LoadInst *L) {
return false;
}
-// findLeader - In order to find a leader for a given value number at a
+// In order to find a leader for a given value number at a
// specific basic block, we first obtain the list of all Values for that number,
// and then scan the list to find one whose block dominates the block in
// question. This is fast because dominator tree queries consist of only
@@ -2044,25 +2031,7 @@ Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
return Val;
}
-/// replaceAllDominatedUsesWith - Replace all uses of 'From' with 'To' if the
-/// use is dominated by the given basic block. Returns the number of uses that
-/// were replaced.
-unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To,
- const BasicBlockEdge &Root) {
- unsigned Count = 0;
- for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
- UI != UE; ) {
- Use &U = *UI++;
-
- if (DT->dominates(Root, U)) {
- U.set(To);
- ++Count;
- }
- }
- return Count;
-}
-
-/// isOnlyReachableViaThisEdge - There is an edge from 'Src' to 'Dst'. Return
+/// There is an edge from 'Src' to 'Dst'. Return
/// true if every path from the entry block to 'Dst' passes via this edge. In
/// particular 'Dst' must not be reachable via another edge from 'Src'.
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
@@ -2079,7 +2048,7 @@ static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
return Pred != nullptr;
}
-/// propagateEquality - The given values are known to be equal in every block
+/// The given values are known to be equal in every block
/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
/// 'RHS' everywhere in the scope. Returns whether a change was made.
bool GVN::propagateEquality(Value *LHS, Value *RHS,
@@ -2138,7 +2107,7 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS,
// LHS always has at least one use that is not dominated by Root, this will
// never do anything if LHS has only one use.
if (!LHS->hasOneUse()) {
- unsigned NumReplacements = replaceAllDominatedUsesWith(LHS, RHS, Root);
+ unsigned NumReplacements = replaceDominatedUsesWith(LHS, RHS, *DT, Root);
Changed |= NumReplacements > 0;
NumGVNEqProp += NumReplacements;
}
@@ -2210,7 +2179,7 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS,
Value *NotCmp = findLeader(Root.getEnd(), Num);
if (NotCmp && isa<Instruction>(NotCmp)) {
unsigned NumReplacements =
- replaceAllDominatedUsesWith(NotCmp, NotVal, Root);
+ replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root);
Changed |= NumReplacements > 0;
NumGVNEqProp += NumReplacements;
}
@@ -2229,7 +2198,7 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS,
return Changed;
}
-/// processInstruction - When calculating availability, handle an instruction
+/// When calculating availability, handle an instruction
/// by inserting it into the appropriate sets
bool GVN::processInstruction(Instruction *I) {
// Ignore dbg info intrinsics.
@@ -2240,6 +2209,7 @@ bool GVN::processInstruction(Instruction *I) {
// to value numbering it. Value numbering often exposes redundancies, for
// example if it determines that %y is equal to %x then the instruction
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
+ const DataLayout &DL = I->getModule()->getDataLayout();
if (Value *V = SimplifyInstruction(I, DL, TLI, DT, AC)) {
I->replaceAllUsesWith(V);
if (MD && V->getType()->getScalarType()->isPointerTy())
@@ -2358,10 +2328,8 @@ bool GVN::runOnFunction(Function& F) {
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- TLI = &getAnalysis<TargetLibraryInfo>();
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
VN.setDomTree(DT);
@@ -2374,7 +2342,8 @@ bool GVN::runOnFunction(Function& F) {
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
BasicBlock *BB = FI++;
- bool removedBlock = MergeBlockIntoPredecessor(BB, this);
+ bool removedBlock = MergeBlockIntoPredecessor(
+ BB, DT, /* LoopInfo */ nullptr, VN.getAliasAnalysis(), MD);
if (removedBlock) ++NumGVNBlocks;
Changed |= removedBlock;
@@ -2457,6 +2426,43 @@ bool GVN::processBlock(BasicBlock *BB) {
return ChangedFunction;
}
+// Instantiate an expression in a predecessor that lacked it.
+bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
+ unsigned int ValNo) {
+ // Because we are going top-down through the block, all value numbers
+ // will be available in the predecessor by the time we need them. Any
+ // that weren't originally present will have been instantiated earlier
+ // in this loop.
+ bool success = true;
+ for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
+ Value *Op = Instr->getOperand(i);
+ if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
+ continue;
+
+ if (Value *V = findLeader(Pred, VN.lookup(Op))) {
+ Instr->setOperand(i, V);
+ } else {
+ success = false;
+ break;
+ }
+ }
+
+ // Fail out if we encounter an operand that is not available in
+ // the PRE predecessor. This is typically because of loads which
+ // are not value numbered precisely.
+ if (!success)
+ return false;
+
+ Instr->insertBefore(Pred->getTerminator());
+ Instr->setName(Instr->getName() + ".pre");
+ Instr->setDebugLoc(Instr->getDebugLoc());
+ VN.add(Instr, ValNo);
+
+ // Update the availability map to include the new instruction.
+ addToLeaderTable(ValNo, Instr, Pred);
+ return true;
+}
+
bool GVN::performScalarPRE(Instruction *CurInst) {
SmallVector<std::pair<Value*, BasicBlock*>, 8> predMap;
@@ -2523,60 +2529,43 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
// Don't do PRE when it might increase code size, i.e. when
// we would need to insert instructions in more than one pred.
- if (NumWithout != 1 || NumWith == 0)
+ if (NumWithout > 1 || NumWith == 0)
return false;
- // Don't do PRE across indirect branch.
- if (isa<IndirectBrInst>(PREPred->getTerminator()))
- return false;
+ // We may have a case where all predecessors have the instruction,
+ // and we just need to insert a phi node. Otherwise, perform
+ // insertion.
+ Instruction *PREInstr = nullptr;
- // We can't do PRE safely on a critical edge, so instead we schedule
- // the edge to be split and perform the PRE the next time we iterate
- // on the function.
- unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
- if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
- toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
- return false;
- }
-
- // Instantiate the expression in the predecessor that lacked it.
- // Because we are going top-down through the block, all value numbers
- // will be available in the predecessor by the time we need them. Any
- // that weren't originally present will have been instantiated earlier
- // in this loop.
- Instruction *PREInstr = CurInst->clone();
- bool success = true;
- for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
- Value *Op = PREInstr->getOperand(i);
- if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
- continue;
+ if (NumWithout != 0) {
+ // Don't do PRE across indirect branch.
+ if (isa<IndirectBrInst>(PREPred->getTerminator()))
+ return false;
- if (Value *V = findLeader(PREPred, VN.lookup(Op))) {
- PREInstr->setOperand(i, V);
- } else {
- success = false;
- break;
+ // We can't do PRE safely on a critical edge, so instead we schedule
+ // the edge to be split and perform the PRE the next time we iterate
+ // on the function.
+ unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
+ if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
+ toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
+ return false;
+ }
+ // We need to insert somewhere, so let's give it a shot
+ PREInstr = CurInst->clone();
+ if (!performScalarPREInsertion(PREInstr, PREPred, ValNo)) {
+ // If we failed insertion, make sure we remove the instruction.
+ DEBUG(verifyRemoved(PREInstr));
+ delete PREInstr;
+ return false;
}
}
- // Fail out if we encounter an operand that is not available in
- // the PRE predecessor. This is typically because of loads which
- // are not value numbered precisely.
- if (!success) {
- DEBUG(verifyRemoved(PREInstr));
- delete PREInstr;
- return false;
- }
+ // Either we should have filled in the PRE instruction, or we should
+ // not have needed insertions.
+ assert (PREInstr != nullptr || NumWithout == 0);
- PREInstr->insertBefore(PREPred->getTerminator());
- PREInstr->setName(CurInst->getName() + ".pre");
- PREInstr->setDebugLoc(CurInst->getDebugLoc());
- VN.add(PREInstr, ValNo);
++NumGVNPRE;
- // Update the availability map to include the new instruction.
- addToLeaderTable(ValNo, PREInstr, PREPred);
-
// Create a PHI to make the value available in this block.
PHINode *Phi =
PHINode::Create(CurInst->getType(), predMap.size(),
@@ -2612,10 +2601,12 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
MD->removeInstruction(CurInst);
DEBUG(verifyRemoved(CurInst));
CurInst->eraseFromParent();
+ ++NumGVNInstr;
+
return true;
}
-/// performPRE - Perform a purely local form of PRE that looks for diamond
+/// Perform a purely local form of PRE that looks for diamond
/// control flow patterns and attempts to perform simple PRE at the join point.
bool GVN::performPRE(Function &F) {
bool Changed = false;
@@ -2645,26 +2636,28 @@ bool GVN::performPRE(Function &F) {
/// Split the critical edge connecting the given two blocks, and return
/// the block inserted to the critical edge.
BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
- BasicBlock *BB = SplitCriticalEdge(Pred, Succ, this);
+ BasicBlock *BB = SplitCriticalEdge(
+ Pred, Succ, CriticalEdgeSplittingOptions(getAliasAnalysis(), DT));
if (MD)
MD->invalidateCachedPredecessors();
return BB;
}
-/// splitCriticalEdges - Split critical edges found during the previous
+/// Split critical edges found during the previous
/// iteration that may enable further optimization.
bool GVN::splitCriticalEdges() {
if (toSplit.empty())
return false;
do {
std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
- SplitCriticalEdge(Edge.first, Edge.second, this);
+ SplitCriticalEdge(Edge.first, Edge.second,
+ CriticalEdgeSplittingOptions(getAliasAnalysis(), DT));
} while (!toSplit.empty());
if (MD) MD->invalidateCachedPredecessors();
return true;
}
-/// iterateOnFunction - Executes one iteration of GVN
+/// Executes one iteration of GVN
bool GVN::iterateOnFunction(Function &F) {
cleanupGlobalSets();
@@ -2695,7 +2688,7 @@ void GVN::cleanupGlobalSets() {
TableAllocator.Reset();
}
-/// verifyRemoved - Verify that the specified instruction does not occur in our
+/// Verify that the specified instruction does not occur in our
/// internal data structures.
void GVN::verifyRemoved(const Instruction *Inst) const {
VN.verifyRemoved(Inst);
@@ -2714,11 +2707,10 @@ void GVN::verifyRemoved(const Instruction *Inst) const {
}
}
-// BB is declared dead, which implied other blocks become dead as well. This
-// function is to add all these blocks to "DeadBlocks". For the dead blocks'
-// live successors, update their phi nodes by replacing the operands
-// corresponding to dead blocks with UndefVal.
-//
+/// BB is declared dead, which implied other blocks become dead as well. This
+/// function is to add all these blocks to "DeadBlocks". For the dead blocks'
+/// live successors, update their phi nodes by replacing the operands
+/// corresponding to dead blocks with UndefVal.
void GVN::addDeadBlock(BasicBlock *BB) {
SmallVector<BasicBlock *, 4> NewDead;
SmallSetVector<BasicBlock *, 4> DF;
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index c01f57f26ea9..600589c904c4 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -31,6 +31,7 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
@@ -44,7 +45,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SimplifyIndVar.h"
@@ -73,7 +73,6 @@ namespace {
LoopInfo *LI;
ScalarEvolution *SE;
DominatorTree *DT;
- const DataLayout *DL;
TargetLibraryInfo *TLI;
const TargetTransformInfo *TTI;
@@ -82,8 +81,8 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- IndVarSimplify() : LoopPass(ID), LI(nullptr), SE(nullptr), DT(nullptr),
- DL(nullptr), Changed(false) {
+ IndVarSimplify()
+ : LoopPass(ID), LI(nullptr), SE(nullptr), DT(nullptr), Changed(false) {
initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry());
}
@@ -91,7 +90,7 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
AU.addRequired<ScalarEvolution>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
@@ -126,7 +125,7 @@ char IndVarSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars",
"Induction Variable Simplification", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
@@ -622,17 +621,6 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
PN->eraseFromParent();
}
}
-
- // If we were unable to completely replace the PHI node, clone the PHI
- // and delete the original one. This lets IVUsers and any other maps
- // purge the original user from their records.
- if (!LCSSASafePhiForRAUW) {
- PHINode *NewPN = cast<PHINode>(PN->clone());
- NewPN->takeName(PN);
- NewPN->insertBefore(PN);
- PN->replaceAllUsesWith(NewPN);
- PN->eraseFromParent();
- }
}
}
@@ -663,14 +651,14 @@ namespace {
/// extended by this sign or zero extend operation. This is used to determine
/// the final width of the IV before actually widening it.
static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE,
- const DataLayout *DL, const TargetTransformInfo *TTI) {
+ const TargetTransformInfo *TTI) {
bool IsSigned = Cast->getOpcode() == Instruction::SExt;
if (!IsSigned && Cast->getOpcode() != Instruction::ZExt)
return;
Type *Ty = Cast->getType();
uint64_t Width = SE->getTypeSizeInBits(Ty);
- if (DL && !DL->isLegalInteger(Width))
+ if (!Cast->getModule()->getDataLayout().isLegalInteger(Width))
return;
// Cast is either an sext or zext up to this point.
@@ -916,8 +904,8 @@ const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
return AddRec;
}
-/// GetWideRecurrence - Is this instruction potentially interesting from
-/// IVUsers' perspective after widening it's type? In other words, can the
+/// GetWideRecurrence - Is this instruction potentially interesting for further
+/// simplification after widening it's type? In other words, can the
/// extend be safely hoisted out of the loop with SCEV reducing the value to a
/// recurrence on the same loop. If so, return the sign or zero extended
/// recurrence. Otherwise return NULL.
@@ -1201,7 +1189,6 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) {
namespace {
class IndVarSimplifyVisitor : public IVVisitor {
ScalarEvolution *SE;
- const DataLayout *DL;
const TargetTransformInfo *TTI;
PHINode *IVPhi;
@@ -1209,9 +1196,9 @@ namespace {
WideIVInfo WI;
IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV,
- const DataLayout *DL, const TargetTransformInfo *TTI,
+ const TargetTransformInfo *TTI,
const DominatorTree *DTree)
- : SE(SCEV), DL(DL), TTI(TTI), IVPhi(IV) {
+ : SE(SCEV), TTI(TTI), IVPhi(IV) {
DT = DTree;
WI.NarrowIV = IVPhi;
if (ReduceLiveIVs)
@@ -1219,9 +1206,7 @@ namespace {
}
// Implement the interface used by simplifyUsersOfIV.
- void visitCast(CastInst *Cast) override {
- visitIVCast(Cast, WI, SE, DL, TTI);
- }
+ void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, TTI); }
};
}
@@ -1255,7 +1240,7 @@ void IndVarSimplify::SimplifyAndExtend(Loop *L,
PHINode *CurrIV = LoopPhis.pop_back_val();
// Information about sign/zero extensions of CurrIV.
- IndVarSimplifyVisitor Visitor(CurrIV, SE, DL, TTI, DT);
+ IndVarSimplifyVisitor Visitor(CurrIV, SE, TTI, DT);
Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &Visitor);
@@ -1278,55 +1263,6 @@ void IndVarSimplify::SimplifyAndExtend(Loop *L,
// LinearFunctionTestReplace and its kin. Rewrite the loop exit condition.
//===----------------------------------------------------------------------===//
-/// Check for expressions that ScalarEvolution generates to compute
-/// BackedgeTakenInfo. If these expressions have not been reduced, then
-/// expanding them may incur additional cost (albeit in the loop preheader).
-static bool isHighCostExpansion(const SCEV *S, BranchInst *BI,
- SmallPtrSetImpl<const SCEV*> &Processed,
- ScalarEvolution *SE) {
- if (!Processed.insert(S).second)
- return false;
-
- // If the backedge-taken count is a UDiv, it's very likely a UDiv that
- // ScalarEvolution's HowFarToZero or HowManyLessThans produced to compute a
- // precise expression, rather than a UDiv from the user's code. If we can't
- // find a UDiv in the code with some simple searching, assume the former and
- // forego rewriting the loop.
- if (isa<SCEVUDivExpr>(S)) {
- ICmpInst *OrigCond = dyn_cast<ICmpInst>(BI->getCondition());
- if (!OrigCond) return true;
- const SCEV *R = SE->getSCEV(OrigCond->getOperand(1));
- R = SE->getMinusSCEV(R, SE->getConstant(R->getType(), 1));
- if (R != S) {
- const SCEV *L = SE->getSCEV(OrigCond->getOperand(0));
- L = SE->getMinusSCEV(L, SE->getConstant(L->getType(), 1));
- if (L != S)
- return true;
- }
- }
-
- // Recurse past add expressions, which commonly occur in the
- // BackedgeTakenCount. They may already exist in program code, and if not,
- // they are not too expensive rematerialize.
- if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
- for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
- I != E; ++I) {
- if (isHighCostExpansion(*I, BI, Processed, SE))
- return true;
- }
- return false;
- }
-
- // HowManyLessThans uses a Max expression whenever the loop is not guarded by
- // the exit condition.
- if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
- return true;
-
- // If we haven't recognized an expensive SCEV pattern, assume it's an
- // expression produced by program code.
- return false;
-}
-
/// canExpandBackedgeTakenCount - Return true if this loop's backedge taken
/// count expression can be safely and cheaply expanded into an instruction
/// sequence that can be used by LinearFunctionTestReplace.
@@ -1340,7 +1276,8 @@ static bool isHighCostExpansion(const SCEV *S, BranchInst *BI,
/// used by ABI constrained operation, as opposed to inttoptr/ptrtoint).
/// However, we don't yet have a strong motivation for converting loop tests
/// into inequality tests.
-static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) {
+static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE,
+ SCEVExpander &Rewriter) {
const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) ||
BackedgeTakenCount->isZero())
@@ -1350,12 +1287,10 @@ static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) {
return false;
// Can't rewrite non-branch yet.
- BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
- if (!BI)
+ if (!isa<BranchInst>(L->getExitingBlock()->getTerminator()))
return false;
- SmallPtrSet<const SCEV*, 8> Processed;
- if (isHighCostExpansion(BackedgeTakenCount, BI, Processed, SE))
+ if (Rewriter.isHighCostExpansion(BackedgeTakenCount, L))
return false;
return true;
@@ -1521,9 +1456,8 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
/// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride.
/// This is difficult in general for SCEV because of potential overflow. But we
/// could at least handle constant BECounts.
-static PHINode *
-FindLoopCounter(Loop *L, const SCEV *BECount,
- ScalarEvolution *SE, DominatorTree *DT, const DataLayout *DL) {
+static PHINode *FindLoopCounter(Loop *L, const SCEV *BECount,
+ ScalarEvolution *SE, DominatorTree *DT) {
uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
Value *Cond =
@@ -1552,7 +1486,8 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
// AR may be wider than BECount. With eq/ne tests overflow is immaterial.
// AR may not be a narrower type, or we may never exit.
uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType());
- if (PhiWidth < BCWidth || (DL && !DL->isLegalInteger(PhiWidth)))
+ if (PhiWidth < BCWidth ||
+ !L->getHeader()->getModule()->getDataLayout().isLegalInteger(PhiWidth))
continue;
const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
@@ -1641,7 +1576,7 @@ static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
&& "unit stride pointer IV must be i8*");
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
- return Builder.CreateGEP(GEPBase, GEPOffset, "lftr.limit");
+ return Builder.CreateGEP(nullptr, GEPBase, GEPOffset, "lftr.limit");
}
else {
// In any other case, convert both IVInit and IVCount to integers before
@@ -1695,7 +1630,7 @@ LinearFunctionTestReplace(Loop *L,
const SCEV *BackedgeTakenCount,
PHINode *IndVar,
SCEVExpander &Rewriter) {
- assert(canExpandBackedgeTakenCount(L, SE) && "precondition");
+ assert(canExpandBackedgeTakenCount(L, SE, Rewriter) && "precondition");
// Initialize CmpIndVar and IVCount to their preincremented values.
Value *CmpIndVar = IndVar;
@@ -1705,51 +1640,15 @@ LinearFunctionTestReplace(Loop *L,
// compare against the post-incremented value, otherwise we must compare
// against the preincremented value.
if (L->getExitingBlock() == L->getLoopLatch()) {
+ // Add one to the "backedge-taken" count to get the trip count.
+ // This addition may overflow, which is valid as long as the comparison is
+ // truncated to BackedgeTakenCount->getType().
+ IVCount = SE->getAddExpr(BackedgeTakenCount,
+ SE->getConstant(BackedgeTakenCount->getType(), 1));
// The BackedgeTaken expression contains the number of times that the
// backedge branches to the loop header. This is one less than the
// number of times the loop executes, so use the incremented indvar.
- llvm::Value *IncrementedIndvar =
- IndVar->getIncomingValueForBlock(L->getExitingBlock());
- const auto *IncrementedIndvarSCEV =
- cast<SCEVAddRecExpr>(SE->getSCEV(IncrementedIndvar));
- // It is unsafe to use the incremented indvar if it has a wrapping flag, we
- // don't want to compare against a poison value. Check the SCEV that
- // corresponds to the incremented indvar, the SCEVExpander will only insert
- // flags in the IR if the SCEV originally had wrapping flags.
- // FIXME: In theory, SCEV could drop flags even though they exist in IR.
- // A more robust solution would involve getting a new expression for
- // CmpIndVar by applying non-NSW/NUW AddExprs.
- auto WrappingFlags =
- ScalarEvolution::setFlags(SCEV::FlagNUW, SCEV::FlagNSW);
- const SCEV *IVInit = IncrementedIndvarSCEV->getStart();
- if (SE->getTypeSizeInBits(IVInit->getType()) >
- SE->getTypeSizeInBits(IVCount->getType()))
- IVInit = SE->getTruncateExpr(IVInit, IVCount->getType());
- unsigned BitWidth = SE->getTypeSizeInBits(IVCount->getType());
- Type *WideTy = IntegerType::get(SE->getContext(), BitWidth + 1);
- // Check if InitIV + BECount+1 requires sign/zero extension.
- // If not, clear the corresponding flag from WrappingFlags because it is not
- // necessary for those flags in the IncrementedIndvarSCEV expression.
- if (SE->getSignExtendExpr(SE->getAddExpr(IVInit, BackedgeTakenCount),
- WideTy) ==
- SE->getAddExpr(SE->getSignExtendExpr(IVInit, WideTy),
- SE->getSignExtendExpr(BackedgeTakenCount, WideTy)))
- WrappingFlags = ScalarEvolution::clearFlags(WrappingFlags, SCEV::FlagNSW);
- if (SE->getZeroExtendExpr(SE->getAddExpr(IVInit, BackedgeTakenCount),
- WideTy) ==
- SE->getAddExpr(SE->getZeroExtendExpr(IVInit, WideTy),
- SE->getZeroExtendExpr(BackedgeTakenCount, WideTy)))
- WrappingFlags = ScalarEvolution::clearFlags(WrappingFlags, SCEV::FlagNUW);
- if (!ScalarEvolution::maskFlags(IncrementedIndvarSCEV->getNoWrapFlags(),
- WrappingFlags)) {
- // Add one to the "backedge-taken" count to get the trip count.
- // This addition may overflow, which is valid as long as the comparison is
- // truncated to BackedgeTakenCount->getType().
- IVCount =
- SE->getAddExpr(BackedgeTakenCount,
- SE->getConstant(BackedgeTakenCount->getType(), 1));
- CmpIndVar = IncrementedIndvar;
- }
+ CmpIndVar = IndVar->getIncomingValueForBlock(L->getExitingBlock());
}
Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE);
@@ -1929,13 +1828,14 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
if (!L->isLoopSimplifyForm())
return false;
- LI = &getAnalysis<LoopInfo>();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
- TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
- TTI = getAnalysisIfAvailable<TargetTransformInfo>();
+ auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
+ TLI = TLIP ? &TLIP->getTLI() : nullptr;
+ auto *TTIP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
+ TTI = TTIP ? &TTIP->getTTI(*L->getHeader()->getParent()) : nullptr;
+ const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
DeadInsts.clear();
Changed = false;
@@ -1947,7 +1847,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
// Create a rewriter object which we'll use to transform the code with.
- SCEVExpander Rewriter(*SE, "indvars");
+ SCEVExpander Rewriter(*SE, DL, "indvars");
#ifndef NDEBUG
Rewriter.setDebugType(DEBUG_TYPE);
#endif
@@ -1975,8 +1875,8 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// If we have a trip count expression, rewrite the loop's exit condition
// using it. We can currently only handle loops with a single exit.
- if (canExpandBackedgeTakenCount(L, SE) && needsLFTR(L, DT)) {
- PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, DL);
+ if (canExpandBackedgeTakenCount(L, SE, Rewriter) && needsLFTR(L, DT)) {
+ PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT);
if (IndVar) {
// Check preconditions for proper SCEVExpander operation. SCEV does not
// express SCEVExpander's dependencies, such as LoopSimplify. Instead any
diff --git a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
new file mode 100644
index 000000000000..cbdacad8f28b
--- /dev/null
+++ b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -0,0 +1,1495 @@
+//===-- InductiveRangeCheckElimination.cpp - ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// The InductiveRangeCheckElimination pass splits a loop's iteration space into
+// three disjoint ranges. It does that in a way such that the loop running in
+// the middle loop provably does not need range checks. As an example, it will
+// convert
+//
+// len = < known positive >
+// for (i = 0; i < n; i++) {
+// if (0 <= i && i < len) {
+// do_something();
+// } else {
+// throw_out_of_bounds();
+// }
+// }
+//
+// to
+//
+// len = < known positive >
+// limit = smin(n, len)
+// // no first segment
+// for (i = 0; i < limit; i++) {
+// if (0 <= i && i < len) { // this check is fully redundant
+// do_something();
+// } else {
+// throw_out_of_bounds();
+// }
+// }
+// for (i = limit; i < n; i++) {
+// if (0 <= i && i < len) {
+// do_something();
+// } else {
+// throw_out_of_bounds();
+// }
+// }
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/SimplifyIndVar.h"
+#include "llvm/Transforms/Utils/UnrollLoop.h"
+#include <array>
+
+using namespace llvm;
+
+static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden,
+ cl::init(64));
+
+static cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden,
+ cl::init(false));
+
+static cl::opt<bool> PrintRangeChecks("irce-print-range-checks", cl::Hidden,
+ cl::init(false));
+
+static cl::opt<int> MaxExitProbReciprocal("irce-max-exit-prob-reciprocal",
+ cl::Hidden, cl::init(10));
+
+#define DEBUG_TYPE "irce"
+
+namespace {
+
+/// An inductive range check is conditional branch in a loop with
+///
+/// 1. a very cold successor (i.e. the branch jumps to that successor very
+/// rarely)
+///
+/// and
+///
+/// 2. a condition that is provably true for some contiguous range of values
+/// taken by the containing loop's induction variable.
+///
+class InductiveRangeCheck {
+ // Classifies a range check
+ enum RangeCheckKind : unsigned {
+ // Range check of the form "0 <= I".
+ RANGE_CHECK_LOWER = 1,
+
+ // Range check of the form "I < L" where L is known positive.
+ RANGE_CHECK_UPPER = 2,
+
+ // The logical and of the RANGE_CHECK_LOWER and RANGE_CHECK_UPPER
+ // conditions.
+ RANGE_CHECK_BOTH = RANGE_CHECK_LOWER | RANGE_CHECK_UPPER,
+
+ // Unrecognized range check condition.
+ RANGE_CHECK_UNKNOWN = (unsigned)-1
+ };
+
+ static const char *rangeCheckKindToStr(RangeCheckKind);
+
+ const SCEV *Offset;
+ const SCEV *Scale;
+ Value *Length;
+ BranchInst *Branch;
+ RangeCheckKind Kind;
+
+ static RangeCheckKind parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
+ ScalarEvolution &SE, Value *&Index,
+ Value *&Length);
+
+ static InductiveRangeCheck::RangeCheckKind
+ parseRangeCheck(Loop *L, ScalarEvolution &SE, Value *Condition,
+ const SCEV *&Index, Value *&UpperLimit);
+
+ InductiveRangeCheck() :
+ Offset(nullptr), Scale(nullptr), Length(nullptr), Branch(nullptr) { }
+
+public:
+ const SCEV *getOffset() const { return Offset; }
+ const SCEV *getScale() const { return Scale; }
+ Value *getLength() const { return Length; }
+
+ void print(raw_ostream &OS) const {
+ OS << "InductiveRangeCheck:\n";
+ OS << " Kind: " << rangeCheckKindToStr(Kind) << "\n";
+ OS << " Offset: ";
+ Offset->print(OS);
+ OS << " Scale: ";
+ Scale->print(OS);
+ OS << " Length: ";
+ if (Length)
+ Length->print(OS);
+ else
+ OS << "(null)";
+ OS << "\n Branch: ";
+ getBranch()->print(OS);
+ OS << "\n";
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() {
+ print(dbgs());
+ }
+#endif
+
+ BranchInst *getBranch() const { return Branch; }
+
+ /// Represents an signed integer range [Range.getBegin(), Range.getEnd()). If
+ /// R.getEnd() sle R.getBegin(), then R denotes the empty range.
+
+ class Range {
+ const SCEV *Begin;
+ const SCEV *End;
+
+ public:
+ Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) {
+ assert(Begin->getType() == End->getType() && "ill-typed range!");
+ }
+
+ Type *getType() const { return Begin->getType(); }
+ const SCEV *getBegin() const { return Begin; }
+ const SCEV *getEnd() const { return End; }
+ };
+
+ typedef SpecificBumpPtrAllocator<InductiveRangeCheck> AllocatorTy;
+
+ /// This is the value the condition of the branch needs to evaluate to for the
+ /// branch to take the hot successor (see (1) above).
+ bool getPassingDirection() { return true; }
+
+ /// Computes a range for the induction variable (IndVar) in which the range
+ /// check is redundant and can be constant-folded away. The induction
+ /// variable is not required to be the canonical {0,+,1} induction variable.
+ Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE,
+ const SCEVAddRecExpr *IndVar,
+ IRBuilder<> &B) const;
+
+ /// Create an inductive range check out of BI if possible, else return
+ /// nullptr.
+ static InductiveRangeCheck *create(AllocatorTy &Alloc, BranchInst *BI,
+ Loop *L, ScalarEvolution &SE,
+ BranchProbabilityInfo &BPI);
+};
+
+class InductiveRangeCheckElimination : public LoopPass {
+ InductiveRangeCheck::AllocatorTy Allocator;
+
+public:
+ static char ID;
+ InductiveRangeCheckElimination() : LoopPass(ID) {
+ initializeInductiveRangeCheckEliminationPass(
+ *PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addRequiredID(LoopSimplifyID);
+ AU.addRequiredID(LCSSAID);
+ AU.addRequired<ScalarEvolution>();
+ AU.addRequired<BranchProbabilityInfo>();
+ }
+
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
+};
+
+char InductiveRangeCheckElimination::ID = 0;
+}
+
+INITIALIZE_PASS(InductiveRangeCheckElimination, "irce",
+ "Inductive range check elimination", false, false)
+
+const char *InductiveRangeCheck::rangeCheckKindToStr(
+ InductiveRangeCheck::RangeCheckKind RCK) {
+ switch (RCK) {
+ case InductiveRangeCheck::RANGE_CHECK_UNKNOWN:
+ return "RANGE_CHECK_UNKNOWN";
+
+ case InductiveRangeCheck::RANGE_CHECK_UPPER:
+ return "RANGE_CHECK_UPPER";
+
+ case InductiveRangeCheck::RANGE_CHECK_LOWER:
+ return "RANGE_CHECK_LOWER";
+
+ case InductiveRangeCheck::RANGE_CHECK_BOTH:
+ return "RANGE_CHECK_BOTH";
+ }
+
+ llvm_unreachable("unknown range check type!");
+}
+
+/// Parse a single ICmp instruction, `ICI`, into a range check. If `ICI`
+/// cannot
+/// be interpreted as a range check, return `RANGE_CHECK_UNKNOWN` and set
+/// `Index` and `Length` to `nullptr`. Otherwise set `Index` to the value
+/// being
+/// range checked, and set `Length` to the upper limit `Index` is being range
+/// checked with if (and only if) the range check type is stronger or equal to
+/// RANGE_CHECK_UPPER.
+///
+InductiveRangeCheck::RangeCheckKind
+InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
+ ScalarEvolution &SE, Value *&Index,
+ Value *&Length) {
+
+ auto IsNonNegativeAndNotLoopVarying = [&SE, L](Value *V) {
+ const SCEV *S = SE.getSCEV(V);
+ if (isa<SCEVCouldNotCompute>(S))
+ return false;
+
+ return SE.getLoopDisposition(S, L) == ScalarEvolution::LoopInvariant &&
+ SE.isKnownNonNegative(S);
+ };
+
+ using namespace llvm::PatternMatch;
+
+ ICmpInst::Predicate Pred = ICI->getPredicate();
+ Value *LHS = ICI->getOperand(0);
+ Value *RHS = ICI->getOperand(1);
+
+ switch (Pred) {
+ default:
+ return RANGE_CHECK_UNKNOWN;
+
+ case ICmpInst::ICMP_SLE:
+ std::swap(LHS, RHS);
+ // fallthrough
+ case ICmpInst::ICMP_SGE:
+ if (match(RHS, m_ConstantInt<0>())) {
+ Index = LHS;
+ return RANGE_CHECK_LOWER;
+ }
+ return RANGE_CHECK_UNKNOWN;
+
+ case ICmpInst::ICMP_SLT:
+ std::swap(LHS, RHS);
+ // fallthrough
+ case ICmpInst::ICMP_SGT:
+ if (match(RHS, m_ConstantInt<-1>())) {
+ Index = LHS;
+ return RANGE_CHECK_LOWER;
+ }
+
+ if (IsNonNegativeAndNotLoopVarying(LHS)) {
+ Index = RHS;
+ Length = LHS;
+ return RANGE_CHECK_UPPER;
+ }
+ return RANGE_CHECK_UNKNOWN;
+
+ case ICmpInst::ICMP_ULT:
+ std::swap(LHS, RHS);
+ // fallthrough
+ case ICmpInst::ICMP_UGT:
+ if (IsNonNegativeAndNotLoopVarying(LHS)) {
+ Index = RHS;
+ Length = LHS;
+ return RANGE_CHECK_BOTH;
+ }
+ return RANGE_CHECK_UNKNOWN;
+ }
+
+ llvm_unreachable("default clause returns!");
+}
+
+/// Parses an arbitrary condition into a range check. `Length` is set only if
+/// the range check is recognized to be `RANGE_CHECK_UPPER` or stronger.
+InductiveRangeCheck::RangeCheckKind
+InductiveRangeCheck::parseRangeCheck(Loop *L, ScalarEvolution &SE,
+ Value *Condition, const SCEV *&Index,
+ Value *&Length) {
+ using namespace llvm::PatternMatch;
+
+ Value *A = nullptr;
+ Value *B = nullptr;
+
+ if (match(Condition, m_And(m_Value(A), m_Value(B)))) {
+ Value *IndexA = nullptr, *IndexB = nullptr;
+ Value *LengthA = nullptr, *LengthB = nullptr;
+ ICmpInst *ICmpA = dyn_cast<ICmpInst>(A), *ICmpB = dyn_cast<ICmpInst>(B);
+
+ if (!ICmpA || !ICmpB)
+ return InductiveRangeCheck::RANGE_CHECK_UNKNOWN;
+
+ auto RCKindA = parseRangeCheckICmp(L, ICmpA, SE, IndexA, LengthA);
+ auto RCKindB = parseRangeCheckICmp(L, ICmpB, SE, IndexB, LengthB);
+
+ if (RCKindA == InductiveRangeCheck::RANGE_CHECK_UNKNOWN ||
+ RCKindB == InductiveRangeCheck::RANGE_CHECK_UNKNOWN)
+ return InductiveRangeCheck::RANGE_CHECK_UNKNOWN;
+
+ if (IndexA != IndexB)
+ return InductiveRangeCheck::RANGE_CHECK_UNKNOWN;
+
+ if (LengthA != nullptr && LengthB != nullptr && LengthA != LengthB)
+ return InductiveRangeCheck::RANGE_CHECK_UNKNOWN;
+
+ Index = SE.getSCEV(IndexA);
+ if (isa<SCEVCouldNotCompute>(Index))
+ return InductiveRangeCheck::RANGE_CHECK_UNKNOWN;
+
+ Length = LengthA == nullptr ? LengthB : LengthA;
+
+ return (InductiveRangeCheck::RangeCheckKind)(RCKindA | RCKindB);
+ }
+
+ if (ICmpInst *ICI = dyn_cast<ICmpInst>(Condition)) {
+ Value *IndexVal = nullptr;
+
+ auto RCKind = parseRangeCheckICmp(L, ICI, SE, IndexVal, Length);
+
+ if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN)
+ return InductiveRangeCheck::RANGE_CHECK_UNKNOWN;
+
+ Index = SE.getSCEV(IndexVal);
+ if (isa<SCEVCouldNotCompute>(Index))
+ return InductiveRangeCheck::RANGE_CHECK_UNKNOWN;
+
+ return RCKind;
+ }
+
+ return InductiveRangeCheck::RANGE_CHECK_UNKNOWN;
+}
+
+
+InductiveRangeCheck *
+InductiveRangeCheck::create(InductiveRangeCheck::AllocatorTy &A, BranchInst *BI,
+ Loop *L, ScalarEvolution &SE,
+ BranchProbabilityInfo &BPI) {
+
+ if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch())
+ return nullptr;
+
+ BranchProbability LikelyTaken(15, 16);
+
+ if (BPI.getEdgeProbability(BI->getParent(), (unsigned) 0) < LikelyTaken)
+ return nullptr;
+
+ Value *Length = nullptr;
+ const SCEV *IndexSCEV = nullptr;
+
+ auto RCKind = InductiveRangeCheck::parseRangeCheck(L, SE, BI->getCondition(),
+ IndexSCEV, Length);
+
+ if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN)
+ return nullptr;
+
+ assert(IndexSCEV && "contract with SplitRangeCheckCondition!");
+ assert((!(RCKind & InductiveRangeCheck::RANGE_CHECK_UPPER) || Length) &&
+ "contract with SplitRangeCheckCondition!");
+
+ const SCEVAddRecExpr *IndexAddRec = dyn_cast<SCEVAddRecExpr>(IndexSCEV);
+ bool IsAffineIndex =
+ IndexAddRec && (IndexAddRec->getLoop() == L) && IndexAddRec->isAffine();
+
+ if (!IsAffineIndex)
+ return nullptr;
+
+ InductiveRangeCheck *IRC = new (A.Allocate()) InductiveRangeCheck;
+ IRC->Length = Length;
+ IRC->Offset = IndexAddRec->getStart();
+ IRC->Scale = IndexAddRec->getStepRecurrence(SE);
+ IRC->Branch = BI;
+ IRC->Kind = RCKind;
+ return IRC;
+}
+
+namespace {
+
+// Keeps track of the structure of a loop. This is similar to llvm::Loop,
+// except that it is more lightweight and can track the state of a loop through
+// changing and potentially invalid IR. This structure also formalizes the
+// kinds of loops we can deal with -- ones that have a single latch that is also
+// an exiting block *and* have a canonical induction variable.
+struct LoopStructure {
+ const char *Tag;
+
+ BasicBlock *Header;
+ BasicBlock *Latch;
+
+ // `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th
+ // successor is `LatchExit', the exit block of the loop.
+ BranchInst *LatchBr;
+ BasicBlock *LatchExit;
+ unsigned LatchBrExitIdx;
+
+ Value *IndVarNext;
+ Value *IndVarStart;
+ Value *LoopExitAt;
+ bool IndVarIncreasing;
+
+ LoopStructure()
+ : Tag(""), Header(nullptr), Latch(nullptr), LatchBr(nullptr),
+ LatchExit(nullptr), LatchBrExitIdx(-1), IndVarNext(nullptr),
+ IndVarStart(nullptr), LoopExitAt(nullptr), IndVarIncreasing(false) {}
+
+ template <typename M> LoopStructure map(M Map) const {
+ LoopStructure Result;
+ Result.Tag = Tag;
+ Result.Header = cast<BasicBlock>(Map(Header));
+ Result.Latch = cast<BasicBlock>(Map(Latch));
+ Result.LatchBr = cast<BranchInst>(Map(LatchBr));
+ Result.LatchExit = cast<BasicBlock>(Map(LatchExit));
+ Result.LatchBrExitIdx = LatchBrExitIdx;
+ Result.IndVarNext = Map(IndVarNext);
+ Result.IndVarStart = Map(IndVarStart);
+ Result.LoopExitAt = Map(LoopExitAt);
+ Result.IndVarIncreasing = IndVarIncreasing;
+ return Result;
+ }
+
+ static Optional<LoopStructure> parseLoopStructure(ScalarEvolution &,
+ BranchProbabilityInfo &BPI,
+ Loop &,
+ const char *&);
+};
+
+/// This class is used to constrain loops to run within a given iteration space.
+/// The algorithm this class implements is given a Loop and a range [Begin,
+/// End). The algorithm then tries to break out a "main loop" out of the loop
+/// it is given in a way that the "main loop" runs with the induction variable
+/// in a subset of [Begin, End). The algorithm emits appropriate pre and post
+/// loops to run any remaining iterations. The pre loop runs any iterations in
+/// which the induction variable is < Begin, and the post loop runs any
+/// iterations in which the induction variable is >= End.
+///
+class LoopConstrainer {
+ // The representation of a clone of the original loop we started out with.
+ struct ClonedLoop {
+ // The cloned blocks
+ std::vector<BasicBlock *> Blocks;
+
+ // `Map` maps values in the clonee into values in the cloned version
+ ValueToValueMapTy Map;
+
+ // An instance of `LoopStructure` for the cloned loop
+ LoopStructure Structure;
+ };
+
+ // Result of rewriting the range of a loop. See changeIterationSpaceEnd for
+ // more details on what these fields mean.
+ struct RewrittenRangeInfo {
+ BasicBlock *PseudoExit;
+ BasicBlock *ExitSelector;
+ std::vector<PHINode *> PHIValuesAtPseudoExit;
+ PHINode *IndVarEnd;
+
+ RewrittenRangeInfo()
+ : PseudoExit(nullptr), ExitSelector(nullptr), IndVarEnd(nullptr) {}
+ };
+
+ // Calculated subranges we restrict the iteration space of the main loop to.
+ // See the implementation of `calculateSubRanges' for more details on how
+ // these fields are computed. `LowLimit` is None if there is no restriction
+ // on low end of the restricted iteration space of the main loop. `HighLimit`
+ // is None if there is no restriction on high end of the restricted iteration
+ // space of the main loop.
+
+ struct SubRanges {
+ Optional<const SCEV *> LowLimit;
+ Optional<const SCEV *> HighLimit;
+ };
+
+ // A utility function that does a `replaceUsesOfWith' on the incoming block
+ // set of a `PHINode' -- replaces instances of `Block' in the `PHINode's
+ // incoming block list with `ReplaceBy'.
+ static void replacePHIBlock(PHINode *PN, BasicBlock *Block,
+ BasicBlock *ReplaceBy);
+
+ // Compute a safe set of limits for the main loop to run in -- effectively the
+ // intersection of `Range' and the iteration space of the original loop.
+ // Return None if unable to compute the set of subranges.
+ //
+ Optional<SubRanges> calculateSubRanges() const;
+
+ // Clone `OriginalLoop' and return the result in CLResult. The IR after
+ // running `cloneLoop' is well formed except for the PHI nodes in CLResult --
+ // the PHI nodes say that there is an incoming edge from `OriginalPreheader`
+ // but there is no such edge.
+ //
+ void cloneLoop(ClonedLoop &CLResult, const char *Tag) const;
+
+ // Rewrite the iteration space of the loop denoted by (LS, Preheader). The
+ // iteration space of the rewritten loop ends at ExitLoopAt. The start of the
+ // iteration space is not changed. `ExitLoopAt' is assumed to be slt
+ // `OriginalHeaderCount'.
+ //
+ // If there are iterations left to execute, control is made to jump to
+ // `ContinuationBlock', otherwise they take the normal loop exit. The
+ // returned `RewrittenRangeInfo' object is populated as follows:
+ //
+ // .PseudoExit is a basic block that unconditionally branches to
+ // `ContinuationBlock'.
+ //
+ // .ExitSelector is a basic block that decides, on exit from the loop,
+ // whether to branch to the "true" exit or to `PseudoExit'.
+ //
+ // .PHIValuesAtPseudoExit are PHINodes in `PseudoExit' that compute the value
+ // for each PHINode in the loop header on taking the pseudo exit.
+ //
+ // After changeIterationSpaceEnd, `Preheader' is no longer a legitimate
+ // preheader because it is made to branch to the loop header only
+ // conditionally.
+ //
+ RewrittenRangeInfo
+ changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader,
+ Value *ExitLoopAt,
+ BasicBlock *ContinuationBlock) const;
+
+ // The loop denoted by `LS' has `OldPreheader' as its preheader. This
+ // function creates a new preheader for `LS' and returns it.
+ //
+ BasicBlock *createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader,
+ const char *Tag) const;
+
+ // `ContinuationBlockAndPreheader' was the continuation block for some call to
+ // `changeIterationSpaceEnd' and is the preheader to the loop denoted by `LS'.
+ // This function rewrites the PHI nodes in `LS.Header' to start with the
+ // correct value.
+ void rewriteIncomingValuesForPHIs(
+ LoopStructure &LS, BasicBlock *ContinuationBlockAndPreheader,
+ const LoopConstrainer::RewrittenRangeInfo &RRI) const;
+
+ // Even though we do not preserve any passes at this time, we at least need to
+ // keep the parent loop structure consistent. The `LPPassManager' seems to
+ // verify this after running a loop pass. This function adds the list of
+ // blocks denoted by BBs to this loops parent loop if required.
+ void addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs);
+
+ // Some global state.
+ Function &F;
+ LLVMContext &Ctx;
+ ScalarEvolution &SE;
+
+ // Information about the original loop we started out with.
+ Loop &OriginalLoop;
+ LoopInfo &OriginalLoopInfo;
+ const SCEV *LatchTakenCount;
+ BasicBlock *OriginalPreheader;
+
+ // The preheader of the main loop. This may or may not be different from
+ // `OriginalPreheader'.
+ BasicBlock *MainLoopPreheader;
+
+ // The range we need to run the main loop in.
+ InductiveRangeCheck::Range Range;
+
+ // The structure of the main loop (see comment at the beginning of this class
+ // for a definition)
+ LoopStructure MainLoopStructure;
+
+public:
+ LoopConstrainer(Loop &L, LoopInfo &LI, const LoopStructure &LS,
+ ScalarEvolution &SE, InductiveRangeCheck::Range R)
+ : F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()),
+ SE(SE), OriginalLoop(L), OriginalLoopInfo(LI), LatchTakenCount(nullptr),
+ OriginalPreheader(nullptr), MainLoopPreheader(nullptr), Range(R),
+ MainLoopStructure(LS) {}
+
+ // Entry point for the algorithm. Returns true on success.
+ bool run();
+};
+
+}
+
+void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block,
+ BasicBlock *ReplaceBy) {
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ if (PN->getIncomingBlock(i) == Block)
+ PN->setIncomingBlock(i, ReplaceBy);
+}
+
+static bool CanBeSMax(ScalarEvolution &SE, const SCEV *S) {
+ APInt SMax =
+ APInt::getSignedMaxValue(cast<IntegerType>(S->getType())->getBitWidth());
+ return SE.getSignedRange(S).contains(SMax) &&
+ SE.getUnsignedRange(S).contains(SMax);
+}
+
+static bool CanBeSMin(ScalarEvolution &SE, const SCEV *S) {
+ APInt SMin =
+ APInt::getSignedMinValue(cast<IntegerType>(S->getType())->getBitWidth());
+ return SE.getSignedRange(S).contains(SMin) &&
+ SE.getUnsignedRange(S).contains(SMin);
+}
+
+Optional<LoopStructure>
+LoopStructure::parseLoopStructure(ScalarEvolution &SE, BranchProbabilityInfo &BPI,
+ Loop &L, const char *&FailureReason) {
+ assert(L.isLoopSimplifyForm() && "should follow from addRequired<>");
+
+ BasicBlock *Latch = L.getLoopLatch();
+ if (!L.isLoopExiting(Latch)) {
+ FailureReason = "no loop latch";
+ return None;
+ }
+
+ BasicBlock *Header = L.getHeader();
+ BasicBlock *Preheader = L.getLoopPreheader();
+ if (!Preheader) {
+ FailureReason = "no preheader";
+ return None;
+ }
+
+ BranchInst *LatchBr = dyn_cast<BranchInst>(&*Latch->rbegin());
+ if (!LatchBr || LatchBr->isUnconditional()) {
+ FailureReason = "latch terminator not conditional branch";
+ return None;
+ }
+
+ unsigned LatchBrExitIdx = LatchBr->getSuccessor(0) == Header ? 1 : 0;
+
+ BranchProbability ExitProbability =
+ BPI.getEdgeProbability(LatchBr->getParent(), LatchBrExitIdx);
+
+ if (ExitProbability > BranchProbability(1, MaxExitProbReciprocal)) {
+ FailureReason = "short running loop, not profitable";
+ return None;
+ }
+
+ ICmpInst *ICI = dyn_cast<ICmpInst>(LatchBr->getCondition());
+ if (!ICI || !isa<IntegerType>(ICI->getOperand(0)->getType())) {
+ FailureReason = "latch terminator branch not conditional on integral icmp";
+ return None;
+ }
+
+ const SCEV *LatchCount = SE.getExitCount(&L, Latch);
+ if (isa<SCEVCouldNotCompute>(LatchCount)) {
+ FailureReason = "could not compute latch count";
+ return None;
+ }
+
+ ICmpInst::Predicate Pred = ICI->getPredicate();
+ Value *LeftValue = ICI->getOperand(0);
+ const SCEV *LeftSCEV = SE.getSCEV(LeftValue);
+ IntegerType *IndVarTy = cast<IntegerType>(LeftValue->getType());
+
+ Value *RightValue = ICI->getOperand(1);
+ const SCEV *RightSCEV = SE.getSCEV(RightValue);
+
+ // We canonicalize `ICI` such that `LeftSCEV` is an add recurrence.
+ if (!isa<SCEVAddRecExpr>(LeftSCEV)) {
+ if (isa<SCEVAddRecExpr>(RightSCEV)) {
+ std::swap(LeftSCEV, RightSCEV);
+ std::swap(LeftValue, RightValue);
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ } else {
+ FailureReason = "no add recurrences in the icmp";
+ return None;
+ }
+ }
+
+ auto HasNoSignedWrap = [&](const SCEVAddRecExpr *AR) {
+ if (AR->getNoWrapFlags(SCEV::FlagNSW))
+ return true;
+
+ IntegerType *Ty = cast<IntegerType>(AR->getType());
+ IntegerType *WideTy =
+ IntegerType::get(Ty->getContext(), Ty->getBitWidth() * 2);
+
+ const SCEVAddRecExpr *ExtendAfterOp =
+ dyn_cast<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
+ if (ExtendAfterOp) {
+ const SCEV *ExtendedStart = SE.getSignExtendExpr(AR->getStart(), WideTy);
+ const SCEV *ExtendedStep =
+ SE.getSignExtendExpr(AR->getStepRecurrence(SE), WideTy);
+
+ bool NoSignedWrap = ExtendAfterOp->getStart() == ExtendedStart &&
+ ExtendAfterOp->getStepRecurrence(SE) == ExtendedStep;
+
+ if (NoSignedWrap)
+ return true;
+ }
+
+ // We may have proved this when computing the sign extension above.
+ return AR->getNoWrapFlags(SCEV::FlagNSW) != SCEV::FlagAnyWrap;
+ };
+
+ auto IsInductionVar = [&](const SCEVAddRecExpr *AR, bool &IsIncreasing) {
+ if (!AR->isAffine())
+ return false;
+
+ // Currently we only work with induction variables that have been proved to
+ // not wrap. This restriction can potentially be lifted in the future.
+
+ if (!HasNoSignedWrap(AR))
+ return false;
+
+ if (const SCEVConstant *StepExpr =
+ dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) {
+ ConstantInt *StepCI = StepExpr->getValue();
+ if (StepCI->isOne() || StepCI->isMinusOne()) {
+ IsIncreasing = StepCI->isOne();
+ return true;
+ }
+ }
+
+ return false;
+ };
+
+ // `ICI` is interpreted as taking the backedge if the *next* value of the
+ // induction variable satisfies some constraint.
+
+ const SCEVAddRecExpr *IndVarNext = cast<SCEVAddRecExpr>(LeftSCEV);
+ bool IsIncreasing = false;
+ if (!IsInductionVar(IndVarNext, IsIncreasing)) {
+ FailureReason = "LHS in icmp not induction variable";
+ return None;
+ }
+
+ ConstantInt *One = ConstantInt::get(IndVarTy, 1);
+ // TODO: generalize the predicates here to also match their unsigned variants.
+ if (IsIncreasing) {
+ bool FoundExpectedPred =
+ (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 1) ||
+ (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 0);
+
+ if (!FoundExpectedPred) {
+ FailureReason = "expected icmp slt semantically, found something else";
+ return None;
+ }
+
+ if (LatchBrExitIdx == 0) {
+ if (CanBeSMax(SE, RightSCEV)) {
+ // TODO: this restriction is easily removable -- we just have to
+ // remember that the icmp was an slt and not an sle.
+ FailureReason = "limit may overflow when coercing sle to slt";
+ return None;
+ }
+
+ IRBuilder<> B(&*Preheader->rbegin());
+ RightValue = B.CreateAdd(RightValue, One);
+ }
+
+ } else {
+ bool FoundExpectedPred =
+ (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 1) ||
+ (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 0);
+
+ if (!FoundExpectedPred) {
+ FailureReason = "expected icmp sgt semantically, found something else";
+ return None;
+ }
+
+ if (LatchBrExitIdx == 0) {
+ if (CanBeSMin(SE, RightSCEV)) {
+ // TODO: this restriction is easily removable -- we just have to
+ // remember that the icmp was an sgt and not an sge.
+ FailureReason = "limit may overflow when coercing sge to sgt";
+ return None;
+ }
+
+ IRBuilder<> B(&*Preheader->rbegin());
+ RightValue = B.CreateSub(RightValue, One);
+ }
+ }
+
+ const SCEV *StartNext = IndVarNext->getStart();
+ const SCEV *Addend = SE.getNegativeSCEV(IndVarNext->getStepRecurrence(SE));
+ const SCEV *IndVarStart = SE.getAddExpr(StartNext, Addend);
+
+ BasicBlock *LatchExit = LatchBr->getSuccessor(LatchBrExitIdx);
+
+ assert(SE.getLoopDisposition(LatchCount, &L) ==
+ ScalarEvolution::LoopInvariant &&
+ "loop variant exit count doesn't make sense!");
+
+ assert(!L.contains(LatchExit) && "expected an exit block!");
+ const DataLayout &DL = Preheader->getModule()->getDataLayout();
+ Value *IndVarStartV =
+ SCEVExpander(SE, DL, "irce")
+ .expandCodeFor(IndVarStart, IndVarTy, &*Preheader->rbegin());
+ IndVarStartV->setName("indvar.start");
+
+ LoopStructure Result;
+
+ Result.Tag = "main";
+ Result.Header = Header;
+ Result.Latch = Latch;
+ Result.LatchBr = LatchBr;
+ Result.LatchExit = LatchExit;
+ Result.LatchBrExitIdx = LatchBrExitIdx;
+ Result.IndVarStart = IndVarStartV;
+ Result.IndVarNext = LeftValue;
+ Result.IndVarIncreasing = IsIncreasing;
+ Result.LoopExitAt = RightValue;
+
+ FailureReason = nullptr;
+
+ return Result;
+}
+
+Optional<LoopConstrainer::SubRanges>
+LoopConstrainer::calculateSubRanges() const {
+ IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType());
+
+ if (Range.getType() != Ty)
+ return None;
+
+ LoopConstrainer::SubRanges Result;
+
+ // I think we can be more aggressive here and make this nuw / nsw if the
+ // addition that feeds into the icmp for the latch's terminating branch is nuw
+ // / nsw. In any case, a wrapping 2's complement addition is safe.
+ ConstantInt *One = ConstantInt::get(Ty, 1);
+ const SCEV *Start = SE.getSCEV(MainLoopStructure.IndVarStart);
+ const SCEV *End = SE.getSCEV(MainLoopStructure.LoopExitAt);
+
+ bool Increasing = MainLoopStructure.IndVarIncreasing;
+
+ // We compute `Smallest` and `Greatest` such that [Smallest, Greatest) is the
+ // range of values the induction variable takes.
+
+ const SCEV *Smallest = nullptr, *Greatest = nullptr;
+
+ if (Increasing) {
+ Smallest = Start;
+ Greatest = End;
+ } else {
+ // These two computations may sign-overflow. Here is why that is okay:
+ //
+ // We know that the induction variable does not sign-overflow on any
+ // iteration except the last one, and it starts at `Start` and ends at
+ // `End`, decrementing by one every time.
+ //
+ // * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the
+ // induction variable is decreasing we know that that the smallest value
+ // the loop body is actually executed with is `INT_SMIN` == `Smallest`.
+ //
+ // * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In
+ // that case, `Clamp` will always return `Smallest` and
+ // [`Result.LowLimit`, `Result.HighLimit`) = [`Smallest`, `Smallest`)
+ // will be an empty range. Returning an empty range is always safe.
+ //
+
+ Smallest = SE.getAddExpr(End, SE.getSCEV(One));
+ Greatest = SE.getAddExpr(Start, SE.getSCEV(One));
+ }
+
+ auto Clamp = [this, Smallest, Greatest](const SCEV *S) {
+ return SE.getSMaxExpr(Smallest, SE.getSMinExpr(Greatest, S));
+ };
+
+ // In some cases we can prove that we don't need a pre or post loop
+
+ bool ProvablyNoPreloop =
+ SE.isKnownPredicate(ICmpInst::ICMP_SLE, Range.getBegin(), Smallest);
+ if (!ProvablyNoPreloop)
+ Result.LowLimit = Clamp(Range.getBegin());
+
+ bool ProvablyNoPostLoop =
+ SE.isKnownPredicate(ICmpInst::ICMP_SLE, Greatest, Range.getEnd());
+ if (!ProvablyNoPostLoop)
+ Result.HighLimit = Clamp(Range.getEnd());
+
+ return Result;
+}
+
+void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result,
+ const char *Tag) const {
+ for (BasicBlock *BB : OriginalLoop.getBlocks()) {
+ BasicBlock *Clone = CloneBasicBlock(BB, Result.Map, Twine(".") + Tag, &F);
+ Result.Blocks.push_back(Clone);
+ Result.Map[BB] = Clone;
+ }
+
+ auto GetClonedValue = [&Result](Value *V) {
+ assert(V && "null values not in domain!");
+ auto It = Result.Map.find(V);
+ if (It == Result.Map.end())
+ return V;
+ return static_cast<Value *>(It->second);
+ };
+
+ Result.Structure = MainLoopStructure.map(GetClonedValue);
+ Result.Structure.Tag = Tag;
+
+ for (unsigned i = 0, e = Result.Blocks.size(); i != e; ++i) {
+ BasicBlock *ClonedBB = Result.Blocks[i];
+ BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i];
+
+ assert(Result.Map[OriginalBB] == ClonedBB && "invariant!");
+
+ for (Instruction &I : *ClonedBB)
+ RemapInstruction(&I, Result.Map,
+ RF_NoModuleLevelChanges | RF_IgnoreMissingEntries);
+
+ // Exit blocks will now have one more predecessor and their PHI nodes need
+ // to be edited to reflect that. No phi nodes need to be introduced because
+ // the loop is in LCSSA.
+
+ for (auto SBBI = succ_begin(OriginalBB), SBBE = succ_end(OriginalBB);
+ SBBI != SBBE; ++SBBI) {
+
+ if (OriginalLoop.contains(*SBBI))
+ continue; // not an exit block
+
+ for (Instruction &I : **SBBI) {
+ if (!isa<PHINode>(&I))
+ break;
+
+ PHINode *PN = cast<PHINode>(&I);
+ Value *OldIncoming = PN->getIncomingValueForBlock(OriginalBB);
+ PN->addIncoming(GetClonedValue(OldIncoming), ClonedBB);
+ }
+ }
+ }
+}
+
+LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd(
+ const LoopStructure &LS, BasicBlock *Preheader, Value *ExitSubloopAt,
+ BasicBlock *ContinuationBlock) const {
+
+ // We start with a loop with a single latch:
+ //
+ // +--------------------+
+ // | |
+ // | preheader |
+ // | |
+ // +--------+-----------+
+ // | ----------------\
+ // | / |
+ // +--------v----v------+ |
+ // | | |
+ // | header | |
+ // | | |
+ // +--------------------+ |
+ // |
+ // ..... |
+ // |
+ // +--------------------+ |
+ // | | |
+ // | latch >----------/
+ // | |
+ // +-------v------------+
+ // |
+ // |
+ // | +--------------------+
+ // | | |
+ // +---> original exit |
+ // | |
+ // +--------------------+
+ //
+ // We change the control flow to look like
+ //
+ //
+ // +--------------------+
+ // | |
+ // | preheader >-------------------------+
+ // | | |
+ // +--------v-----------+ |
+ // | /-------------+ |
+ // | / | |
+ // +--------v--v--------+ | |
+ // | | | |
+ // | header | | +--------+ |
+ // | | | | | |
+ // +--------------------+ | | +-----v-----v-----------+
+ // | | | |
+ // | | | .pseudo.exit |
+ // | | | |
+ // | | +-----------v-----------+
+ // | | |
+ // ..... | | |
+ // | | +--------v-------------+
+ // +--------------------+ | | | |
+ // | | | | | ContinuationBlock |
+ // | latch >------+ | | |
+ // | | | +----------------------+
+ // +---------v----------+ |
+ // | |
+ // | |
+ // | +---------------^-----+
+ // | | |
+ // +-----> .exit.selector |
+ // | |
+ // +----------v----------+
+ // |
+ // +--------------------+ |
+ // | | |
+ // | original exit <----+
+ // | |
+ // +--------------------+
+ //
+
+ RewrittenRangeInfo RRI;
+
+ auto BBInsertLocation = std::next(Function::iterator(LS.Latch));
+ RRI.ExitSelector = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".exit.selector",
+ &F, BBInsertLocation);
+ RRI.PseudoExit = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".pseudo.exit", &F,
+ BBInsertLocation);
+
+ BranchInst *PreheaderJump = cast<BranchInst>(&*Preheader->rbegin());
+ bool Increasing = LS.IndVarIncreasing;
+
+ IRBuilder<> B(PreheaderJump);
+
+ // EnterLoopCond - is it okay to start executing this `LS'?
+ Value *EnterLoopCond = Increasing
+ ? B.CreateICmpSLT(LS.IndVarStart, ExitSubloopAt)
+ : B.CreateICmpSGT(LS.IndVarStart, ExitSubloopAt);
+
+ B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit);
+ PreheaderJump->eraseFromParent();
+
+ LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector);
+ B.SetInsertPoint(LS.LatchBr);
+ Value *TakeBackedgeLoopCond =
+ Increasing ? B.CreateICmpSLT(LS.IndVarNext, ExitSubloopAt)
+ : B.CreateICmpSGT(LS.IndVarNext, ExitSubloopAt);
+ Value *CondForBranch = LS.LatchBrExitIdx == 1
+ ? TakeBackedgeLoopCond
+ : B.CreateNot(TakeBackedgeLoopCond);
+
+ LS.LatchBr->setCondition(CondForBranch);
+
+ B.SetInsertPoint(RRI.ExitSelector);
+
+ // IterationsLeft - are there any more iterations left, given the original
+ // upper bound on the induction variable? If not, we branch to the "real"
+ // exit.
+ Value *IterationsLeft = Increasing
+ ? B.CreateICmpSLT(LS.IndVarNext, LS.LoopExitAt)
+ : B.CreateICmpSGT(LS.IndVarNext, LS.LoopExitAt);
+ B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit);
+
+ BranchInst *BranchToContinuation =
+ BranchInst::Create(ContinuationBlock, RRI.PseudoExit);
+
+ // We emit PHI nodes into `RRI.PseudoExit' that compute the "latest" value of
+ // each of the PHI nodes in the loop header. This feeds into the initial
+ // value of the same PHI nodes if/when we continue execution.
+ for (Instruction &I : *LS.Header) {
+ if (!isa<PHINode>(&I))
+ break;
+
+ PHINode *PN = cast<PHINode>(&I);
+
+ PHINode *NewPHI = PHINode::Create(PN->getType(), 2, PN->getName() + ".copy",
+ BranchToContinuation);
+
+ NewPHI->addIncoming(PN->getIncomingValueForBlock(Preheader), Preheader);
+ NewPHI->addIncoming(PN->getIncomingValueForBlock(LS.Latch),
+ RRI.ExitSelector);
+ RRI.PHIValuesAtPseudoExit.push_back(NewPHI);
+ }
+
+ RRI.IndVarEnd = PHINode::Create(LS.IndVarNext->getType(), 2, "indvar.end",
+ BranchToContinuation);
+ RRI.IndVarEnd->addIncoming(LS.IndVarStart, Preheader);
+ RRI.IndVarEnd->addIncoming(LS.IndVarNext, RRI.ExitSelector);
+
+ // The latch exit now has a branch from `RRI.ExitSelector' instead of
+ // `LS.Latch'. The PHI nodes need to be updated to reflect that.
+ for (Instruction &I : *LS.LatchExit) {
+ if (PHINode *PN = dyn_cast<PHINode>(&I))
+ replacePHIBlock(PN, LS.Latch, RRI.ExitSelector);
+ else
+ break;
+ }
+
+ return RRI;
+}
+
+void LoopConstrainer::rewriteIncomingValuesForPHIs(
+ LoopStructure &LS, BasicBlock *ContinuationBlock,
+ const LoopConstrainer::RewrittenRangeInfo &RRI) const {
+
+ unsigned PHIIndex = 0;
+ for (Instruction &I : *LS.Header) {
+ if (!isa<PHINode>(&I))
+ break;
+
+ PHINode *PN = cast<PHINode>(&I);
+
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i)
+ if (PN->getIncomingBlock(i) == ContinuationBlock)
+ PN->setIncomingValue(i, RRI.PHIValuesAtPseudoExit[PHIIndex++]);
+ }
+
+ LS.IndVarStart = RRI.IndVarEnd;
+}
+
+BasicBlock *LoopConstrainer::createPreheader(const LoopStructure &LS,
+ BasicBlock *OldPreheader,
+ const char *Tag) const {
+
+ BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header);
+ BranchInst::Create(LS.Header, Preheader);
+
+ for (Instruction &I : *LS.Header) {
+ if (!isa<PHINode>(&I))
+ break;
+
+ PHINode *PN = cast<PHINode>(&I);
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i)
+ replacePHIBlock(PN, OldPreheader, Preheader);
+ }
+
+ return Preheader;
+}
+
+void LoopConstrainer::addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs) {
+ Loop *ParentLoop = OriginalLoop.getParentLoop();
+ if (!ParentLoop)
+ return;
+
+ for (BasicBlock *BB : BBs)
+ ParentLoop->addBasicBlockToLoop(BB, OriginalLoopInfo);
+}
+
+bool LoopConstrainer::run() {
+ BasicBlock *Preheader = nullptr;
+ LatchTakenCount = SE.getExitCount(&OriginalLoop, MainLoopStructure.Latch);
+ Preheader = OriginalLoop.getLoopPreheader();
+ assert(!isa<SCEVCouldNotCompute>(LatchTakenCount) && Preheader != nullptr &&
+ "preconditions!");
+
+ OriginalPreheader = Preheader;
+ MainLoopPreheader = Preheader;
+
+ Optional<SubRanges> MaybeSR = calculateSubRanges();
+ if (!MaybeSR.hasValue()) {
+ DEBUG(dbgs() << "irce: could not compute subranges\n");
+ return false;
+ }
+
+ SubRanges SR = MaybeSR.getValue();
+ bool Increasing = MainLoopStructure.IndVarIncreasing;
+ IntegerType *IVTy =
+ cast<IntegerType>(MainLoopStructure.IndVarNext->getType());
+
+ SCEVExpander Expander(SE, F.getParent()->getDataLayout(), "irce");
+ Instruction *InsertPt = OriginalPreheader->getTerminator();
+
+ // It would have been better to make `PreLoop' and `PostLoop'
+ // `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy
+ // constructor.
+ ClonedLoop PreLoop, PostLoop;
+ bool NeedsPreLoop =
+ Increasing ? SR.LowLimit.hasValue() : SR.HighLimit.hasValue();
+ bool NeedsPostLoop =
+ Increasing ? SR.HighLimit.hasValue() : SR.LowLimit.hasValue();
+
+ Value *ExitPreLoopAt = nullptr;
+ Value *ExitMainLoopAt = nullptr;
+ const SCEVConstant *MinusOneS =
+ cast<SCEVConstant>(SE.getConstant(IVTy, -1, true /* isSigned */));
+
+ if (NeedsPreLoop) {
+ const SCEV *ExitPreLoopAtSCEV = nullptr;
+
+ if (Increasing)
+ ExitPreLoopAtSCEV = *SR.LowLimit;
+ else {
+ if (CanBeSMin(SE, *SR.HighLimit)) {
+ DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
+ << "preloop exit limit. HighLimit = " << *(*SR.HighLimit)
+ << "\n");
+ return false;
+ }
+ ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS);
+ }
+
+ ExitPreLoopAt = Expander.expandCodeFor(ExitPreLoopAtSCEV, IVTy, InsertPt);
+ ExitPreLoopAt->setName("exit.preloop.at");
+ }
+
+ if (NeedsPostLoop) {
+ const SCEV *ExitMainLoopAtSCEV = nullptr;
+
+ if (Increasing)
+ ExitMainLoopAtSCEV = *SR.HighLimit;
+ else {
+ if (CanBeSMin(SE, *SR.LowLimit)) {
+ DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
+ << "mainloop exit limit. LowLimit = " << *(*SR.LowLimit)
+ << "\n");
+ return false;
+ }
+ ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS);
+ }
+
+ ExitMainLoopAt = Expander.expandCodeFor(ExitMainLoopAtSCEV, IVTy, InsertPt);
+ ExitMainLoopAt->setName("exit.mainloop.at");
+ }
+
+ // We clone these ahead of time so that we don't have to deal with changing
+ // and temporarily invalid IR as we transform the loops.
+ if (NeedsPreLoop)
+ cloneLoop(PreLoop, "preloop");
+ if (NeedsPostLoop)
+ cloneLoop(PostLoop, "postloop");
+
+ RewrittenRangeInfo PreLoopRRI;
+
+ if (NeedsPreLoop) {
+ Preheader->getTerminator()->replaceUsesOfWith(MainLoopStructure.Header,
+ PreLoop.Structure.Header);
+
+ MainLoopPreheader =
+ createPreheader(MainLoopStructure, Preheader, "mainloop");
+ PreLoopRRI = changeIterationSpaceEnd(PreLoop.Structure, Preheader,
+ ExitPreLoopAt, MainLoopPreheader);
+ rewriteIncomingValuesForPHIs(MainLoopStructure, MainLoopPreheader,
+ PreLoopRRI);
+ }
+
+ BasicBlock *PostLoopPreheader = nullptr;
+ RewrittenRangeInfo PostLoopRRI;
+
+ if (NeedsPostLoop) {
+ PostLoopPreheader =
+ createPreheader(PostLoop.Structure, Preheader, "postloop");
+ PostLoopRRI = changeIterationSpaceEnd(MainLoopStructure, MainLoopPreheader,
+ ExitMainLoopAt, PostLoopPreheader);
+ rewriteIncomingValuesForPHIs(PostLoop.Structure, PostLoopPreheader,
+ PostLoopRRI);
+ }
+
+ BasicBlock *NewMainLoopPreheader =
+ MainLoopPreheader != Preheader ? MainLoopPreheader : nullptr;
+ BasicBlock *NewBlocks[] = {PostLoopPreheader, PreLoopRRI.PseudoExit,
+ PreLoopRRI.ExitSelector, PostLoopRRI.PseudoExit,
+ PostLoopRRI.ExitSelector, NewMainLoopPreheader};
+
+ // Some of the above may be nullptr, filter them out before passing to
+ // addToParentLoopIfNeeded.
+ auto NewBlocksEnd =
+ std::remove(std::begin(NewBlocks), std::end(NewBlocks), nullptr);
+
+ addToParentLoopIfNeeded(makeArrayRef(std::begin(NewBlocks), NewBlocksEnd));
+ addToParentLoopIfNeeded(PreLoop.Blocks);
+ addToParentLoopIfNeeded(PostLoop.Blocks);
+
+ return true;
+}
+
+/// Computes and returns a range of values for the induction variable (IndVar)
+/// in which the range check can be safely elided. If it cannot compute such a
+/// range, returns None.
+Optional<InductiveRangeCheck::Range>
+InductiveRangeCheck::computeSafeIterationSpace(ScalarEvolution &SE,
+ const SCEVAddRecExpr *IndVar,
+ IRBuilder<> &) const {
+ // IndVar is of the form "A + B * I" (where "I" is the canonical induction
+ // variable, that may or may not exist as a real llvm::Value in the loop) and
+ // this inductive range check is a range check on the "C + D * I" ("C" is
+ // getOffset() and "D" is getScale()). We rewrite the value being range
+ // checked to "M + N * IndVar" where "N" = "D * B^(-1)" and "M" = "C - NA".
+ // Currently we support this only for "B" = "D" = { 1 or -1 }, but the code
+ // can be generalized as needed.
+ //
+ // The actual inequalities we solve are of the form
+ //
+ // 0 <= M + 1 * IndVar < L given L >= 0 (i.e. N == 1)
+ //
+ // The inequality is satisfied by -M <= IndVar < (L - M) [^1]. All additions
+ // and subtractions are twos-complement wrapping and comparisons are signed.
+ //
+ // Proof:
+ //
+ // If there exists IndVar such that -M <= IndVar < (L - M) then it follows
+ // that -M <= (-M + L) [== Eq. 1]. Since L >= 0, if (-M + L) sign-overflows
+ // then (-M + L) < (-M). Hence by [Eq. 1], (-M + L) could not have
+ // overflown.
+ //
+ // This means IndVar = t + (-M) for t in [0, L). Hence (IndVar + M) = t.
+ // Hence 0 <= (IndVar + M) < L
+
+ // [^1]: Note that the solution does _not_ apply if L < 0; consider values M =
+ // 127, IndVar = 126 and L = -2 in an i8 world.
+
+ if (!IndVar->isAffine())
+ return None;
+
+ const SCEV *A = IndVar->getStart();
+ const SCEVConstant *B = dyn_cast<SCEVConstant>(IndVar->getStepRecurrence(SE));
+ if (!B)
+ return None;
+
+ const SCEV *C = getOffset();
+ const SCEVConstant *D = dyn_cast<SCEVConstant>(getScale());
+ if (D != B)
+ return None;
+
+ ConstantInt *ConstD = D->getValue();
+ if (!(ConstD->isMinusOne() || ConstD->isOne()))
+ return None;
+
+ const SCEV *M = SE.getMinusSCEV(C, A);
+
+ const SCEV *Begin = SE.getNegativeSCEV(M);
+ const SCEV *UpperLimit = nullptr;
+
+ // We strengthen "0 <= I" to "0 <= I < INT_SMAX" and "I < L" to "0 <= I < L".
+ // We can potentially do much better here.
+ if (Value *V = getLength()) {
+ UpperLimit = SE.getSCEV(V);
+ } else {
+ assert(Kind == InductiveRangeCheck::RANGE_CHECK_LOWER && "invariant!");
+ unsigned BitWidth = cast<IntegerType>(IndVar->getType())->getBitWidth();
+ UpperLimit = SE.getConstant(APInt::getSignedMaxValue(BitWidth));
+ }
+
+ const SCEV *End = SE.getMinusSCEV(UpperLimit, M);
+ return InductiveRangeCheck::Range(Begin, End);
+}
+
+static Optional<InductiveRangeCheck::Range>
+IntersectRange(ScalarEvolution &SE,
+ const Optional<InductiveRangeCheck::Range> &R1,
+ const InductiveRangeCheck::Range &R2, IRBuilder<> &B) {
+ if (!R1.hasValue())
+ return R2;
+ auto &R1Value = R1.getValue();
+
+ // TODO: we could widen the smaller range and have this work; but for now we
+ // bail out to keep things simple.
+ if (R1Value.getType() != R2.getType())
+ return None;
+
+ const SCEV *NewBegin = SE.getSMaxExpr(R1Value.getBegin(), R2.getBegin());
+ const SCEV *NewEnd = SE.getSMinExpr(R1Value.getEnd(), R2.getEnd());
+
+ return InductiveRangeCheck::Range(NewBegin, NewEnd);
+}
+
+bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (L->getBlocks().size() >= LoopSizeCutoff) {
+ DEBUG(dbgs() << "irce: giving up constraining loop, too large\n";);
+ return false;
+ }
+
+ BasicBlock *Preheader = L->getLoopPreheader();
+ if (!Preheader) {
+ DEBUG(dbgs() << "irce: loop has no preheader, leaving\n");
+ return false;
+ }
+
+ LLVMContext &Context = Preheader->getContext();
+ InductiveRangeCheck::AllocatorTy IRCAlloc;
+ SmallVector<InductiveRangeCheck *, 16> RangeChecks;
+ ScalarEvolution &SE = getAnalysis<ScalarEvolution>();
+ BranchProbabilityInfo &BPI = getAnalysis<BranchProbabilityInfo>();
+
+ for (auto BBI : L->getBlocks())
+ if (BranchInst *TBI = dyn_cast<BranchInst>(BBI->getTerminator()))
+ if (InductiveRangeCheck *IRC =
+ InductiveRangeCheck::create(IRCAlloc, TBI, L, SE, BPI))
+ RangeChecks.push_back(IRC);
+
+ if (RangeChecks.empty())
+ return false;
+
+ auto PrintRecognizedRangeChecks = [&](raw_ostream &OS) {
+ OS << "irce: looking at loop "; L->print(OS);
+ OS << "irce: loop has " << RangeChecks.size()
+ << " inductive range checks: \n";
+ for (InductiveRangeCheck *IRC : RangeChecks)
+ IRC->print(OS);
+ };
+
+ DEBUG(PrintRecognizedRangeChecks(dbgs()));
+
+ if (PrintRangeChecks)
+ PrintRecognizedRangeChecks(errs());
+
+ const char *FailureReason = nullptr;
+ Optional<LoopStructure> MaybeLoopStructure =
+ LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason);
+ if (!MaybeLoopStructure.hasValue()) {
+ DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason
+ << "\n";);
+ return false;
+ }
+ LoopStructure LS = MaybeLoopStructure.getValue();
+ bool Increasing = LS.IndVarIncreasing;
+ const SCEV *MinusOne =
+ SE.getConstant(LS.IndVarNext->getType(), Increasing ? -1 : 1, true);
+ const SCEVAddRecExpr *IndVar =
+ cast<SCEVAddRecExpr>(SE.getAddExpr(SE.getSCEV(LS.IndVarNext), MinusOne));
+
+ Optional<InductiveRangeCheck::Range> SafeIterRange;
+ Instruction *ExprInsertPt = Preheader->getTerminator();
+
+ SmallVector<InductiveRangeCheck *, 4> RangeChecksToEliminate;
+
+ IRBuilder<> B(ExprInsertPt);
+ for (InductiveRangeCheck *IRC : RangeChecks) {
+ auto Result = IRC->computeSafeIterationSpace(SE, IndVar, B);
+ if (Result.hasValue()) {
+ auto MaybeSafeIterRange =
+ IntersectRange(SE, SafeIterRange, Result.getValue(), B);
+ if (MaybeSafeIterRange.hasValue()) {
+ RangeChecksToEliminate.push_back(IRC);
+ SafeIterRange = MaybeSafeIterRange.getValue();
+ }
+ }
+ }
+
+ if (!SafeIterRange.hasValue())
+ return false;
+
+ LoopConstrainer LC(*L, getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), LS,
+ SE, SafeIterRange.getValue());
+ bool Changed = LC.run();
+
+ if (Changed) {
+ auto PrintConstrainedLoopInfo = [L]() {
+ dbgs() << "irce: in function ";
+ dbgs() << L->getHeader()->getParent()->getName() << ": ";
+ dbgs() << "constrained ";
+ L->print(dbgs());
+ };
+
+ DEBUG(PrintConstrainedLoopInfo());
+
+ if (PrintChangedLoops)
+ PrintConstrainedLoopInfo();
+
+ // Optimize away the now-redundant range checks.
+
+ for (InductiveRangeCheck *IRC : RangeChecksToEliminate) {
+ ConstantInt *FoldedRangeCheck = IRC->getPassingDirection()
+ ? ConstantInt::getTrue(Context)
+ : ConstantInt::getFalse(Context);
+ IRC->getBranch()->setCondition(FoldedRangeCheck);
+ }
+ }
+
+ return Changed;
+}
+
+Pass *llvm::createInductiveRangeCheckEliminationPass() {
+ return new InductiveRangeCheckElimination;
+}
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 78beb3f98dcd..711df417992b 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -23,6 +23,7 @@
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/Analysis/Loads.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
@@ -32,7 +33,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -78,7 +78,6 @@ namespace {
/// revectored to the false side of the second if.
///
class JumpThreading : public FunctionPass {
- const DataLayout *DL;
TargetLibraryInfo *TLI;
LazyValueInfo *LVI;
#ifdef NDEBUG
@@ -115,7 +114,7 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LazyValueInfo>();
AU.addPreserved<LazyValueInfo>();
- AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
void FindLoopHeaders(Function &F);
@@ -145,7 +144,7 @@ char JumpThreading::ID = 0;
INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",
"Jump Threading", false, false)
INITIALIZE_PASS_DEPENDENCY(LazyValueInfo)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(JumpThreading, "jump-threading",
"Jump Threading", false, false)
@@ -159,9 +158,7 @@ bool JumpThreading::runOnFunction(Function &F) {
return false;
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
- TLI = &getAnalysis<TargetLibraryInfo>();
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
LVI = &getAnalysis<LazyValueInfo>();
// Remove unreachable blocks from function as they may result in infinite
@@ -505,6 +502,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result,
assert(Preference == WantInteger && "Compares only produce integers");
PHINode *PN = dyn_cast<PHINode>(Cmp->getOperand(0));
if (PN && PN->getParent() == BB) {
+ const DataLayout &DL = PN->getModule()->getDataLayout();
// We can do this simplification if any comparisons fold to true or false.
// See if any do.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
@@ -709,7 +707,8 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
// Run constant folding to see if we can reduce the condition to a simple
// constant.
if (Instruction *I = dyn_cast<Instruction>(Condition)) {
- Value *SimpleVal = ConstantFoldInstruction(I, DL, TLI);
+ Value *SimpleVal =
+ ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI);
if (SimpleVal) {
I->replaceAllUsesWith(SimpleVal);
I->eraseFromParent();
@@ -792,6 +791,17 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true);
BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr);
CondBr->eraseFromParent();
+ if (CondCmp->use_empty())
+ CondCmp->eraseFromParent();
+ else if (CondCmp->getParent() == BB) {
+ // If the fact we just learned is true for all uses of the
+ // condition, replace it with a constant value
+ auto *CI = Baseline == LazyValueInfo::True ?
+ ConstantInt::getTrue(CondCmp->getType()) :
+ ConstantInt::getFalse(CondCmp->getType());
+ CondCmp->replaceAllUsesWith(CI);
+ CondCmp->eraseFromParent();
+ }
return true;
}
}
@@ -993,7 +1003,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// Split them out to their own block.
UnavailablePred =
- SplitBlockPredecessors(LoadBB, PredsToSplit, "thread-pre-split", this);
+ SplitBlockPredecessors(LoadBB, PredsToSplit, "thread-pre-split");
}
// If the value isn't available in all predecessors, then there will be
@@ -1418,7 +1428,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
else {
DEBUG(dbgs() << " Factoring out " << PredBBs.size()
<< " common predecessors.\n");
- PredBB = SplitBlockPredecessors(BB, PredBBs, ".thr_comm", this);
+ PredBB = SplitBlockPredecessors(BB, PredBBs, ".thr_comm");
}
// And finally, do it!
@@ -1521,7 +1531,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
// At this point, the IR is fully up to date and consistent. Do a quick scan
// over the new instructions and zap any that are constants or dead. This
// frequently happens because of phi translation.
- SimplifyInstructionsInBlock(NewBB, DL, TLI);
+ SimplifyInstructionsInBlock(NewBB, TLI);
// Threaded an edge!
++NumThreads;
@@ -1561,7 +1571,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
else {
DEBUG(dbgs() << " Factoring out " << PredBBs.size()
<< " common predecessors.\n");
- PredBB = SplitBlockPredecessors(BB, PredBBs, ".thr_comm", this);
+ PredBB = SplitBlockPredecessors(BB, PredBBs, ".thr_comm");
}
// Okay, we decided to do this! Clone all the instructions in BB onto the end
@@ -1575,7 +1585,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
if (!OldPredBranch || !OldPredBranch->isUnconditional()) {
- PredBB = SplitEdge(PredBB, BB, this);
+ PredBB = SplitEdge(PredBB, BB);
OldPredBranch = cast<BranchInst>(PredBB->getTerminator());
}
@@ -1586,7 +1596,6 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
BasicBlock::iterator BI = BB->begin();
for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB);
-
// Clone the non-phi instructions of BB into PredBB, keeping track of the
// mapping and using it to remap operands in the cloned instructions.
for (; BI != BB->end(); ++BI) {
@@ -1603,7 +1612,8 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
// If this instruction can be simplified after the operands are updated,
// just use the simplified value instead. This frequently happens due to
// phi translation.
- if (Value *IV = SimplifyInstruction(New, DL)) {
+ if (Value *IV =
+ SimplifyInstruction(New, BB->getModule()->getDataLayout())) {
delete New;
ValueMapping[BI] = IV;
} else {
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index e145981846d9..f0e6d641b180 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -38,6 +38,7 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
@@ -52,7 +53,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -71,6 +71,33 @@ static cl::opt<bool>
DisablePromotion("disable-licm-promotion", cl::Hidden,
cl::desc("Disable memory promotion in LICM pass"));
+static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
+static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop);
+static bool hoist(Instruction &I, BasicBlock *Preheader);
+static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT,
+ const Loop *CurLoop, AliasSetTracker *CurAST );
+static bool isGuaranteedToExecute(const Instruction &Inst,
+ const DominatorTree *DT,
+ const Loop *CurLoop,
+ const LICMSafetyInfo *SafetyInfo);
+static bool isSafeToExecuteUnconditionally(const Instruction &Inst,
+ const DominatorTree *DT,
+ const TargetLibraryInfo *TLI,
+ const Loop *CurLoop,
+ const LICMSafetyInfo *SafetyInfo,
+ const Instruction *CtxI = nullptr);
+static bool pointerInvalidatedByLoop(Value *V, uint64_t Size,
+ const AAMDNodes &AAInfo,
+ AliasSetTracker *CurAST);
+static Instruction *CloneInstructionInExitBlock(const Instruction &I,
+ BasicBlock &ExitBlock,
+ PHINode &PN,
+ const LoopInfo *LI);
+static bool canSinkOrHoistInst(Instruction &I, AliasAnalysis *AA,
+ DominatorTree *DT, TargetLibraryInfo *TLI,
+ Loop *CurLoop, AliasSetTracker *CurAST,
+ LICMSafetyInfo *SafetyInfo);
+
namespace {
struct LICM : public LoopPass {
static char ID; // Pass identification, replacement for typeid
@@ -86,7 +113,7 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
AU.addRequiredID(LoopSimplifyID);
AU.addPreservedID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
@@ -94,7 +121,7 @@ namespace {
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
AU.addPreserved<ScalarEvolution>();
- AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
using llvm::Pass::doFinalization;
@@ -109,7 +136,6 @@ namespace {
LoopInfo *LI; // Current LoopInfo
DominatorTree *DT; // Dominator Tree for the current Loop.
- const DataLayout *DL; // DataLayout for constant folding.
TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.
// State that is updated as we process loops.
@@ -117,10 +143,6 @@ namespace {
BasicBlock *Preheader; // The preheader block of the current loop...
Loop *CurLoop; // The current loop we are working on...
AliasSetTracker *CurAST; // AliasSet information for the current loop...
- bool MayThrow; // The current loop contains an instruction which
- // may throw, thus preventing code motion of
- // instructions with side effects.
- bool HeaderMayThrow; // Same as previous, but specific to loop header
DenseMap<Loop*, AliasSetTracker*> LoopToAliasSetMap;
/// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
@@ -133,88 +155,17 @@ namespace {
/// Simple Analysis hook. Delete loop L from alias set map.
void deleteAnalysisLoop(Loop *L) override;
-
- /// SinkRegion - Walk the specified region of the CFG (defined by all blocks
- /// dominated by the specified block, and that are in the current loop) in
- /// reverse depth first order w.r.t the DominatorTree. This allows us to
- /// visit uses before definitions, allowing us to sink a loop body in one
- /// pass without iteration.
- ///
- void SinkRegion(DomTreeNode *N);
-
- /// HoistRegion - Walk the specified region of the CFG (defined by all
- /// blocks dominated by the specified block, and that are in the current
- /// loop) in depth first order w.r.t the DominatorTree. This allows us to
- /// visit definitions before uses, allowing us to hoist a loop body in one
- /// pass without iteration.
- ///
- void HoistRegion(DomTreeNode *N);
-
- /// inSubLoop - Little predicate that returns true if the specified basic
- /// block is in a subloop of the current one, not the current one itself.
- ///
- bool inSubLoop(BasicBlock *BB) {
- assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
- return LI->getLoopFor(BB) != CurLoop;
- }
-
- /// sink - When an instruction is found to only be used outside of the loop,
- /// this function moves it to the exit blocks and patches up SSA form as
- /// needed.
- ///
- void sink(Instruction &I);
-
- /// hoist - When an instruction is found to only use loop invariant operands
- /// that is safe to hoist, this instruction is called to do the dirty work.
- ///
- void hoist(Instruction &I);
-
- /// isSafeToExecuteUnconditionally - Only sink or hoist an instruction if it
- /// is not a trapping instruction or if it is a trapping instruction and is
- /// guaranteed to execute.
- ///
- bool isSafeToExecuteUnconditionally(Instruction &I);
-
- /// isGuaranteedToExecute - Check that the instruction is guaranteed to
- /// execute.
- ///
- bool isGuaranteedToExecute(Instruction &I);
-
- /// pointerInvalidatedByLoop - Return true if the body of this loop may
- /// store into the memory location pointed to by V.
- ///
- bool pointerInvalidatedByLoop(Value *V, uint64_t Size,
- const AAMDNodes &AAInfo) {
- // Check to see if any of the basic blocks in CurLoop invalidate *V.
- return CurAST->getAliasSetForPointer(V, Size, AAInfo).isMod();
- }
-
- bool canSinkOrHoistInst(Instruction &I);
- bool isNotUsedInLoop(Instruction &I);
-
- void PromoteAliasSet(AliasSet &AS,
- SmallVectorImpl<BasicBlock*> &ExitBlocks,
- SmallVectorImpl<Instruction*> &InsertPts,
- PredIteratorCache &PIC);
-
- /// \brief Create a copy of the instruction in the exit block and patch up
- /// SSA.
- /// PN is a user of I in ExitBlock that can be used to get the number and
- /// list of predecessors fast.
- Instruction *CloneInstructionInExitBlock(Instruction &I,
- BasicBlock &ExitBlock,
- PHINode &PN);
};
}
char LICM::ID = 0;
INITIALIZE_PASS_BEGIN(LICM, "licm", "Loop Invariant Code Motion", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(LICM, "licm", "Loop Invariant Code Motion", false, false)
@@ -231,13 +182,11 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
Changed = false;
// Get our Loop and Alias Analysis information...
- LI = &getAnalysis<LoopInfo>();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
- TLI = &getAnalysis<TargetLibraryInfo>();
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
@@ -274,19 +223,9 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
CurAST->add(*BB); // Incorporate the specified basic block
}
- HeaderMayThrow = false;
- BasicBlock *Header = L->getHeader();
- for (BasicBlock::iterator I = Header->begin(), E = Header->end();
- (I != E) && !HeaderMayThrow; ++I)
- HeaderMayThrow |= I->mayThrow();
- MayThrow = HeaderMayThrow;
- // TODO: We've already searched for instructions which may throw in subloops.
- // We may want to reuse this information.
- for (Loop::block_iterator BB = L->block_begin(), BBE = L->block_end();
- (BB != BBE) && !MayThrow ; ++BB)
- for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end();
- (I != E) && !MayThrow; ++I)
- MayThrow |= I->mayThrow();
+ // Compute loop safety information.
+ LICMSafetyInfo SafetyInfo;
+ computeLICMSafetyInfo(&SafetyInfo, CurLoop);
// We want to visit all of the instructions in this loop... that are not parts
// of our subloops (they have already had their invariants hoisted out of
@@ -299,9 +238,11 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
// instructions, we perform another pass to hoist them out of the loop.
//
if (L->hasDedicatedExits())
- SinkRegion(DT->getNode(L->getHeader()));
+ Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, CurLoop,
+ CurAST, &SafetyInfo);
if (Preheader)
- HoistRegion(DT->getNode(L->getHeader()));
+ Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI,
+ CurLoop, CurAST, &SafetyInfo);
// Now that all loop invariants have been removed from the loop, promote any
// memory references to scalars that we can.
@@ -313,7 +254,9 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
// Loop over all of the alias sets in the tracker object.
for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
I != E; ++I)
- PromoteAliasSet(*I, ExitBlocks, InsertPts, PIC);
+ Changed |= promoteLoopAccessesToScalars(*I, ExitBlocks, InsertPts,
+ PIC, LI, DT, CurLoop,
+ CurAST, &SafetyInfo);
// Once we have promoted values across the loop body we have to recursively
// reform LCSSA as any nested loop may now have values defined within the
@@ -346,27 +289,35 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
return Changed;
}
-/// SinkRegion - Walk the specified region of the CFG (defined by all blocks
-/// dominated by the specified block, and that are in the current loop) in
-/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
-/// uses before definitions, allowing us to sink a loop body in one pass without
-/// iteration.
+/// Walk the specified region of the CFG (defined by all blocks dominated by
+/// the specified block, and that are in the current loop) in reverse depth
+/// first order w.r.t the DominatorTree. This allows us to visit uses before
+/// definitions, allowing us to sink a loop body in one pass without iteration.
///
-void LICM::SinkRegion(DomTreeNode *N) {
- assert(N != nullptr && "Null dominator tree node?");
+bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
+ DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop,
+ AliasSetTracker *CurAST, LICMSafetyInfo *SafetyInfo) {
+
+ // Verify inputs.
+ assert(N != nullptr && AA != nullptr && LI != nullptr &&
+ DT != nullptr && CurLoop != nullptr && CurAST != nullptr &&
+ SafetyInfo != nullptr && "Unexpected input to sinkRegion");
+
+ // Set changed as false.
+ bool Changed = false;
+ // Get basic block
BasicBlock *BB = N->getBlock();
-
// If this subregion is not in the top level loop at all, exit.
- if (!CurLoop->contains(BB)) return;
+ if (!CurLoop->contains(BB)) return Changed;
// We are processing blocks in reverse dfo, so process children first.
const std::vector<DomTreeNode*> &Children = N->getChildren();
for (unsigned i = 0, e = Children.size(); i != e; ++i)
- SinkRegion(Children[i]);
-
+ Changed |=
+ sinkRegion(Children[i], AA, LI, DT, TLI, CurLoop, CurAST, SafetyInfo);
// Only need to process the contents of this block if it is not part of a
// subloop (which would already have been processed).
- if (inSubLoop(BB)) return;
+ if (inSubLoop(BB,CurLoop,LI)) return Changed;
for (BasicBlock::iterator II = BB->end(); II != BB->begin(); ) {
Instruction &I = *--II;
@@ -387,35 +338,43 @@ void LICM::SinkRegion(DomTreeNode *N) {
// outside of the loop. In this case, it doesn't even matter if the
// operands of the instruction are loop invariant.
//
- if (isNotUsedInLoop(I) && canSinkOrHoistInst(I)) {
+ if (isNotUsedInLoop(I, CurLoop) &&
+ canSinkOrHoistInst(I, AA, DT, TLI, CurLoop, CurAST, SafetyInfo)) {
++II;
- sink(I);
+ Changed |= sink(I, LI, DT, CurLoop, CurAST);
}
}
+ return Changed;
}
-/// HoistRegion - Walk the specified region of the CFG (defined by all blocks
-/// dominated by the specified block, and that are in the current loop) in depth
-/// first order w.r.t the DominatorTree. This allows us to visit definitions
-/// before uses, allowing us to hoist a loop body in one pass without iteration.
+/// Walk the specified region of the CFG (defined by all blocks dominated by
+/// the specified block, and that are in the current loop) in depth first
+/// order w.r.t the DominatorTree. This allows us to visit definitions before
+/// uses, allowing us to hoist a loop body in one pass without iteration.
///
-void LICM::HoistRegion(DomTreeNode *N) {
- assert(N != nullptr && "Null dominator tree node?");
+bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
+ DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop,
+ AliasSetTracker *CurAST, LICMSafetyInfo *SafetyInfo) {
+ // Verify inputs.
+ assert(N != nullptr && AA != nullptr && LI != nullptr &&
+ DT != nullptr && CurLoop != nullptr && CurAST != nullptr &&
+ SafetyInfo != nullptr && "Unexpected input to hoistRegion");
+ // Set changed as false.
+ bool Changed = false;
+ // Get basic block
BasicBlock *BB = N->getBlock();
-
// If this subregion is not in the top level loop at all, exit.
- if (!CurLoop->contains(BB)) return;
-
+ if (!CurLoop->contains(BB)) return Changed;
// Only need to process the contents of this block if it is not part of a
// subloop (which would already have been processed).
- if (!inSubLoop(BB))
+ if (!inSubLoop(BB, CurLoop, LI))
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ) {
Instruction &I = *II++;
-
// Try constant folding this instruction. If all the operands are
// constants, it is technically hoistable, but it would be better to just
// fold it.
- if (Constant *C = ConstantFoldInstruction(&I, DL, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(
+ &I, I.getModule()->getDataLayout(), TLI)) {
DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n');
CurAST->copyValue(&I, C);
CurAST->deleteValue(&I);
@@ -428,20 +387,49 @@ void LICM::HoistRegion(DomTreeNode *N) {
// if all of the operands of the instruction are loop invariant and if it
// is safe to hoist the instruction.
//
- if (CurLoop->hasLoopInvariantOperands(&I) && canSinkOrHoistInst(I) &&
- isSafeToExecuteUnconditionally(I))
- hoist(I);
+ if (CurLoop->hasLoopInvariantOperands(&I) &&
+ canSinkOrHoistInst(I, AA, DT, TLI, CurLoop, CurAST, SafetyInfo) &&
+ isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo,
+ CurLoop->getLoopPreheader()->getTerminator()))
+ Changed |= hoist(I, CurLoop->getLoopPreheader());
}
const std::vector<DomTreeNode*> &Children = N->getChildren();
for (unsigned i = 0, e = Children.size(); i != e; ++i)
- HoistRegion(Children[i]);
+ Changed |=
+ hoistRegion(Children[i], AA, LI, DT, TLI, CurLoop, CurAST, SafetyInfo);
+ return Changed;
+}
+
+/// Computes loop safety information, checks loop body & header
+/// for the possiblity of may throw exception.
+///
+void llvm::computeLICMSafetyInfo(LICMSafetyInfo * SafetyInfo, Loop * CurLoop) {
+ assert(CurLoop != nullptr && "CurLoop cant be null");
+ BasicBlock *Header = CurLoop->getHeader();
+ // Setting default safety values.
+ SafetyInfo->MayThrow = false;
+ SafetyInfo->HeaderMayThrow = false;
+ // Iterate over header and compute dafety info.
+ for (BasicBlock::iterator I = Header->begin(), E = Header->end();
+ (I != E) && !SafetyInfo->HeaderMayThrow; ++I)
+ SafetyInfo->HeaderMayThrow |= I->mayThrow();
+
+ SafetyInfo->MayThrow = SafetyInfo->HeaderMayThrow;
+ // Iterate over loop instructions and compute safety info.
+ for (Loop::block_iterator BB = CurLoop->block_begin(),
+ BBE = CurLoop->block_end(); (BB != BBE) && !SafetyInfo->MayThrow ; ++BB)
+ for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end();
+ (I != E) && !SafetyInfo->MayThrow; ++I)
+ SafetyInfo->MayThrow |= I->mayThrow();
}
/// canSinkOrHoistInst - Return true if the hoister and sinker can handle this
/// instruction.
///
-bool LICM::canSinkOrHoistInst(Instruction &I) {
+bool canSinkOrHoistInst(Instruction &I, AliasAnalysis *AA, DominatorTree *DT,
+ TargetLibraryInfo *TLI, Loop *CurLoop,
+ AliasSetTracker *CurAST, LICMSafetyInfo *SafetyInfo) {
// Loads have extra constraints we have to verify before we can hoist them.
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (!LI->isUnordered())
@@ -462,7 +450,7 @@ bool LICM::canSinkOrHoistInst(Instruction &I) {
AAMDNodes AAInfo;
LI->getAAMetadata(AAInfo);
- return !pointerInvalidatedByLoop(LI->getOperand(0), Size, AAInfo);
+ return !pointerInvalidatedByLoop(LI->getOperand(0), Size, AAInfo, CurAST);
} else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
// Don't sink or hoist dbg info; it's legal, but not useful.
if (isa<DbgInfoIntrinsic>(I))
@@ -501,30 +489,34 @@ bool LICM::canSinkOrHoistInst(Instruction &I) {
!isa<InsertValueInst>(I))
return false;
- return isSafeToExecuteUnconditionally(I);
+ // TODO: Plumb the context instruction through to make hoisting and sinking
+ // more powerful. Hoisting of loads already works due to the special casing
+ // above.
+ return isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo,
+ nullptr);
}
-/// \brief Returns true if a PHINode is a trivially replaceable with an
+/// Returns true if a PHINode is a trivially replaceable with an
/// Instruction.
+/// This is true when all incoming values are that instruction.
+/// This pattern occurs most often with LCSSA PHI nodes.
///
-/// This is true when all incoming values are that instruction. This pattern
-/// occurs most often with LCSSA PHI nodes.
-static bool isTriviallyReplacablePHI(PHINode &PN, Instruction &I) {
- for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
- if (PN.getIncomingValue(i) != &I)
+static bool isTriviallyReplacablePHI(const PHINode &PN, const Instruction &I) {
+ for (const Value *IncValue : PN.incoming_values())
+ if (IncValue != &I)
return false;
return true;
}
-/// isNotUsedInLoop - Return true if the only users of this instruction are
-/// outside of the loop. If this is true, we can sink the instruction to the
-/// exit blocks of the loop.
+/// Return true if the only users of this instruction are outside of
+/// the loop. If this is true, we can sink the instruction to the exit
+/// blocks of the loop.
///
-bool LICM::isNotUsedInLoop(Instruction &I) {
- for (User *U : I.users()) {
- Instruction *UI = cast<Instruction>(U);
- if (PHINode *PN = dyn_cast<PHINode>(UI)) {
+static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop) {
+ for (const User *U : I.users()) {
+ const Instruction *UI = cast<Instruction>(U);
+ if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
// A PHI node where all of the incoming values are this instruction are
// special -- they can just be RAUW'ed with the instruction and thus
// don't require a use in the predecessor. This is a particular important
@@ -552,9 +544,10 @@ bool LICM::isNotUsedInLoop(Instruction &I) {
return true;
}
-Instruction *LICM::CloneInstructionInExitBlock(Instruction &I,
- BasicBlock &ExitBlock,
- PHINode &PN) {
+static Instruction *CloneInstructionInExitBlock(const Instruction &I,
+ BasicBlock &ExitBlock,
+ PHINode &PN,
+ const LoopInfo *LI) {
Instruction *New = I.clone();
ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
if (!I.getName().empty()) New->setName(I.getName() + ".le");
@@ -581,14 +574,15 @@ Instruction *LICM::CloneInstructionInExitBlock(Instruction &I,
return New;
}
-/// sink - When an instruction is found to only be used outside of the loop,
-/// this function moves it to the exit blocks and patches up SSA form as needed.
+/// When an instruction is found to only be used outside of the loop, this
+/// function moves it to the exit blocks and patches up SSA form as needed.
/// This method is guaranteed to remove the original instruction from its
/// position, and may either delete it or move it to outside of the loop.
///
-void LICM::sink(Instruction &I) {
+static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT,
+ const Loop *CurLoop, AliasSetTracker *CurAST ) {
DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
-
+ bool Changed = false;
if (isa<LoadInst>(I)) ++NumMovedLoads;
else if (isa<CallInst>(I)) ++NumMovedCalls;
++NumSunk;
@@ -597,7 +591,8 @@ void LICM::sink(Instruction &I) {
#ifndef NDEBUG
SmallVector<BasicBlock *, 32> ExitBlocks;
CurLoop->getUniqueExitBlocks(ExitBlocks);
- SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), ExitBlocks.end());
+ SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
+ ExitBlocks.end());
#endif
// Clones of this instruction. Don't create more than one per exit block!
@@ -625,7 +620,7 @@ void LICM::sink(Instruction &I) {
New = It->second;
else
New = SunkCopies[ExitBlock] =
- CloneInstructionInExitBlock(I, *ExitBlock, *PN);
+ CloneInstructionInExitBlock(I, *ExitBlock, *PN, LI);
PN->replaceAllUsesWith(New);
PN->eraseFromParent();
@@ -633,37 +628,43 @@ void LICM::sink(Instruction &I) {
CurAST->deleteValue(&I);
I.eraseFromParent();
+ return Changed;
}
-/// hoist - When an instruction is found to only use loop invariant operands
-/// that is safe to hoist, this instruction is called to do the dirty work.
+/// When an instruction is found to only use loop invariant operands that
+/// is safe to hoist, this instruction is called to do the dirty work.
///
-void LICM::hoist(Instruction &I) {
+static bool hoist(Instruction &I, BasicBlock *Preheader) {
DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": "
<< I << "\n");
-
// Move the new node to the Preheader, before its terminator.
I.moveBefore(Preheader->getTerminator());
if (isa<LoadInst>(I)) ++NumMovedLoads;
else if (isa<CallInst>(I)) ++NumMovedCalls;
++NumHoisted;
- Changed = true;
+ return true;
}
-/// isSafeToExecuteUnconditionally - Only sink or hoist an instruction if it is
-/// not a trapping instruction or if it is a trapping instruction and is
-/// guaranteed to execute.
-///
-bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
- // If it is not a trapping instruction, it is always safe to hoist.
- if (isSafeToSpeculativelyExecute(&Inst, DL))
+/// Only sink or hoist an instruction if it is not a trapping instruction,
+/// or if the instruction is known not to trap when moved to the preheader.
+/// or if it is a trapping instruction and is guaranteed to execute.
+static bool isSafeToExecuteUnconditionally(const Instruction &Inst,
+ const DominatorTree *DT,
+ const TargetLibraryInfo *TLI,
+ const Loop *CurLoop,
+ const LICMSafetyInfo *SafetyInfo,
+ const Instruction *CtxI) {
+ if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI))
return true;
- return isGuaranteedToExecute(Inst);
+ return isGuaranteedToExecute(Inst, DT, CurLoop, SafetyInfo);
}
-bool LICM::isGuaranteedToExecute(Instruction &Inst) {
+static bool isGuaranteedToExecute(const Instruction &Inst,
+ const DominatorTree *DT,
+ const Loop *CurLoop,
+ const LICMSafetyInfo * SafetyInfo) {
// We have to check to make sure that the instruction dominates all
// of the exit blocks. If it doesn't, then there is a path out of the loop
@@ -675,11 +676,11 @@ bool LICM::isGuaranteedToExecute(Instruction &Inst) {
if (Inst.getParent() == CurLoop->getHeader())
// If there's a throw in the header block, we can't guarantee we'll reach
// Inst.
- return !HeaderMayThrow;
+ return !SafetyInfo->HeaderMayThrow;
// Somewhere in this loop there is an instruction which may throw and make us
// exit the loop.
- if (MayThrow)
+ if (SafetyInfo->MayThrow)
return false;
// Get the exit blocks for the current loop.
@@ -719,17 +720,18 @@ namespace {
// We need to create an LCSSA PHI node for the incoming value and
// store that.
PHINode *PN = PHINode::Create(
- I->getType(), PredCache.GetNumPreds(BB),
+ I->getType(), PredCache.size(BB),
I->getName() + ".lcssa", BB->begin());
- for (BasicBlock **PI = PredCache.GetPreds(BB); *PI; ++PI)
- PN->addIncoming(I, *PI);
+ for (BasicBlock *Pred : PredCache.get(BB))
+ PN->addIncoming(I, Pred);
return PN;
}
return V;
}
public:
- LoopPromoter(Value *SP, const SmallVectorImpl<Instruction *> &Insts,
+ LoopPromoter(Value *SP,
+ ArrayRef<const Instruction *> Insts,
SSAUpdater &S, SmallPtrSetImpl<Value *> &PMA,
SmallVectorImpl<BasicBlock *> &LEB,
SmallVectorImpl<Instruction *> &LIP, PredIteratorCache &PIC,
@@ -777,25 +779,37 @@ namespace {
};
} // end anon namespace
-/// PromoteAliasSet - Try to promote memory values to scalars by sinking
-/// stores out of the loop and moving loads to before the loop. We do this by
-/// looping over the stores in the loop, looking for stores to Must pointers
-/// which are loop invariant.
+/// Try to promote memory values to scalars by sinking stores out of the
+/// loop and moving loads to before the loop. We do this by looping over
+/// the stores in the loop, looking for stores to Must pointers which are
+/// loop invariant.
///
-void LICM::PromoteAliasSet(AliasSet &AS,
- SmallVectorImpl<BasicBlock*> &ExitBlocks,
- SmallVectorImpl<Instruction*> &InsertPts,
- PredIteratorCache &PIC) {
+bool llvm::promoteLoopAccessesToScalars(AliasSet &AS,
+ SmallVectorImpl<BasicBlock*>&ExitBlocks,
+ SmallVectorImpl<Instruction*>&InsertPts,
+ PredIteratorCache &PIC, LoopInfo *LI,
+ DominatorTree *DT, Loop *CurLoop,
+ AliasSetTracker *CurAST,
+ LICMSafetyInfo * SafetyInfo) {
+ // Verify inputs.
+ assert(LI != nullptr && DT != nullptr &&
+ CurLoop != nullptr && CurAST != nullptr &&
+ SafetyInfo != nullptr &&
+ "Unexpected Input to promoteLoopAccessesToScalars");
+ // Initially set Changed status to false.
+ bool Changed = false;
// We can promote this alias set if it has a store, if it is a "Must" alias
// set, if the pointer is loop invariant, and if we are not eliminating any
// volatile loads or stores.
if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue()))
- return;
+ return Changed;
assert(!AS.empty() &&
"Must alias set should have at least one pointer element in it!");
+
Value *SomePtr = AS.begin()->getValue();
+ BasicBlock * Preheader = CurLoop->getLoopPreheader();
// It isn't safe to promote a load/store from the loop if the load/store is
// conditional. For example, turning:
@@ -832,7 +846,7 @@ void LICM::PromoteAliasSet(AliasSet &AS,
// cannot (yet) promote a memory location that is loaded and stored in
// different sizes.
if (SomePtr->getType() != ASIV->getType())
- return;
+ return Changed;
for (User *U : ASIV->users()) {
// Ignore instructions that are outside the loop.
@@ -842,25 +856,25 @@ void LICM::PromoteAliasSet(AliasSet &AS,
// If there is an non-load/store instruction in the loop, we can't promote
// it.
- if (LoadInst *load = dyn_cast<LoadInst>(UI)) {
+ if (const LoadInst *load = dyn_cast<LoadInst>(UI)) {
assert(!load->isVolatile() && "AST broken");
if (!load->isSimple())
- return;
- } else if (StoreInst *store = dyn_cast<StoreInst>(UI)) {
+ return Changed;
+ } else if (const StoreInst *store = dyn_cast<StoreInst>(UI)) {
// Stores *of* the pointer are not interesting, only stores *to* the
// pointer.
if (UI->getOperand(1) != ASIV)
continue;
assert(!store->isVolatile() && "AST broken");
if (!store->isSimple())
- return;
+ return Changed;
// Don't sink stores from loops without dedicated block exits. Exits
// containing indirect branches are not transformed by loop simplify,
// make sure we catch that. An additional load may be generated in the
// preheader for SSA updater, so also avoid sinking when no preheader
// is available.
if (!HasDedicatedExits || !Preheader)
- return;
+ return Changed;
// Note that we only check GuaranteedToExecute inside the store case
// so that we do not introduce stores where they did not exist before
@@ -872,16 +886,17 @@ void LICM::PromoteAliasSet(AliasSet &AS,
// Larger is better, with the exception of 0 being the best alignment.
unsigned InstAlignment = store->getAlignment();
if ((InstAlignment > Alignment || InstAlignment == 0) && Alignment != 0)
- if (isGuaranteedToExecute(*UI)) {
+ if (isGuaranteedToExecute(*UI, DT, CurLoop, SafetyInfo)) {
GuaranteedToExecute = true;
Alignment = InstAlignment;
}
if (!GuaranteedToExecute)
- GuaranteedToExecute = isGuaranteedToExecute(*UI);
+ GuaranteedToExecute = isGuaranteedToExecute(*UI, DT,
+ CurLoop, SafetyInfo);
} else
- return; // Not a load or store.
+ return Changed; // Not a load or store.
// Merge the AA tags.
if (LoopUses.empty()) {
@@ -897,7 +912,7 @@ void LICM::PromoteAliasSet(AliasSet &AS,
// If there isn't a guaranteed-to-execute instruction, we can't promote.
if (!GuaranteedToExecute)
- return;
+ return Changed;
// Otherwise, this is safe to promote, lets do it!
DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " <<*SomePtr<<'\n');
@@ -922,7 +937,8 @@ void LICM::PromoteAliasSet(AliasSet &AS,
// We use the SSAUpdater interface to insert phi nodes as required.
SmallVector<PHINode*, 16> NewPHIs;
SSAUpdater SSA(&NewPHIs);
- LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
+ LoopPromoter Promoter(SomePtr, LoopUses, SSA,
+ PointerMustAliases, ExitBlocks,
InsertPts, PIC, *CurAST, *LI, DL, Alignment, AATags);
// Set up the preheader to have a definition of the value. It is the live-out
@@ -942,10 +958,12 @@ void LICM::PromoteAliasSet(AliasSet &AS,
// If the SSAUpdater didn't use the load in the preheader, just zap it now.
if (PreheaderLoad->use_empty())
PreheaderLoad->eraseFromParent();
-}
+ return Changed;
+}
-/// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
+/// Simple Analysis hook. Clone alias set info.
+///
void LICM::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L) {
AliasSetTracker *AST = LoopToAliasSetMap.lookup(L);
if (!AST)
@@ -954,8 +972,8 @@ void LICM::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L) {
AST->copyValue(From, To);
}
-/// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias
-/// set.
+/// Simple Analysis hook. Delete value V from alias set
+///
void LICM::deleteAnalysisValue(Value *V, Loop *L) {
AliasSetTracker *AST = LoopToAliasSetMap.lookup(L);
if (!AST)
@@ -965,6 +983,7 @@ void LICM::deleteAnalysisValue(Value *V, Loop *L) {
}
/// Simple Analysis hook. Delete value L from alias set map.
+///
void LICM::deleteAnalysisLoop(Loop *L) {
AliasSetTracker *AST = LoopToAliasSetMap.lookup(L);
if (!AST)
@@ -973,3 +992,23 @@ void LICM::deleteAnalysisLoop(Loop *L) {
delete AST;
LoopToAliasSetMap.erase(L);
}
+
+
+/// Return true if the body of this loop may store into the memory
+/// location pointed to by V.
+///
+static bool pointerInvalidatedByLoop(Value *V, uint64_t Size,
+ const AAMDNodes &AAInfo,
+ AliasSetTracker *CurAST) {
+ // Check to see if any of the basic blocks in CurLoop invalidate *V.
+ return CurAST->getAliasSetForPointer(V, Size, AAInfo).isMod();
+}
+
+/// Little predicate that returns true if the specified basic block is in
+/// a subloop of the current one, not the current one itself.
+///
+static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
+ assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
+ return LI->getLoopFor(BB) != CurLoop;
+}
+
diff --git a/lib/Transforms/Scalar/LLVMBuild.txt b/lib/Transforms/Scalar/LLVMBuild.txt
index 2bb49a3026c9..deea9e2d0102 100644
--- a/lib/Transforms/Scalar/LLVMBuild.txt
+++ b/lib/Transforms/Scalar/LLVMBuild.txt
@@ -20,4 +20,4 @@ type = Library
name = Scalar
parent = Transforms
library_name = ScalarOpts
-required_libraries = Analysis Core InstCombine ProfileData Support Target TransformUtils
+required_libraries = Analysis Core InstCombine ProfileData Support TransformUtils
diff --git a/lib/Transforms/Scalar/LoadCombine.cpp b/lib/Transforms/Scalar/LoadCombine.cpp
index 11e4d7606d96..c19cd19059b2 100644
--- a/lib/Transforms/Scalar/LoadCombine.cpp
+++ b/lib/Transforms/Scalar/LoadCombine.cpp
@@ -12,17 +12,17 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar.h"
-
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/TargetFolder.h"
-#include "llvm/Pass.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
-#include "llvm/IR/Instructions.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -41,9 +41,9 @@ struct PointerOffsetPair {
};
struct LoadPOPPair {
+ LoadPOPPair() = default;
LoadPOPPair(LoadInst *L, PointerOffsetPair P, unsigned O)
: Load(L), POP(P), InsertOrder(O) {}
- LoadPOPPair() {}
LoadInst *Load;
PointerOffsetPair POP;
/// \brief The new load needs to be created before the first load in IR order.
@@ -52,13 +52,10 @@ struct LoadPOPPair {
class LoadCombine : public BasicBlockPass {
LLVMContext *C;
- const DataLayout *DL;
AliasAnalysis *AA;
public:
- LoadCombine()
- : BasicBlockPass(ID),
- C(nullptr), DL(nullptr), AA(nullptr) {
+ LoadCombine() : BasicBlockPass(ID), C(nullptr), AA(nullptr) {
initializeSROAPass(*PassRegistry::getPassRegistry());
}
@@ -85,12 +82,6 @@ private:
bool LoadCombine::doInitialization(Function &F) {
DEBUG(dbgs() << "LoadCombine function: " << F.getName() << "\n");
C = &F.getContext();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP) {
- DEBUG(dbgs() << " Skipping LoadCombine -- no target data!\n");
- return false;
- }
- DL = &DLP->getDataLayout();
return true;
}
@@ -100,9 +91,10 @@ PointerOffsetPair LoadCombine::getPointerOffsetPair(LoadInst &LI) {
POP.Offset = 0;
while (isa<BitCastInst>(POP.Pointer) || isa<GetElementPtrInst>(POP.Pointer)) {
if (auto *GEP = dyn_cast<GetElementPtrInst>(POP.Pointer)) {
- unsigned BitWidth = DL->getPointerTypeSizeInBits(GEP->getType());
+ auto &DL = LI.getModule()->getDataLayout();
+ unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType());
APInt Offset(BitWidth, 0);
- if (GEP->accumulateConstantOffset(*DL, Offset))
+ if (GEP->accumulateConstantOffset(DL, Offset))
POP.Offset += Offset.getZExtValue();
else
// Can't handle GEPs with variable indices.
@@ -145,7 +137,8 @@ bool LoadCombine::aggregateLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
if (PrevOffset == -1ull) {
BaseLoad = L.Load;
PrevOffset = L.POP.Offset;
- PrevSize = DL->getTypeStoreSize(L.Load->getType());
+ PrevSize = L.Load->getModule()->getDataLayout().getTypeStoreSize(
+ L.Load->getType());
AggregateLoads.push_back(L);
continue;
}
@@ -164,7 +157,8 @@ bool LoadCombine::aggregateLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
// FIXME: We may want to handle this case.
continue;
PrevOffset = L.POP.Offset;
- PrevSize = DL->getTypeStoreSize(L.Load->getType());
+ PrevSize = L.Load->getModule()->getDataLayout().getTypeStoreSize(
+ L.Load->getType());
AggregateLoads.push_back(L);
}
if (combineLoads(AggregateLoads))
@@ -215,7 +209,8 @@ bool LoadCombine::combineLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
for (const auto &L : Loads) {
Builder->SetInsertPoint(L.Load);
Value *V = Builder->CreateExtractInteger(
- *DL, NewLoad, cast<IntegerType>(L.Load->getType()),
+ L.Load->getModule()->getDataLayout(), NewLoad,
+ cast<IntegerType>(L.Load->getType()),
L.POP.Offset - Loads[0].POP.Offset, "combine.extract");
L.Load->replaceAllUsesWith(V);
}
@@ -225,13 +220,13 @@ bool LoadCombine::combineLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
}
bool LoadCombine::runOnBasicBlock(BasicBlock &BB) {
- if (skipOptnoneFunction(BB) || !DL)
+ if (skipOptnoneFunction(BB))
return false;
AA = &getAnalysis<AliasAnalysis>();
- IRBuilder<true, TargetFolder>
- TheBuilder(BB.getContext(), TargetFolder(DL));
+ IRBuilder<true, TargetFolder> TheBuilder(
+ BB.getContext(), TargetFolder(BB.getModule()->getDataLayout()));
Builder = &TheBuilder;
DenseMap<const Value *, SmallVector<LoadPOPPair, 8>> LoadMap;
diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp
index 1d1f33ae6183..98b068edf582 100644
--- a/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -39,14 +39,14 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
AU.addRequired<ScalarEvolution>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
AU.addPreserved<ScalarEvolution>();
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<LoopInfo>();
+ AU.addPreserved<LoopInfoWrapperPass>();
AU.addPreservedID(LoopSimplifyID);
AU.addPreservedID(LCSSAID);
}
@@ -63,7 +63,7 @@ char LoopDeletion::ID = 0;
INITIALIZE_PASS_BEGIN(LoopDeletion, "loop-deletion",
"Delete dead loops", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
@@ -236,7 +236,7 @@ bool LoopDeletion::runOnLoop(Loop *L, LPPassManager &LPM) {
// Finally, the blocks from loopinfo. This has to happen late because
// otherwise our loop iterators won't work.
- LoopInfo &loopInfo = getAnalysis<LoopInfo>();
+ LoopInfo &loopInfo = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SmallPtrSet<BasicBlock*, 8> blocks;
blocks.insert(L->block_begin(), L->block_end());
for (BasicBlock *BB : blocks)
diff --git a/lib/Transforms/Scalar/LoopDistribute.cpp b/lib/Transforms/Scalar/LoopDistribute.cpp
new file mode 100644
index 000000000000..a907d596e35b
--- /dev/null
+++ b/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -0,0 +1,976 @@
+//===- LoopDistribute.cpp - Loop Distribution Pass ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Loop Distribution Pass. Its main focus is to
+// distribute loops that cannot be vectorized due to dependence cycles. It
+// tries to isolate the offending dependences into a new loop allowing
+// vectorization of the remaining parts.
+//
+// For dependence analysis, the pass uses the LoopVectorizer's
+// LoopAccessAnalysis. Because this analysis presumes no change in the order of
+// memory operations, special care is taken to preserve the lexical order of
+// these operations.
+//
+// Similarly to the Vectorizer, the pass also supports loop versioning to
+// run-time disambiguate potentially overlapping arrays.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/EquivalenceClasses.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/LoopAccessAnalysis.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <list>
+
+#define LDIST_NAME "loop-distribute"
+#define DEBUG_TYPE LDIST_NAME
+
+using namespace llvm;
+
+static cl::opt<bool>
+ LDistVerify("loop-distribute-verify", cl::Hidden,
+ cl::desc("Turn on DominatorTree and LoopInfo verification "
+ "after Loop Distribution"),
+ cl::init(false));
+
+static cl::opt<bool> DistributeNonIfConvertible(
+ "loop-distribute-non-if-convertible", cl::Hidden,
+ cl::desc("Whether to distribute into a loop that may not be "
+ "if-convertible by the loop vectorizer"),
+ cl::init(false));
+
+STATISTIC(NumLoopsDistributed, "Number of loops distributed");
+
+/// \brief Remaps instructions in a loop including the preheader.
+static void remapInstructionsInLoop(const SmallVectorImpl<BasicBlock *> &Blocks,
+ ValueToValueMapTy &VMap) {
+ // Rewrite the code to refer to itself.
+ for (auto *BB : Blocks)
+ for (auto &Inst : *BB)
+ RemapInstruction(&Inst, VMap,
+ RF_NoModuleLevelChanges | RF_IgnoreMissingEntries);
+}
+
+/// \brief Clones a loop \p OrigLoop. Returns the loop and the blocks in \p
+/// Blocks.
+///
+/// Updates LoopInfo and DominatorTree assuming the loop is dominated by block
+/// \p LoopDomBB. Insert the new blocks before block specified in \p Before.
+static Loop *cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB,
+ Loop *OrigLoop, ValueToValueMapTy &VMap,
+ const Twine &NameSuffix, LoopInfo *LI,
+ DominatorTree *DT,
+ SmallVectorImpl<BasicBlock *> &Blocks) {
+ Function *F = OrigLoop->getHeader()->getParent();
+ Loop *ParentLoop = OrigLoop->getParentLoop();
+
+ Loop *NewLoop = new Loop();
+ if (ParentLoop)
+ ParentLoop->addChildLoop(NewLoop);
+ else
+ LI->addTopLevelLoop(NewLoop);
+
+ BasicBlock *OrigPH = OrigLoop->getLoopPreheader();
+ BasicBlock *NewPH = CloneBasicBlock(OrigPH, VMap, NameSuffix, F);
+ // To rename the loop PHIs.
+ VMap[OrigPH] = NewPH;
+ Blocks.push_back(NewPH);
+
+ // Update LoopInfo.
+ if (ParentLoop)
+ ParentLoop->addBasicBlockToLoop(NewPH, *LI);
+
+ // Update DominatorTree.
+ DT->addNewBlock(NewPH, LoopDomBB);
+
+ for (BasicBlock *BB : OrigLoop->getBlocks()) {
+ BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F);
+ VMap[BB] = NewBB;
+
+ // Update LoopInfo.
+ NewLoop->addBasicBlockToLoop(NewBB, *LI);
+
+ // Update DominatorTree.
+ BasicBlock *IDomBB = DT->getNode(BB)->getIDom()->getBlock();
+ DT->addNewBlock(NewBB, cast<BasicBlock>(VMap[IDomBB]));
+
+ Blocks.push_back(NewBB);
+ }
+
+ // Move them physically from the end of the block list.
+ F->getBasicBlockList().splice(Before, F->getBasicBlockList(), NewPH);
+ F->getBasicBlockList().splice(Before, F->getBasicBlockList(),
+ NewLoop->getHeader(), F->end());
+
+ return NewLoop;
+}
+
+namespace {
+/// \brief Maintains the set of instructions of the loop for a partition before
+/// cloning. After cloning, it hosts the new loop.
+class InstPartition {
+ typedef SmallPtrSet<Instruction *, 8> InstructionSet;
+
+public:
+ InstPartition(Instruction *I, Loop *L, bool DepCycle = false)
+ : DepCycle(DepCycle), OrigLoop(L), ClonedLoop(nullptr) {
+ Set.insert(I);
+ }
+
+ /// \brief Returns whether this partition contains a dependence cycle.
+ bool hasDepCycle() const { return DepCycle; }
+
+ /// \brief Adds an instruction to this partition.
+ void add(Instruction *I) { Set.insert(I); }
+
+ /// \brief Collection accessors.
+ InstructionSet::iterator begin() { return Set.begin(); }
+ InstructionSet::iterator end() { return Set.end(); }
+ InstructionSet::const_iterator begin() const { return Set.begin(); }
+ InstructionSet::const_iterator end() const { return Set.end(); }
+ bool empty() const { return Set.empty(); }
+
+ /// \brief Moves this partition into \p Other. This partition becomes empty
+ /// after this.
+ void moveTo(InstPartition &Other) {
+ Other.Set.insert(Set.begin(), Set.end());
+ Set.clear();
+ Other.DepCycle |= DepCycle;
+ }
+
+ /// \brief Populates the partition with a transitive closure of all the
+ /// instructions that the seeded instructions dependent on.
+ void populateUsedSet() {
+ // FIXME: We currently don't use control-dependence but simply include all
+ // blocks (possibly empty at the end) and let simplifycfg mostly clean this
+ // up.
+ for (auto *B : OrigLoop->getBlocks())
+ Set.insert(B->getTerminator());
+
+ // Follow the use-def chains to form a transitive closure of all the
+ // instructions that the originally seeded instructions depend on.
+ SmallVector<Instruction *, 8> Worklist(Set.begin(), Set.end());
+ while (!Worklist.empty()) {
+ Instruction *I = Worklist.pop_back_val();
+ // Insert instructions from the loop that we depend on.
+ for (Value *V : I->operand_values()) {
+ auto *I = dyn_cast<Instruction>(V);
+ if (I && OrigLoop->contains(I->getParent()) && Set.insert(I).second)
+ Worklist.push_back(I);
+ }
+ }
+ }
+
+ /// \brief Clones the original loop.
+ ///
+ /// Updates LoopInfo and DominatorTree using the information that block \p
+ /// LoopDomBB dominates the loop.
+ Loop *cloneLoopWithPreheader(BasicBlock *InsertBefore, BasicBlock *LoopDomBB,
+ unsigned Index, LoopInfo *LI,
+ DominatorTree *DT) {
+ ClonedLoop = ::cloneLoopWithPreheader(InsertBefore, LoopDomBB, OrigLoop,
+ VMap, Twine(".ldist") + Twine(Index),
+ LI, DT, ClonedLoopBlocks);
+ return ClonedLoop;
+ }
+
+ /// \brief The cloned loop. If this partition is mapped to the original loop,
+ /// this is null.
+ const Loop *getClonedLoop() const { return ClonedLoop; }
+
+ /// \brief Returns the loop where this partition ends up after distribution.
+ /// If this partition is mapped to the original loop then use the block from
+ /// the loop.
+ const Loop *getDistributedLoop() const {
+ return ClonedLoop ? ClonedLoop : OrigLoop;
+ }
+
+ /// \brief The VMap that is populated by cloning and then used in
+ /// remapinstruction to remap the cloned instructions.
+ ValueToValueMapTy &getVMap() { return VMap; }
+
+ /// \brief Remaps the cloned instructions using VMap.
+ void remapInstructions() { remapInstructionsInLoop(ClonedLoopBlocks, VMap); }
+
+ /// \brief Based on the set of instructions selected for this partition,
+ /// removes the unnecessary ones.
+ void removeUnusedInsts() {
+ SmallVector<Instruction *, 8> Unused;
+
+ for (auto *Block : OrigLoop->getBlocks())
+ for (auto &Inst : *Block)
+ if (!Set.count(&Inst)) {
+ Instruction *NewInst = &Inst;
+ if (!VMap.empty())
+ NewInst = cast<Instruction>(VMap[NewInst]);
+
+ assert(!isa<BranchInst>(NewInst) &&
+ "Branches are marked used early on");
+ Unused.push_back(NewInst);
+ }
+
+ // Delete the instructions backwards, as it has a reduced likelihood of
+ // having to update as many def-use and use-def chains.
+ for (auto I = Unused.rbegin(), E = Unused.rend(); I != E; ++I) {
+ auto *Inst = *I;
+
+ if (!Inst->use_empty())
+ Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
+ Inst->eraseFromParent();
+ }
+ }
+
+ void print() const {
+ if (DepCycle)
+ dbgs() << " (cycle)\n";
+ for (auto *I : Set)
+ // Prefix with the block name.
+ dbgs() << " " << I->getParent()->getName() << ":" << *I << "\n";
+ }
+
+ void printBlocks() const {
+ for (auto *BB : getDistributedLoop()->getBlocks())
+ dbgs() << *BB;
+ }
+
+private:
+ /// \brief Instructions from OrigLoop selected for this partition.
+ InstructionSet Set;
+
+ /// \brief Whether this partition contains a dependence cycle.
+ bool DepCycle;
+
+ /// \brief The original loop.
+ Loop *OrigLoop;
+
+ /// \brief The cloned loop. If this partition is mapped to the original loop,
+ /// this is null.
+ Loop *ClonedLoop;
+
+ /// \brief The blocks of ClonedLoop including the preheader. If this
+ /// partition is mapped to the original loop, this is empty.
+ SmallVector<BasicBlock *, 8> ClonedLoopBlocks;
+
+ /// \brief These gets populated once the set of instructions have been
+ /// finalized. If this partition is mapped to the original loop, these are not
+ /// set.
+ ValueToValueMapTy VMap;
+};
+
+/// \brief Holds the set of Partitions. It populates them, merges them and then
+/// clones the loops.
+class InstPartitionContainer {
+ typedef DenseMap<Instruction *, int> InstToPartitionIdT;
+
+public:
+ InstPartitionContainer(Loop *L, LoopInfo *LI, DominatorTree *DT)
+ : L(L), LI(LI), DT(DT) {}
+
+ /// \brief Returns the number of partitions.
+ unsigned getSize() const { return PartitionContainer.size(); }
+
+ /// \brief Adds \p Inst into the current partition if that is marked to
+ /// contain cycles. Otherwise start a new partition for it.
+ void addToCyclicPartition(Instruction *Inst) {
+ // If the current partition is non-cyclic. Start a new one.
+ if (PartitionContainer.empty() || !PartitionContainer.back().hasDepCycle())
+ PartitionContainer.emplace_back(Inst, L, /*DepCycle=*/true);
+ else
+ PartitionContainer.back().add(Inst);
+ }
+
+ /// \brief Adds \p Inst into a partition that is not marked to contain
+ /// dependence cycles.
+ ///
+ // Initially we isolate memory instructions into as many partitions as
+ // possible, then later we may merge them back together.
+ void addToNewNonCyclicPartition(Instruction *Inst) {
+ PartitionContainer.emplace_back(Inst, L);
+ }
+
+ /// \brief Merges adjacent non-cyclic partitions.
+ ///
+ /// The idea is that we currently only want to isolate the non-vectorizable
+ /// partition. We could later allow more distribution among these partition
+ /// too.
+ void mergeAdjacentNonCyclic() {
+ mergeAdjacentPartitionsIf(
+ [](const InstPartition *P) { return !P->hasDepCycle(); });
+ }
+
+ /// \brief If a partition contains only conditional stores, we won't vectorize
+ /// it. Try to merge it with a previous cyclic partition.
+ void mergeNonIfConvertible() {
+ mergeAdjacentPartitionsIf([&](const InstPartition *Partition) {
+ if (Partition->hasDepCycle())
+ return true;
+
+ // Now, check if all stores are conditional in this partition.
+ bool seenStore = false;
+
+ for (auto *Inst : *Partition)
+ if (isa<StoreInst>(Inst)) {
+ seenStore = true;
+ if (!LoopAccessInfo::blockNeedsPredication(Inst->getParent(), L, DT))
+ return false;
+ }
+ return seenStore;
+ });
+ }
+
+ /// \brief Merges the partitions according to various heuristics.
+ void mergeBeforePopulating() {
+ mergeAdjacentNonCyclic();
+ if (!DistributeNonIfConvertible)
+ mergeNonIfConvertible();
+ }
+
+ /// \brief Merges partitions in order to ensure that no loads are duplicated.
+ ///
+ /// We can't duplicate loads because that could potentially reorder them.
+ /// LoopAccessAnalysis provides dependency information with the context that
+ /// the order of memory operation is preserved.
+ ///
+ /// Return if any partitions were merged.
+ bool mergeToAvoidDuplicatedLoads() {
+ typedef DenseMap<Instruction *, InstPartition *> LoadToPartitionT;
+ typedef EquivalenceClasses<InstPartition *> ToBeMergedT;
+
+ LoadToPartitionT LoadToPartition;
+ ToBeMergedT ToBeMerged;
+
+ // Step through the partitions and create equivalence between partitions
+ // that contain the same load. Also put partitions in between them in the
+ // same equivalence class to avoid reordering of memory operations.
+ for (PartitionContainerT::iterator I = PartitionContainer.begin(),
+ E = PartitionContainer.end();
+ I != E; ++I) {
+ auto *PartI = &*I;
+
+ // If a load occurs in two partitions PartI and PartJ, merge all
+ // partitions (PartI, PartJ] into PartI.
+ for (Instruction *Inst : *PartI)
+ if (isa<LoadInst>(Inst)) {
+ bool NewElt;
+ LoadToPartitionT::iterator LoadToPart;
+
+ std::tie(LoadToPart, NewElt) =
+ LoadToPartition.insert(std::make_pair(Inst, PartI));
+ if (!NewElt) {
+ DEBUG(dbgs() << "Merging partitions due to this load in multiple "
+ << "partitions: " << PartI << ", "
+ << LoadToPart->second << "\n" << *Inst << "\n");
+
+ auto PartJ = I;
+ do {
+ --PartJ;
+ ToBeMerged.unionSets(PartI, &*PartJ);
+ } while (&*PartJ != LoadToPart->second);
+ }
+ }
+ }
+ if (ToBeMerged.empty())
+ return false;
+
+ // Merge the member of an equivalence class into its class leader. This
+ // makes the members empty.
+ for (ToBeMergedT::iterator I = ToBeMerged.begin(), E = ToBeMerged.end();
+ I != E; ++I) {
+ if (!I->isLeader())
+ continue;
+
+ auto PartI = I->getData();
+ for (auto PartJ : make_range(std::next(ToBeMerged.member_begin(I)),
+ ToBeMerged.member_end())) {
+ PartJ->moveTo(*PartI);
+ }
+ }
+
+ // Remove the empty partitions.
+ PartitionContainer.remove_if(
+ [](const InstPartition &P) { return P.empty(); });
+
+ return true;
+ }
+
+ /// \brief Sets up the mapping between instructions to partitions. If the
+ /// instruction is duplicated across multiple partitions, set the entry to -1.
+ void setupPartitionIdOnInstructions() {
+ int PartitionID = 0;
+ for (const auto &Partition : PartitionContainer) {
+ for (Instruction *Inst : Partition) {
+ bool NewElt;
+ InstToPartitionIdT::iterator Iter;
+
+ std::tie(Iter, NewElt) =
+ InstToPartitionId.insert(std::make_pair(Inst, PartitionID));
+ if (!NewElt)
+ Iter->second = -1;
+ }
+ ++PartitionID;
+ }
+ }
+
+ /// \brief Populates the partition with everything that the seeding
+ /// instructions require.
+ void populateUsedSet() {
+ for (auto &P : PartitionContainer)
+ P.populateUsedSet();
+ }
+
+ /// \brief This performs the main chunk of the work of cloning the loops for
+ /// the partitions.
+ void cloneLoops(Pass *P) {
+ BasicBlock *OrigPH = L->getLoopPreheader();
+ // At this point the predecessor of the preheader is either the memcheck
+ // block or the top part of the original preheader.
+ BasicBlock *Pred = OrigPH->getSinglePredecessor();
+ assert(Pred && "Preheader does not have a single predecessor");
+ BasicBlock *ExitBlock = L->getExitBlock();
+ assert(ExitBlock && "No single exit block");
+ Loop *NewLoop;
+
+ assert(!PartitionContainer.empty() && "at least two partitions expected");
+ // We're cloning the preheader along with the loop so we already made sure
+ // it was empty.
+ assert(&*OrigPH->begin() == OrigPH->getTerminator() &&
+ "preheader not empty");
+
+ // Create a loop for each partition except the last. Clone the original
+ // loop before PH along with adding a preheader for the cloned loop. Then
+ // update PH to point to the newly added preheader.
+ BasicBlock *TopPH = OrigPH;
+ unsigned Index = getSize() - 1;
+ for (auto I = std::next(PartitionContainer.rbegin()),
+ E = PartitionContainer.rend();
+ I != E; ++I, --Index, TopPH = NewLoop->getLoopPreheader()) {
+ auto *Part = &*I;
+
+ NewLoop = Part->cloneLoopWithPreheader(TopPH, Pred, Index, LI, DT);
+
+ Part->getVMap()[ExitBlock] = TopPH;
+ Part->remapInstructions();
+ }
+ Pred->getTerminator()->replaceUsesOfWith(OrigPH, TopPH);
+
+ // Now go in forward order and update the immediate dominator for the
+ // preheaders with the exiting block of the previous loop. Dominance
+ // within the loop is updated in cloneLoopWithPreheader.
+ for (auto Curr = PartitionContainer.cbegin(),
+ Next = std::next(PartitionContainer.cbegin()),
+ E = PartitionContainer.cend();
+ Next != E; ++Curr, ++Next)
+ DT->changeImmediateDominator(
+ Next->getDistributedLoop()->getLoopPreheader(),
+ Curr->getDistributedLoop()->getExitingBlock());
+ }
+
+ /// \brief Removes the dead instructions from the cloned loops.
+ void removeUnusedInsts() {
+ for (auto &Partition : PartitionContainer)
+ Partition.removeUnusedInsts();
+ }
+
+ /// \brief For each memory pointer, it computes the partitionId the pointer is
+ /// used in.
+ ///
+ /// This returns an array of int where the I-th entry corresponds to I-th
+ /// entry in LAI.getRuntimePointerCheck(). If the pointer is used in multiple
+ /// partitions its entry is set to -1.
+ SmallVector<int, 8>
+ computePartitionSetForPointers(const LoopAccessInfo &LAI) {
+ const LoopAccessInfo::RuntimePointerCheck *RtPtrCheck =
+ LAI.getRuntimePointerCheck();
+
+ unsigned N = RtPtrCheck->Pointers.size();
+ SmallVector<int, 8> PtrToPartitions(N);
+ for (unsigned I = 0; I < N; ++I) {
+ Value *Ptr = RtPtrCheck->Pointers[I];
+ auto Instructions =
+ LAI.getInstructionsForAccess(Ptr, RtPtrCheck->IsWritePtr[I]);
+
+ int &Partition = PtrToPartitions[I];
+ // First set it to uninitialized.
+ Partition = -2;
+ for (Instruction *Inst : Instructions) {
+ // Note that this could be -1 if Inst is duplicated across multiple
+ // partitions.
+ int ThisPartition = this->InstToPartitionId[Inst];
+ if (Partition == -2)
+ Partition = ThisPartition;
+ // -1 means belonging to multiple partitions.
+ else if (Partition == -1)
+ break;
+ else if (Partition != (int)ThisPartition)
+ Partition = -1;
+ }
+ assert(Partition != -2 && "Pointer not belonging to any partition");
+ }
+
+ return PtrToPartitions;
+ }
+
+ void print(raw_ostream &OS) const {
+ unsigned Index = 0;
+ for (const auto &P : PartitionContainer) {
+ OS << "Partition " << Index++ << " (" << &P << "):\n";
+ P.print();
+ }
+ }
+
+ void dump() const { print(dbgs()); }
+
+#ifndef NDEBUG
+ friend raw_ostream &operator<<(raw_ostream &OS,
+ const InstPartitionContainer &Partitions) {
+ Partitions.print(OS);
+ return OS;
+ }
+#endif
+
+ void printBlocks() const {
+ unsigned Index = 0;
+ for (const auto &P : PartitionContainer) {
+ dbgs() << "\nPartition " << Index++ << " (" << &P << "):\n";
+ P.printBlocks();
+ }
+ }
+
+private:
+ typedef std::list<InstPartition> PartitionContainerT;
+
+ /// \brief List of partitions.
+ PartitionContainerT PartitionContainer;
+
+ /// \brief Mapping from Instruction to partition Id. If the instruction
+ /// belongs to multiple partitions the entry contains -1.
+ InstToPartitionIdT InstToPartitionId;
+
+ Loop *L;
+ LoopInfo *LI;
+ DominatorTree *DT;
+
+ /// \brief The control structure to merge adjacent partitions if both satisfy
+ /// the \p Predicate.
+ template <class UnaryPredicate>
+ void mergeAdjacentPartitionsIf(UnaryPredicate Predicate) {
+ InstPartition *PrevMatch = nullptr;
+ for (auto I = PartitionContainer.begin(); I != PartitionContainer.end();) {
+ auto DoesMatch = Predicate(&*I);
+ if (PrevMatch == nullptr && DoesMatch) {
+ PrevMatch = &*I;
+ ++I;
+ } else if (PrevMatch != nullptr && DoesMatch) {
+ I->moveTo(*PrevMatch);
+ I = PartitionContainer.erase(I);
+ } else {
+ PrevMatch = nullptr;
+ ++I;
+ }
+ }
+ }
+};
+
+/// \brief For each memory instruction, this class maintains difference of the
+/// number of unsafe dependences that start out from this instruction minus
+/// those that end here.
+///
+/// By traversing the memory instructions in program order and accumulating this
+/// number, we know whether any unsafe dependence crosses over a program point.
+class MemoryInstructionDependences {
+ typedef MemoryDepChecker::Dependence Dependence;
+
+public:
+ struct Entry {
+ Instruction *Inst;
+ unsigned NumUnsafeDependencesStartOrEnd;
+
+ Entry(Instruction *Inst) : Inst(Inst), NumUnsafeDependencesStartOrEnd(0) {}
+ };
+
+ typedef SmallVector<Entry, 8> AccessesType;
+
+ AccessesType::const_iterator begin() const { return Accesses.begin(); }
+ AccessesType::const_iterator end() const { return Accesses.end(); }
+
+ MemoryInstructionDependences(
+ const SmallVectorImpl<Instruction *> &Instructions,
+ const SmallVectorImpl<Dependence> &InterestingDependences) {
+ Accesses.append(Instructions.begin(), Instructions.end());
+
+ DEBUG(dbgs() << "Backward dependences:\n");
+ for (auto &Dep : InterestingDependences)
+ if (Dep.isPossiblyBackward()) {
+ // Note that the designations source and destination follow the program
+ // order, i.e. source is always first. (The direction is given by the
+ // DepType.)
+ ++Accesses[Dep.Source].NumUnsafeDependencesStartOrEnd;
+ --Accesses[Dep.Destination].NumUnsafeDependencesStartOrEnd;
+
+ DEBUG(Dep.print(dbgs(), 2, Instructions));
+ }
+ }
+
+private:
+ AccessesType Accesses;
+};
+
+/// \brief Handles the loop versioning based on memchecks.
+class RuntimeCheckEmitter {
+public:
+ RuntimeCheckEmitter(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
+ DominatorTree *DT)
+ : OrigLoop(L), NonDistributedLoop(nullptr), LAI(LAI), LI(LI), DT(DT) {}
+
+ /// \brief Given the \p Partitions formed by Loop Distribution, it determines
+ /// in which partition each pointer is used.
+ void partitionPointers(InstPartitionContainer &Partitions) {
+ // Set up partition id in PtrRtChecks. Ptr -> Access -> Intruction ->
+ // Partition.
+ PtrToPartition = Partitions.computePartitionSetForPointers(LAI);
+
+ DEBUG(dbgs() << "\nPointers:\n");
+ DEBUG(LAI.getRuntimePointerCheck()->print(dbgs(), 0, &PtrToPartition));
+ }
+
+ /// \brief Returns true if we need memchecks to distribute the loop.
+ bool needsRuntimeChecks() const {
+ return LAI.getRuntimePointerCheck()->needsAnyChecking(&PtrToPartition);
+ }
+
+ /// \brief Performs the CFG manipulation part of versioning the loop including
+ /// the DominatorTree and LoopInfo updates.
+ void versionLoop(Pass *P) {
+ Instruction *FirstCheckInst;
+ Instruction *MemRuntimeCheck;
+ // Add the memcheck in the original preheader (this is empty initially).
+ BasicBlock *MemCheckBB = OrigLoop->getLoopPreheader();
+ std::tie(FirstCheckInst, MemRuntimeCheck) =
+ LAI.addRuntimeCheck(MemCheckBB->getTerminator(), &PtrToPartition);
+ assert(MemRuntimeCheck && "called even though needsAnyChecking = false");
+
+ // Rename the block to make the IR more readable.
+ MemCheckBB->setName(OrigLoop->getHeader()->getName() + ".ldist.memcheck");
+
+ // Create empty preheader for the loop (and after cloning for the
+ // original/nondist loop).
+ BasicBlock *PH =
+ SplitBlock(MemCheckBB, MemCheckBB->getTerminator(), DT, LI);
+ PH->setName(OrigLoop->getHeader()->getName() + ".ph");
+
+ // Clone the loop including the preheader.
+ //
+ // FIXME: This does not currently preserve SimplifyLoop because the exit
+ // block is a join between the two loops.
+ SmallVector<BasicBlock *, 8> NonDistributedLoopBlocks;
+ NonDistributedLoop =
+ cloneLoopWithPreheader(PH, MemCheckBB, OrigLoop, VMap, ".ldist.nondist",
+ LI, DT, NonDistributedLoopBlocks);
+ remapInstructionsInLoop(NonDistributedLoopBlocks, VMap);
+
+ // Insert the conditional branch based on the result of the memchecks.
+ Instruction *OrigTerm = MemCheckBB->getTerminator();
+ BranchInst::Create(NonDistributedLoop->getLoopPreheader(),
+ OrigLoop->getLoopPreheader(), MemRuntimeCheck, OrigTerm);
+ OrigTerm->eraseFromParent();
+
+ // The loops merge in the original exit block. This is now dominated by the
+ // memchecking block.
+ DT->changeImmediateDominator(OrigLoop->getExitBlock(), MemCheckBB);
+ }
+
+ /// \brief Adds the necessary PHI nodes for the versioned loops based on the
+ /// loop-defined values used outside of the loop.
+ void addPHINodes(const SmallVectorImpl<Instruction *> &DefsUsedOutside) {
+ BasicBlock *PHIBlock = OrigLoop->getExitBlock();
+ assert(PHIBlock && "No single successor to loop exit block");
+
+ for (auto *Inst : DefsUsedOutside) {
+ auto *NonDistInst = cast<Instruction>(VMap[Inst]);
+ PHINode *PN;
+
+ // First see if we have a single-operand PHI with the value defined by the
+ // original loop.
+ for (auto I = PHIBlock->begin(); (PN = dyn_cast<PHINode>(I)); ++I) {
+ assert(PN->getNumOperands() == 1 &&
+ "Exit block should only have on predecessor");
+ if (PN->getIncomingValue(0) == Inst)
+ break;
+ }
+ // If not create it.
+ if (!PN) {
+ PN = PHINode::Create(Inst->getType(), 2, Inst->getName() + ".ldist",
+ PHIBlock->begin());
+ for (auto *User : Inst->users())
+ if (!OrigLoop->contains(cast<Instruction>(User)->getParent()))
+ User->replaceUsesOfWith(Inst, PN);
+ PN->addIncoming(Inst, OrigLoop->getExitingBlock());
+ }
+ // Add the new incoming value from the non-distributed loop.
+ PN->addIncoming(NonDistInst, NonDistributedLoop->getExitingBlock());
+ }
+ }
+
+private:
+ /// \brief The original loop. This becomes the "versioned" one, i.e. control
+ /// goes if the memchecks all pass.
+ Loop *OrigLoop;
+ /// \brief The fall-back loop, i.e. if any of the memchecks fail.
+ Loop *NonDistributedLoop;
+
+ /// \brief For each memory pointer it contains the partitionId it is used in.
+ ///
+ /// The I-th entry corresponds to I-th entry in LAI.getRuntimePointerCheck().
+ /// If the pointer is used in multiple partitions the entry is set to -1.
+ SmallVector<int, 8> PtrToPartition;
+
+ /// \brief This maps the instructions from OrigLoop to their counterpart in
+ /// NonDistributedLoop.
+ ValueToValueMapTy VMap;
+
+ /// \brief Analyses used.
+ const LoopAccessInfo &LAI;
+ LoopInfo *LI;
+ DominatorTree *DT;
+};
+
+/// \brief Returns the instructions that use values defined in the loop.
+static SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L) {
+ SmallVector<Instruction *, 8> UsedOutside;
+
+ for (auto *Block : L->getBlocks())
+ // FIXME: I believe that this could use copy_if if the Inst reference could
+ // be adapted into a pointer.
+ for (auto &Inst : *Block) {
+ auto Users = Inst.users();
+ if (std::any_of(Users.begin(), Users.end(), [&](User *U) {
+ auto *Use = cast<Instruction>(U);
+ return !L->contains(Use->getParent());
+ }))
+ UsedOutside.push_back(&Inst);
+ }
+
+ return UsedOutside;
+}
+
+/// \brief The pass class.
+class LoopDistribute : public FunctionPass {
+public:
+ LoopDistribute() : FunctionPass(ID) {
+ initializeLoopDistributePass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override {
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ LAA = &getAnalysis<LoopAccessAnalysis>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+
+ // Build up a worklist of inner-loops to vectorize. This is necessary as the
+ // act of distributing a loop creates new loops and can invalidate iterators
+ // across the loops.
+ SmallVector<Loop *, 8> Worklist;
+
+ for (Loop *TopLevelLoop : *LI)
+ for (Loop *L : depth_first(TopLevelLoop))
+ // We only handle inner-most loops.
+ if (L->empty())
+ Worklist.push_back(L);
+
+ // Now walk the identified inner loops.
+ bool Changed = false;
+ for (Loop *L : Worklist)
+ Changed |= processLoop(L);
+
+ // Process each loop nest in the function.
+ return Changed;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addPreserved<LoopInfoWrapperPass>();
+ AU.addRequired<LoopAccessAnalysis>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ }
+
+ static char ID;
+
+private:
+ /// \brief Try to distribute an inner-most loop.
+ bool processLoop(Loop *L) {
+ assert(L->empty() && "Only process inner loops.");
+
+ DEBUG(dbgs() << "\nLDist: In \"" << L->getHeader()->getParent()->getName()
+ << "\" checking " << *L << "\n");
+
+ BasicBlock *PH = L->getLoopPreheader();
+ if (!PH) {
+ DEBUG(dbgs() << "Skipping; no preheader");
+ return false;
+ }
+ if (!L->getExitBlock()) {
+ DEBUG(dbgs() << "Skipping; multiple exit blocks");
+ return false;
+ }
+ // LAA will check that we only have a single exiting block.
+
+ const LoopAccessInfo &LAI = LAA->getInfo(L, ValueToValueMap());
+
+ // Currently, we only distribute to isolate the part of the loop with
+ // dependence cycles to enable partial vectorization.
+ if (LAI.canVectorizeMemory()) {
+ DEBUG(dbgs() << "Skipping; memory operations are safe for vectorization");
+ return false;
+ }
+ auto *InterestingDependences =
+ LAI.getDepChecker().getInterestingDependences();
+ if (!InterestingDependences || InterestingDependences->empty()) {
+ DEBUG(dbgs() << "Skipping; No unsafe dependences to isolate");
+ return false;
+ }
+
+ InstPartitionContainer Partitions(L, LI, DT);
+
+ // First, go through each memory operation and assign them to consecutive
+ // partitions (the order of partitions follows program order). Put those
+ // with unsafe dependences into "cyclic" partition otherwise put each store
+ // in its own "non-cyclic" partition (we'll merge these later).
+ //
+ // Note that a memory operation (e.g. Load2 below) at a program point that
+ // has an unsafe dependence (Store3->Load1) spanning over it must be
+ // included in the same cyclic partition as the dependent operations. This
+ // is to preserve the original program order after distribution. E.g.:
+ //
+ // NumUnsafeDependencesStartOrEnd NumUnsafeDependencesActive
+ // Load1 -. 1 0->1
+ // Load2 | /Unsafe/ 0 1
+ // Store3 -' -1 1->0
+ // Load4 0 0
+ //
+ // NumUnsafeDependencesActive > 0 indicates this situation and in this case
+ // we just keep assigning to the same cyclic partition until
+ // NumUnsafeDependencesActive reaches 0.
+ const MemoryDepChecker &DepChecker = LAI.getDepChecker();
+ MemoryInstructionDependences MID(DepChecker.getMemoryInstructions(),
+ *InterestingDependences);
+
+ int NumUnsafeDependencesActive = 0;
+ for (auto &InstDep : MID) {
+ Instruction *I = InstDep.Inst;
+ // We update NumUnsafeDependencesActive post-instruction, catch the
+ // start of a dependence directly via NumUnsafeDependencesStartOrEnd.
+ if (NumUnsafeDependencesActive ||
+ InstDep.NumUnsafeDependencesStartOrEnd > 0)
+ Partitions.addToCyclicPartition(I);
+ else
+ Partitions.addToNewNonCyclicPartition(I);
+ NumUnsafeDependencesActive += InstDep.NumUnsafeDependencesStartOrEnd;
+ assert(NumUnsafeDependencesActive >= 0 &&
+ "Negative number of dependences active");
+ }
+
+ // Add partitions for values used outside. These partitions can be out of
+ // order from the original program order. This is OK because if the
+ // partition uses a load we will merge this partition with the original
+ // partition of the load that we set up in the previous loop (see
+ // mergeToAvoidDuplicatedLoads).
+ auto DefsUsedOutside = findDefsUsedOutsideOfLoop(L);
+ for (auto *Inst : DefsUsedOutside)
+ Partitions.addToNewNonCyclicPartition(Inst);
+
+ DEBUG(dbgs() << "Seeded partitions:\n" << Partitions);
+ if (Partitions.getSize() < 2)
+ return false;
+
+ // Run the merge heuristics: Merge non-cyclic adjacent partitions since we
+ // should be able to vectorize these together.
+ Partitions.mergeBeforePopulating();
+ DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions);
+ if (Partitions.getSize() < 2)
+ return false;
+
+ // Now, populate the partitions with non-memory operations.
+ Partitions.populateUsedSet();
+ DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions);
+
+ // In order to preserve original lexical order for loads, keep them in the
+ // partition that we set up in the MemoryInstructionDependences loop.
+ if (Partitions.mergeToAvoidDuplicatedLoads()) {
+ DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n"
+ << Partitions);
+ if (Partitions.getSize() < 2)
+ return false;
+ }
+
+ DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n");
+ // We're done forming the partitions set up the reverse mapping from
+ // instructions to partitions.
+ Partitions.setupPartitionIdOnInstructions();
+
+ // To keep things simple have an empty preheader before we version or clone
+ // the loop. (Also split if this has no predecessor, i.e. entry, because we
+ // rely on PH having a predecessor.)
+ if (!PH->getSinglePredecessor() || &*PH->begin() != PH->getTerminator())
+ SplitBlock(PH, PH->getTerminator(), DT, LI);
+
+ // If we need run-time checks to disambiguate pointers are run-time, version
+ // the loop now.
+ RuntimeCheckEmitter RtCheckEmitter(LAI, L, LI, DT);
+ RtCheckEmitter.partitionPointers(Partitions);
+ if (RtCheckEmitter.needsRuntimeChecks()) {
+ RtCheckEmitter.versionLoop(this);
+ RtCheckEmitter.addPHINodes(DefsUsedOutside);
+ }
+
+ // Create identical copies of the original loop for each partition and hook
+ // them up sequentially.
+ Partitions.cloneLoops(this);
+
+ // Now, we remove the instruction from each loop that don't belong to that
+ // partition.
+ Partitions.removeUnusedInsts();
+ DEBUG(dbgs() << "\nAfter removing unused Instrs:\n");
+ DEBUG(Partitions.printBlocks());
+
+ if (LDistVerify) {
+ LI->verify();
+ DT->verifyDomTree();
+ }
+
+ ++NumLoopsDistributed;
+ return true;
+ }
+
+ // Analyses used.
+ LoopInfo *LI;
+ LoopAccessAnalysis *LAA;
+ DominatorTree *DT;
+};
+} // anonymous namespace
+
+char LoopDistribute::ID;
+static const char ldist_name[] = "Loop Distribition";
+
+INITIALIZE_PASS_BEGIN(LoopDistribute, LDIST_NAME, ldist_name, false, false)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_END(LoopDistribute, LDIST_NAME, ldist_name, false, false)
+
+namespace llvm {
+FunctionPass *createLoopDistributePass() { return new LoopDistribute(); }
+}
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index a12f5a7a0334..f92ecd4efdae 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -47,6 +47,7 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
@@ -56,7 +57,6 @@
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -130,7 +130,6 @@ namespace {
class LoopIdiomRecognize : public LoopPass {
Loop *CurLoop;
- const DataLayout *DL;
DominatorTree *DT;
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
@@ -139,7 +138,10 @@ namespace {
static char ID;
explicit LoopIdiomRecognize() : LoopPass(ID) {
initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
- DL = nullptr; DT = nullptr; SE = nullptr; TLI = nullptr; TTI = nullptr;
+ DT = nullptr;
+ SE = nullptr;
+ TLI = nullptr;
+ TTI = nullptr;
}
bool runOnLoop(Loop *L, LPPassManager &LPM) override;
@@ -163,8 +165,8 @@ namespace {
/// loop preheaders be inserted into the CFG.
///
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addPreserved<LoopInfoWrapperPass>();
AU.addRequiredID(LoopSimplifyID);
AU.addPreservedID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
@@ -175,16 +177,8 @@ namespace {
AU.addPreserved<ScalarEvolution>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<TargetLibraryInfo>();
- AU.addRequired<TargetTransformInfo>();
- }
-
- const DataLayout *getDataLayout() {
- if (DL)
- return DL;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
- return DL;
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
}
DominatorTree *getDominatorTree() {
@@ -197,11 +191,16 @@ namespace {
}
TargetLibraryInfo *getTargetLibraryInfo() {
- return TLI ? TLI : (TLI = &getAnalysis<TargetLibraryInfo>());
+ if (!TLI)
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+
+ return TLI;
}
const TargetTransformInfo *getTargetTransformInfo() {
- return TTI ? TTI : (TTI = &getAnalysis<TargetTransformInfo>());
+ return TTI ? TTI
+ : (TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
+ *CurLoop->getHeader()->getParent()));
}
Loop *getLoop() const { return CurLoop; }
@@ -215,14 +214,14 @@ namespace {
char LoopIdiomRecognize::ID = 0;
INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
false, false)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
false, false)
@@ -232,44 +231,13 @@ Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); }
/// and zero out all the operands of this instruction. If any of them become
/// dead, delete them and the computation tree that feeds them.
///
-static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE,
+static void deleteDeadInstruction(Instruction *I,
const TargetLibraryInfo *TLI) {
- SmallVector<Instruction*, 32> NowDeadInsts;
-
- NowDeadInsts.push_back(I);
-
- // Before we touch this instruction, remove it from SE!
- do {
- Instruction *DeadInst = NowDeadInsts.pop_back_val();
-
- // This instruction is dead, zap it, in stages. Start by removing it from
- // SCEV.
- SE.forgetValue(DeadInst);
-
- for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
- Value *Op = DeadInst->getOperand(op);
- DeadInst->setOperand(op, nullptr);
-
- // If this operand just became dead, add it to the NowDeadInsts list.
- if (!Op->use_empty()) continue;
-
- if (Instruction *OpI = dyn_cast<Instruction>(Op))
- if (isInstructionTriviallyDead(OpI, TLI))
- NowDeadInsts.push_back(OpI);
- }
-
- DeadInst->eraseFromParent();
-
- } while (!NowDeadInsts.empty());
-}
-
-/// deleteIfDeadInstruction - If the specified value is a dead instruction,
-/// delete it and any recursively used instructions.
-static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE,
- const TargetLibraryInfo *TLI) {
- if (Instruction *I = dyn_cast<Instruction>(V))
- if (isInstructionTriviallyDead(I, TLI))
- deleteDeadInstruction(I, SE, TLI);
+ SmallVector<Value *, 16> Operands(I->value_op_begin(), I->value_op_end());
+ I->replaceAllUsesWith(UndefValue::get(I->getType()));
+ I->eraseFromParent();
+ for (Value *Op : Operands)
+ RecursivelyDeleteTriviallyDeadInstructions(Op, TLI);
}
//===----------------------------------------------------------------------===//
@@ -285,7 +253,7 @@ static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE,
// the concern of breaking data dependence.
bool LIRUtil::isAlmostEmpty(BasicBlock *BB) {
if (BranchInst *Br = getBranch(BB)) {
- return Br->isUnconditional() && BB->size() == 1;
+ return Br->isUnconditional() && Br == BB->begin();
}
return false;
}
@@ -542,7 +510,7 @@ void NclPopcountRecognize::transform(Instruction *CntInst,
cast<ICmpInst>(Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
PreCond->replaceAllUsesWith(NewPreCond);
- deleteDeadInstruction(PreCond, *SE, TLI);
+ RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
}
// Step 3: Note that the population count is exactly the trip count of the
@@ -592,15 +560,7 @@ void NclPopcountRecognize::transform(Instruction *CntInst,
// Step 4: All the references to the original population counter outside
// the loop are replaced with the NewCount -- the value returned from
// __builtin_ctpop().
- {
- SmallVector<Value *, 4> CntUses;
- for (User *U : CntInst->users())
- if (cast<Instruction>(U)->getParent() != Body)
- CntUses.push_back(U);
- for (unsigned Idx = 0; Idx < CntUses.size(); Idx++) {
- (cast<Instruction>(CntUses[Idx]))->replaceUsesOfWith(CntInst, NewCount);
- }
- }
+ CntInst->replaceUsesOutsideBlock(NewCount, Body);
// step 5: Forget the "non-computable" trip-count SCEV associated with the
// loop. The loop would otherwise not be deleted even if it becomes empty.
@@ -651,7 +611,9 @@ bool NclPopcountRecognize::recognize() {
bool LoopIdiomRecognize::runOnCountableLoop() {
const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
- if (isa<SCEVCouldNotCompute>(BECount)) return false;
+ assert(!isa<SCEVCouldNotCompute>(BECount) &&
+ "runOnCountableLoop() called on a loop without a predictable"
+ "backedge-taken count");
// If this loop executes exactly one time, then it should be peeled, not
// optimized by this pass.
@@ -659,15 +621,11 @@ bool LoopIdiomRecognize::runOnCountableLoop() {
if (BECst->getValue()->getValue() == 0)
return false;
- // We require target data for now.
- if (!getDataLayout())
- return false;
-
// set DT
(void)getDominatorTree();
- LoopInfo &LI = getAnalysis<LoopInfo>();
- TLI = &getAnalysis<TargetLibraryInfo>();
+ LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
// set TLI
(void)getTargetLibraryInfo();
@@ -681,13 +639,12 @@ bool LoopIdiomRecognize::runOnCountableLoop() {
bool MadeChange = false;
// Scan all the blocks in the loop that are not in subloops.
- for (Loop::block_iterator BI = CurLoop->block_begin(),
- E = CurLoop->block_end(); BI != E; ++BI) {
+ for (auto *BB : CurLoop->getBlocks()) {
// Ignore blocks in subloops.
- if (LI.getLoopFor(*BI) != CurLoop)
+ if (LI.getLoopFor(BB) != CurLoop)
continue;
- MadeChange |= runOnLoopBlock(*BI, BECount, ExitBlocks);
+ MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
}
return MadeChange;
}
@@ -776,7 +733,8 @@ bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
Value *StorePtr = SI->getPointerOperand();
// Reject stores that are so large that they overflow an unsigned.
- uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
+ auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
+ uint64_t SizeInBits = DL.getTypeSizeInBits(StoredVal->getType());
if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
return false;
@@ -951,7 +909,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// but it can be turned into memset_pattern if the target supports it.
Value *SplatValue = isBytewiseValue(StoredVal);
Constant *PatternValue = nullptr;
-
+ auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
// If we're allowed to form a memset, and the stored value would be acceptable
@@ -962,9 +920,8 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
CurLoop->isLoopInvariant(SplatValue)) {
// Keep and use SplatValue.
PatternValue = nullptr;
- } else if (DestAS == 0 &&
- TLI->has(LibFunc::memset_pattern16) &&
- (PatternValue = getMemSetPatternValue(StoredVal, *DL))) {
+ } else if (DestAS == 0 && TLI->has(LibFunc::memset_pattern16) &&
+ (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
// Don't create memset_pattern16s with address spaces.
// It looks like we can use PatternValue!
SplatValue = nullptr;
@@ -979,7 +936,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// header. This allows us to insert code for it in the preheader.
BasicBlock *Preheader = CurLoop->getLoopPreheader();
IRBuilder<> Builder(Preheader->getTerminator());
- SCEVExpander Expander(*SE, "loop-idiom");
+ SCEVExpander Expander(*SE, DL, "loop-idiom");
Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
@@ -997,7 +954,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
StoreSize, getAnalysis<AliasAnalysis>(), TheStore)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
- deleteIfDeadInstruction(BasePtr, *SE, TLI);
+ RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
return false;
}
@@ -1039,12 +996,12 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// Otherwise we should form a memset_pattern16. PatternValue is known to be
// an constant array of 16-bytes. Plop the value into a mergable global.
GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
- GlobalValue::InternalLinkage,
+ GlobalValue::PrivateLinkage,
PatternValue, ".memset_pattern");
GV->setUnnamedAddr(true); // Ok to merge these.
GV->setAlignment(16);
Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
- NewCall = Builder.CreateCall3(MSP, BasePtr, PatternPtr, NumBytes);
+ NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
}
DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
@@ -1053,7 +1010,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// Okay, the memset has been formed. Zap the original store and anything that
// feeds into it.
- deleteDeadInstruction(TheStore, *SE, TLI);
+ deleteDeadInstruction(TheStore, TLI);
++NumMemSet;
return true;
}
@@ -1076,7 +1033,8 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
// header. This allows us to insert code for it in the preheader.
BasicBlock *Preheader = CurLoop->getLoopPreheader();
IRBuilder<> Builder(Preheader->getTerminator());
- SCEVExpander Expander(*SE, "loop-idiom");
+ const DataLayout &DL = Preheader->getModule()->getDataLayout();
+ SCEVExpander Expander(*SE, DL, "loop-idiom");
// Okay, we have a strided store "p[i]" of a loaded value. We can turn
// this into a memcpy in the loop preheader now if we want. However, this
@@ -1094,7 +1052,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
getAnalysis<AliasAnalysis>(), SI)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
- deleteIfDeadInstruction(StoreBasePtr, *SE, TLI);
+ RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
return false;
}
@@ -1109,8 +1067,8 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
StoreSize, getAnalysis<AliasAnalysis>(), SI)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
- deleteIfDeadInstruction(LoadBasePtr, *SE, TLI);
- deleteIfDeadInstruction(StoreBasePtr, *SE, TLI);
+ RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
+ RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
return false;
}
@@ -1143,7 +1101,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
// Okay, the memset has been formed. Zap the original store and anything that
// feeds into it.
- deleteDeadInstruction(SI, *SE, TLI);
+ deleteDeadInstruction(SI, TLI);
++NumMemCpy;
return true;
}
diff --git a/lib/Transforms/Scalar/LoopInstSimplify.cpp b/lib/Transforms/Scalar/LoopInstSimplify.cpp
index d664f85c773d..e12502654751 100644
--- a/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -23,7 +23,7 @@
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -44,12 +44,12 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<AssumptionCacheTracker>();
- AU.addRequired<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
AU.addRequiredID(LoopSimplifyID);
AU.addPreservedID(LoopSimplifyID);
AU.addPreservedID(LCSSAID);
AU.addPreserved<ScalarEvolution>();
- AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
}
@@ -58,9 +58,9 @@ char LoopInstSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(LoopInstSimplify, "loop-instsimplify",
"Simplify instructions in loops", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_END(LoopInstSimplify, "loop-instsimplify",
"Simplify instructions in loops", false, false)
@@ -76,10 +76,9 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
- LoopInfo *LI = &getAnalysis<LoopInfo>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
- const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
+ LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ const TargetLibraryInfo *TLI =
+ &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
*L->getHeader()->getParent());
@@ -109,6 +108,7 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
WorklistItem Item = VisitStack.pop_back_val();
BasicBlock *BB = Item.getPointer();
bool IsSubloopHeader = Item.getInt();
+ const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
// Simplify instructions in the current basic block.
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
diff --git a/lib/Transforms/Scalar/LoopInterchange.cpp b/lib/Transforms/Scalar/LoopInterchange.cpp
new file mode 100644
index 000000000000..f584018299d1
--- /dev/null
+++ b/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -0,0 +1,1300 @@
+//===- LoopInterchange.cpp - Loop interchange pass------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This Pass handles loop interchange transform.
+// This pass interchanges loops to provide a more cache-friendly memory access
+// patterns.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/CodeMetrics.h"
+#include "llvm/Analysis/DependenceAnalysis.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopIterator.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "loop-interchange"
+
+namespace {
+
+typedef SmallVector<Loop *, 8> LoopVector;
+
+// TODO: Check if we can use a sparse matrix here.
+typedef std::vector<std::vector<char>> CharMatrix;
+
+// Maximum number of dependencies that can be handled in the dependency matrix.
+static const unsigned MaxMemInstrCount = 100;
+
+// Maximum loop depth supported.
+static const unsigned MaxLoopNestDepth = 10;
+
+struct LoopInterchange;
+
+#ifdef DUMP_DEP_MATRICIES
+void printDepMatrix(CharMatrix &DepMatrix) {
+ for (auto I = DepMatrix.begin(), E = DepMatrix.end(); I != E; ++I) {
+ std::vector<char> Vec = *I;
+ for (auto II = Vec.begin(), EE = Vec.end(); II != EE; ++II)
+ DEBUG(dbgs() << *II << " ");
+ DEBUG(dbgs() << "\n");
+ }
+}
+#endif
+
+static bool populateDependencyMatrix(CharMatrix &DepMatrix, unsigned Level,
+ Loop *L, DependenceAnalysis *DA) {
+ typedef SmallVector<Value *, 16> ValueVector;
+ ValueVector MemInstr;
+
+ if (Level > MaxLoopNestDepth) {
+ DEBUG(dbgs() << "Cannot handle loops of depth greater than "
+ << MaxLoopNestDepth << "\n");
+ return false;
+ }
+
+ // For each block.
+ for (Loop::block_iterator BB = L->block_begin(), BE = L->block_end();
+ BB != BE; ++BB) {
+ // Scan the BB and collect legal loads and stores.
+ for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end(); I != E;
+ ++I) {
+ Instruction *Ins = dyn_cast<Instruction>(I);
+ if (!Ins)
+ return false;
+ LoadInst *Ld = dyn_cast<LoadInst>(I);
+ StoreInst *St = dyn_cast<StoreInst>(I);
+ if (!St && !Ld)
+ continue;
+ if (Ld && !Ld->isSimple())
+ return false;
+ if (St && !St->isSimple())
+ return false;
+ MemInstr.push_back(I);
+ }
+ }
+
+ DEBUG(dbgs() << "Found " << MemInstr.size()
+ << " Loads and Stores to analyze\n");
+
+ ValueVector::iterator I, IE, J, JE;
+
+ for (I = MemInstr.begin(), IE = MemInstr.end(); I != IE; ++I) {
+ for (J = I, JE = MemInstr.end(); J != JE; ++J) {
+ std::vector<char> Dep;
+ Instruction *Src = dyn_cast<Instruction>(*I);
+ Instruction *Des = dyn_cast<Instruction>(*J);
+ if (Src == Des)
+ continue;
+ if (isa<LoadInst>(Src) && isa<LoadInst>(Des))
+ continue;
+ if (auto D = DA->depends(Src, Des, true)) {
+ DEBUG(dbgs() << "Found Dependency between Src=" << Src << " Des=" << Des
+ << "\n");
+ if (D->isFlow()) {
+ // TODO: Handle Flow dependence.Check if it is sufficient to populate
+ // the Dependence Matrix with the direction reversed.
+ DEBUG(dbgs() << "Flow dependence not handled");
+ return false;
+ }
+ if (D->isAnti()) {
+ DEBUG(dbgs() << "Found Anti dependence \n");
+ unsigned Levels = D->getLevels();
+ char Direction;
+ for (unsigned II = 1; II <= Levels; ++II) {
+ const SCEV *Distance = D->getDistance(II);
+ const SCEVConstant *SCEVConst =
+ dyn_cast_or_null<SCEVConstant>(Distance);
+ if (SCEVConst) {
+ const ConstantInt *CI = SCEVConst->getValue();
+ if (CI->isNegative())
+ Direction = '<';
+ else if (CI->isZero())
+ Direction = '=';
+ else
+ Direction = '>';
+ Dep.push_back(Direction);
+ } else if (D->isScalar(II)) {
+ Direction = 'S';
+ Dep.push_back(Direction);
+ } else {
+ unsigned Dir = D->getDirection(II);
+ if (Dir == Dependence::DVEntry::LT ||
+ Dir == Dependence::DVEntry::LE)
+ Direction = '<';
+ else if (Dir == Dependence::DVEntry::GT ||
+ Dir == Dependence::DVEntry::GE)
+ Direction = '>';
+ else if (Dir == Dependence::DVEntry::EQ)
+ Direction = '=';
+ else
+ Direction = '*';
+ Dep.push_back(Direction);
+ }
+ }
+ while (Dep.size() != Level) {
+ Dep.push_back('I');
+ }
+
+ DepMatrix.push_back(Dep);
+ if (DepMatrix.size() > MaxMemInstrCount) {
+ DEBUG(dbgs() << "Cannot handle more than " << MaxMemInstrCount
+ << " dependencies inside loop\n");
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ // We don't have a DepMatrix to check legality return false
+ if (DepMatrix.size() == 0)
+ return false;
+ return true;
+}
+
+// A loop is moved from index 'from' to an index 'to'. Update the Dependence
+// matrix by exchanging the two columns.
+static void interChangeDepedencies(CharMatrix &DepMatrix, unsigned FromIndx,
+ unsigned ToIndx) {
+ unsigned numRows = DepMatrix.size();
+ for (unsigned i = 0; i < numRows; ++i) {
+ char TmpVal = DepMatrix[i][ToIndx];
+ DepMatrix[i][ToIndx] = DepMatrix[i][FromIndx];
+ DepMatrix[i][FromIndx] = TmpVal;
+ }
+}
+
+// Checks if outermost non '=','S'or'I' dependence in the dependence matrix is
+// '>'
+static bool isOuterMostDepPositive(CharMatrix &DepMatrix, unsigned Row,
+ unsigned Column) {
+ for (unsigned i = 0; i <= Column; ++i) {
+ if (DepMatrix[Row][i] == '<')
+ return false;
+ if (DepMatrix[Row][i] == '>')
+ return true;
+ }
+ // All dependencies were '=','S' or 'I'
+ return false;
+}
+
+// Checks if no dependence exist in the dependency matrix in Row before Column.
+static bool containsNoDependence(CharMatrix &DepMatrix, unsigned Row,
+ unsigned Column) {
+ for (unsigned i = 0; i < Column; ++i) {
+ if (DepMatrix[Row][i] != '=' || DepMatrix[Row][i] != 'S' ||
+ DepMatrix[Row][i] != 'I')
+ return false;
+ }
+ return true;
+}
+
+static bool validDepInterchange(CharMatrix &DepMatrix, unsigned Row,
+ unsigned OuterLoopId, char InnerDep,
+ char OuterDep) {
+
+ if (isOuterMostDepPositive(DepMatrix, Row, OuterLoopId))
+ return false;
+
+ if (InnerDep == OuterDep)
+ return true;
+
+ // It is legal to interchange if and only if after interchange no row has a
+ // '>' direction as the leftmost non-'='.
+
+ if (InnerDep == '=' || InnerDep == 'S' || InnerDep == 'I')
+ return true;
+
+ if (InnerDep == '<')
+ return true;
+
+ if (InnerDep == '>') {
+ // If OuterLoopId represents outermost loop then interchanging will make the
+ // 1st dependency as '>'
+ if (OuterLoopId == 0)
+ return false;
+
+ // If all dependencies before OuterloopId are '=','S'or 'I'. Then
+ // interchanging will result in this row having an outermost non '='
+ // dependency of '>'
+ if (!containsNoDependence(DepMatrix, Row, OuterLoopId))
+ return true;
+ }
+
+ return false;
+}
+
+// Checks if it is legal to interchange 2 loops.
+// [Theorem] A permutation of the loops in a perfect nest is legal if and only
+// if
+// the direction matrix, after the same permutation is applied to its columns,
+// has no ">" direction as the leftmost non-"=" direction in any row.
+static bool isLegalToInterChangeLoops(CharMatrix &DepMatrix,
+ unsigned InnerLoopId,
+ unsigned OuterLoopId) {
+
+ unsigned NumRows = DepMatrix.size();
+ // For each row check if it is valid to interchange.
+ for (unsigned Row = 0; Row < NumRows; ++Row) {
+ char InnerDep = DepMatrix[Row][InnerLoopId];
+ char OuterDep = DepMatrix[Row][OuterLoopId];
+ if (InnerDep == '*' || OuterDep == '*')
+ return false;
+ else if (!validDepInterchange(DepMatrix, Row, OuterLoopId, InnerDep,
+ OuterDep))
+ return false;
+ }
+ return true;
+}
+
+static void populateWorklist(Loop &L, SmallVector<LoopVector, 8> &V) {
+
+ DEBUG(dbgs() << "Calling populateWorklist called\n");
+ LoopVector LoopList;
+ Loop *CurrentLoop = &L;
+ std::vector<Loop *> vec = CurrentLoop->getSubLoopsVector();
+ while (vec.size() != 0) {
+ // The current loop has multiple subloops in it hence it is not tightly
+ // nested.
+ // Discard all loops above it added into Worklist.
+ if (vec.size() != 1) {
+ LoopList.clear();
+ return;
+ }
+ LoopList.push_back(CurrentLoop);
+ CurrentLoop = *(vec.begin());
+ vec = CurrentLoop->getSubLoopsVector();
+ }
+ LoopList.push_back(CurrentLoop);
+ V.push_back(LoopList);
+}
+
+static PHINode *getInductionVariable(Loop *L, ScalarEvolution *SE) {
+ PHINode *InnerIndexVar = L->getCanonicalInductionVariable();
+ if (InnerIndexVar)
+ return InnerIndexVar;
+ if (L->getLoopLatch() == nullptr || L->getLoopPredecessor() == nullptr)
+ return nullptr;
+ for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
+ PHINode *PhiVar = cast<PHINode>(I);
+ Type *PhiTy = PhiVar->getType();
+ if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
+ !PhiTy->isPointerTy())
+ return nullptr;
+ const SCEVAddRecExpr *AddRec =
+ dyn_cast<SCEVAddRecExpr>(SE->getSCEV(PhiVar));
+ if (!AddRec || !AddRec->isAffine())
+ continue;
+ const SCEV *Step = AddRec->getStepRecurrence(*SE);
+ const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
+ if (!C)
+ continue;
+ // Found the induction variable.
+ // FIXME: Handle loops with more than one induction variable. Note that,
+ // currently, legality makes sure we have only one induction variable.
+ return PhiVar;
+ }
+ return nullptr;
+}
+
+/// LoopInterchangeLegality checks if it is legal to interchange the loop.
+class LoopInterchangeLegality {
+public:
+ LoopInterchangeLegality(Loop *Outer, Loop *Inner, ScalarEvolution *SE,
+ LoopInterchange *Pass)
+ : OuterLoop(Outer), InnerLoop(Inner), SE(SE), CurrentPass(Pass),
+ InnerLoopHasReduction(false) {}
+
+ /// Check if the loops can be interchanged.
+ bool canInterchangeLoops(unsigned InnerLoopId, unsigned OuterLoopId,
+ CharMatrix &DepMatrix);
+ /// Check if the loop structure is understood. We do not handle triangular
+ /// loops for now.
+ bool isLoopStructureUnderstood(PHINode *InnerInductionVar);
+
+ bool currentLimitations();
+
+ bool hasInnerLoopReduction() { return InnerLoopHasReduction; }
+
+private:
+ bool tightlyNested(Loop *Outer, Loop *Inner);
+ bool containsUnsafeInstructionsInHeader(BasicBlock *BB);
+ bool areAllUsesReductions(Instruction *Ins, Loop *L);
+ bool containsUnsafeInstructionsInLatch(BasicBlock *BB);
+ bool findInductionAndReductions(Loop *L,
+ SmallVector<PHINode *, 8> &Inductions,
+ SmallVector<PHINode *, 8> &Reductions);
+ Loop *OuterLoop;
+ Loop *InnerLoop;
+
+ /// Scev analysis.
+ ScalarEvolution *SE;
+ LoopInterchange *CurrentPass;
+
+ bool InnerLoopHasReduction;
+};
+
+/// LoopInterchangeProfitability checks if it is profitable to interchange the
+/// loop.
+class LoopInterchangeProfitability {
+public:
+ LoopInterchangeProfitability(Loop *Outer, Loop *Inner, ScalarEvolution *SE)
+ : OuterLoop(Outer), InnerLoop(Inner), SE(SE) {}
+
+ /// Check if the loop interchange is profitable
+ bool isProfitable(unsigned InnerLoopId, unsigned OuterLoopId,
+ CharMatrix &DepMatrix);
+
+private:
+ int getInstrOrderCost();
+
+ Loop *OuterLoop;
+ Loop *InnerLoop;
+
+ /// Scev analysis.
+ ScalarEvolution *SE;
+};
+
+/// LoopInterchangeTransform interchanges the loop
+class LoopInterchangeTransform {
+public:
+ LoopInterchangeTransform(Loop *Outer, Loop *Inner, ScalarEvolution *SE,
+ LoopInfo *LI, DominatorTree *DT,
+ LoopInterchange *Pass, BasicBlock *LoopNestExit,
+ bool InnerLoopContainsReductions)
+ : OuterLoop(Outer), InnerLoop(Inner), SE(SE), LI(LI), DT(DT),
+ LoopExit(LoopNestExit),
+ InnerLoopHasReduction(InnerLoopContainsReductions) {}
+
+ /// Interchange OuterLoop and InnerLoop.
+ bool transform();
+ void restructureLoops(Loop *InnerLoop, Loop *OuterLoop);
+ void removeChildLoop(Loop *OuterLoop, Loop *InnerLoop);
+
+private:
+ void splitInnerLoopLatch(Instruction *);
+ void splitOuterLoopLatch();
+ void splitInnerLoopHeader();
+ bool adjustLoopLinks();
+ void adjustLoopPreheaders();
+ void adjustOuterLoopPreheader();
+ void adjustInnerLoopPreheader();
+ bool adjustLoopBranches();
+ void updateIncomingBlock(BasicBlock *CurrBlock, BasicBlock *OldPred,
+ BasicBlock *NewPred);
+
+ Loop *OuterLoop;
+ Loop *InnerLoop;
+
+ /// Scev analysis.
+ ScalarEvolution *SE;
+ LoopInfo *LI;
+ DominatorTree *DT;
+ BasicBlock *LoopExit;
+ bool InnerLoopHasReduction;
+};
+
+// Main LoopInterchange Pass
+struct LoopInterchange : public FunctionPass {
+ static char ID;
+ ScalarEvolution *SE;
+ LoopInfo *LI;
+ DependenceAnalysis *DA;
+ DominatorTree *DT;
+ LoopInterchange()
+ : FunctionPass(ID), SE(nullptr), LI(nullptr), DA(nullptr), DT(nullptr) {
+ initializeLoopInterchangePass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<ScalarEvolution>();
+ AU.addRequired<AliasAnalysis>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addRequired<DependenceAnalysis>();
+ AU.addRequiredID(LoopSimplifyID);
+ AU.addRequiredID(LCSSAID);
+ }
+
+ bool runOnFunction(Function &F) override {
+ SE = &getAnalysis<ScalarEvolution>();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ DA = &getAnalysis<DependenceAnalysis>();
+ auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
+ // Build up a worklist of loop pairs to analyze.
+ SmallVector<LoopVector, 8> Worklist;
+
+ for (Loop *L : *LI)
+ populateWorklist(*L, Worklist);
+
+ DEBUG(dbgs() << "Worklist size = " << Worklist.size() << "\n");
+ bool Changed = true;
+ while (!Worklist.empty()) {
+ LoopVector LoopList = Worklist.pop_back_val();
+ Changed = processLoopList(LoopList, F);
+ }
+ return Changed;
+ }
+
+ bool isComputableLoopNest(LoopVector LoopList) {
+ for (auto I = LoopList.begin(), E = LoopList.end(); I != E; ++I) {
+ Loop *L = *I;
+ const SCEV *ExitCountOuter = SE->getBackedgeTakenCount(L);
+ if (ExitCountOuter == SE->getCouldNotCompute()) {
+ DEBUG(dbgs() << "Couldn't compute Backedge count\n");
+ return false;
+ }
+ if (L->getNumBackEdges() != 1) {
+ DEBUG(dbgs() << "NumBackEdges is not equal to 1\n");
+ return false;
+ }
+ if (!L->getExitingBlock()) {
+ DEBUG(dbgs() << "Loop Doesn't have unique exit block\n");
+ return false;
+ }
+ }
+ return true;
+ }
+
+ unsigned selectLoopForInterchange(LoopVector LoopList) {
+ // TODO: Add a better heuristic to select the loop to be interchanged based
+ // on the dependece matrix. Currently we select the innermost loop.
+ return LoopList.size() - 1;
+ }
+
+ bool processLoopList(LoopVector LoopList, Function &F) {
+
+ bool Changed = false;
+ CharMatrix DependencyMatrix;
+ if (LoopList.size() < 2) {
+ DEBUG(dbgs() << "Loop doesn't contain minimum nesting level.\n");
+ return false;
+ }
+ if (!isComputableLoopNest(LoopList)) {
+ DEBUG(dbgs() << "Not vaild loop candidate for interchange\n");
+ return false;
+ }
+ Loop *OuterMostLoop = *(LoopList.begin());
+
+ DEBUG(dbgs() << "Processing LoopList of size = " << LoopList.size()
+ << "\n");
+
+ if (!populateDependencyMatrix(DependencyMatrix, LoopList.size(),
+ OuterMostLoop, DA)) {
+ DEBUG(dbgs() << "Populating Dependency matrix failed\n");
+ return false;
+ }
+#ifdef DUMP_DEP_MATRICIES
+ DEBUG(dbgs() << "Dependence before inter change \n");
+ printDepMatrix(DependencyMatrix);
+#endif
+
+ BasicBlock *OuterMostLoopLatch = OuterMostLoop->getLoopLatch();
+ BranchInst *OuterMostLoopLatchBI =
+ dyn_cast<BranchInst>(OuterMostLoopLatch->getTerminator());
+ if (!OuterMostLoopLatchBI)
+ return false;
+
+ // Since we currently do not handle LCSSA PHI's any failure in loop
+ // condition will now branch to LoopNestExit.
+ // TODO: This should be removed once we handle LCSSA PHI nodes.
+
+ // Get the Outermost loop exit.
+ BasicBlock *LoopNestExit;
+ if (OuterMostLoopLatchBI->getSuccessor(0) == OuterMostLoop->getHeader())
+ LoopNestExit = OuterMostLoopLatchBI->getSuccessor(1);
+ else
+ LoopNestExit = OuterMostLoopLatchBI->getSuccessor(0);
+
+ if (isa<PHINode>(LoopNestExit->begin())) {
+ DEBUG(dbgs() << "PHI Nodes in loop nest exit is not handled for now "
+ "since on failure all loops branch to loop nest exit.\n");
+ return false;
+ }
+
+ unsigned SelecLoopId = selectLoopForInterchange(LoopList);
+ // Move the selected loop outwards to the best posible position.
+ for (unsigned i = SelecLoopId; i > 0; i--) {
+ bool Interchanged =
+ processLoop(LoopList, i, i - 1, LoopNestExit, DependencyMatrix);
+ if (!Interchanged)
+ return Changed;
+ // Loops interchanged reflect the same in LoopList
+ std::swap(LoopList[i - 1], LoopList[i]);
+
+ // Update the DependencyMatrix
+ interChangeDepedencies(DependencyMatrix, i, i - 1);
+ DT->recalculate(F);
+#ifdef DUMP_DEP_MATRICIES
+ DEBUG(dbgs() << "Dependence after inter change \n");
+ printDepMatrix(DependencyMatrix);
+#endif
+ Changed |= Interchanged;
+ }
+ return Changed;
+ }
+
+ bool processLoop(LoopVector LoopList, unsigned InnerLoopId,
+ unsigned OuterLoopId, BasicBlock *LoopNestExit,
+ std::vector<std::vector<char>> &DependencyMatrix) {
+
+ DEBUG(dbgs() << "Processing Innder Loop Id = " << InnerLoopId
+ << " and OuterLoopId = " << OuterLoopId << "\n");
+ Loop *InnerLoop = LoopList[InnerLoopId];
+ Loop *OuterLoop = LoopList[OuterLoopId];
+
+ LoopInterchangeLegality LIL(OuterLoop, InnerLoop, SE, this);
+ if (!LIL.canInterchangeLoops(InnerLoopId, OuterLoopId, DependencyMatrix)) {
+ DEBUG(dbgs() << "Not interchanging Loops. Cannot prove legality\n");
+ return false;
+ }
+ DEBUG(dbgs() << "Loops are legal to interchange\n");
+ LoopInterchangeProfitability LIP(OuterLoop, InnerLoop, SE);
+ if (!LIP.isProfitable(InnerLoopId, OuterLoopId, DependencyMatrix)) {
+ DEBUG(dbgs() << "Interchanging Loops not profitable\n");
+ return false;
+ }
+
+ LoopInterchangeTransform LIT(OuterLoop, InnerLoop, SE, LI, DT, this,
+ LoopNestExit, LIL.hasInnerLoopReduction());
+ LIT.transform();
+ DEBUG(dbgs() << "Loops interchanged\n");
+ return true;
+ }
+};
+
+} // end of namespace
+bool LoopInterchangeLegality::areAllUsesReductions(Instruction *Ins, Loop *L) {
+ return !std::any_of(Ins->user_begin(), Ins->user_end(), [=](User *U) -> bool {
+ PHINode *UserIns = dyn_cast<PHINode>(U);
+ ReductionDescriptor RD;
+ return !UserIns || !ReductionDescriptor::isReductionPHI(UserIns, L, RD);
+ });
+}
+
+bool LoopInterchangeLegality::containsUnsafeInstructionsInHeader(
+ BasicBlock *BB) {
+ for (auto I = BB->begin(), E = BB->end(); I != E; ++I) {
+ // Load corresponding to reduction PHI's are safe while concluding if
+ // tightly nested.
+ if (LoadInst *L = dyn_cast<LoadInst>(I)) {
+ if (!areAllUsesReductions(L, InnerLoop))
+ return true;
+ } else if (I->mayHaveSideEffects() || I->mayReadFromMemory())
+ return true;
+ }
+ return false;
+}
+
+bool LoopInterchangeLegality::containsUnsafeInstructionsInLatch(
+ BasicBlock *BB) {
+ for (auto I = BB->begin(), E = BB->end(); I != E; ++I) {
+ // Stores corresponding to reductions are safe while concluding if tightly
+ // nested.
+ if (StoreInst *L = dyn_cast<StoreInst>(I)) {
+ PHINode *PHI = dyn_cast<PHINode>(L->getOperand(0));
+ if (!PHI)
+ return true;
+ } else if (I->mayHaveSideEffects() || I->mayReadFromMemory())
+ return true;
+ }
+ return false;
+}
+
+bool LoopInterchangeLegality::tightlyNested(Loop *OuterLoop, Loop *InnerLoop) {
+ BasicBlock *OuterLoopHeader = OuterLoop->getHeader();
+ BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
+ BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch();
+
+ DEBUG(dbgs() << "Checking if Loops are Tightly Nested\n");
+
+ // A perfectly nested loop will not have any branch in between the outer and
+ // inner block i.e. outer header will branch to either inner preheader and
+ // outerloop latch.
+ BranchInst *outerLoopHeaderBI =
+ dyn_cast<BranchInst>(OuterLoopHeader->getTerminator());
+ if (!outerLoopHeaderBI)
+ return false;
+ unsigned num = outerLoopHeaderBI->getNumSuccessors();
+ for (unsigned i = 0; i < num; i++) {
+ if (outerLoopHeaderBI->getSuccessor(i) != InnerLoopPreHeader &&
+ outerLoopHeaderBI->getSuccessor(i) != OuterLoopLatch)
+ return false;
+ }
+
+ DEBUG(dbgs() << "Checking instructions in Loop header and Loop latch \n");
+ // We do not have any basic block in between now make sure the outer header
+ // and outer loop latch doesnt contain any unsafe instructions.
+ if (containsUnsafeInstructionsInHeader(OuterLoopHeader) ||
+ containsUnsafeInstructionsInLatch(OuterLoopLatch))
+ return false;
+
+ DEBUG(dbgs() << "Loops are perfectly nested \n");
+ // We have a perfect loop nest.
+ return true;
+}
+
+
+bool LoopInterchangeLegality::isLoopStructureUnderstood(
+ PHINode *InnerInduction) {
+
+ unsigned Num = InnerInduction->getNumOperands();
+ BasicBlock *InnerLoopPreheader = InnerLoop->getLoopPreheader();
+ for (unsigned i = 0; i < Num; ++i) {
+ Value *Val = InnerInduction->getOperand(i);
+ if (isa<Constant>(Val))
+ continue;
+ Instruction *I = dyn_cast<Instruction>(Val);
+ if (!I)
+ return false;
+ // TODO: Handle triangular loops.
+ // e.g. for(int i=0;i<N;i++)
+ // for(int j=i;j<N;j++)
+ unsigned IncomBlockIndx = PHINode::getIncomingValueNumForOperand(i);
+ if (InnerInduction->getIncomingBlock(IncomBlockIndx) ==
+ InnerLoopPreheader &&
+ !OuterLoop->isLoopInvariant(I)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool LoopInterchangeLegality::findInductionAndReductions(
+ Loop *L, SmallVector<PHINode *, 8> &Inductions,
+ SmallVector<PHINode *, 8> &Reductions) {
+ if (!L->getLoopLatch() || !L->getLoopPredecessor())
+ return false;
+ for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
+ ReductionDescriptor RD;
+ PHINode *PHI = cast<PHINode>(I);
+ ConstantInt *StepValue = nullptr;
+ if (isInductionPHI(PHI, SE, StepValue))
+ Inductions.push_back(PHI);
+ else if (ReductionDescriptor::isReductionPHI(PHI, L, RD))
+ Reductions.push_back(PHI);
+ else {
+ DEBUG(
+ dbgs() << "Failed to recognize PHI as an induction or reduction.\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool containsSafePHI(BasicBlock *Block, bool isOuterLoopExitBlock) {
+ for (auto I = Block->begin(); isa<PHINode>(I); ++I) {
+ PHINode *PHI = cast<PHINode>(I);
+ // Reduction lcssa phi will have only 1 incoming block that from loop latch.
+ if (PHI->getNumIncomingValues() > 1)
+ return false;
+ Instruction *Ins = dyn_cast<Instruction>(PHI->getIncomingValue(0));
+ if (!Ins)
+ return false;
+ // Incoming value for lcssa phi's in outer loop exit can only be inner loop
+ // exits lcssa phi else it would not be tightly nested.
+ if (!isa<PHINode>(Ins) && isOuterLoopExitBlock)
+ return false;
+ }
+ return true;
+}
+
+static BasicBlock *getLoopLatchExitBlock(BasicBlock *LatchBlock,
+ BasicBlock *LoopHeader) {
+ if (BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator())) {
+ unsigned Num = BI->getNumSuccessors();
+ assert(Num == 2);
+ for (unsigned i = 0; i < Num; ++i) {
+ if (BI->getSuccessor(i) == LoopHeader)
+ continue;
+ return BI->getSuccessor(i);
+ }
+ }
+ return nullptr;
+}
+
+// This function indicates the current limitations in the transform as a result
+// of which we do not proceed.
+bool LoopInterchangeLegality::currentLimitations() {
+
+ BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
+ BasicBlock *InnerLoopHeader = InnerLoop->getHeader();
+ BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch();
+ BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch();
+ BasicBlock *OuterLoopHeader = OuterLoop->getHeader();
+
+ PHINode *InnerInductionVar;
+ SmallVector<PHINode *, 8> Inductions;
+ SmallVector<PHINode *, 8> Reductions;
+ if (!findInductionAndReductions(InnerLoop, Inductions, Reductions))
+ return true;
+
+ // TODO: Currently we handle only loops with 1 induction variable.
+ if (Inductions.size() != 1) {
+ DEBUG(dbgs() << "We currently only support loops with 1 induction variable."
+ << "Failed to interchange due to current limitation\n");
+ return true;
+ }
+ if (Reductions.size() > 0)
+ InnerLoopHasReduction = true;
+
+ InnerInductionVar = Inductions.pop_back_val();
+ Reductions.clear();
+ if (!findInductionAndReductions(OuterLoop, Inductions, Reductions))
+ return true;
+
+ // Outer loop cannot have reduction because then loops will not be tightly
+ // nested.
+ if (!Reductions.empty())
+ return true;
+ // TODO: Currently we handle only loops with 1 induction variable.
+ if (Inductions.size() != 1)
+ return true;
+
+ // TODO: Triangular loops are not handled for now.
+ if (!isLoopStructureUnderstood(InnerInductionVar)) {
+ DEBUG(dbgs() << "Loop structure not understood by pass\n");
+ return true;
+ }
+
+ // TODO: We only handle LCSSA PHI's corresponding to reduction for now.
+ BasicBlock *LoopExitBlock =
+ getLoopLatchExitBlock(OuterLoopLatch, OuterLoopHeader);
+ if (!LoopExitBlock || !containsSafePHI(LoopExitBlock, true))
+ return true;
+
+ LoopExitBlock = getLoopLatchExitBlock(InnerLoopLatch, InnerLoopHeader);
+ if (!LoopExitBlock || !containsSafePHI(LoopExitBlock, false))
+ return true;
+
+ // TODO: Current limitation: Since we split the inner loop latch at the point
+ // were induction variable is incremented (induction.next); We cannot have
+ // more than 1 user of induction.next since it would result in broken code
+ // after split.
+ // e.g.
+ // for(i=0;i<N;i++) {
+ // for(j = 0;j<M;j++) {
+ // A[j+1][i+2] = A[j][i]+k;
+ // }
+ // }
+ bool FoundInduction = false;
+ Instruction *InnerIndexVarInc = nullptr;
+ if (InnerInductionVar->getIncomingBlock(0) == InnerLoopPreHeader)
+ InnerIndexVarInc =
+ dyn_cast<Instruction>(InnerInductionVar->getIncomingValue(1));
+ else
+ InnerIndexVarInc =
+ dyn_cast<Instruction>(InnerInductionVar->getIncomingValue(0));
+
+ if (!InnerIndexVarInc)
+ return true;
+
+ // Since we split the inner loop latch on this induction variable. Make sure
+ // we do not have any instruction between the induction variable and branch
+ // instruction.
+
+ for (auto I = InnerLoopLatch->rbegin(), E = InnerLoopLatch->rend();
+ I != E && !FoundInduction; ++I) {
+ if (isa<BranchInst>(*I) || isa<CmpInst>(*I) || isa<TruncInst>(*I))
+ continue;
+ const Instruction &Ins = *I;
+ // We found an instruction. If this is not induction variable then it is not
+ // safe to split this loop latch.
+ if (!Ins.isIdenticalTo(InnerIndexVarInc))
+ return true;
+ else
+ FoundInduction = true;
+ }
+ // The loop latch ended and we didnt find the induction variable return as
+ // current limitation.
+ if (!FoundInduction)
+ return true;
+
+ return false;
+}
+
+bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId,
+ unsigned OuterLoopId,
+ CharMatrix &DepMatrix) {
+
+ if (!isLegalToInterChangeLoops(DepMatrix, InnerLoopId, OuterLoopId)) {
+ DEBUG(dbgs() << "Failed interchange InnerLoopId = " << InnerLoopId
+ << "and OuterLoopId = " << OuterLoopId
+ << "due to dependence\n");
+ return false;
+ }
+
+ // Create unique Preheaders if we already do not have one.
+ BasicBlock *OuterLoopPreHeader = OuterLoop->getLoopPreheader();
+ BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
+
+ // Create a unique outer preheader -
+ // 1) If OuterLoop preheader is not present.
+ // 2) If OuterLoop Preheader is same as OuterLoop Header
+ // 3) If OuterLoop Preheader is same as Header of the previous loop.
+ // 4) If OuterLoop Preheader is Entry node.
+ if (!OuterLoopPreHeader || OuterLoopPreHeader == OuterLoop->getHeader() ||
+ isa<PHINode>(OuterLoopPreHeader->begin()) ||
+ !OuterLoopPreHeader->getUniquePredecessor()) {
+ OuterLoopPreHeader = InsertPreheaderForLoop(OuterLoop, CurrentPass);
+ }
+
+ if (!InnerLoopPreHeader || InnerLoopPreHeader == InnerLoop->getHeader() ||
+ InnerLoopPreHeader == OuterLoop->getHeader()) {
+ InnerLoopPreHeader = InsertPreheaderForLoop(InnerLoop, CurrentPass);
+ }
+
+ // TODO: The loops could not be interchanged due to current limitations in the
+ // transform module.
+ if (currentLimitations()) {
+ DEBUG(dbgs() << "Not legal because of current transform limitation\n");
+ return false;
+ }
+
+ // Check if the loops are tightly nested.
+ if (!tightlyNested(OuterLoop, InnerLoop)) {
+ DEBUG(dbgs() << "Loops not tightly nested\n");
+ return false;
+ }
+
+ return true;
+}
+
+int LoopInterchangeProfitability::getInstrOrderCost() {
+ unsigned GoodOrder, BadOrder;
+ BadOrder = GoodOrder = 0;
+ for (auto BI = InnerLoop->block_begin(), BE = InnerLoop->block_end();
+ BI != BE; ++BI) {
+ for (auto I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I) {
+ const Instruction &Ins = *I;
+ if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&Ins)) {
+ unsigned NumOp = GEP->getNumOperands();
+ bool FoundInnerInduction = false;
+ bool FoundOuterInduction = false;
+ for (unsigned i = 0; i < NumOp; ++i) {
+ const SCEV *OperandVal = SE->getSCEV(GEP->getOperand(i));
+ const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(OperandVal);
+ if (!AR)
+ continue;
+
+ // If we find the inner induction after an outer induction e.g.
+ // for(int i=0;i<N;i++)
+ // for(int j=0;j<N;j++)
+ // A[i][j] = A[i-1][j-1]+k;
+ // then it is a good order.
+ if (AR->getLoop() == InnerLoop) {
+ // We found an InnerLoop induction after OuterLoop induction. It is
+ // a good order.
+ FoundInnerInduction = true;
+ if (FoundOuterInduction) {
+ GoodOrder++;
+ break;
+ }
+ }
+ // If we find the outer induction after an inner induction e.g.
+ // for(int i=0;i<N;i++)
+ // for(int j=0;j<N;j++)
+ // A[j][i] = A[j-1][i-1]+k;
+ // then it is a bad order.
+ if (AR->getLoop() == OuterLoop) {
+ // We found an OuterLoop induction after InnerLoop induction. It is
+ // a bad order.
+ FoundOuterInduction = true;
+ if (FoundInnerInduction) {
+ BadOrder++;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ return GoodOrder - BadOrder;
+}
+
+static bool isProfitabileForVectorization(unsigned InnerLoopId,
+ unsigned OuterLoopId,
+ CharMatrix &DepMatrix) {
+ // TODO: Improve this heuristic to catch more cases.
+ // If the inner loop is loop independent or doesn't carry any dependency it is
+ // profitable to move this to outer position.
+ unsigned Row = DepMatrix.size();
+ for (unsigned i = 0; i < Row; ++i) {
+ if (DepMatrix[i][InnerLoopId] != 'S' && DepMatrix[i][InnerLoopId] != 'I')
+ return false;
+ // TODO: We need to improve this heuristic.
+ if (DepMatrix[i][OuterLoopId] != '=')
+ return false;
+ }
+ // If outer loop has dependence and inner loop is loop independent then it is
+ // profitable to interchange to enable parallelism.
+ return true;
+}
+
+bool LoopInterchangeProfitability::isProfitable(unsigned InnerLoopId,
+ unsigned OuterLoopId,
+ CharMatrix &DepMatrix) {
+
+ // TODO: Add Better Profitibility checks.
+ // e.g
+ // 1) Construct dependency matrix and move the one with no loop carried dep
+ // inside to enable vectorization.
+
+ // This is rough cost estimation algorithm. It counts the good and bad order
+ // of induction variables in the instruction and allows reordering if number
+ // of bad orders is more than good.
+ int Cost = 0;
+ Cost += getInstrOrderCost();
+ DEBUG(dbgs() << "Cost = " << Cost << "\n");
+ if (Cost < 0)
+ return true;
+
+ // It is not profitable as per current cache profitibility model. But check if
+ // we can move this loop outside to improve parallelism.
+ bool ImprovesPar =
+ isProfitabileForVectorization(InnerLoopId, OuterLoopId, DepMatrix);
+ return ImprovesPar;
+}
+
+void LoopInterchangeTransform::removeChildLoop(Loop *OuterLoop,
+ Loop *InnerLoop) {
+ for (Loop::iterator I = OuterLoop->begin(), E = OuterLoop->end(); I != E;
+ ++I) {
+ if (*I == InnerLoop) {
+ OuterLoop->removeChildLoop(I);
+ return;
+ }
+ }
+ assert(false && "Couldn't find loop");
+}
+
+void LoopInterchangeTransform::restructureLoops(Loop *InnerLoop,
+ Loop *OuterLoop) {
+ Loop *OuterLoopParent = OuterLoop->getParentLoop();
+ if (OuterLoopParent) {
+ // Remove the loop from its parent loop.
+ removeChildLoop(OuterLoopParent, OuterLoop);
+ removeChildLoop(OuterLoop, InnerLoop);
+ OuterLoopParent->addChildLoop(InnerLoop);
+ } else {
+ removeChildLoop(OuterLoop, InnerLoop);
+ LI->changeTopLevelLoop(OuterLoop, InnerLoop);
+ }
+
+ while (!InnerLoop->empty())
+ OuterLoop->addChildLoop(InnerLoop->removeChildLoop(InnerLoop->begin()));
+
+ InnerLoop->addChildLoop(OuterLoop);
+}
+
+bool LoopInterchangeTransform::transform() {
+
+ DEBUG(dbgs() << "transform\n");
+ bool Transformed = false;
+ Instruction *InnerIndexVar;
+
+ if (InnerLoop->getSubLoops().size() == 0) {
+ BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
+ DEBUG(dbgs() << "Calling Split Inner Loop\n");
+ PHINode *InductionPHI = getInductionVariable(InnerLoop, SE);
+ if (!InductionPHI) {
+ DEBUG(dbgs() << "Failed to find the point to split loop latch \n");
+ return false;
+ }
+
+ if (InductionPHI->getIncomingBlock(0) == InnerLoopPreHeader)
+ InnerIndexVar = dyn_cast<Instruction>(InductionPHI->getIncomingValue(1));
+ else
+ InnerIndexVar = dyn_cast<Instruction>(InductionPHI->getIncomingValue(0));
+
+ //
+ // Split at the place were the induction variable is
+ // incremented/decremented.
+ // TODO: This splitting logic may not work always. Fix this.
+ splitInnerLoopLatch(InnerIndexVar);
+ DEBUG(dbgs() << "splitInnerLoopLatch Done\n");
+
+ // Splits the inner loops phi nodes out into a seperate basic block.
+ splitInnerLoopHeader();
+ DEBUG(dbgs() << "splitInnerLoopHeader Done\n");
+ }
+
+ Transformed |= adjustLoopLinks();
+ if (!Transformed) {
+ DEBUG(dbgs() << "adjustLoopLinks Failed\n");
+ return false;
+ }
+
+ restructureLoops(InnerLoop, OuterLoop);
+ return true;
+}
+
+void LoopInterchangeTransform::splitInnerLoopLatch(Instruction *Inc) {
+ BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch();
+ BasicBlock *InnerLoopLatchPred = InnerLoopLatch;
+ InnerLoopLatch = SplitBlock(InnerLoopLatchPred, Inc, DT, LI);
+}
+
+void LoopInterchangeTransform::splitOuterLoopLatch() {
+ BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch();
+ BasicBlock *OuterLatchLcssaPhiBlock = OuterLoopLatch;
+ OuterLoopLatch = SplitBlock(OuterLatchLcssaPhiBlock,
+ OuterLoopLatch->getFirstNonPHI(), DT, LI);
+}
+
+void LoopInterchangeTransform::splitInnerLoopHeader() {
+
+ // Split the inner loop header out. Here make sure that the reduction PHI's
+ // stay in the innerloop body.
+ BasicBlock *InnerLoopHeader = InnerLoop->getHeader();
+ BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
+ if (InnerLoopHasReduction) {
+ // FIXME: Check if the induction PHI will always be the first PHI.
+ BasicBlock *New = InnerLoopHeader->splitBasicBlock(
+ ++(InnerLoopHeader->begin()), InnerLoopHeader->getName() + ".split");
+ if (LI)
+ if (Loop *L = LI->getLoopFor(InnerLoopHeader))
+ L->addBasicBlockToLoop(New, *LI);
+
+ // Adjust Reduction PHI's in the block.
+ SmallVector<PHINode *, 8> PHIVec;
+ for (auto I = New->begin(); isa<PHINode>(I); ++I) {
+ PHINode *PHI = dyn_cast<PHINode>(I);
+ Value *V = PHI->getIncomingValueForBlock(InnerLoopPreHeader);
+ PHI->replaceAllUsesWith(V);
+ PHIVec.push_back((PHI));
+ }
+ for (auto I = PHIVec.begin(), E = PHIVec.end(); I != E; ++I) {
+ PHINode *P = *I;
+ P->eraseFromParent();
+ }
+ } else {
+ SplitBlock(InnerLoopHeader, InnerLoopHeader->getFirstNonPHI(), DT, LI);
+ }
+
+ DEBUG(dbgs() << "Output of splitInnerLoopHeader InnerLoopHeaderSucc & "
+ "InnerLoopHeader \n");
+}
+
+/// \brief Move all instructions except the terminator from FromBB right before
+/// InsertBefore
+static void moveBBContents(BasicBlock *FromBB, Instruction *InsertBefore) {
+ auto &ToList = InsertBefore->getParent()->getInstList();
+ auto &FromList = FromBB->getInstList();
+
+ ToList.splice(InsertBefore, FromList, FromList.begin(),
+ FromBB->getTerminator());
+}
+
+void LoopInterchangeTransform::adjustOuterLoopPreheader() {
+ BasicBlock *OuterLoopPreHeader = OuterLoop->getLoopPreheader();
+ BasicBlock *InnerPreHeader = InnerLoop->getLoopPreheader();
+
+ moveBBContents(OuterLoopPreHeader, InnerPreHeader->getTerminator());
+}
+
+void LoopInterchangeTransform::adjustInnerLoopPreheader() {
+ BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
+ BasicBlock *OuterHeader = OuterLoop->getHeader();
+
+ moveBBContents(InnerLoopPreHeader, OuterHeader->getTerminator());
+}
+
+void LoopInterchangeTransform::updateIncomingBlock(BasicBlock *CurrBlock,
+ BasicBlock *OldPred,
+ BasicBlock *NewPred) {
+ for (auto I = CurrBlock->begin(); isa<PHINode>(I); ++I) {
+ PHINode *PHI = cast<PHINode>(I);
+ unsigned Num = PHI->getNumIncomingValues();
+ for (unsigned i = 0; i < Num; ++i) {
+ if (PHI->getIncomingBlock(i) == OldPred)
+ PHI->setIncomingBlock(i, NewPred);
+ }
+ }
+}
+
+bool LoopInterchangeTransform::adjustLoopBranches() {
+
+ DEBUG(dbgs() << "adjustLoopBranches called\n");
+ // Adjust the loop preheader
+ BasicBlock *InnerLoopHeader = InnerLoop->getHeader();
+ BasicBlock *OuterLoopHeader = OuterLoop->getHeader();
+ BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch();
+ BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch();
+ BasicBlock *OuterLoopPreHeader = OuterLoop->getLoopPreheader();
+ BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
+ BasicBlock *OuterLoopPredecessor = OuterLoopPreHeader->getUniquePredecessor();
+ BasicBlock *InnerLoopLatchPredecessor =
+ InnerLoopLatch->getUniquePredecessor();
+ BasicBlock *InnerLoopLatchSuccessor;
+ BasicBlock *OuterLoopLatchSuccessor;
+
+ BranchInst *OuterLoopLatchBI =
+ dyn_cast<BranchInst>(OuterLoopLatch->getTerminator());
+ BranchInst *InnerLoopLatchBI =
+ dyn_cast<BranchInst>(InnerLoopLatch->getTerminator());
+ BranchInst *OuterLoopHeaderBI =
+ dyn_cast<BranchInst>(OuterLoopHeader->getTerminator());
+ BranchInst *InnerLoopHeaderBI =
+ dyn_cast<BranchInst>(InnerLoopHeader->getTerminator());
+
+ if (!OuterLoopPredecessor || !InnerLoopLatchPredecessor ||
+ !OuterLoopLatchBI || !InnerLoopLatchBI || !OuterLoopHeaderBI ||
+ !InnerLoopHeaderBI)
+ return false;
+
+ BranchInst *InnerLoopLatchPredecessorBI =
+ dyn_cast<BranchInst>(InnerLoopLatchPredecessor->getTerminator());
+ BranchInst *OuterLoopPredecessorBI =
+ dyn_cast<BranchInst>(OuterLoopPredecessor->getTerminator());
+
+ if (!OuterLoopPredecessorBI || !InnerLoopLatchPredecessorBI)
+ return false;
+ BasicBlock *InnerLoopHeaderSucessor = InnerLoopHeader->getUniqueSuccessor();
+ if (!InnerLoopHeaderSucessor)
+ return false;
+
+ // Adjust Loop Preheader and headers
+
+ unsigned NumSucc = OuterLoopPredecessorBI->getNumSuccessors();
+ for (unsigned i = 0; i < NumSucc; ++i) {
+ if (OuterLoopPredecessorBI->getSuccessor(i) == OuterLoopPreHeader)
+ OuterLoopPredecessorBI->setSuccessor(i, InnerLoopPreHeader);
+ }
+
+ NumSucc = OuterLoopHeaderBI->getNumSuccessors();
+ for (unsigned i = 0; i < NumSucc; ++i) {
+ if (OuterLoopHeaderBI->getSuccessor(i) == OuterLoopLatch)
+ OuterLoopHeaderBI->setSuccessor(i, LoopExit);
+ else if (OuterLoopHeaderBI->getSuccessor(i) == InnerLoopPreHeader)
+ OuterLoopHeaderBI->setSuccessor(i, InnerLoopHeaderSucessor);
+ }
+
+ // Adjust reduction PHI's now that the incoming block has changed.
+ updateIncomingBlock(InnerLoopHeaderSucessor, InnerLoopHeader,
+ OuterLoopHeader);
+
+ BranchInst::Create(OuterLoopPreHeader, InnerLoopHeaderBI);
+ InnerLoopHeaderBI->eraseFromParent();
+
+ // -------------Adjust loop latches-----------
+ if (InnerLoopLatchBI->getSuccessor(0) == InnerLoopHeader)
+ InnerLoopLatchSuccessor = InnerLoopLatchBI->getSuccessor(1);
+ else
+ InnerLoopLatchSuccessor = InnerLoopLatchBI->getSuccessor(0);
+
+ NumSucc = InnerLoopLatchPredecessorBI->getNumSuccessors();
+ for (unsigned i = 0; i < NumSucc; ++i) {
+ if (InnerLoopLatchPredecessorBI->getSuccessor(i) == InnerLoopLatch)
+ InnerLoopLatchPredecessorBI->setSuccessor(i, InnerLoopLatchSuccessor);
+ }
+
+ // Adjust PHI nodes in InnerLoopLatchSuccessor. Update all uses of PHI with
+ // the value and remove this PHI node from inner loop.
+ SmallVector<PHINode *, 8> LcssaVec;
+ for (auto I = InnerLoopLatchSuccessor->begin(); isa<PHINode>(I); ++I) {
+ PHINode *LcssaPhi = cast<PHINode>(I);
+ LcssaVec.push_back(LcssaPhi);
+ }
+ for (auto I = LcssaVec.begin(), E = LcssaVec.end(); I != E; ++I) {
+ PHINode *P = *I;
+ Value *Incoming = P->getIncomingValueForBlock(InnerLoopLatch);
+ P->replaceAllUsesWith(Incoming);
+ P->eraseFromParent();
+ }
+
+ if (OuterLoopLatchBI->getSuccessor(0) == OuterLoopHeader)
+ OuterLoopLatchSuccessor = OuterLoopLatchBI->getSuccessor(1);
+ else
+ OuterLoopLatchSuccessor = OuterLoopLatchBI->getSuccessor(0);
+
+ if (InnerLoopLatchBI->getSuccessor(1) == InnerLoopLatchSuccessor)
+ InnerLoopLatchBI->setSuccessor(1, OuterLoopLatchSuccessor);
+ else
+ InnerLoopLatchBI->setSuccessor(0, OuterLoopLatchSuccessor);
+
+ updateIncomingBlock(OuterLoopLatchSuccessor, OuterLoopLatch, InnerLoopLatch);
+
+ if (OuterLoopLatchBI->getSuccessor(0) == OuterLoopLatchSuccessor) {
+ OuterLoopLatchBI->setSuccessor(0, InnerLoopLatch);
+ } else {
+ OuterLoopLatchBI->setSuccessor(1, InnerLoopLatch);
+ }
+
+ return true;
+}
+void LoopInterchangeTransform::adjustLoopPreheaders() {
+
+ // We have interchanged the preheaders so we need to interchange the data in
+ // the preheader as well.
+ // This is because the content of inner preheader was previously executed
+ // inside the outer loop.
+ BasicBlock *OuterLoopPreHeader = OuterLoop->getLoopPreheader();
+ BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
+ BasicBlock *OuterLoopHeader = OuterLoop->getHeader();
+ BranchInst *InnerTermBI =
+ cast<BranchInst>(InnerLoopPreHeader->getTerminator());
+
+ // These instructions should now be executed inside the loop.
+ // Move instruction into a new block after outer header.
+ moveBBContents(InnerLoopPreHeader, OuterLoopHeader->getTerminator());
+ // These instructions were not executed previously in the loop so move them to
+ // the older inner loop preheader.
+ moveBBContents(OuterLoopPreHeader, InnerTermBI);
+}
+
+bool LoopInterchangeTransform::adjustLoopLinks() {
+
+ // Adjust all branches in the inner and outer loop.
+ bool Changed = adjustLoopBranches();
+ if (Changed)
+ adjustLoopPreheaders();
+ return Changed;
+}
+
+char LoopInterchange::ID = 0;
+INITIALIZE_PASS_BEGIN(LoopInterchange, "loop-interchange",
+ "Interchanges loops for cache reuse", false, false)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_DEPENDENCY(DependenceAnalysis)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
+
+INITIALIZE_PASS_END(LoopInterchange, "loop-interchange",
+ "Interchanges loops for cache reuse", false, false)
+
+Pass *llvm::createLoopInterchangePass() { return new LoopInterchange(); }
diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp
index 8f122041c248..ed103e6b8ed6 100644
--- a/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -12,7 +12,9 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -21,6 +23,7 @@
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
@@ -28,7 +31,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
@@ -43,6 +45,12 @@ static cl::opt<unsigned>
MaxInc("max-reroll-increment", cl::init(2048), cl::Hidden,
cl::desc("The maximum increment for loop rerolling"));
+static cl::opt<unsigned>
+NumToleratedFailedMatches("reroll-num-tolerated-failed-matches", cl::init(400),
+ cl::Hidden,
+ cl::desc("The maximum number of failures to tolerate"
+ " during fuzzy matching. (default: 400)"));
+
// This loop re-rolling transformation aims to transform loops like this:
//
// int foo(int a);
@@ -119,6 +127,16 @@ MaxInc("max-reroll-increment", cl::init(2048), cl::Hidden,
// br %cmp, header, exit
namespace {
+ enum IterationLimits {
+ /// The maximum number of iterations that we'll try and reroll. This
+ /// has to be less than 25 in order to fit into a SmallBitVector.
+ IL_MaxRerollIterations = 16,
+ /// The bitvector index used by loop induction variables and other
+ /// instructions that belong to all iterations.
+ IL_All,
+ IL_End
+ };
+
class LoopReroll : public LoopPass {
public:
static char ID; // Pass ID, replacement for typeid
@@ -130,19 +148,18 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AliasAnalysis>();
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addPreserved<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolution>();
- AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
-protected:
+ protected:
AliasAnalysis *AA;
LoopInfo *LI;
ScalarEvolution *SE;
- const DataLayout *DL;
TargetLibraryInfo *TLI;
DominatorTree *DT;
@@ -311,26 +328,113 @@ protected:
DenseSet<int> Reds;
};
+ // A DAGRootSet models an induction variable being used in a rerollable
+ // loop. For example,
+ //
+ // x[i*3+0] = y1
+ // x[i*3+1] = y2
+ // x[i*3+2] = y3
+ //
+ // Base instruction -> i*3
+ // +---+----+
+ // / | \
+ // ST[y1] +1 +2 <-- Roots
+ // | |
+ // ST[y2] ST[y3]
+ //
+ // There may be multiple DAGRoots, for example:
+ //
+ // x[i*2+0] = ... (1)
+ // x[i*2+1] = ... (1)
+ // x[i*2+4] = ... (2)
+ // x[i*2+5] = ... (2)
+ // x[(i+1234)*2+5678] = ... (3)
+ // x[(i+1234)*2+5679] = ... (3)
+ //
+ // The loop will be rerolled by adding a new loop induction variable,
+ // one for the Base instruction in each DAGRootSet.
+ //
+ struct DAGRootSet {
+ Instruction *BaseInst;
+ SmallInstructionVector Roots;
+ // The instructions between IV and BaseInst (but not including BaseInst).
+ SmallInstructionSet SubsumedInsts;
+ };
+
+ // The set of all DAG roots, and state tracking of all roots
+ // for a particular induction variable.
+ struct DAGRootTracker {
+ DAGRootTracker(LoopReroll *Parent, Loop *L, Instruction *IV,
+ ScalarEvolution *SE, AliasAnalysis *AA,
+ TargetLibraryInfo *TLI)
+ : Parent(Parent), L(L), SE(SE), AA(AA), TLI(TLI), IV(IV) {}
+
+ /// Stage 1: Find all the DAG roots for the induction variable.
+ bool findRoots();
+ /// Stage 2: Validate if the found roots are valid.
+ bool validate(ReductionTracker &Reductions);
+ /// Stage 3: Assuming validate() returned true, perform the
+ /// replacement.
+ /// @param IterCount The maximum iteration count of L.
+ void replace(const SCEV *IterCount);
+
+ protected:
+ typedef MapVector<Instruction*, SmallBitVector> UsesTy;
+
+ bool findRootsRecursive(Instruction *IVU,
+ SmallInstructionSet SubsumedInsts);
+ bool findRootsBase(Instruction *IVU, SmallInstructionSet SubsumedInsts);
+ bool collectPossibleRoots(Instruction *Base,
+ std::map<int64_t,Instruction*> &Roots);
+
+ bool collectUsedInstructions(SmallInstructionSet &PossibleRedSet);
+ void collectInLoopUserSet(const SmallInstructionVector &Roots,
+ const SmallInstructionSet &Exclude,
+ const SmallInstructionSet &Final,
+ DenseSet<Instruction *> &Users);
+ void collectInLoopUserSet(Instruction *Root,
+ const SmallInstructionSet &Exclude,
+ const SmallInstructionSet &Final,
+ DenseSet<Instruction *> &Users);
+
+ UsesTy::iterator nextInstr(int Val, UsesTy &In,
+ const SmallInstructionSet &Exclude,
+ UsesTy::iterator *StartI=nullptr);
+ bool isBaseInst(Instruction *I);
+ bool isRootInst(Instruction *I);
+ bool instrDependsOn(Instruction *I,
+ UsesTy::iterator Start,
+ UsesTy::iterator End);
+
+ LoopReroll *Parent;
+
+ // Members of Parent, replicated here for brevity.
+ Loop *L;
+ ScalarEvolution *SE;
+ AliasAnalysis *AA;
+ TargetLibraryInfo *TLI;
+
+ // The loop induction variable.
+ Instruction *IV;
+ // Loop step amount.
+ uint64_t Inc;
+ // Loop reroll count; if Inc == 1, this records the scaling applied
+ // to the indvar: a[i*2+0] = ...; a[i*2+1] = ... ;
+ // If Inc is not 1, Scale = Inc.
+ uint64_t Scale;
+ // The roots themselves.
+ SmallVector<DAGRootSet,16> RootSets;
+ // All increment instructions for IV.
+ SmallInstructionVector LoopIncs;
+ // Map of all instructions in the loop (in order) to the iterations
+ // they are used in (or specially, IL_All for instructions
+ // used in the loop increment mechanism).
+ UsesTy Uses;
+ };
+
void collectPossibleIVs(Loop *L, SmallInstructionVector &PossibleIVs);
void collectPossibleReductions(Loop *L,
ReductionTracker &Reductions);
- void collectInLoopUserSet(Loop *L,
- const SmallInstructionVector &Roots,
- const SmallInstructionSet &Exclude,
- const SmallInstructionSet &Final,
- DenseSet<Instruction *> &Users);
- void collectInLoopUserSet(Loop *L,
- Instruction * Root,
- const SmallInstructionSet &Exclude,
- const SmallInstructionSet &Final,
- DenseSet<Instruction *> &Users);
- bool findScaleFromMul(Instruction *RealIV, uint64_t &Scale,
- Instruction *&IV,
- SmallInstructionVector &LoopIncs);
- bool collectAllRoots(Loop *L, uint64_t Inc, uint64_t Scale, Instruction *IV,
- SmallVector<SmallInstructionVector, 32> &Roots,
- SmallInstructionSet &AllRoots,
- SmallInstructionVector &LoopIncs);
bool reroll(Instruction *IV, Loop *L, BasicBlock *Header, const SCEV *IterCount,
ReductionTracker &Reductions);
};
@@ -339,10 +443,10 @@ protected:
char LoopReroll::ID = 0;
INITIALIZE_PASS_BEGIN(LoopReroll, "loop-reroll", "Reroll loops", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(LoopReroll, "loop-reroll", "Reroll loops", false, false)
Pass *llvm::createLoopRerollPass() {
@@ -353,10 +457,10 @@ Pass *llvm::createLoopRerollPass() {
// This operates like Instruction::isUsedOutsideOfBlock, but considers PHIs in
// non-loop blocks to be outside the loop.
static bool hasUsesOutsideLoop(Instruction *I, Loop *L) {
- for (User *U : I->users())
+ for (User *U : I->users()) {
if (!L->contains(cast<Instruction>(U)))
return true;
-
+ }
return false;
}
@@ -403,6 +507,8 @@ void LoopReroll::SimpleLoopReduction::add(Loop *L) {
// (including the PHI), except for the last value (which is used by the PHI
// and also outside the loop).
Instruction *C = Instructions.front();
+ if (C->user_empty())
+ return;
do {
C = cast<Instruction>(*C->user_begin());
@@ -424,11 +530,12 @@ void LoopReroll::SimpleLoopReduction::add(Loop *L) {
return;
// C is now the (potential) last instruction in the reduction chain.
- for (User *U : C->users())
+ for (User *U : C->users()) {
// The only in-loop user can be the initial PHI.
if (L->contains(cast<Instruction>(U)))
if (cast<Instruction>(U) != Instructions.front())
return;
+ }
Instructions.push_back(C);
Valid = true;
@@ -467,7 +574,7 @@ void LoopReroll::collectPossibleReductions(Loop *L,
// if they are users, but their users are not added. This is used, for
// example, to prevent a reduction update from forcing all later reduction
// updates into the use set.
-void LoopReroll::collectInLoopUserSet(Loop *L,
+void LoopReroll::DAGRootTracker::collectInLoopUserSet(
Instruction *Root, const SmallInstructionSet &Exclude,
const SmallInstructionSet &Final,
DenseSet<Instruction *> &Users) {
@@ -504,14 +611,14 @@ void LoopReroll::collectInLoopUserSet(Loop *L,
// Collect all of the users of all of the provided root instructions (combined
// into a single set).
-void LoopReroll::collectInLoopUserSet(Loop *L,
+void LoopReroll::DAGRootTracker::collectInLoopUserSet(
const SmallInstructionVector &Roots,
const SmallInstructionSet &Exclude,
const SmallInstructionSet &Final,
DenseSet<Instruction *> &Users) {
for (SmallInstructionVector::const_iterator I = Roots.begin(),
IE = Roots.end(); I != IE; ++I)
- collectInLoopUserSet(L, *I, Exclude, Final, Users);
+ collectInLoopUserSet(*I, Exclude, Final, Users);
}
static bool isSimpleLoadStore(Instruction *I) {
@@ -524,130 +631,699 @@ static bool isSimpleLoadStore(Instruction *I) {
return false;
}
-// Recognize loops that are setup like this:
-//
-// %iv = phi [ (preheader, ...), (body, %iv.next) ]
-// %scaled.iv = mul %iv, scale
-// f(%scaled.iv)
-// %scaled.iv.1 = add %scaled.iv, 1
-// f(%scaled.iv.1)
-// %scaled.iv.2 = add %scaled.iv, 2
-// f(%scaled.iv.2)
-// %scaled.iv.scale_m_1 = add %scaled.iv, scale-1
-// f(%scaled.iv.scale_m_1)
-// ...
-// %iv.next = add %iv, 1
-// %cmp = icmp(%iv, ...)
-// br %cmp, header, exit
-//
-// and, if found, set IV = %scaled.iv, and add %iv.next to LoopIncs.
-bool LoopReroll::findScaleFromMul(Instruction *RealIV, uint64_t &Scale,
- Instruction *&IV,
- SmallInstructionVector &LoopIncs) {
- // This is a special case: here we're looking for all uses (except for
- // the increment) to be multiplied by a common factor. The increment must
- // be by one. This is to capture loops like:
- // for (int i = 0; i < 500; ++i) {
- // foo(3*i); foo(3*i+1); foo(3*i+2);
- // }
- if (RealIV->getNumUses() != 2)
+/// Return true if IVU is a "simple" arithmetic operation.
+/// This is used for narrowing the search space for DAGRoots; only arithmetic
+/// and GEPs can be part of a DAGRoot.
+static bool isSimpleArithmeticOp(User *IVU) {
+ if (Instruction *I = dyn_cast<Instruction>(IVU)) {
+ switch (I->getOpcode()) {
+ default: return false;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::Shl:
+ case Instruction::AShr:
+ case Instruction::LShr:
+ case Instruction::GetElementPtr:
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool isLoopIncrement(User *U, Instruction *IV) {
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(U);
+ if (!BO || BO->getOpcode() != Instruction::Add)
+ return false;
+
+ for (auto *UU : BO->users()) {
+ PHINode *PN = dyn_cast<PHINode>(UU);
+ if (PN && PN == IV)
+ return true;
+ }
+ return false;
+}
+
+bool LoopReroll::DAGRootTracker::
+collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots) {
+ SmallInstructionVector BaseUsers;
+
+ for (auto *I : Base->users()) {
+ ConstantInt *CI = nullptr;
+
+ if (isLoopIncrement(I, IV)) {
+ LoopIncs.push_back(cast<Instruction>(I));
+ continue;
+ }
+
+ // The root nodes must be either GEPs, ORs or ADDs.
+ if (auto *BO = dyn_cast<BinaryOperator>(I)) {
+ if (BO->getOpcode() == Instruction::Add ||
+ BO->getOpcode() == Instruction::Or)
+ CI = dyn_cast<ConstantInt>(BO->getOperand(1));
+ } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
+ Value *LastOperand = GEP->getOperand(GEP->getNumOperands()-1);
+ CI = dyn_cast<ConstantInt>(LastOperand);
+ }
+
+ if (!CI) {
+ if (Instruction *II = dyn_cast<Instruction>(I)) {
+ BaseUsers.push_back(II);
+ continue;
+ } else {
+ DEBUG(dbgs() << "LRR: Aborting due to non-instruction: " << *I << "\n");
+ return false;
+ }
+ }
+
+ int64_t V = CI->getValue().getSExtValue();
+ if (Roots.find(V) != Roots.end())
+ // No duplicates, please.
+ return false;
+
+ // FIXME: Add support for negative values.
+ if (V < 0) {
+ DEBUG(dbgs() << "LRR: Aborting due to negative value: " << V << "\n");
+ return false;
+ }
+
+ Roots[V] = cast<Instruction>(I);
+ }
+
+ if (Roots.empty())
return false;
- const SCEVAddRecExpr *RealIVSCEV = cast<SCEVAddRecExpr>(SE->getSCEV(RealIV));
- Instruction *User1 = cast<Instruction>(*RealIV->user_begin()),
- *User2 = cast<Instruction>(*std::next(RealIV->user_begin()));
- if (!SE->isSCEVable(User1->getType()) || !SE->isSCEVable(User2->getType()))
+
+ // If we found non-loop-inc, non-root users of Base, assume they are
+ // for the zeroth root index. This is because "add %a, 0" gets optimized
+ // away.
+ if (BaseUsers.size()) {
+ if (Roots.find(0) != Roots.end()) {
+ DEBUG(dbgs() << "LRR: Multiple roots found for base - aborting!\n");
+ return false;
+ }
+ Roots[0] = Base;
+ }
+
+ // Calculate the number of users of the base, or lowest indexed, iteration.
+ unsigned NumBaseUses = BaseUsers.size();
+ if (NumBaseUses == 0)
+ NumBaseUses = Roots.begin()->second->getNumUses();
+
+ // Check that every node has the same number of users.
+ for (auto &KV : Roots) {
+ if (KV.first == 0)
+ continue;
+ if (KV.second->getNumUses() != NumBaseUses) {
+ DEBUG(dbgs() << "LRR: Aborting - Root and Base #users not the same: "
+ << "#Base=" << NumBaseUses << ", #Root=" <<
+ KV.second->getNumUses() << "\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool LoopReroll::DAGRootTracker::
+findRootsRecursive(Instruction *I, SmallInstructionSet SubsumedInsts) {
+ // Does the user look like it could be part of a root set?
+ // All its users must be simple arithmetic ops.
+ if (I->getNumUses() > IL_MaxRerollIterations)
return false;
- const SCEVAddRecExpr *User1SCEV =
- dyn_cast<SCEVAddRecExpr>(SE->getSCEV(User1)),
- *User2SCEV =
- dyn_cast<SCEVAddRecExpr>(SE->getSCEV(User2));
- if (!User1SCEV || !User1SCEV->isAffine() ||
- !User2SCEV || !User2SCEV->isAffine())
+
+ if ((I->getOpcode() == Instruction::Mul ||
+ I->getOpcode() == Instruction::PHI) &&
+ I != IV &&
+ findRootsBase(I, SubsumedInsts))
+ return true;
+
+ SubsumedInsts.insert(I);
+
+ for (User *V : I->users()) {
+ Instruction *I = dyn_cast<Instruction>(V);
+ if (std::find(LoopIncs.begin(), LoopIncs.end(), I) != LoopIncs.end())
+ continue;
+
+ if (!I || !isSimpleArithmeticOp(I) ||
+ !findRootsRecursive(I, SubsumedInsts))
+ return false;
+ }
+ return true;
+}
+
+bool LoopReroll::DAGRootTracker::
+findRootsBase(Instruction *IVU, SmallInstructionSet SubsumedInsts) {
+
+ // The base instruction needs to be a multiply so
+ // that we can erase it.
+ if (IVU->getOpcode() != Instruction::Mul &&
+ IVU->getOpcode() != Instruction::PHI)
+ return false;
+
+ std::map<int64_t, Instruction*> V;
+ if (!collectPossibleRoots(IVU, V))
return false;
- // We assume below that User1 is the scale multiply and User2 is the
- // increment. If this can't be true, then swap them.
- if (User1SCEV == RealIVSCEV->getPostIncExpr(*SE)) {
- std::swap(User1, User2);
- std::swap(User1SCEV, User2SCEV);
+ // If we didn't get a root for index zero, then IVU must be
+ // subsumed.
+ if (V.find(0) == V.end())
+ SubsumedInsts.insert(IVU);
+
+ // Partition the vector into monotonically increasing indexes.
+ DAGRootSet DRS;
+ DRS.BaseInst = nullptr;
+
+ for (auto &KV : V) {
+ if (!DRS.BaseInst) {
+ DRS.BaseInst = KV.second;
+ DRS.SubsumedInsts = SubsumedInsts;
+ } else if (DRS.Roots.empty()) {
+ DRS.Roots.push_back(KV.second);
+ } else if (V.find(KV.first - 1) != V.end()) {
+ DRS.Roots.push_back(KV.second);
+ } else {
+ // Linear sequence terminated.
+ RootSets.push_back(DRS);
+ DRS.BaseInst = KV.second;
+ DRS.SubsumedInsts = SubsumedInsts;
+ DRS.Roots.clear();
+ }
+ }
+ RootSets.push_back(DRS);
+
+ return true;
+}
+
+bool LoopReroll::DAGRootTracker::findRoots() {
+
+ const SCEVAddRecExpr *RealIVSCEV = cast<SCEVAddRecExpr>(SE->getSCEV(IV));
+ Inc = cast<SCEVConstant>(RealIVSCEV->getOperand(1))->
+ getValue()->getZExtValue();
+
+ assert(RootSets.empty() && "Unclean state!");
+ if (Inc == 1) {
+ for (auto *IVU : IV->users()) {
+ if (isLoopIncrement(IVU, IV))
+ LoopIncs.push_back(cast<Instruction>(IVU));
+ }
+ if (!findRootsRecursive(IV, SmallInstructionSet()))
+ return false;
+ LoopIncs.push_back(IV);
+ } else {
+ if (!findRootsBase(IV, SmallInstructionSet()))
+ return false;
}
- if (User2SCEV != RealIVSCEV->getPostIncExpr(*SE))
+ // Ensure all sets have the same size.
+ if (RootSets.empty()) {
+ DEBUG(dbgs() << "LRR: Aborting because no root sets found!\n");
return false;
- assert(User2SCEV->getStepRecurrence(*SE)->isOne() &&
- "Invalid non-unit step for multiplicative scaling");
- LoopIncs.push_back(User2);
-
- if (const SCEVConstant *MulScale =
- dyn_cast<SCEVConstant>(User1SCEV->getStepRecurrence(*SE))) {
- // Make sure that both the start and step have the same multiplier.
- if (RealIVSCEV->getStart()->getType() != MulScale->getType())
+ }
+ for (auto &V : RootSets) {
+ if (V.Roots.empty() || V.Roots.size() != RootSets[0].Roots.size()) {
+ DEBUG(dbgs()
+ << "LRR: Aborting because not all root sets have the same size\n");
return false;
- if (SE->getMulExpr(RealIVSCEV->getStart(), MulScale) !=
- User1SCEV->getStart())
+ }
+ }
+
+ // And ensure all loop iterations are consecutive. We rely on std::map
+ // providing ordered traversal.
+ for (auto &V : RootSets) {
+ const auto *ADR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(V.BaseInst));
+ if (!ADR)
return false;
- ConstantInt *MulScaleCI = MulScale->getValue();
- if (!MulScaleCI->uge(2) || MulScaleCI->uge(MaxInc))
+ // Consider a DAGRootSet with N-1 roots (so N different values including
+ // BaseInst).
+ // Define d = Roots[0] - BaseInst, which should be the same as
+ // Roots[I] - Roots[I-1] for all I in [1..N).
+ // Define D = BaseInst@J - BaseInst@J-1, where "@J" means the value at the
+ // loop iteration J.
+ //
+ // Now, For the loop iterations to be consecutive:
+ // D = d * N
+
+ unsigned N = V.Roots.size() + 1;
+ const SCEV *StepSCEV = SE->getMinusSCEV(SE->getSCEV(V.Roots[0]), ADR);
+ const SCEV *ScaleSCEV = SE->getConstant(StepSCEV->getType(), N);
+ if (ADR->getStepRecurrence(*SE) != SE->getMulExpr(StepSCEV, ScaleSCEV)) {
+ DEBUG(dbgs() << "LRR: Aborting because iterations are not consecutive\n");
return false;
- Scale = MulScaleCI->getZExtValue();
- IV = User1;
- } else
+ }
+ }
+ Scale = RootSets[0].Roots.size() + 1;
+
+ if (Scale > IL_MaxRerollIterations) {
+ DEBUG(dbgs() << "LRR: Aborting - too many iterations found. "
+ << "#Found=" << Scale << ", #Max=" << IL_MaxRerollIterations
+ << "\n");
return false;
+ }
+
+ DEBUG(dbgs() << "LRR: Successfully found roots: Scale=" << Scale << "\n");
- DEBUG(dbgs() << "LRR: Found possible scaling " << *User1 << "\n");
return true;
}
-// Collect all root increments with respect to the provided induction variable
-// (normally the PHI, but sometimes a multiply). A root increment is an
-// instruction, normally an add, with a positive constant less than Scale. In a
-// rerollable loop, each of these increments is the root of an instruction
-// graph isomorphic to the others. Also, we collect the final induction
-// increment (the increment equal to the Scale), and its users in LoopIncs.
-bool LoopReroll::collectAllRoots(Loop *L, uint64_t Inc, uint64_t Scale,
- Instruction *IV,
- SmallVector<SmallInstructionVector, 32> &Roots,
- SmallInstructionSet &AllRoots,
- SmallInstructionVector &LoopIncs) {
- for (User *U : IV->users()) {
- Instruction *UI = cast<Instruction>(U);
- if (!SE->isSCEVable(UI->getType()))
- continue;
- if (UI->getType() != IV->getType())
- continue;
- if (!L->contains(UI))
- continue;
- if (hasUsesOutsideLoop(UI, L))
- continue;
+bool LoopReroll::DAGRootTracker::collectUsedInstructions(SmallInstructionSet &PossibleRedSet) {
+ // Populate the MapVector with all instructions in the block, in order first,
+ // so we can iterate over the contents later in perfect order.
+ for (auto &I : *L->getHeader()) {
+ Uses[&I].resize(IL_End);
+ }
+
+ SmallInstructionSet Exclude;
+ for (auto &DRS : RootSets) {
+ Exclude.insert(DRS.Roots.begin(), DRS.Roots.end());
+ Exclude.insert(DRS.SubsumedInsts.begin(), DRS.SubsumedInsts.end());
+ Exclude.insert(DRS.BaseInst);
+ }
+ Exclude.insert(LoopIncs.begin(), LoopIncs.end());
+
+ for (auto &DRS : RootSets) {
+ DenseSet<Instruction*> VBase;
+ collectInLoopUserSet(DRS.BaseInst, Exclude, PossibleRedSet, VBase);
+ for (auto *I : VBase) {
+ Uses[I].set(0);
+ }
+
+ unsigned Idx = 1;
+ for (auto *Root : DRS.Roots) {
+ DenseSet<Instruction*> V;
+ collectInLoopUserSet(Root, Exclude, PossibleRedSet, V);
- if (const SCEVConstant *Diff = dyn_cast<SCEVConstant>(SE->getMinusSCEV(
- SE->getSCEV(UI), SE->getSCEV(IV)))) {
- uint64_t Idx = Diff->getValue()->getValue().getZExtValue();
- if (Idx > 0 && Idx < Scale) {
- Roots[Idx-1].push_back(UI);
- AllRoots.insert(UI);
- } else if (Idx == Scale && Inc > 1) {
- LoopIncs.push_back(UI);
+ // While we're here, check the use sets are the same size.
+ if (V.size() != VBase.size()) {
+ DEBUG(dbgs() << "LRR: Aborting - use sets are different sizes\n");
+ return false;
+ }
+
+ for (auto *I : V) {
+ Uses[I].set(Idx);
}
+ ++Idx;
}
+
+ // Make sure our subsumed instructions are remembered too.
+ for (auto *I : DRS.SubsumedInsts) {
+ Uses[I].set(IL_All);
+ }
+ }
+
+ // Make sure the loop increments are also accounted for.
+
+ Exclude.clear();
+ for (auto &DRS : RootSets) {
+ Exclude.insert(DRS.Roots.begin(), DRS.Roots.end());
+ Exclude.insert(DRS.SubsumedInsts.begin(), DRS.SubsumedInsts.end());
+ Exclude.insert(DRS.BaseInst);
+ }
+
+ DenseSet<Instruction*> V;
+ collectInLoopUserSet(LoopIncs, Exclude, PossibleRedSet, V);
+ for (auto *I : V) {
+ Uses[I].set(IL_All);
+ }
+
+ return true;
+
+}
+
+/// Get the next instruction in "In" that is a member of set Val.
+/// Start searching from StartI, and do not return anything in Exclude.
+/// If StartI is not given, start from In.begin().
+LoopReroll::DAGRootTracker::UsesTy::iterator
+LoopReroll::DAGRootTracker::nextInstr(int Val, UsesTy &In,
+ const SmallInstructionSet &Exclude,
+ UsesTy::iterator *StartI) {
+ UsesTy::iterator I = StartI ? *StartI : In.begin();
+ while (I != In.end() && (I->second.test(Val) == 0 ||
+ Exclude.count(I->first) != 0))
+ ++I;
+ return I;
+}
+
+bool LoopReroll::DAGRootTracker::isBaseInst(Instruction *I) {
+ for (auto &DRS : RootSets) {
+ if (DRS.BaseInst == I)
+ return true;
}
+ return false;
+}
- if (Roots[0].empty())
+bool LoopReroll::DAGRootTracker::isRootInst(Instruction *I) {
+ for (auto &DRS : RootSets) {
+ if (std::find(DRS.Roots.begin(), DRS.Roots.end(), I) != DRS.Roots.end())
+ return true;
+ }
+ return false;
+}
+
+/// Return true if instruction I depends on any instruction between
+/// Start and End.
+bool LoopReroll::DAGRootTracker::instrDependsOn(Instruction *I,
+ UsesTy::iterator Start,
+ UsesTy::iterator End) {
+ for (auto *U : I->users()) {
+ for (auto It = Start; It != End; ++It)
+ if (U == It->first)
+ return true;
+ }
+ return false;
+}
+
+bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) {
+ // We now need to check for equivalence of the use graph of each root with
+ // that of the primary induction variable (excluding the roots). Our goal
+ // here is not to solve the full graph isomorphism problem, but rather to
+ // catch common cases without a lot of work. As a result, we will assume
+ // that the relative order of the instructions in each unrolled iteration
+ // is the same (although we will not make an assumption about how the
+ // different iterations are intermixed). Note that while the order must be
+ // the same, the instructions may not be in the same basic block.
+
+ // An array of just the possible reductions for this scale factor. When we
+ // collect the set of all users of some root instructions, these reduction
+ // instructions are treated as 'final' (their uses are not considered).
+ // This is important because we don't want the root use set to search down
+ // the reduction chain.
+ SmallInstructionSet PossibleRedSet;
+ SmallInstructionSet PossibleRedLastSet;
+ SmallInstructionSet PossibleRedPHISet;
+ Reductions.restrictToScale(Scale, PossibleRedSet,
+ PossibleRedPHISet, PossibleRedLastSet);
+
+ // Populate "Uses" with where each instruction is used.
+ if (!collectUsedInstructions(PossibleRedSet))
return false;
- bool AllSame = true;
- for (unsigned i = 1; i < Scale-1; ++i)
- if (Roots[i].size() != Roots[0].size()) {
- AllSame = false;
- break;
+
+ // Make sure we mark the reduction PHIs as used in all iterations.
+ for (auto *I : PossibleRedPHISet) {
+ Uses[I].set(IL_All);
+ }
+
+ // Make sure all instructions in the loop are in one and only one
+ // set.
+ for (auto &KV : Uses) {
+ if (KV.second.count() != 1) {
+ DEBUG(dbgs() << "LRR: Aborting - instruction is not used in 1 iteration: "
+ << *KV.first << " (#uses=" << KV.second.count() << ")\n");
+ return false;
}
+ }
- if (!AllSame)
- return false;
+ DEBUG(
+ for (auto &KV : Uses) {
+ dbgs() << "LRR: " << KV.second.find_first() << "\t" << *KV.first << "\n";
+ }
+ );
+
+ for (unsigned Iter = 1; Iter < Scale; ++Iter) {
+ // In addition to regular aliasing information, we need to look for
+ // instructions from later (future) iterations that have side effects
+ // preventing us from reordering them past other instructions with side
+ // effects.
+ bool FutureSideEffects = false;
+ AliasSetTracker AST(*AA);
+ // The map between instructions in f(%iv.(i+1)) and f(%iv).
+ DenseMap<Value *, Value *> BaseMap;
+
+ // Compare iteration Iter to the base.
+ SmallInstructionSet Visited;
+ auto BaseIt = nextInstr(0, Uses, Visited);
+ auto RootIt = nextInstr(Iter, Uses, Visited);
+ auto LastRootIt = Uses.begin();
+
+ while (BaseIt != Uses.end() && RootIt != Uses.end()) {
+ Instruction *BaseInst = BaseIt->first;
+ Instruction *RootInst = RootIt->first;
+
+ // Skip over the IV or root instructions; only match their users.
+ bool Continue = false;
+ if (isBaseInst(BaseInst)) {
+ Visited.insert(BaseInst);
+ BaseIt = nextInstr(0, Uses, Visited);
+ Continue = true;
+ }
+ if (isRootInst(RootInst)) {
+ LastRootIt = RootIt;
+ Visited.insert(RootInst);
+ RootIt = nextInstr(Iter, Uses, Visited);
+ Continue = true;
+ }
+ if (Continue) continue;
+
+ if (!BaseInst->isSameOperationAs(RootInst)) {
+ // Last chance saloon. We don't try and solve the full isomorphism
+ // problem, but try and at least catch the case where two instructions
+ // *of different types* are round the wrong way. We won't be able to
+ // efficiently tell, given two ADD instructions, which way around we
+ // should match them, but given an ADD and a SUB, we can at least infer
+ // which one is which.
+ //
+ // This should allow us to deal with a greater subset of the isomorphism
+ // problem. It does however change a linear algorithm into a quadratic
+ // one, so limit the number of probes we do.
+ auto TryIt = RootIt;
+ unsigned N = NumToleratedFailedMatches;
+ while (TryIt != Uses.end() &&
+ !BaseInst->isSameOperationAs(TryIt->first) &&
+ N--) {
+ ++TryIt;
+ TryIt = nextInstr(Iter, Uses, Visited, &TryIt);
+ }
+
+ if (TryIt == Uses.end() || TryIt == RootIt ||
+ instrDependsOn(TryIt->first, RootIt, TryIt)) {
+ DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst <<
+ " vs. " << *RootInst << "\n");
+ return false;
+ }
+
+ RootIt = TryIt;
+ RootInst = TryIt->first;
+ }
+
+ // All instructions between the last root and this root
+ // may belong to some other iteration. If they belong to a
+ // future iteration, then they're dangerous to alias with.
+ //
+ // Note that because we allow a limited amount of flexibility in the order
+ // that we visit nodes, LastRootIt might be *before* RootIt, in which
+ // case we've already checked this set of instructions so we shouldn't
+ // do anything.
+ for (; LastRootIt < RootIt; ++LastRootIt) {
+ Instruction *I = LastRootIt->first;
+ if (LastRootIt->second.find_first() < (int)Iter)
+ continue;
+ if (I->mayWriteToMemory())
+ AST.add(I);
+ // Note: This is specifically guarded by a check on isa<PHINode>,
+ // which while a valid (somewhat arbitrary) micro-optimization, is
+ // needed because otherwise isSafeToSpeculativelyExecute returns
+ // false on PHI nodes.
+ if (!isa<PHINode>(I) && !isSimpleLoadStore(I) &&
+ !isSafeToSpeculativelyExecute(I))
+ // Intervening instructions cause side effects.
+ FutureSideEffects = true;
+ }
+
+ // Make sure that this instruction, which is in the use set of this
+ // root instruction, does not also belong to the base set or the set of
+ // some other root instruction.
+ if (RootIt->second.count() > 1) {
+ DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst <<
+ " vs. " << *RootInst << " (prev. case overlap)\n");
+ return false;
+ }
+
+ // Make sure that we don't alias with any instruction in the alias set
+ // tracker. If we do, then we depend on a future iteration, and we
+ // can't reroll.
+ if (RootInst->mayReadFromMemory())
+ for (auto &K : AST) {
+ if (K.aliasesUnknownInst(RootInst, *AA)) {
+ DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst <<
+ " vs. " << *RootInst << " (depends on future store)\n");
+ return false;
+ }
+ }
+
+ // If we've past an instruction from a future iteration that may have
+ // side effects, and this instruction might also, then we can't reorder
+ // them, and this matching fails. As an exception, we allow the alias
+ // set tracker to handle regular (simple) load/store dependencies.
+ if (FutureSideEffects && ((!isSimpleLoadStore(BaseInst) &&
+ !isSafeToSpeculativelyExecute(BaseInst)) ||
+ (!isSimpleLoadStore(RootInst) &&
+ !isSafeToSpeculativelyExecute(RootInst)))) {
+ DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst <<
+ " vs. " << *RootInst <<
+ " (side effects prevent reordering)\n");
+ return false;
+ }
+
+ // For instructions that are part of a reduction, if the operation is
+ // associative, then don't bother matching the operands (because we
+ // already know that the instructions are isomorphic, and the order
+ // within the iteration does not matter). For non-associative reductions,
+ // we do need to match the operands, because we need to reject
+ // out-of-order instructions within an iteration!
+ // For example (assume floating-point addition), we need to reject this:
+ // x += a[i]; x += b[i];
+ // x += a[i+1]; x += b[i+1];
+ // x += b[i+2]; x += a[i+2];
+ bool InReduction = Reductions.isPairInSame(BaseInst, RootInst);
+
+ if (!(InReduction && BaseInst->isAssociative())) {
+ bool Swapped = false, SomeOpMatched = false;
+ for (unsigned j = 0; j < BaseInst->getNumOperands(); ++j) {
+ Value *Op2 = RootInst->getOperand(j);
+
+ // If this is part of a reduction (and the operation is not
+ // associatve), then we match all operands, but not those that are
+ // part of the reduction.
+ if (InReduction)
+ if (Instruction *Op2I = dyn_cast<Instruction>(Op2))
+ if (Reductions.isPairInSame(RootInst, Op2I))
+ continue;
+
+ DenseMap<Value *, Value *>::iterator BMI = BaseMap.find(Op2);
+ if (BMI != BaseMap.end()) {
+ Op2 = BMI->second;
+ } else {
+ for (auto &DRS : RootSets) {
+ if (DRS.Roots[Iter-1] == (Instruction*) Op2) {
+ Op2 = DRS.BaseInst;
+ break;
+ }
+ }
+ }
+
+ if (BaseInst->getOperand(Swapped ? unsigned(!j) : j) != Op2) {
+ // If we've not already decided to swap the matched operands, and
+ // we've not already matched our first operand (note that we could
+ // have skipped matching the first operand because it is part of a
+ // reduction above), and the instruction is commutative, then try
+ // the swapped match.
+ if (!Swapped && BaseInst->isCommutative() && !SomeOpMatched &&
+ BaseInst->getOperand(!j) == Op2) {
+ Swapped = true;
+ } else {
+ DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst
+ << " vs. " << *RootInst << " (operand " << j << ")\n");
+ return false;
+ }
+ }
+
+ SomeOpMatched = true;
+ }
+ }
+
+ if ((!PossibleRedLastSet.count(BaseInst) &&
+ hasUsesOutsideLoop(BaseInst, L)) ||
+ (!PossibleRedLastSet.count(RootInst) &&
+ hasUsesOutsideLoop(RootInst, L))) {
+ DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst <<
+ " vs. " << *RootInst << " (uses outside loop)\n");
+ return false;
+ }
+
+ Reductions.recordPair(BaseInst, RootInst, Iter);
+ BaseMap.insert(std::make_pair(RootInst, BaseInst));
+
+ LastRootIt = RootIt;
+ Visited.insert(BaseInst);
+ Visited.insert(RootInst);
+ BaseIt = nextInstr(0, Uses, Visited);
+ RootIt = nextInstr(Iter, Uses, Visited);
+ }
+ assert (BaseIt == Uses.end() && RootIt == Uses.end() &&
+ "Mismatched set sizes!");
+ }
+
+ DEBUG(dbgs() << "LRR: Matched all iteration increments for " <<
+ *IV << "\n");
return true;
}
+void LoopReroll::DAGRootTracker::replace(const SCEV *IterCount) {
+ BasicBlock *Header = L->getHeader();
+ // Remove instructions associated with non-base iterations.
+ for (BasicBlock::reverse_iterator J = Header->rbegin();
+ J != Header->rend();) {
+ unsigned I = Uses[&*J].find_first();
+ if (I > 0 && I < IL_All) {
+ Instruction *D = &*J;
+ DEBUG(dbgs() << "LRR: removing: " << *D << "\n");
+ D->eraseFromParent();
+ continue;
+ }
+
+ ++J;
+ }
+ const DataLayout &DL = Header->getModule()->getDataLayout();
+
+ // We need to create a new induction variable for each different BaseInst.
+ for (auto &DRS : RootSets) {
+ // Insert the new induction variable.
+ const SCEVAddRecExpr *RealIVSCEV =
+ cast<SCEVAddRecExpr>(SE->getSCEV(DRS.BaseInst));
+ const SCEV *Start = RealIVSCEV->getStart();
+ const SCEVAddRecExpr *H = cast<SCEVAddRecExpr>
+ (SE->getAddRecExpr(Start,
+ SE->getConstant(RealIVSCEV->getType(), 1),
+ L, SCEV::FlagAnyWrap));
+ { // Limit the lifetime of SCEVExpander.
+ SCEVExpander Expander(*SE, DL, "reroll");
+ Value *NewIV = Expander.expandCodeFor(H, IV->getType(), Header->begin());
+
+ for (auto &KV : Uses) {
+ if (KV.second.find_first() == 0)
+ KV.first->replaceUsesOfWith(DRS.BaseInst, NewIV);
+ }
+
+ if (BranchInst *BI = dyn_cast<BranchInst>(Header->getTerminator())) {
+ // FIXME: Why do we need this check?
+ if (Uses[BI].find_first() == IL_All) {
+ const SCEV *ICSCEV = RealIVSCEV->evaluateAtIteration(IterCount, *SE);
+
+ // Iteration count SCEV minus 1
+ const SCEV *ICMinus1SCEV =
+ SE->getMinusSCEV(ICSCEV, SE->getConstant(ICSCEV->getType(), 1));
+
+ Value *ICMinus1; // Iteration count minus 1
+ if (isa<SCEVConstant>(ICMinus1SCEV)) {
+ ICMinus1 = Expander.expandCodeFor(ICMinus1SCEV, NewIV->getType(), BI);
+ } else {
+ BasicBlock *Preheader = L->getLoopPreheader();
+ if (!Preheader)
+ Preheader = InsertPreheaderForLoop(L, Parent);
+
+ ICMinus1 = Expander.expandCodeFor(ICMinus1SCEV, NewIV->getType(),
+ Preheader->getTerminator());
+ }
+
+ Value *Cond =
+ new ICmpInst(BI, CmpInst::ICMP_EQ, NewIV, ICMinus1, "exitcond");
+ BI->setCondition(Cond);
+
+ if (BI->getSuccessor(1) != Header)
+ BI->swapSuccessors();
+ }
+ }
+ }
+ }
+
+ SimplifyInstructionsInBlock(Header, TLI);
+ DeleteDeadPHIs(Header, TLI);
+}
+
// Validate the selected reductions. All iterations must have an isomorphic
// part of the reduction chain and, for non-associative reductions, the chain
// entries must appear in order.
@@ -711,8 +1387,9 @@ void LoopReroll::ReductionTracker::replaceSelected() {
// Replace users with the new end-of-chain value.
SmallInstructionVector Users;
- for (User *U : PossibleReds[i].getReducedValue()->users())
+ for (User *U : PossibleReds[i].getReducedValue()->users()) {
Users.push_back(cast<Instruction>(U));
+ }
for (SmallInstructionVector::iterator J = Users.begin(),
JE = Users.end(); J != JE; ++J)
@@ -767,359 +1444,23 @@ void LoopReroll::ReductionTracker::replaceSelected() {
bool LoopReroll::reroll(Instruction *IV, Loop *L, BasicBlock *Header,
const SCEV *IterCount,
ReductionTracker &Reductions) {
- const SCEVAddRecExpr *RealIVSCEV = cast<SCEVAddRecExpr>(SE->getSCEV(IV));
- uint64_t Inc = cast<SCEVConstant>(RealIVSCEV->getOperand(1))->
- getValue()->getZExtValue();
- // The collection of loop increment instructions.
- SmallInstructionVector LoopIncs;
- uint64_t Scale = Inc;
-
- // The effective induction variable, IV, is normally also the real induction
- // variable. When we're dealing with a loop like:
- // for (int i = 0; i < 500; ++i)
- // x[3*i] = ...;
- // x[3*i+1] = ...;
- // x[3*i+2] = ...;
- // then the real IV is still i, but the effective IV is (3*i).
- Instruction *RealIV = IV;
- if (Inc == 1 && !findScaleFromMul(RealIV, Scale, IV, LoopIncs))
- return false;
-
- assert(Scale <= MaxInc && "Scale is too large");
- assert(Scale > 1 && "Scale must be at least 2");
+ DAGRootTracker DAGRoots(this, L, IV, SE, AA, TLI);
- // The set of increment instructions for each increment value.
- SmallVector<SmallInstructionVector, 32> Roots(Scale-1);
- SmallInstructionSet AllRoots;
- if (!collectAllRoots(L, Inc, Scale, IV, Roots, AllRoots, LoopIncs))
+ if (!DAGRoots.findRoots())
return false;
-
DEBUG(dbgs() << "LRR: Found all root induction increments for: " <<
- *RealIV << "\n");
-
- // An array of just the possible reductions for this scale factor. When we
- // collect the set of all users of some root instructions, these reduction
- // instructions are treated as 'final' (their uses are not considered).
- // This is important because we don't want the root use set to search down
- // the reduction chain.
- SmallInstructionSet PossibleRedSet;
- SmallInstructionSet PossibleRedLastSet, PossibleRedPHISet;
- Reductions.restrictToScale(Scale, PossibleRedSet, PossibleRedPHISet,
- PossibleRedLastSet);
-
- // We now need to check for equivalence of the use graph of each root with
- // that of the primary induction variable (excluding the roots). Our goal
- // here is not to solve the full graph isomorphism problem, but rather to
- // catch common cases without a lot of work. As a result, we will assume
- // that the relative order of the instructions in each unrolled iteration
- // is the same (although we will not make an assumption about how the
- // different iterations are intermixed). Note that while the order must be
- // the same, the instructions may not be in the same basic block.
- SmallInstructionSet Exclude(AllRoots);
- Exclude.insert(LoopIncs.begin(), LoopIncs.end());
-
- DenseSet<Instruction *> BaseUseSet;
- collectInLoopUserSet(L, IV, Exclude, PossibleRedSet, BaseUseSet);
-
- DenseSet<Instruction *> AllRootUses;
- std::vector<DenseSet<Instruction *> > RootUseSets(Scale-1);
-
- bool MatchFailed = false;
- for (unsigned i = 0; i < Scale-1 && !MatchFailed; ++i) {
- DenseSet<Instruction *> &RootUseSet = RootUseSets[i];
- collectInLoopUserSet(L, Roots[i], SmallInstructionSet(),
- PossibleRedSet, RootUseSet);
-
- DEBUG(dbgs() << "LRR: base use set size: " << BaseUseSet.size() <<
- " vs. iteration increment " << (i+1) <<
- " use set size: " << RootUseSet.size() << "\n");
-
- if (BaseUseSet.size() != RootUseSet.size()) {
- MatchFailed = true;
- break;
- }
-
- // In addition to regular aliasing information, we need to look for
- // instructions from later (future) iterations that have side effects
- // preventing us from reordering them past other instructions with side
- // effects.
- bool FutureSideEffects = false;
- AliasSetTracker AST(*AA);
-
- // The map between instructions in f(%iv.(i+1)) and f(%iv).
- DenseMap<Value *, Value *> BaseMap;
-
- assert(L->getNumBlocks() == 1 && "Cannot handle multi-block loops");
- for (BasicBlock::iterator J1 = Header->begin(), J2 = Header->begin(),
- JE = Header->end(); J1 != JE && !MatchFailed; ++J1) {
- if (cast<Instruction>(J1) == RealIV)
- continue;
- if (cast<Instruction>(J1) == IV)
- continue;
- if (!BaseUseSet.count(J1))
- continue;
- if (PossibleRedPHISet.count(J1)) // Skip reduction PHIs.
- continue;
-
- while (J2 != JE && (!RootUseSet.count(J2) ||
- std::find(Roots[i].begin(), Roots[i].end(), J2) !=
- Roots[i].end())) {
- // As we iterate through the instructions, instructions that don't
- // belong to previous iterations (or the base case), must belong to
- // future iterations. We want to track the alias set of writes from
- // previous iterations.
- if (!isa<PHINode>(J2) && !BaseUseSet.count(J2) &&
- !AllRootUses.count(J2)) {
- if (J2->mayWriteToMemory())
- AST.add(J2);
-
- // Note: This is specifically guarded by a check on isa<PHINode>,
- // which while a valid (somewhat arbitrary) micro-optimization, is
- // needed because otherwise isSafeToSpeculativelyExecute returns
- // false on PHI nodes.
- if (!isSimpleLoadStore(J2) && !isSafeToSpeculativelyExecute(J2, DL))
- FutureSideEffects = true;
- }
-
- ++J2;
- }
-
- if (!J1->isSameOperationAs(J2)) {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
- " vs. " << *J2 << "\n");
- MatchFailed = true;
- break;
- }
-
- // Make sure that this instruction, which is in the use set of this
- // root instruction, does not also belong to the base set or the set of
- // some previous root instruction.
- if (BaseUseSet.count(J2) || AllRootUses.count(J2)) {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
- " vs. " << *J2 << " (prev. case overlap)\n");
- MatchFailed = true;
- break;
- }
-
- // Make sure that we don't alias with any instruction in the alias set
- // tracker. If we do, then we depend on a future iteration, and we
- // can't reroll.
- if (J2->mayReadFromMemory()) {
- for (AliasSetTracker::iterator K = AST.begin(), KE = AST.end();
- K != KE && !MatchFailed; ++K) {
- if (K->aliasesUnknownInst(J2, *AA)) {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
- " vs. " << *J2 << " (depends on future store)\n");
- MatchFailed = true;
- break;
- }
- }
- }
-
- // If we've past an instruction from a future iteration that may have
- // side effects, and this instruction might also, then we can't reorder
- // them, and this matching fails. As an exception, we allow the alias
- // set tracker to handle regular (simple) load/store dependencies.
- if (FutureSideEffects &&
- ((!isSimpleLoadStore(J1) &&
- !isSafeToSpeculativelyExecute(J1, DL)) ||
- (!isSimpleLoadStore(J2) &&
- !isSafeToSpeculativelyExecute(J2, DL)))) {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
- " vs. " << *J2 <<
- " (side effects prevent reordering)\n");
- MatchFailed = true;
- break;
- }
-
- // For instructions that are part of a reduction, if the operation is
- // associative, then don't bother matching the operands (because we
- // already know that the instructions are isomorphic, and the order
- // within the iteration does not matter). For non-associative reductions,
- // we do need to match the operands, because we need to reject
- // out-of-order instructions within an iteration!
- // For example (assume floating-point addition), we need to reject this:
- // x += a[i]; x += b[i];
- // x += a[i+1]; x += b[i+1];
- // x += b[i+2]; x += a[i+2];
- bool InReduction = Reductions.isPairInSame(J1, J2);
-
- if (!(InReduction && J1->isAssociative())) {
- bool Swapped = false, SomeOpMatched = false;
- for (unsigned j = 0; j < J1->getNumOperands() && !MatchFailed; ++j) {
- Value *Op2 = J2->getOperand(j);
-
- // If this is part of a reduction (and the operation is not
- // associatve), then we match all operands, but not those that are
- // part of the reduction.
- if (InReduction)
- if (Instruction *Op2I = dyn_cast<Instruction>(Op2))
- if (Reductions.isPairInSame(J2, Op2I))
- continue;
-
- DenseMap<Value *, Value *>::iterator BMI = BaseMap.find(Op2);
- if (BMI != BaseMap.end())
- Op2 = BMI->second;
- else if (std::find(Roots[i].begin(), Roots[i].end(),
- (Instruction*) Op2) != Roots[i].end())
- Op2 = IV;
-
- if (J1->getOperand(Swapped ? unsigned(!j) : j) != Op2) {
- // If we've not already decided to swap the matched operands, and
- // we've not already matched our first operand (note that we could
- // have skipped matching the first operand because it is part of a
- // reduction above), and the instruction is commutative, then try
- // the swapped match.
- if (!Swapped && J1->isCommutative() && !SomeOpMatched &&
- J1->getOperand(!j) == Op2) {
- Swapped = true;
- } else {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
- " vs. " << *J2 << " (operand " << j << ")\n");
- MatchFailed = true;
- break;
- }
- }
-
- SomeOpMatched = true;
- }
- }
-
- if ((!PossibleRedLastSet.count(J1) && hasUsesOutsideLoop(J1, L)) ||
- (!PossibleRedLastSet.count(J2) && hasUsesOutsideLoop(J2, L))) {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
- " vs. " << *J2 << " (uses outside loop)\n");
- MatchFailed = true;
- break;
- }
-
- if (!MatchFailed)
- BaseMap.insert(std::pair<Value *, Value *>(J2, J1));
-
- AllRootUses.insert(J2);
- Reductions.recordPair(J1, J2, i+1);
-
- ++J2;
- }
- }
-
- if (MatchFailed)
- return false;
-
- DEBUG(dbgs() << "LRR: Matched all iteration increments for " <<
- *RealIV << "\n");
-
- DenseSet<Instruction *> LoopIncUseSet;
- collectInLoopUserSet(L, LoopIncs, SmallInstructionSet(),
- SmallInstructionSet(), LoopIncUseSet);
- DEBUG(dbgs() << "LRR: Loop increment set size: " <<
- LoopIncUseSet.size() << "\n");
-
- // Make sure that all instructions in the loop have been included in some
- // use set.
- for (BasicBlock::iterator J = Header->begin(), JE = Header->end();
- J != JE; ++J) {
- if (isa<DbgInfoIntrinsic>(J))
- continue;
- if (cast<Instruction>(J) == RealIV)
- continue;
- if (cast<Instruction>(J) == IV)
- continue;
- if (BaseUseSet.count(J) || AllRootUses.count(J) ||
- (LoopIncUseSet.count(J) && (J->isTerminator() ||
- isSafeToSpeculativelyExecute(J, DL))))
- continue;
-
- if (AllRoots.count(J))
- continue;
-
- if (Reductions.isSelectedPHI(J))
- continue;
-
- DEBUG(dbgs() << "LRR: aborting reroll based on " << *RealIV <<
- " unprocessed instruction found: " << *J << "\n");
- MatchFailed = true;
- break;
- }
-
- if (MatchFailed)
+ *IV << "\n");
+
+ if (!DAGRoots.validate(Reductions))
return false;
-
- DEBUG(dbgs() << "LRR: all instructions processed from " <<
- *RealIV << "\n");
-
if (!Reductions.validateSelected())
return false;
-
// At this point, we've validated the rerolling, and we're committed to
// making changes!
Reductions.replaceSelected();
+ DAGRoots.replace(IterCount);
- // Remove instructions associated with non-base iterations.
- for (BasicBlock::reverse_iterator J = Header->rbegin();
- J != Header->rend();) {
- if (AllRootUses.count(&*J)) {
- Instruction *D = &*J;
- DEBUG(dbgs() << "LRR: removing: " << *D << "\n");
- D->eraseFromParent();
- continue;
- }
-
- ++J;
- }
-
- // Insert the new induction variable.
- const SCEV *Start = RealIVSCEV->getStart();
- if (Inc == 1)
- Start = SE->getMulExpr(Start,
- SE->getConstant(Start->getType(), Scale));
- const SCEVAddRecExpr *H =
- cast<SCEVAddRecExpr>(SE->getAddRecExpr(Start,
- SE->getConstant(RealIVSCEV->getType(), 1),
- L, SCEV::FlagAnyWrap));
- { // Limit the lifetime of SCEVExpander.
- SCEVExpander Expander(*SE, "reroll");
- Value *NewIV = Expander.expandCodeFor(H, IV->getType(), Header->begin());
-
- for (DenseSet<Instruction *>::iterator J = BaseUseSet.begin(),
- JE = BaseUseSet.end(); J != JE; ++J)
- (*J)->replaceUsesOfWith(IV, NewIV);
-
- if (BranchInst *BI = dyn_cast<BranchInst>(Header->getTerminator())) {
- if (LoopIncUseSet.count(BI)) {
- const SCEV *ICSCEV = RealIVSCEV->evaluateAtIteration(IterCount, *SE);
- if (Inc == 1)
- ICSCEV =
- SE->getMulExpr(ICSCEV, SE->getConstant(ICSCEV->getType(), Scale));
- // Iteration count SCEV minus 1
- const SCEV *ICMinus1SCEV =
- SE->getMinusSCEV(ICSCEV, SE->getConstant(ICSCEV->getType(), 1));
-
- Value *ICMinus1; // Iteration count minus 1
- if (isa<SCEVConstant>(ICMinus1SCEV)) {
- ICMinus1 = Expander.expandCodeFor(ICMinus1SCEV, NewIV->getType(), BI);
- } else {
- BasicBlock *Preheader = L->getLoopPreheader();
- if (!Preheader)
- Preheader = InsertPreheaderForLoop(L, this);
-
- ICMinus1 = Expander.expandCodeFor(ICMinus1SCEV, NewIV->getType(),
- Preheader->getTerminator());
- }
-
- Value *Cond =
- new ICmpInst(BI, CmpInst::ICMP_EQ, NewIV, ICMinus1, "exitcond");
- BI->setCondition(Cond);
-
- if (BI->getSuccessor(1) != Header)
- BI->swapSuccessors();
- }
- }
- }
-
- SimplifyInstructionsInBlock(Header, DL, TLI);
- DeleteDeadPHIs(Header, TLI);
++NumRerolledLoops;
return true;
}
@@ -1129,11 +1470,9 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) {
return false;
AA = &getAnalysis<AliasAnalysis>();
- LI = &getAnalysis<LoopInfo>();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
- TLI = &getAnalysis<TargetLibraryInfo>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
BasicBlock *Header = L->getHeader();
diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp
index 267cb999d245..a675e1289baf 100644
--- a/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/lib/Transforms/Scalar/LoopRotation.cpp
@@ -24,8 +24,10 @@
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -56,14 +58,14 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AssumptionCacheTracker>();
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addPreserved<LoopInfoWrapperPass>();
AU.addRequiredID(LoopSimplifyID);
AU.addPreservedID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
AU.addPreservedID(LCSSAID);
AU.addPreserved<ScalarEvolution>();
- AU.addRequired<TargetTransformInfo>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
}
bool runOnLoop(Loop *L, LPPassManager &LPM) override;
@@ -75,14 +77,15 @@ namespace {
LoopInfo *LI;
const TargetTransformInfo *TTI;
AssumptionCache *AC;
+ DominatorTree *DT;
};
}
char LoopRotate::ID = 0;
INITIALIZE_PASS_BEGIN(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
-INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_END(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
@@ -100,10 +103,13 @@ bool LoopRotate::runOnLoop(Loop *L, LPPassManager &LPM) {
// Save the loop metadata.
MDNode *LoopMD = L->getLoopID();
- LI = &getAnalysis<LoopInfo>();
- TTI = &getAnalysis<TargetTransformInfo>();
- AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
- *L->getHeader()->getParent());
+ Function &F = *L->getHeader()->getParent();
+
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+ AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
+ auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
// Simplify the loop latch before attempting to rotate the header
// upward. Rotation may not be needed if the loop tail can be folded into the
@@ -226,20 +232,17 @@ static bool shouldSpeculateInstrs(BasicBlock::iterator Begin,
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr: {
- Value *IVOpnd = nullptr;
- if (isa<ConstantInt>(I->getOperand(0)))
- IVOpnd = I->getOperand(1);
-
- if (isa<ConstantInt>(I->getOperand(1))) {
- if (IVOpnd)
- return false;
-
- IVOpnd = I->getOperand(0);
- }
+ Value *IVOpnd = !isa<Constant>(I->getOperand(0))
+ ? I->getOperand(0)
+ : !isa<Constant>(I->getOperand(1))
+ ? I->getOperand(1)
+ : nullptr;
+ if (!IVOpnd)
+ return false;
// If increment operand is used outside of the loop, this speculation
// could cause extra live range interference.
- if (MultiExitLoop && IVOpnd) {
+ if (MultiExitLoop) {
for (User *UseI : IVOpnd->users()) {
auto *UserInst = cast<Instruction>(UseI);
if (!L->contains(UserInst))
@@ -308,9 +311,8 @@ bool LoopRotate::simplifyLoopLatch(Loop *L) {
// Nuke the Latch block.
assert(Latch->empty() && "unable to evacuate Latch");
LI->removeBlock(Latch);
- if (DominatorTreeWrapperPass *DTWP =
- getAnalysisIfAvailable<DominatorTreeWrapperPass>())
- DTWP->getDomTree().eraseNode(Latch);
+ if (DT)
+ DT->eraseNode(Latch);
Latch->eraseFromParent();
return true;
}
@@ -412,6 +414,8 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
for (; PHINode *PN = dyn_cast<PHINode>(I); ++I)
ValueMap[PN] = PN->getIncomingValueForBlock(OrigPreheader);
+ const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
+
// For the rest of the instructions, either hoist to the OrigPreheader if
// possible or create a clone in the OldPreHeader if not.
TerminatorInst *LoopEntryBranch = OrigPreheader->getTerminator();
@@ -442,8 +446,8 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// With the operands remapped, see if the instruction constant folds or is
// otherwise simplifyable. This commonly occurs because the entry from PHI
// nodes allows icmps and other instructions to fold.
- // FIXME: Provide DL, TLI, DT, AC to SimplifyInstruction.
- Value *V = SimplifyInstruction(C);
+ // FIXME: Provide TLI, DT, AC to SimplifyInstruction.
+ Value *V = SimplifyInstruction(C, DL);
if (V && LI->replacementPreservesLCSSAForm(C, V)) {
// If so, then delete the temporary instruction and stick the folded value
// in the map.
@@ -495,31 +499,31 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// The conditional branch can't be folded, handle the general case.
// Update DominatorTree to reflect the CFG change we just made. Then split
// edges as necessary to preserve LoopSimplify form.
- if (DominatorTreeWrapperPass *DTWP =
- getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
- DominatorTree &DT = DTWP->getDomTree();
+ if (DT) {
// Everything that was dominated by the old loop header is now dominated
// by the original loop preheader. Conceptually the header was merged
// into the preheader, even though we reuse the actual block as a new
// loop latch.
- DomTreeNode *OrigHeaderNode = DT.getNode(OrigHeader);
+ DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader);
SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(),
OrigHeaderNode->end());
- DomTreeNode *OrigPreheaderNode = DT.getNode(OrigPreheader);
+ DomTreeNode *OrigPreheaderNode = DT->getNode(OrigPreheader);
for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I)
- DT.changeImmediateDominator(HeaderChildren[I], OrigPreheaderNode);
+ DT->changeImmediateDominator(HeaderChildren[I], OrigPreheaderNode);
- assert(DT.getNode(Exit)->getIDom() == OrigPreheaderNode);
- assert(DT.getNode(NewHeader)->getIDom() == OrigPreheaderNode);
+ assert(DT->getNode(Exit)->getIDom() == OrigPreheaderNode);
+ assert(DT->getNode(NewHeader)->getIDom() == OrigPreheaderNode);
// Update OrigHeader to be dominated by the new header block.
- DT.changeImmediateDominator(OrigHeader, OrigLatch);
+ DT->changeImmediateDominator(OrigHeader, OrigLatch);
}
// Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and
// thus is not a preheader anymore.
// Split the edge to form a real preheader.
- BasicBlock *NewPH = SplitCriticalEdge(OrigPreheader, NewHeader, this);
+ BasicBlock *NewPH = SplitCriticalEdge(
+ OrigPreheader, NewHeader,
+ CriticalEdgeSplittingOptions(DT, LI).setPreserveLCSSA());
NewPH->setName(NewHeader->getName() + ".lr.ph");
// Preserve canonical loop form, which means that 'Exit' should have only
@@ -538,7 +542,8 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
if (isa<IndirectBrInst>((*PI)->getTerminator()))
continue;
SplitLatchEdge |= L->getLoopLatch() == *PI;
- BasicBlock *ExitSplit = SplitCriticalEdge(*PI, Exit, this);
+ BasicBlock *ExitSplit = SplitCriticalEdge(
+ *PI, Exit, CriticalEdgeSplittingOptions(DT, LI).setPreserveLCSSA());
ExitSplit->moveBefore(Exit);
}
assert(SplitLatchEdge &&
@@ -552,17 +557,15 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
PHBI->eraseFromParent();
// With our CFG finalized, update DomTree if it is available.
- if (DominatorTreeWrapperPass *DTWP =
- getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
- DominatorTree &DT = DTWP->getDomTree();
+ if (DT) {
// Update OrigHeader to be dominated by the new header block.
- DT.changeImmediateDominator(NewHeader, OrigPreheader);
- DT.changeImmediateDominator(OrigHeader, OrigLatch);
+ DT->changeImmediateDominator(NewHeader, OrigPreheader);
+ DT->changeImmediateDominator(OrigHeader, OrigLatch);
// Brute force incremental dominator tree update. Call
// findNearestCommonDominator on all CFG predecessors of each child of the
// original header.
- DomTreeNode *OrigHeaderNode = DT.getNode(OrigHeader);
+ DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader);
SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(),
OrigHeaderNode->end());
bool Changed;
@@ -575,11 +578,11 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
pred_iterator PI = pred_begin(BB);
BasicBlock *NearestDom = *PI;
for (pred_iterator PE = pred_end(BB); PI != PE; ++PI)
- NearestDom = DT.findNearestCommonDominator(NearestDom, *PI);
+ NearestDom = DT->findNearestCommonDominator(NearestDom, *PI);
// Remember if this changes the DomTree.
if (Node->getIDom()->getBlock() != NearestDom) {
- DT.changeImmediateDominator(BB, NearestDom);
+ DT->changeImmediateDominator(BB, NearestDom);
Changed = true;
}
}
@@ -597,7 +600,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// the OrigHeader block into OrigLatch. This will succeed if they are
// connected by an unconditional branch. This is just a cleanup so the
// emitted code isn't too gross in this common case.
- MergeBlockIntoPredecessor(OrigHeader, this);
+ MergeBlockIntoPredecessor(OrigHeader, DT, LI);
DEBUG(dbgs() << "LoopRotation: into "; L->dump());
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 7b60373dc508..584c7aee7f1d 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -28,7 +28,7 @@
//
// The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however
// it's useful to think about these as the same register, with some uses using
-// the value of the register before the add and some using // it after. In this
+// the value of the register before the add and some using it after. In this
// example, the icmp is a post-increment user, since it uses %i.next, which is
// the value of the induction variable after the increment. The other common
// case of post-increment users is users outside the loop.
@@ -68,6 +68,7 @@
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -111,8 +112,6 @@ public:
/// a particular register.
SmallBitVector UsedByIndices;
- RegSortData() {}
-
void print(raw_ostream &OS) const;
void dump() const;
};
@@ -186,9 +185,8 @@ RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) {
// Update RegUses. The data structure is not optimized for this purpose;
// we must iterate through it and update each of the bit vectors.
- for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end();
- I != E; ++I) {
- SmallBitVector &UsedByIndices = I->second.UsedByIndices;
+ for (auto &Pair : RegUsesMap) {
+ SmallBitVector &UsedByIndices = Pair.second.UsedByIndices;
if (LUIdx < UsedByIndices.size())
UsedByIndices[LUIdx] =
LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0;
@@ -298,9 +296,8 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
// Look at add operands.
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
- for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
- I != E; ++I)
- DoInitialMatch(*I, L, Good, Bad, SE);
+ for (const SCEV *S : Add->operands())
+ DoInitialMatch(S, L, Good, Bad, SE);
return;
}
@@ -327,12 +324,10 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
DoInitialMatch(NewMul, L, MyGood, MyBad, SE);
const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
SE.getEffectiveSCEVType(NewMul->getType())));
- for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(),
- E = MyGood.end(); I != E; ++I)
- Good.push_back(SE.getMulExpr(NegOne, *I));
- for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(),
- E = MyBad.end(); I != E; ++I)
- Bad.push_back(SE.getMulExpr(NegOne, *I));
+ for (const SCEV *S : MyGood)
+ Good.push_back(SE.getMulExpr(NegOne, S));
+ for (const SCEV *S : MyBad)
+ Bad.push_back(SE.getMulExpr(NegOne, S));
return;
}
@@ -444,9 +439,8 @@ bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx,
if (ScaledReg)
if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx))
return true;
- for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(),
- E = BaseRegs.end(); I != E; ++I)
- if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx))
+ for (const SCEV *BaseReg : BaseRegs)
+ if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx))
return true;
return false;
}
@@ -461,10 +455,9 @@ void Formula::print(raw_ostream &OS) const {
if (!First) OS << " + "; else First = false;
OS << BaseOffset;
}
- for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(),
- E = BaseRegs.end(); I != E; ++I) {
+ for (const SCEV *BaseReg : BaseRegs) {
if (!First) OS << " + "; else First = false;
- OS << "reg(" << **I << ')';
+ OS << "reg(" << *BaseReg << ')';
}
if (HasBaseReg && BaseRegs.empty()) {
if (!First) OS << " + "; else First = false;
@@ -577,10 +570,8 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) {
if (IgnoreSignificantBits || isAddSExtable(Add, SE)) {
SmallVector<const SCEV *, 8> Ops;
- for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
- I != E; ++I) {
- const SCEV *Op = getExactSDiv(*I, RHS, SE,
- IgnoreSignificantBits);
+ for (const SCEV *S : Add->operands()) {
+ const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits);
if (!Op) return nullptr;
Ops.push_back(Op);
}
@@ -594,9 +585,7 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) {
SmallVector<const SCEV *, 4> Ops;
bool Found = false;
- for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end();
- I != E; ++I) {
- const SCEV *S = *I;
+ for (const SCEV *S : Mul->operands()) {
if (!Found)
if (const SCEV *Q = getExactSDiv(S, RHS, SE,
IgnoreSignificantBits)) {
@@ -766,9 +755,8 @@ static bool isHighCostExpansion(const SCEV *S,
return false;
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
- for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
- I != E; ++I) {
- if (isHighCostExpansion(*I, Processed, SE))
+ for (const SCEV *S : Add->operands()) {
+ if (isHighCostExpansion(S, Processed, SE))
return true;
}
return false;
@@ -819,9 +807,9 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
if (!I || !isInstructionTriviallyDead(I))
continue;
- for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
- if (Instruction *U = dyn_cast<Instruction>(*OI)) {
- *OI = nullptr;
+ for (Use &O : I->operands())
+ if (Instruction *U = dyn_cast<Instruction>(O)) {
+ O = nullptr;
if (U->use_empty())
DeadInsts.push_back(U);
}
@@ -1002,9 +990,7 @@ void Cost::RateFormula(const TargetTransformInfo &TTI,
if (isLoser())
return;
}
- for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
- E = F.BaseRegs.end(); I != E; ++I) {
- const SCEV *BaseReg = *I;
+ for (const SCEV *BaseReg : F.BaseRegs) {
if (VisitedRegs.count(BaseReg)) {
Lose();
return;
@@ -1027,9 +1013,8 @@ void Cost::RateFormula(const TargetTransformInfo &TTI,
ScaleCost += getScalingFactorCost(TTI, LU, F);
// Tally up the non-zero immediates.
- for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
- E = Offsets.end(); I != E; ++I) {
- int64_t Offset = (uint64_t)*I + F.BaseOffset;
+ for (int64_t O : Offsets) {
+ int64_t Offset = (uint64_t)O + F.BaseOffset;
if (F.BaseGV)
ImmCost += 64; // Handle symbolic values conservatively.
// TODO: This should probably be the pointer size.
@@ -1152,10 +1137,9 @@ void LSRFixup::print(raw_ostream &OS) const {
OS << ", OperandValToReplace=";
OperandValToReplace->printAsOperand(OS, /*PrintType=*/false);
- for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(),
- E = PostIncLoops.end(); I != E; ++I) {
+ for (const Loop *PIL : PostIncLoops) {
OS << ", PostIncLoop=";
- (*I)->getHeader()->printAsOperand(OS, /*PrintType=*/false);
+ PIL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
}
if (LUIdx != ~size_t(0))
@@ -1301,9 +1285,8 @@ bool LSRUse::InsertFormula(const Formula &F) {
assert((!F.ScaledReg || !F.ScaledReg->isZero()) &&
"Zero allocated in a scaled register!");
#ifndef NDEBUG
- for (SmallVectorImpl<const SCEV *>::const_iterator I =
- F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I)
- assert(!(*I)->isZero() && "Zero allocated in a base register!");
+ for (const SCEV *BaseReg : F.BaseRegs)
+ assert(!BaseReg->isZero() && "Zero allocated in a base register!");
#endif
// Add the formula to the list.
@@ -1327,11 +1310,9 @@ void LSRUse::DeleteFormula(Formula &F) {
/// RecomputeRegs - Recompute the Regs field, and update RegUses.
void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) {
// Now that we've filtered out some formulae, recompute the Regs set.
- SmallPtrSet<const SCEV *, 4> OldRegs = Regs;
+ SmallPtrSet<const SCEV *, 4> OldRegs = std::move(Regs);
Regs.clear();
- for (SmallVectorImpl<Formula>::const_iterator I = Formulae.begin(),
- E = Formulae.end(); I != E; ++I) {
- const Formula &F = *I;
+ for (const Formula &F : Formulae) {
if (F.ScaledReg) Regs.insert(F.ScaledReg);
Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
}
@@ -1357,11 +1338,11 @@ void LSRUse::print(raw_ostream &OS) const {
}
OS << ", Offsets={";
- for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
- E = Offsets.end(); I != E; ++I) {
- OS << *I;
- if (std::next(I) != E)
- OS << ',';
+ bool NeedComma = false;
+ for (int64_t O : Offsets) {
+ if (NeedComma) OS << ',';
+ OS << O;
+ NeedComma = true;
}
OS << '}';
@@ -1386,9 +1367,6 @@ static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
case LSRUse::Address:
return TTI.isLegalAddressingMode(AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale);
- // Otherwise, just guess that reg+reg addressing is legal.
- //return ;
-
case LSRUse::ICmpZero:
// There's not even a target hook for querying whether it would be legal to
// fold a GV into an ICmp.
@@ -1928,12 +1906,12 @@ void LSRInstance::OptimizeShadowIV() {
/// set the IV user and stride information and return true, otherwise return
/// false.
bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) {
- for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
- if (UI->getUser() == Cond) {
+ for (IVStrideUse &U : IU)
+ if (U.getUser() == Cond) {
// NOTE: we could handle setcc instructions with multiple uses here, but
// InstCombine does it as well for simple uses, it's not clear that it
// occurs enough in real life to handle.
- CondUse = UI;
+ CondUse = &U;
return true;
}
return false;
@@ -2108,8 +2086,7 @@ LSRInstance::OptimizeLoopTermCond() {
SmallVector<BasicBlock*, 8> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
- for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
- BasicBlock *ExitingBlock = ExitingBlocks[i];
+ for (BasicBlock *ExitingBlock : ExitingBlocks) {
// Get the terminating condition for the loop if possible. If we
// can, we want to change it to use a post-incremented version of its
@@ -2352,9 +2329,7 @@ LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
LU.WidestFixupType == OrigLU.WidestFixupType &&
LU.HasFormulaWithSameRegs(OrigF)) {
// Scan through this use's formulae.
- for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
- E = LU.Formulae.end(); I != E; ++I) {
- const Formula &F = *I;
+ for (const Formula &F : LU.Formulae) {
// Check to see if this formula has the same registers and symbols
// as OrigF.
if (F.BaseRegs == OrigF.BaseRegs &&
@@ -2382,8 +2357,8 @@ void LSRInstance::CollectInterestingTypesAndFactors() {
// Collect interesting types and strides.
SmallVector<const SCEV *, 4> Worklist;
- for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
- const SCEV *Expr = IU.getExpr(*UI);
+ for (const IVStrideUse &U : IU) {
+ const SCEV *Expr = IU.getExpr(U);
// Collect interesting types.
Types.insert(SE.getEffectiveSCEVType(Expr->getType()));
@@ -2586,25 +2561,23 @@ isProfitableChain(IVChain &Chain, SmallPtrSetImpl<Instruction*> &Users,
unsigned NumConstIncrements = 0;
unsigned NumVarIncrements = 0;
unsigned NumReusedIncrements = 0;
- for (IVChain::const_iterator I = Chain.begin(), E = Chain.end();
- I != E; ++I) {
-
- if (I->IncExpr->isZero())
+ for (const IVInc &Inc : Chain) {
+ if (Inc.IncExpr->isZero())
continue;
// Incrementing by zero or some constant is neutral. We assume constants can
// be folded into an addressing mode or an add's immediate operand.
- if (isa<SCEVConstant>(I->IncExpr)) {
+ if (isa<SCEVConstant>(Inc.IncExpr)) {
++NumConstIncrements;
continue;
}
- if (I->IncExpr == LastIncExpr)
+ if (Inc.IncExpr == LastIncExpr)
++NumReusedIncrements;
else
++NumVarIncrements;
- LastIncExpr = I->IncExpr;
+ LastIncExpr = Inc.IncExpr;
}
// An IV chain with a single increment is handled by LSR's postinc
// uses. However, a chain with multiple increments requires keeping the IV's
@@ -2839,12 +2812,11 @@ void LSRInstance::FinalizeChain(IVChain &Chain) {
assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n");
- for (IVChain::const_iterator I = Chain.begin(), E = Chain.end();
- I != E; ++I) {
- DEBUG(dbgs() << " Inc: " << *I->UserInst << "\n");
- User::op_iterator UseI =
- std::find(I->UserInst->op_begin(), I->UserInst->op_end(), I->IVOperand);
- assert(UseI != I->UserInst->op_end() && "cannot find IV operand");
+ for (const IVInc &Inc : Chain) {
+ DEBUG(dbgs() << " Inc: " << Inc.UserInst << "\n");
+ auto UseI = std::find(Inc.UserInst->op_begin(), Inc.UserInst->op_end(),
+ Inc.IVOperand);
+ assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand");
IVIncSet.insert(UseI);
}
}
@@ -2907,20 +2879,18 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
Type *IVTy = IVSrc->getType();
Type *IntTy = SE.getEffectiveSCEVType(IVTy);
const SCEV *LeftOverExpr = nullptr;
- for (IVChain::const_iterator IncI = Chain.begin(),
- IncE = Chain.end(); IncI != IncE; ++IncI) {
-
- Instruction *InsertPt = IncI->UserInst;
+ for (const IVInc &Inc : Chain) {
+ Instruction *InsertPt = Inc.UserInst;
if (isa<PHINode>(InsertPt))
InsertPt = L->getLoopLatch()->getTerminator();
// IVOper will replace the current IV User's operand. IVSrc is the IV
// value currently held in a register.
Value *IVOper = IVSrc;
- if (!IncI->IncExpr->isZero()) {
+ if (!Inc.IncExpr->isZero()) {
// IncExpr was the result of subtraction of two narrow values, so must
// be signed.
- const SCEV *IncExpr = SE.getNoopOrSignExtend(IncI->IncExpr, IntTy);
+ const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy);
LeftOverExpr = LeftOverExpr ?
SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr;
}
@@ -2933,22 +2903,21 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt);
// If an IV increment can't be folded, use it as the next IV value.
- if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand,
- TTI)) {
+ if (!canFoldIVIncExpr(LeftOverExpr, Inc.UserInst, Inc.IVOperand, TTI)) {
assert(IVTy == IVOper->getType() && "inconsistent IV increment type");
IVSrc = IVOper;
LeftOverExpr = nullptr;
}
}
- Type *OperTy = IncI->IVOperand->getType();
+ Type *OperTy = Inc.IVOperand->getType();
if (IVTy != OperTy) {
assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) &&
"cannot extend a chained IV");
IRBuilder<> Builder(InsertPt);
IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain");
}
- IncI->UserInst->replaceUsesOfWith(IncI->IVOperand, IVOper);
- DeadInsts.push_back(IncI->IVOperand);
+ Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper);
+ DeadInsts.push_back(Inc.IVOperand);
}
// If LSR created a new, wider phi, we may also replace its postinc. We only
// do this if we also found a wide value for the head of the chain.
@@ -2976,11 +2945,11 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
}
void LSRInstance::CollectFixupsAndInitialFormulae() {
- for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
- Instruction *UserInst = UI->getUser();
+ for (const IVStrideUse &U : IU) {
+ Instruction *UserInst = U.getUser();
// Skip IV users that are part of profitable IV Chains.
User::op_iterator UseI = std::find(UserInst->op_begin(), UserInst->op_end(),
- UI->getOperandValToReplace());
+ U.getOperandValToReplace());
assert(UseI != UserInst->op_end() && "cannot find IV operand");
if (IVIncSet.count(UseI))
continue;
@@ -2988,8 +2957,8 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
// Record the uses.
LSRFixup &LF = getNewFixup();
LF.UserInst = UserInst;
- LF.OperandValToReplace = UI->getOperandValToReplace();
- LF.PostIncLoops = UI->getPostIncLoops();
+ LF.OperandValToReplace = U.getOperandValToReplace();
+ LF.PostIncLoops = U.getPostIncLoops();
LSRUse::KindType Kind = LSRUse::Basic;
Type *AccessTy = nullptr;
@@ -2998,7 +2967,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
AccessTy = getAccessType(LF.UserInst);
}
- const SCEV *S = IU.getExpr(*UI);
+ const SCEV *S = IU.getExpr(U);
// Equality (== and !=) ICmps are special. We can rewrite (i == N) as
// (N - i == 0), and this allows (N - i) to be the expression that we work
@@ -3090,9 +3059,8 @@ LSRInstance::InsertSupplementalFormula(const SCEV *S,
void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) {
if (F.ScaledReg)
RegUses.CountRegister(F.ScaledReg, LUIdx);
- for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
- E = F.BaseRegs.end(); I != E; ++I)
- RegUses.CountRegister(*I, LUIdx);
+ for (const SCEV *BaseReg : F.BaseRegs)
+ RegUses.CountRegister(BaseReg, LUIdx);
}
/// InsertFormula - If the given formula has not yet been inserted, add it to
@@ -3213,9 +3181,8 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
// Break out add operands.
- for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
- I != E; ++I) {
- const SCEV *Remainder = CollectSubexprs(*I, C, Ops, L, SE, Depth+1);
+ for (const SCEV *S : Add->operands()) {
+ const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1);
if (Remainder)
Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
}
@@ -3373,9 +3340,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
Formula F = Base;
F.BaseRegs.clear();
SmallVector<const SCEV *, 4> Ops;
- for (SmallVectorImpl<const SCEV *>::const_iterator
- I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) {
- const SCEV *BaseReg = *I;
+ for (const SCEV *BaseReg : Base.BaseRegs) {
if (SE.properlyDominates(BaseReg, L->getHeader()) &&
!SE.hasComputableLoopEvolution(BaseReg, L))
Ops.push_back(BaseReg);
@@ -3432,15 +3397,13 @@ void LSRInstance::GenerateConstantOffsetsImpl(
LSRUse &LU, unsigned LUIdx, const Formula &Base,
const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) {
const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
- for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(),
- E = Worklist.end();
- I != E; ++I) {
+ for (int64_t Offset : Worklist) {
Formula F = Base;
- F.BaseOffset = (uint64_t)Base.BaseOffset - *I;
- if (isLegalUse(TTI, LU.MinOffset - *I, LU.MaxOffset - *I, LU.Kind,
+ F.BaseOffset = (uint64_t)Base.BaseOffset - Offset;
+ if (isLegalUse(TTI, LU.MinOffset - Offset, LU.MaxOffset - Offset, LU.Kind,
LU.AccessTy, F)) {
// Add the offset to the base register.
- const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G);
+ const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), Offset), G);
// If it cancelled out, drop the base register, otherwise update it.
if (NewG->isZero()) {
if (IsScaledReg) {
@@ -3506,10 +3469,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
assert(!Base.BaseGV && "ICmpZero use is not legal!");
// Check each interesting stride.
- for (SmallSetVector<int64_t, 8>::const_iterator
- I = Factors.begin(), E = Factors.end(); I != E; ++I) {
- int64_t Factor = *I;
-
+ for (int64_t Factor : Factors) {
// Check that the multiplication doesn't overflow.
if (Base.BaseOffset == INT64_MIN && Factor == -1)
continue;
@@ -3593,10 +3553,7 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
assert(Base.Scale == 0 && "Unscale did not did its job!");
// Check each interesting stride.
- for (SmallSetVector<int64_t, 8>::const_iterator
- I = Factors.begin(), E = Factors.end(); I != E; ++I) {
- int64_t Factor = *I;
-
+ for (int64_t Factor : Factors) {
Base.Scale = Factor;
Base.HasBaseReg = Base.BaseRegs.size() > 1;
// Check whether this scale is going to be legal.
@@ -3652,16 +3609,13 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
if (!DstTy) return;
DstTy = SE.getEffectiveSCEVType(DstTy);
- for (SmallSetVector<Type *, 4>::const_iterator
- I = Types.begin(), E = Types.end(); I != E; ++I) {
- Type *SrcTy = *I;
+ for (Type *SrcTy : Types) {
if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) {
Formula F = Base;
- if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I);
- for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(),
- JE = F.BaseRegs.end(); J != JE; ++J)
- *J = SE.getAnyExtendExpr(*J, SrcTy);
+ if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy);
+ for (const SCEV *&BaseReg : F.BaseRegs)
+ BaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy);
// TODO: This assumes we've done basic processing on all uses and
// have an idea what the register usage is.
@@ -3708,20 +3662,17 @@ void WorkItem::dump() const {
void LSRInstance::GenerateCrossUseConstantOffsets() {
// Group the registers by their value without any added constant offset.
typedef std::map<int64_t, const SCEV *> ImmMapTy;
- typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy;
- RegMapTy Map;
+ DenseMap<const SCEV *, ImmMapTy> Map;
DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
SmallVector<const SCEV *, 8> Sequence;
- for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
- I != E; ++I) {
- const SCEV *Reg = *I;
+ for (const SCEV *Use : RegUses) {
+ const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify.
int64_t Imm = ExtractImmediate(Reg, SE);
- std::pair<RegMapTy::iterator, bool> Pair =
- Map.insert(std::make_pair(Reg, ImmMapTy()));
+ auto Pair = Map.insert(std::make_pair(Reg, ImmMapTy()));
if (Pair.second)
Sequence.push_back(Reg);
- Pair.first->second.insert(std::make_pair(Imm, *I));
- UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I);
+ Pair.first->second.insert(std::make_pair(Imm, Use));
+ UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(Use);
}
// Now examine each set of registers with the same base value. Build up
@@ -3729,9 +3680,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
// not adding formulae and register counts while we're searching.
SmallVector<WorkItem, 32> WorkItems;
SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems;
- for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(),
- E = Sequence.end(); I != E; ++I) {
- const SCEV *Reg = *I;
+ for (const SCEV *Reg : Sequence) {
const ImmMapTy &Imms = Map.find(Reg)->second;
// It's not worthwhile looking for reuse if there's only one offset.
@@ -3739,9 +3688,8 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
continue;
DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';
- for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
- J != JE; ++J)
- dbgs() << ' ' << J->first;
+ for (const auto &Entry : Imms)
+ dbgs() << ' ' << Entry.first;
dbgs() << '\n');
// Examine each offset.
@@ -3786,9 +3734,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
UniqueItems.clear();
// Now iterate through the worklist and add new formulae.
- for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(),
- E = WorkItems.end(); I != E; ++I) {
- const WorkItem &WI = *I;
+ for (const WorkItem &WI : WorkItems) {
size_t LUIdx = WI.LUIdx;
LSRUse &LU = Uses[LUIdx];
int64_t Imm = WI.Imm;
@@ -3827,7 +3773,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
if (C->getValue()->isNegative() !=
(NewF.BaseOffset < 0) &&
(C->getValue()->getValue().abs() * APInt(BitWidth, F.Scale))
- .ule(abs64(NewF.BaseOffset)))
+ .ule(std::abs(NewF.BaseOffset)))
continue;
// OK, looks good.
@@ -3853,12 +3799,10 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
// If the new formula has a constant in a register, and adding the
// constant value to the immediate would produce a value closer to
// zero than the immediate itself, then the formula isn't worthwhile.
- for (SmallVectorImpl<const SCEV *>::const_iterator
- J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end();
- J != JE; ++J)
- if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J))
+ for (const SCEV *NewReg : NewF.BaseRegs)
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewReg))
if ((C->getValue()->getValue() + NewF.BaseOffset).abs().slt(
- abs64(NewF.BaseOffset)) &&
+ std::abs(NewF.BaseOffset)) &&
(C->getValue()->getValue() +
NewF.BaseOffset).countTrailingZeros() >=
countTrailingZeros<uint64_t>(NewF.BaseOffset))
@@ -3959,9 +3903,7 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
}
else {
SmallVector<const SCEV *, 4> Key;
- for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(),
- JE = F.BaseRegs.end(); J != JE; ++J) {
- const SCEV *Reg = *J;
+ for (const SCEV *Reg : F.BaseRegs) {
if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx))
Key.push_back(Reg);
}
@@ -4023,9 +3965,8 @@ static const size_t ComplexityLimit = UINT16_MAX;
/// isn't always sufficient.
size_t LSRInstance::EstimateSearchSpaceComplexity() const {
size_t Power = 1;
- for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
- E = Uses.end(); I != E; ++I) {
- size_t FSize = I->Formulae.size();
+ for (const LSRUse &LU : Uses) {
+ size_t FSize = LU.Formulae.size();
if (FSize >= ComplexityLimit) {
Power = ComplexityLimit;
break;
@@ -4116,9 +4057,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
LSRUse &LU = Uses[LUIdx];
- for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
- E = LU.Formulae.end(); I != E; ++I) {
- const Formula &F = *I;
+ for (const Formula &F : LU.Formulae) {
if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1))
continue;
@@ -4135,9 +4074,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
// Update the relocs to reference the new use.
- for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(),
- E = Fixups.end(); I != E; ++I) {
- LSRFixup &Fixup = *I;
+ for (LSRFixup &Fixup : Fixups) {
if (Fixup.LUIdx == LUIdx) {
Fixup.LUIdx = LUThatHas - &Uses.front();
Fixup.Offset += F.BaseOffset;
@@ -4218,9 +4155,7 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
// to be a good reuse register candidate.
const SCEV *Best = nullptr;
unsigned BestNum = 0;
- for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
- I != E; ++I) {
- const SCEV *Reg = *I;
+ for (const SCEV *Reg : RegUses) {
if (Taken.count(Reg))
continue;
if (!Best)
@@ -4308,17 +4243,12 @@ void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
SmallPtrSet<const SCEV *, 16> NewRegs;
Cost NewCost;
- for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
- E = LU.Formulae.end(); I != E; ++I) {
- const Formula &F = *I;
-
+ for (const Formula &F : LU.Formulae) {
// Ignore formulae which may not be ideal in terms of register reuse of
// ReqRegs. The formula should use all required registers before
// introducing new ones.
int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size());
- for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(),
- JE = ReqRegs.end(); J != JE; ++J) {
- const SCEV *Reg = *J;
+ for (const SCEV *Reg : ReqRegs) {
if ((F.ScaledReg && F.ScaledReg == Reg) ||
std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) !=
F.BaseRegs.end()) {
@@ -4426,9 +4356,7 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
bool AllDominate = true;
Instruction *BetterPos = nullptr;
Instruction *Tentative = IDom->getTerminator();
- for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(),
- E = Inputs.end(); I != E; ++I) {
- Instruction *Inst = *I;
+ for (Instruction *Inst : Inputs) {
if (Inst == Tentative || !DT.dominates(Inst, Tentative)) {
AllDominate = false;
break;
@@ -4475,9 +4403,7 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP,
}
// The expansion must also be dominated by the increment positions of any
// loops it for which it is using post-inc mode.
- for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(),
- E = LF.PostIncLoops.end(); I != E; ++I) {
- const Loop *PIL = *I;
+ for (const Loop *PIL : LF.PostIncLoops) {
if (PIL == L) continue;
// Be dominated by the loop exit.
@@ -4552,9 +4478,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
SmallVector<const SCEV *, 8> Ops;
// Expand the BaseRegs portion.
- for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
- E = F.BaseRegs.end(); I != E; ++I) {
- const SCEV *Reg = *I;
+ for (const SCEV *Reg : F.BaseRegs) {
assert(!Reg->isZero() && "Zero allocated in a base register!");
// If we're expanding for a post-inc user, make the post-inc adjustment.
@@ -4728,12 +4652,14 @@ void LSRInstance::RewriteForPHI(PHINode *PN,
// Split the critical edge.
BasicBlock *NewBB = nullptr;
if (!Parent->isLandingPad()) {
- NewBB = SplitCriticalEdge(BB, Parent, P,
- /*MergeIdenticalEdges=*/true,
- /*DontDeleteUselessPhis=*/true);
+ NewBB = SplitCriticalEdge(BB, Parent,
+ CriticalEdgeSplittingOptions(&DT, &LI)
+ .setMergeIdenticalEdges()
+ .setDontDeleteUselessPHIs());
} else {
SmallVector<BasicBlock*, 2> NewBBs;
- SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs);
+ SplitLandingPadPredecessors(Parent, BB, "", "", NewBBs,
+ /*AliasAnalysis*/ nullptr, &DT, &LI);
NewBB = NewBBs[0];
}
// If NewBB==NULL, then SplitCriticalEdge refused to split because all
@@ -4823,7 +4749,8 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
// we can remove them after we are done working.
SmallVector<WeakVH, 16> DeadInsts;
- SCEVExpander Rewriter(SE, "lsr");
+ SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(),
+ "lsr");
#ifndef NDEBUG
Rewriter.setDebugType(DEBUG_TYPE);
#endif
@@ -4832,25 +4759,20 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
Rewriter.setIVIncInsertPos(L, IVIncInsertPos);
// Mark phi nodes that terminate chains so the expander tries to reuse them.
- for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(),
- ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) {
- if (PHINode *PN = dyn_cast<PHINode>(ChainI->tailUserInst()))
+ for (const IVChain &Chain : IVChainVec) {
+ if (PHINode *PN = dyn_cast<PHINode>(Chain.tailUserInst()))
Rewriter.setChainedPhi(PN);
}
// Expand the new value definitions and update the users.
- for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
- E = Fixups.end(); I != E; ++I) {
- const LSRFixup &Fixup = *I;
-
+ for (const LSRFixup &Fixup : Fixups) {
Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P);
Changed = true;
}
- for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(),
- ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) {
- GenerateIVChain(*ChainI, Rewriter, DeadInsts);
+ for (const IVChain &Chain : IVChainVec) {
+ GenerateIVChain(Chain, Rewriter, DeadInsts);
Changed = true;
}
// Clean up after ourselves. This must be done before deleting any
@@ -4863,9 +4785,10 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
LSRInstance::LSRInstance(Loop *L, Pass *P)
: IU(P->getAnalysis<IVUsers>()), SE(P->getAnalysis<ScalarEvolution>()),
DT(P->getAnalysis<DominatorTreeWrapperPass>().getDomTree()),
- LI(P->getAnalysis<LoopInfo>()),
- TTI(P->getAnalysis<TargetTransformInfo>()), L(L), Changed(false),
- IVIncInsertPos(nullptr) {
+ LI(P->getAnalysis<LoopInfoWrapperPass>().getLoopInfo()),
+ TTI(P->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
+ *L->getHeader()->getParent())),
+ L(L), Changed(false), IVIncInsertPos(nullptr) {
// If LoopSimplify form is not available, stay out of trouble.
if (!L->isLoopSimplifyForm())
return;
@@ -4876,10 +4799,10 @@ LSRInstance::LSRInstance(Loop *L, Pass *P)
// If there's too much analysis to be done, bail early. We won't be able to
// model the problem anyway.
unsigned NumUsers = 0;
- for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
+ for (const IVStrideUse &U : IU) {
if (++NumUsers > MaxIVUsers) {
- DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << *L
- << "\n");
+ (void)U;
+ DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U << "\n");
return;
}
}
@@ -4948,14 +4871,10 @@ LSRInstance::LSRInstance(Loop *L, Pass *P)
#ifndef NDEBUG
// Formulae should be legal.
- for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), E = Uses.end();
- I != E; ++I) {
- const LSRUse &LU = *I;
- for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
- JE = LU.Formulae.end();
- J != JE; ++J)
+ for (const LSRUse &LU : Uses) {
+ for (const Formula &F : LU.Formulae)
assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
- *J) && "Illegal formula generated!");
+ F) && "Illegal formula generated!");
};
#endif
@@ -4969,44 +4888,38 @@ void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
OS << "LSR has identified the following interesting factors and types: ";
bool First = true;
- for (SmallSetVector<int64_t, 8>::const_iterator
- I = Factors.begin(), E = Factors.end(); I != E; ++I) {
+ for (int64_t Factor : Factors) {
if (!First) OS << ", ";
First = false;
- OS << '*' << *I;
+ OS << '*' << Factor;
}
- for (SmallSetVector<Type *, 4>::const_iterator
- I = Types.begin(), E = Types.end(); I != E; ++I) {
+ for (Type *Ty : Types) {
if (!First) OS << ", ";
First = false;
- OS << '(' << **I << ')';
+ OS << '(' << *Ty << ')';
}
OS << '\n';
}
void LSRInstance::print_fixups(raw_ostream &OS) const {
OS << "LSR is examining the following fixup sites:\n";
- for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
- E = Fixups.end(); I != E; ++I) {
+ for (const LSRFixup &LF : Fixups) {
dbgs() << " ";
- I->print(OS);
+ LF.print(OS);
OS << '\n';
}
}
void LSRInstance::print_uses(raw_ostream &OS) const {
OS << "LSR is examining the following uses:\n";
- for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
- E = Uses.end(); I != E; ++I) {
- const LSRUse &LU = *I;
+ for (const LSRUse &LU : Uses) {
dbgs() << " ";
LU.print(OS);
OS << '\n';
- for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
- JE = LU.Formulae.end(); J != JE; ++J) {
+ for (const Formula &F : LU.Formulae) {
OS << " ";
- J->print(OS);
+ F.print(OS);
OS << '\n';
}
}
@@ -5041,11 +4954,11 @@ private:
char LoopStrengthReduce::ID = 0;
INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
"Loop Strength Reduction", false, false)
-INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(IVUsers)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce",
"Loop Strength Reduction", false, false)
@@ -5064,8 +4977,8 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
// many analyses if they are around.
AU.addPreservedID(LoopSimplifyID);
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addPreserved<LoopInfoWrapperPass>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
@@ -5076,7 +4989,7 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredID(LoopSimplifyID);
AU.addRequired<IVUsers>();
AU.addPreserved<IVUsers>();
- AU.addRequired<TargetTransformInfo>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
}
bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
@@ -5092,13 +5005,15 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
Changed |= DeleteDeadPHIs(L->getHeader());
if (EnablePhiElim && L->isLoopSimplifyForm()) {
SmallVector<WeakVH, 16> DeadInsts;
- SCEVExpander Rewriter(getAnalysis<ScalarEvolution>(), "lsr");
+ const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
+ SCEVExpander Rewriter(getAnalysis<ScalarEvolution>(), DL, "lsr");
#ifndef NDEBUG
Rewriter.setDebugType(DEBUG_TYPE);
#endif
unsigned numFolded = Rewriter.replaceCongruentIVs(
L, &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), DeadInsts,
- &getAnalysis<TargetTransformInfo>());
+ &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
+ *L->getHeader()->getParent()));
if (numFolded) {
Changed = true;
DeleteTriviallyDeadInstructions(DeadInsts);
diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp
index fef52107f623..ccafd100ef0f 100644
--- a/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -13,15 +13,18 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CodeMetrics.h"
-#include "llvm/Analysis/FunctionTargetTransformInfo.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Dominators.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/CommandLine.h"
@@ -38,6 +41,22 @@ static cl::opt<unsigned>
UnrollThreshold("unroll-threshold", cl::init(150), cl::Hidden,
cl::desc("The cut-off point for automatic loop unrolling"));
+static cl::opt<unsigned> UnrollMaxIterationsCountToAnalyze(
+ "unroll-max-iteration-count-to-analyze", cl::init(0), cl::Hidden,
+ cl::desc("Don't allow loop unrolling to simulate more than this number of"
+ "iterations when checking full unroll profitability"));
+
+static cl::opt<unsigned> UnrollMinPercentOfOptimized(
+ "unroll-percent-of-optimized-for-complete-unroll", cl::init(20), cl::Hidden,
+ cl::desc("If complete unrolling could trigger further optimizations, and, "
+ "by that, remove the given percent of instructions, perform the "
+ "complete unroll even if it's beyond the threshold"));
+
+static cl::opt<unsigned> UnrollAbsoluteThreshold(
+ "unroll-absolute-threshold", cl::init(2000), cl::Hidden,
+ cl::desc("Don't unroll if the unrolled size is bigger than this threshold,"
+ " even if we can remove big portion of instructions later."));
+
static cl::opt<unsigned>
UnrollCount("unroll-count", cl::init(0), cl::Hidden,
cl::desc("Use this unroll count for all loops including those with "
@@ -63,11 +82,16 @@ namespace {
static char ID; // Pass ID, replacement for typeid
LoopUnroll(int T = -1, int C = -1, int P = -1, int R = -1) : LoopPass(ID) {
CurrentThreshold = (T == -1) ? UnrollThreshold : unsigned(T);
+ CurrentAbsoluteThreshold = UnrollAbsoluteThreshold;
+ CurrentMinPercentOfOptimized = UnrollMinPercentOfOptimized;
CurrentCount = (C == -1) ? UnrollCount : unsigned(C);
CurrentAllowPartial = (P == -1) ? UnrollAllowPartial : (bool)P;
CurrentRuntime = (R == -1) ? UnrollRuntime : (bool)R;
UserThreshold = (T != -1) || (UnrollThreshold.getNumOccurrences() > 0);
+ UserAbsoluteThreshold = (UnrollAbsoluteThreshold.getNumOccurrences() > 0);
+ UserPercentOfOptimized =
+ (UnrollMinPercentOfOptimized.getNumOccurrences() > 0);
UserAllowPartial = (P != -1) ||
(UnrollAllowPartial.getNumOccurrences() > 0);
UserRuntime = (R != -1) || (UnrollRuntime.getNumOccurrences() > 0);
@@ -91,10 +115,16 @@ namespace {
unsigned CurrentCount;
unsigned CurrentThreshold;
+ unsigned CurrentAbsoluteThreshold;
+ unsigned CurrentMinPercentOfOptimized;
bool CurrentAllowPartial;
bool CurrentRuntime;
bool UserCount; // CurrentCount is user-specified.
bool UserThreshold; // CurrentThreshold is user-specified.
+ bool UserAbsoluteThreshold; // CurrentAbsoluteThreshold is
+ // user-specified.
+ bool UserPercentOfOptimized; // CurrentMinPercentOfOptimized is
+ // user-specified.
bool UserAllowPartial; // CurrentAllowPartial is user-specified.
bool UserRuntime; // CurrentRuntime is user-specified.
@@ -105,16 +135,15 @@ namespace {
///
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AssumptionCacheTracker>();
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addPreserved<LoopInfoWrapperPass>();
AU.addRequiredID(LoopSimplifyID);
AU.addPreservedID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
AU.addPreservedID(LCSSAID);
AU.addRequired<ScalarEvolution>();
AU.addPreserved<ScalarEvolution>();
- AU.addRequired<TargetTransformInfo>();
- AU.addRequired<FunctionTargetTransformInfo>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
// FIXME: Loop unroll requires LCSSA. And LCSSA requires dom info.
// If loop unroll does not preserve dom info then LCSSA pass on next
// loop will receive invalid dom info.
@@ -124,9 +153,11 @@ namespace {
// Fill in the UnrollingPreferences parameter with values from the
// TargetTransformationInfo.
- void getUnrollingPreferences(Loop *L, const FunctionTargetTransformInfo &FTTI,
+ void getUnrollingPreferences(Loop *L, const TargetTransformInfo &TTI,
TargetTransformInfo::UnrollingPreferences &UP) {
UP.Threshold = CurrentThreshold;
+ UP.AbsoluteThreshold = CurrentAbsoluteThreshold;
+ UP.MinPercentOfOptimized = CurrentMinPercentOfOptimized;
UP.OptSizeThreshold = OptSizeUnrollThreshold;
UP.PartialThreshold = CurrentThreshold;
UP.PartialOptSizeThreshold = OptSizeUnrollThreshold;
@@ -134,7 +165,8 @@ namespace {
UP.MaxCount = UINT_MAX;
UP.Partial = CurrentAllowPartial;
UP.Runtime = CurrentRuntime;
- FTTI.getUnrollingPreferences(L, UP);
+ UP.AllowExpensiveTripCount = false;
+ TTI.getUnrollingPreferences(L, UP);
}
// Select and return an unroll count based on parameters from
@@ -153,7 +185,9 @@ namespace {
// unrolled loops respectively.
void selectThresholds(const Loop *L, bool HasPragma,
const TargetTransformInfo::UnrollingPreferences &UP,
- unsigned &Threshold, unsigned &PartialThreshold) {
+ unsigned &Threshold, unsigned &PartialThreshold,
+ unsigned &AbsoluteThreshold,
+ unsigned &PercentOfOptimizedForCompleteUnroll) {
// Determine the current unrolling threshold. While this is
// normally set from UnrollThreshold, it is overridden to a
// smaller value if the current function is marked as
@@ -161,10 +195,15 @@ namespace {
// specified.
Threshold = UserThreshold ? CurrentThreshold : UP.Threshold;
PartialThreshold = UserThreshold ? CurrentThreshold : UP.PartialThreshold;
+ AbsoluteThreshold = UserAbsoluteThreshold ? CurrentAbsoluteThreshold
+ : UP.AbsoluteThreshold;
+ PercentOfOptimizedForCompleteUnroll = UserPercentOfOptimized
+ ? CurrentMinPercentOfOptimized
+ : UP.MinPercentOfOptimized;
+
if (!UserThreshold &&
- L->getHeader()->getParent()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize)) {
+ L->getHeader()->getParent()->hasFnAttribute(
+ Attribute::OptimizeForSize)) {
Threshold = UP.OptSizeThreshold;
PartialThreshold = UP.PartialOptSizeThreshold;
}
@@ -180,15 +219,18 @@ namespace {
std::max<unsigned>(PartialThreshold, PragmaUnrollThreshold);
}
}
+ bool canUnrollCompletely(Loop *L, unsigned Threshold,
+ unsigned AbsoluteThreshold, uint64_t UnrolledSize,
+ unsigned NumberOfOptimizedInstructions,
+ unsigned PercentOfOptimizedForCompleteUnroll);
};
}
char LoopUnroll::ID = 0;
INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
-INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_DEPENDENCY(FunctionTargetTransformInfo)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
@@ -203,6 +245,407 @@ Pass *llvm::createSimpleLoopUnrollPass() {
return llvm::createLoopUnrollPass(-1, -1, 0, 0);
}
+namespace {
+/// \brief SCEV expressions visitor used for finding expressions that would
+/// become constants if the loop L is unrolled.
+struct FindConstantPointers {
+ /// \brief Shows whether the expression is ConstAddress+Constant or not.
+ bool IndexIsConstant;
+
+ /// \brief Used for filtering out SCEV expressions with two or more AddRec
+ /// subexpressions.
+ ///
+ /// Used to filter out complicated SCEV expressions, having several AddRec
+ /// sub-expressions. We don't handle them, because unrolling one loop
+ /// would help to replace only one of these inductions with a constant, and
+ /// consequently, the expression would remain non-constant.
+ bool HaveSeenAR;
+
+ /// \brief If the SCEV expression becomes ConstAddress+Constant, this value
+ /// holds ConstAddress. Otherwise, it's nullptr.
+ Value *BaseAddress;
+
+ /// \brief The loop, which we try to completely unroll.
+ const Loop *L;
+
+ ScalarEvolution &SE;
+
+ FindConstantPointers(const Loop *L, ScalarEvolution &SE)
+ : IndexIsConstant(true), HaveSeenAR(false), BaseAddress(nullptr),
+ L(L), SE(SE) {}
+
+ /// Examine the given expression S and figure out, if it can be a part of an
+ /// expression, that could become a constant after the loop is unrolled.
+ /// The routine sets IndexIsConstant and HaveSeenAR according to the analysis
+ /// results.
+ /// \returns true if we need to examine subexpressions, and false otherwise.
+ bool follow(const SCEV *S) {
+ if (const SCEVUnknown *SC = dyn_cast<SCEVUnknown>(S)) {
+ // We've reached the leaf node of SCEV, it's most probably just a
+ // variable.
+ // If it's the only one SCEV-subexpression, then it might be a base
+ // address of an index expression.
+ // If we've already recorded base address, then just give up on this SCEV
+ // - it's too complicated.
+ if (BaseAddress) {
+ IndexIsConstant = false;
+ return false;
+ }
+ BaseAddress = SC->getValue();
+ return false;
+ }
+ if (isa<SCEVConstant>(S))
+ return false;
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ // If the current SCEV expression is AddRec, and its loop isn't the loop
+ // we are about to unroll, then we won't get a constant address after
+ // unrolling, and thus, won't be able to eliminate the load.
+ if (AR->getLoop() != L) {
+ IndexIsConstant = false;
+ return false;
+ }
+ // We don't handle multiple AddRecs here, so give up in this case.
+ if (HaveSeenAR) {
+ IndexIsConstant = false;
+ return false;
+ }
+ HaveSeenAR = true;
+ }
+
+ // Continue traversal.
+ return true;
+ }
+ bool isDone() const { return !IndexIsConstant; }
+};
+} // End anonymous namespace.
+
+namespace {
+/// \brief A cache of SCEV results used to optimize repeated queries to SCEV on
+/// the same set of instructions.
+///
+/// The primary cost this saves is the cost of checking the validity of a SCEV
+/// every time it is looked up. However, in some cases we can provide a reduced
+/// and especially useful model for an instruction based upon SCEV that is
+/// non-trivial to compute but more useful to clients.
+class SCEVCache {
+public:
+ /// \brief Struct to represent a GEP whose start and step are known fixed
+ /// offsets from a base address due to SCEV's analysis.
+ struct GEPDescriptor {
+ Value *BaseAddr = nullptr;
+ unsigned Start = 0;
+ unsigned Step = 0;
+ };
+
+ Optional<GEPDescriptor> getGEPDescriptor(GetElementPtrInst *GEP);
+
+ SCEVCache(const Loop &L, ScalarEvolution &SE) : L(L), SE(SE) {}
+
+private:
+ const Loop &L;
+ ScalarEvolution &SE;
+
+ SmallDenseMap<GetElementPtrInst *, GEPDescriptor> GEPDescriptors;
+};
+} // End anonymous namespace.
+
+/// \brief Get a simplified descriptor for a GEP instruction.
+///
+/// Where possible, this produces a simplified descriptor for a GEP instruction
+/// using SCEV analysis of the containing loop. If this isn't possible, it
+/// returns an empty optional.
+///
+/// The model is a base address, an initial offset, and a per-iteration step.
+/// This fits very common patterns of GEPs inside loops and is something we can
+/// use to simulate the behavior of a particular iteration of a loop.
+///
+/// This is a cached interface. The first call may do non-trivial work to
+/// compute the result, but all subsequent calls will return a fast answer
+/// based on a cached result. This includes caching negative results.
+Optional<SCEVCache::GEPDescriptor>
+SCEVCache::getGEPDescriptor(GetElementPtrInst *GEP) {
+ decltype(GEPDescriptors)::iterator It;
+ bool Inserted;
+
+ std::tie(It, Inserted) = GEPDescriptors.insert({GEP, {}});
+
+ if (!Inserted) {
+ if (!It->second.BaseAddr)
+ return None;
+
+ return It->second;
+ }
+
+ // We've inserted a new record into the cache, so compute the GEP descriptor
+ // if possible.
+ Value *V = cast<Value>(GEP);
+ if (!SE.isSCEVable(V->getType()))
+ return None;
+ const SCEV *S = SE.getSCEV(V);
+
+ // FIXME: It'd be nice if the worklist and set used by the
+ // SCEVTraversal could be re-used between loop iterations, but the
+ // interface doesn't support that. There is no way to clear the visited
+ // sets between uses.
+ FindConstantPointers Visitor(&L, SE);
+ SCEVTraversal<FindConstantPointers> T(Visitor);
+
+ // Try to find (BaseAddress+Step+Offset) tuple.
+ // If succeeded, save it to the cache - it might help in folding
+ // loads.
+ T.visitAll(S);
+ if (!Visitor.IndexIsConstant || !Visitor.BaseAddress)
+ return None;
+
+ const SCEV *BaseAddrSE = SE.getSCEV(Visitor.BaseAddress);
+ if (BaseAddrSE->getType() != S->getType())
+ return None;
+ const SCEV *OffSE = SE.getMinusSCEV(S, BaseAddrSE);
+ const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(OffSE);
+
+ if (!AR)
+ return None;
+
+ const SCEVConstant *StepSE =
+ dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE));
+ const SCEVConstant *StartSE = dyn_cast<SCEVConstant>(AR->getStart());
+ if (!StepSE || !StartSE)
+ return None;
+
+ // Check and skip caching if doing so would require lots of bits to
+ // avoid overflow.
+ APInt Start = StartSE->getValue()->getValue();
+ APInt Step = StepSE->getValue()->getValue();
+ if (Start.getActiveBits() > 32 || Step.getActiveBits() > 32)
+ return None;
+
+ // We found a cacheable SCEV model for the GEP.
+ It->second.BaseAddr = Visitor.BaseAddress;
+ It->second.Start = Start.getLimitedValue();
+ It->second.Step = Step.getLimitedValue();
+ return It->second;
+}
+
+namespace {
+// This class is used to get an estimate of the optimization effects that we
+// could get from complete loop unrolling. It comes from the fact that some
+// loads might be replaced with concrete constant values and that could trigger
+// a chain of instruction simplifications.
+//
+// E.g. we might have:
+// int a[] = {0, 1, 0};
+// v = 0;
+// for (i = 0; i < 3; i ++)
+// v += b[i]*a[i];
+// If we completely unroll the loop, we would get:
+// v = b[0]*a[0] + b[1]*a[1] + b[2]*a[2]
+// Which then will be simplified to:
+// v = b[0]* 0 + b[1]* 1 + b[2]* 0
+// And finally:
+// v = b[1]
+class UnrolledInstAnalyzer : private InstVisitor<UnrolledInstAnalyzer, bool> {
+ typedef InstVisitor<UnrolledInstAnalyzer, bool> Base;
+ friend class InstVisitor<UnrolledInstAnalyzer, bool>;
+
+public:
+ UnrolledInstAnalyzer(unsigned Iteration,
+ DenseMap<Value *, Constant *> &SimplifiedValues,
+ SCEVCache &SC)
+ : Iteration(Iteration), SimplifiedValues(SimplifiedValues), SC(SC) {}
+
+ // Allow access to the initial visit method.
+ using Base::visit;
+
+private:
+ /// \brief Number of currently simulated iteration.
+ ///
+ /// If an expression is ConstAddress+Constant, then the Constant is
+ /// Start + Iteration*Step, where Start and Step could be obtained from
+ /// SCEVGEPCache.
+ unsigned Iteration;
+
+ // While we walk the loop instructions, we we build up and maintain a mapping
+ // of simplified values specific to this iteration. The idea is to propagate
+ // any special information we have about loads that can be replaced with
+ // constants after complete unrolling, and account for likely simplifications
+ // post-unrolling.
+ DenseMap<Value *, Constant *> &SimplifiedValues;
+
+ // We use a cache to wrap all our SCEV queries.
+ SCEVCache &SC;
+
+ /// Base case for the instruction visitor.
+ bool visitInstruction(Instruction &I) { return false; };
+
+ /// TODO: Add visitors for other instruction types, e.g. ZExt, SExt.
+
+ /// Try to simplify binary operator I.
+ ///
+ /// TODO: Probaly it's worth to hoist the code for estimating the
+ /// simplifications effects to a separate class, since we have a very similar
+ /// code in InlineCost already.
+ bool visitBinaryOperator(BinaryOperator &I) {
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ if (!isa<Constant>(LHS))
+ if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
+ LHS = SimpleLHS;
+ if (!isa<Constant>(RHS))
+ if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
+ RHS = SimpleRHS;
+ Value *SimpleV = nullptr;
+ const DataLayout &DL = I.getModule()->getDataLayout();
+ if (auto FI = dyn_cast<FPMathOperator>(&I))
+ SimpleV =
+ SimplifyFPBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags(), DL);
+ else
+ SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
+
+ if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
+ SimplifiedValues[&I] = C;
+
+ return SimpleV;
+ }
+
+ /// Try to fold load I.
+ bool visitLoad(LoadInst &I) {
+ Value *AddrOp = I.getPointerOperand();
+ if (!isa<Constant>(AddrOp))
+ if (Constant *SimplifiedAddrOp = SimplifiedValues.lookup(AddrOp))
+ AddrOp = SimplifiedAddrOp;
+
+ auto *GEP = dyn_cast<GetElementPtrInst>(AddrOp);
+ if (!GEP)
+ return false;
+ auto OptionalGEPDesc = SC.getGEPDescriptor(GEP);
+ if (!OptionalGEPDesc)
+ return false;
+
+ auto GV = dyn_cast<GlobalVariable>(OptionalGEPDesc->BaseAddr);
+ // We're only interested in loads that can be completely folded to a
+ // constant.
+ if (!GV || !GV->hasInitializer())
+ return false;
+
+ ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(GV->getInitializer());
+ if (!CDS)
+ return false;
+
+ // This calculation should never overflow because we bound Iteration quite
+ // low and both the start and step are 32-bit integers. We use signed
+ // integers so that UBSan will catch if a bug sneaks into the code.
+ int ElemSize = CDS->getElementType()->getPrimitiveSizeInBits() / 8U;
+ int64_t Index = ((int64_t)OptionalGEPDesc->Start +
+ (int64_t)OptionalGEPDesc->Step * (int64_t)Iteration) /
+ ElemSize;
+ if (Index >= CDS->getNumElements()) {
+ // FIXME: For now we conservatively ignore out of bound accesses, but
+ // we're allowed to perform the optimization in this case.
+ return false;
+ }
+
+ Constant *CV = CDS->getElementAsConstant(Index);
+ assert(CV && "Constant expected.");
+ SimplifiedValues[&I] = CV;
+
+ return true;
+ }
+};
+} // namespace
+
+
+namespace {
+struct EstimatedUnrollCost {
+ /// \brief Count the number of optimized instructions.
+ unsigned NumberOfOptimizedInstructions;
+
+ /// \brief Count the total number of instructions.
+ unsigned UnrolledLoopSize;
+};
+}
+
+/// \brief Figure out if the loop is worth full unrolling.
+///
+/// Complete loop unrolling can make some loads constant, and we need to know
+/// if that would expose any further optimization opportunities. This routine
+/// estimates this optimization. It assigns computed number of instructions,
+/// that potentially might be optimized away, to
+/// NumberOfOptimizedInstructions, and total number of instructions to
+/// UnrolledLoopSize (not counting blocks that won't be reached, if we were
+/// able to compute the condition).
+/// \returns false if we can't analyze the loop, or if we discovered that
+/// unrolling won't give anything. Otherwise, returns true.
+Optional<EstimatedUnrollCost>
+analyzeLoopUnrollCost(const Loop *L, unsigned TripCount, ScalarEvolution &SE,
+ const TargetTransformInfo &TTI,
+ unsigned MaxUnrolledLoopSize) {
+ // We want to be able to scale offsets by the trip count and add more offsets
+ // to them without checking for overflows, and we already don't want to
+ // analyze *massive* trip counts, so we force the max to be reasonably small.
+ assert(UnrollMaxIterationsCountToAnalyze < (INT_MAX / 2) &&
+ "The unroll iterations max is too large!");
+
+ // Don't simulate loops with a big or unknown tripcount
+ if (!UnrollMaxIterationsCountToAnalyze || !TripCount ||
+ TripCount > UnrollMaxIterationsCountToAnalyze)
+ return None;
+
+ SmallSetVector<BasicBlock *, 16> BBWorklist;
+ DenseMap<Value *, Constant *> SimplifiedValues;
+
+ // Use a cache to access SCEV expressions so that we don't pay the cost on
+ // each iteration. This cache is lazily self-populating.
+ SCEVCache SC(*L, SE);
+
+ unsigned NumberOfOptimizedInstructions = 0;
+ unsigned UnrolledLoopSize = 0;
+
+ // Simulate execution of each iteration of the loop counting instructions,
+ // which would be simplified.
+ // Since the same load will take different values on different iterations,
+ // we literally have to go through all loop's iterations.
+ for (unsigned Iteration = 0; Iteration < TripCount; ++Iteration) {
+ SimplifiedValues.clear();
+ UnrolledInstAnalyzer Analyzer(Iteration, SimplifiedValues, SC);
+
+ BBWorklist.clear();
+ BBWorklist.insert(L->getHeader());
+ // Note that we *must not* cache the size, this loop grows the worklist.
+ for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
+ BasicBlock *BB = BBWorklist[Idx];
+
+ // Visit all instructions in the given basic block and try to simplify
+ // it. We don't change the actual IR, just count optimization
+ // opportunities.
+ for (Instruction &I : *BB) {
+ UnrolledLoopSize += TTI.getUserCost(&I);
+
+ // Visit the instruction to analyze its loop cost after unrolling,
+ // and if the visitor returns true, then we can optimize this
+ // instruction away.
+ if (Analyzer.visit(I))
+ NumberOfOptimizedInstructions += TTI.getUserCost(&I);
+
+ // If unrolled body turns out to be too big, bail out.
+ if (UnrolledLoopSize - NumberOfOptimizedInstructions >
+ MaxUnrolledLoopSize)
+ return None;
+ }
+
+ // Add BB's successors to the worklist.
+ for (BasicBlock *Succ : successors(BB))
+ if (L->contains(Succ))
+ BBWorklist.insert(Succ);
+ }
+
+ // If we found no optimization opportunities on the first iteration, we
+ // won't find them on later ones too.
+ if (!NumberOfOptimizedInstructions)
+ return None;
+ }
+ return {{NumberOfOptimizedInstructions, UnrolledLoopSize}};
+}
+
/// ApproximateLoopSize - Approximate the size of the loop.
static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
bool &NotDuplicatable,
@@ -234,44 +677,31 @@ static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
// Returns the loop hint metadata node with the given name (for example,
// "llvm.loop.unroll.count"). If no such metadata node exists, then nullptr is
// returned.
-static const MDNode *GetUnrollMetadata(const Loop *L, StringRef Name) {
- MDNode *LoopID = L->getLoopID();
- if (!LoopID)
- return nullptr;
-
- // First operand should refer to the loop id itself.
- assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
- assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
-
- for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) {
- const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
- if (!MD)
- continue;
-
- const MDString *S = dyn_cast<MDString>(MD->getOperand(0));
- if (!S)
- continue;
-
- if (Name.equals(S->getString()))
- return MD;
- }
+static MDNode *GetUnrollMetadataForLoop(const Loop *L, StringRef Name) {
+ if (MDNode *LoopID = L->getLoopID())
+ return GetUnrollMetadata(LoopID, Name);
return nullptr;
}
// Returns true if the loop has an unroll(full) pragma.
static bool HasUnrollFullPragma(const Loop *L) {
- return GetUnrollMetadata(L, "llvm.loop.unroll.full");
+ return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.full");
}
// Returns true if the loop has an unroll(disable) pragma.
static bool HasUnrollDisablePragma(const Loop *L) {
- return GetUnrollMetadata(L, "llvm.loop.unroll.disable");
+ return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.disable");
+}
+
+// Returns true if the loop has an runtime unroll(disable) pragma.
+static bool HasRuntimeUnrollDisablePragma(const Loop *L) {
+ return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.runtime.disable");
}
// If loop has an unroll_count pragma return the (necessarily
// positive) value from the pragma. Otherwise return 0.
static unsigned UnrollCountPragmaValue(const Loop *L) {
- const MDNode *MD = GetUnrollMetadata(L, "llvm.loop.unroll.count");
+ MDNode *MD = GetUnrollMetadataForLoop(L, "llvm.loop.unroll.count");
if (MD) {
assert(MD->getNumOperands() == 2 &&
"Unroll count hint metadata should have two operands.");
@@ -319,6 +749,49 @@ static void SetLoopAlreadyUnrolled(Loop *L) {
L->setLoopID(NewLoopID);
}
+bool LoopUnroll::canUnrollCompletely(
+ Loop *L, unsigned Threshold, unsigned AbsoluteThreshold,
+ uint64_t UnrolledSize, unsigned NumberOfOptimizedInstructions,
+ unsigned PercentOfOptimizedForCompleteUnroll) {
+
+ if (Threshold == NoThreshold) {
+ DEBUG(dbgs() << " Can fully unroll, because no threshold is set.\n");
+ return true;
+ }
+
+ if (UnrolledSize <= Threshold) {
+ DEBUG(dbgs() << " Can fully unroll, because unrolled size: "
+ << UnrolledSize << "<" << Threshold << "\n");
+ return true;
+ }
+
+ assert(UnrolledSize && "UnrolledSize can't be 0 at this point.");
+ unsigned PercentOfOptimizedInstructions =
+ (uint64_t)NumberOfOptimizedInstructions * 100ull / UnrolledSize;
+
+ if (UnrolledSize <= AbsoluteThreshold &&
+ PercentOfOptimizedInstructions >= PercentOfOptimizedForCompleteUnroll) {
+ DEBUG(dbgs() << " Can fully unroll, because unrolling will help removing "
+ << PercentOfOptimizedInstructions
+ << "% instructions (threshold: "
+ << PercentOfOptimizedForCompleteUnroll << "%)\n");
+ DEBUG(dbgs() << " Unrolled size (" << UnrolledSize
+ << ") is less than the threshold (" << AbsoluteThreshold
+ << ").\n");
+ return true;
+ }
+
+ DEBUG(dbgs() << " Too large to fully unroll:\n");
+ DEBUG(dbgs() << " Unrolled size: " << UnrolledSize << "\n");
+ DEBUG(dbgs() << " Estimated number of optimized instructions: "
+ << NumberOfOptimizedInstructions << "\n");
+ DEBUG(dbgs() << " Absolute threshold: " << AbsoluteThreshold << "\n");
+ DEBUG(dbgs() << " Minimum percent of removed instructions: "
+ << PercentOfOptimizedForCompleteUnroll << "\n");
+ DEBUG(dbgs() << " Threshold for small loops: " << Threshold << "\n");
+ return false;
+}
+
unsigned LoopUnroll::selectUnrollCount(
const Loop *L, unsigned TripCount, bool PragmaFullUnroll,
unsigned PragmaCount, const TargetTransformInfo::UnrollingPreferences &UP,
@@ -363,13 +836,13 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
if (skipOptnoneFunction(L))
return false;
- LoopInfo *LI = &getAnalysis<LoopInfo>();
+ Function &F = *L->getHeader()->getParent();
+
+ LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
ScalarEvolution *SE = &getAnalysis<ScalarEvolution>();
- const TargetTransformInfo &TTI = getAnalysis<TargetTransformInfo>();
- const FunctionTargetTransformInfo &FTTI =
- getAnalysis<FunctionTargetTransformInfo>();
- auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
- *L->getHeader()->getParent());
+ const TargetTransformInfo &TTI =
+ getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+ auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
BasicBlock *Header = L->getHeader();
DEBUG(dbgs() << "Loop Unroll: F[" << Header->getParent()->getName()
@@ -383,7 +856,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
bool HasPragma = PragmaFullUnroll || PragmaCount > 0;
TargetTransformInfo::UnrollingPreferences UP;
- getUnrollingPreferences(L, FTTI, UP);
+ getUnrollingPreferences(L, TTI, UP);
// Find trip count and trip multiple if count is not available
unsigned TripCount = 0;
@@ -426,20 +899,33 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
}
unsigned Threshold, PartialThreshold;
- selectThresholds(L, HasPragma, UP, Threshold, PartialThreshold);
+ unsigned AbsoluteThreshold, PercentOfOptimizedForCompleteUnroll;
+ selectThresholds(L, HasPragma, UP, Threshold, PartialThreshold,
+ AbsoluteThreshold, PercentOfOptimizedForCompleteUnroll);
// Given Count, TripCount and thresholds determine the type of
// unrolling which is to be performed.
enum { Full = 0, Partial = 1, Runtime = 2 };
int Unrolling;
if (TripCount && Count == TripCount) {
- if (Threshold != NoThreshold && UnrolledSize > Threshold) {
- DEBUG(dbgs() << " Too large to fully unroll with count: " << Count
- << " because size: " << UnrolledSize << ">" << Threshold
- << "\n");
- Unrolling = Partial;
- } else {
+ Unrolling = Partial;
+ // If the loop is really small, we don't need to run an expensive analysis.
+ if (canUnrollCompletely(
+ L, Threshold, AbsoluteThreshold,
+ UnrolledSize, 0, 100)) {
Unrolling = Full;
+ } else {
+ // The loop isn't that small, but we still can fully unroll it if that
+ // helps to remove a significant number of instructions.
+ // To check that, run additional analysis on the loop.
+ if (Optional<EstimatedUnrollCost> Cost =
+ analyzeLoopUnrollCost(L, TripCount, *SE, TTI, AbsoluteThreshold))
+ if (canUnrollCompletely(L, Threshold, AbsoluteThreshold,
+ Cost->UnrolledLoopSize,
+ Cost->NumberOfOptimizedInstructions,
+ PercentOfOptimizedForCompleteUnroll)) {
+ Unrolling = Full;
+ }
}
} else if (TripCount && Count < TripCount) {
Unrolling = Partial;
@@ -450,6 +936,9 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
// Reduce count based on the type of unrolling and the threshold values.
unsigned OriginalCount = Count;
bool AllowRuntime = UserRuntime ? CurrentRuntime : UP.Runtime;
+ if (HasRuntimeUnrollDisablePragma(L)) {
+ AllowRuntime = false;
+ }
if (Unrolling == Partial) {
bool AllowPartial = UserAllowPartial ? CurrentAllowPartial : UP.Partial;
if (!AllowPartial && !CountSetExplicitly) {
@@ -518,8 +1007,8 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
}
// Unroll the loop.
- if (!UnrollLoop(L, Count, TripCount, AllowRuntime, TripMultiple, LI, this,
- &LPM, &AC))
+ if (!UnrollLoop(L, Count, TripCount, AllowRuntime, UP.AllowExpensiveTripCount,
+ TripMultiple, LI, this, &LPM, &AC))
return false;
return true;
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 9f4c12270d76..988d2af3ea90 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -42,6 +42,7 @@
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -170,13 +171,13 @@ namespace {
AU.addRequired<AssumptionCacheTracker>();
AU.addRequiredID(LoopSimplifyID);
AU.addPreservedID(LoopSimplifyID);
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addPreserved<LoopInfoWrapperPass>();
AU.addRequiredID(LCSSAID);
AU.addPreservedID(LCSSAID);
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<ScalarEvolution>();
- AU.addRequired<TargetTransformInfo>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
}
private:
@@ -333,10 +334,10 @@ void LUAnalysisCache::cloneData(const Loop *NewLoop, const Loop *OldLoop,
char LoopUnswitch::ID = 0;
INITIALIZE_PASS_BEGIN(LoopUnswitch, "loop-unswitch", "Unswitch loops",
false, false)
-INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_END(LoopUnswitch, "loop-unswitch", "Unswitch loops",
false, false)
@@ -387,7 +388,7 @@ bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) {
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
*L->getHeader()->getParent());
- LI = &getAnalysis<LoopInfo>();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
LPM = &LPM_Ref;
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
@@ -432,8 +433,10 @@ bool LoopUnswitch::processCurrentLoop() {
// Probably we reach the quota of branches for this loop. If so
// stop unswitching.
- if (!BranchesInfo.countLoop(currentLoop, getAnalysis<TargetTransformInfo>(),
- AC))
+ if (!BranchesInfo.countLoop(
+ currentLoop, getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
+ *currentLoop->getHeader()->getParent()),
+ AC))
return false;
// Loop over all of the basic blocks in the loop. If we find an interior
@@ -655,9 +658,7 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
// Check to see if it would be profitable to unswitch current loop.
// Do not do non-trivial unswitch while optimizing for size.
- if (OptimizeForSize ||
- F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize))
+ if (OptimizeForSize || F->hasFnAttribute(Attribute::OptimizeForSize))
return false;
UnswitchNontrivialCondition(LoopCond, Val, currentLoop);
@@ -675,7 +676,7 @@ static Loop *CloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I)
if (LI->getLoopFor(*I) == L)
- New->addBasicBlockToLoop(cast<BasicBlock>(VM[*I]), LI->getBase());
+ New->addBasicBlockToLoop(cast<BasicBlock>(VM[*I]), *LI);
// Add all of the subloops to the new loop.
for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
@@ -706,8 +707,9 @@ void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
// If either edge is critical, split it. This helps preserve LoopSimplify
// form for enclosing loops.
- SplitCriticalEdge(BI, 0, this, false, false, true);
- SplitCriticalEdge(BI, 1, this, false, false, true);
+ auto Options = CriticalEdgeSplittingOptions(DT, LI).setPreserveLCSSA();
+ SplitCriticalEdge(BI, 0, Options);
+ SplitCriticalEdge(BI, 1, Options);
}
/// UnswitchTrivialCondition - Given a loop that has a trivial unswitchable
@@ -726,7 +728,7 @@ void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
// First step, split the preheader, so that we know that there is a safe place
// to insert the conditional branch. We will change loopPreheader to have a
// conditional branch on Cond.
- BasicBlock *NewPH = SplitEdge(loopPreheader, loopHeader, this);
+ BasicBlock *NewPH = SplitEdge(loopPreheader, loopHeader, DT, LI);
// Now that we have a place to insert the conditional branch, create a place
// to branch to: this is the exit block out of the loop that we should
@@ -737,7 +739,7 @@ void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
// without actually branching to it (the exit block should be dominated by the
// loop header, not the preheader).
assert(!L->contains(ExitBlock) && "Exit block is in the loop?");
- BasicBlock *NewExit = SplitBlock(ExitBlock, ExitBlock->begin(), this);
+ BasicBlock *NewExit = SplitBlock(ExitBlock, ExitBlock->begin(), DT, LI);
// Okay, now we have a position to branch from and a position to branch to,
// insert the new conditional branch.
@@ -768,13 +770,9 @@ void LoopUnswitch::SplitExitEdges(Loop *L,
// Although SplitBlockPredecessors doesn't preserve loop-simplify in
// general, if we call it on all predecessors of all exits then it does.
- if (!ExitBlock->isLandingPad()) {
- SplitBlockPredecessors(ExitBlock, Preds, ".us-lcssa", this);
- } else {
- SmallVector<BasicBlock*, 2> NewBBs;
- SplitLandingPadPredecessors(ExitBlock, Preds, ".us-lcssa", ".us-lcssa",
- this, NewBBs);
- }
+ SplitBlockPredecessors(ExitBlock, Preds, ".us-lcssa",
+ /*AliasAnalysis*/ nullptr, DT, LI,
+ /*PreserveLCSSA*/ true);
}
}
@@ -797,7 +795,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
// First step, split the preheader and exit blocks, and add these blocks to
// the LoopBlocks list.
- BasicBlock *NewPreheader = SplitEdge(loopPreheader, loopHeader, this);
+ BasicBlock *NewPreheader = SplitEdge(loopPreheader, loopHeader, DT, LI);
LoopBlocks.push_back(NewPreheader);
// We want the loop to come after the preheader, but before the exit blocks.
@@ -850,14 +848,14 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
if (ParentLoop) {
// Make sure to add the cloned preheader and exit blocks to the parent loop
// as well.
- ParentLoop->addBasicBlockToLoop(NewBlocks[0], LI->getBase());
+ ParentLoop->addBasicBlockToLoop(NewBlocks[0], *LI);
}
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
BasicBlock *NewExit = cast<BasicBlock>(VMap[ExitBlocks[i]]);
// The new exit block should be in the same loop as the old one.
if (Loop *ExitBBLoop = LI->getLoopFor(ExitBlocks[i]))
- ExitBBLoop->addBasicBlockToLoop(NewExit, LI->getBase());
+ ExitBBLoop->addBasicBlockToLoop(NewExit, *LI);
assert(NewExit->getTerminator()->getNumSuccessors() == 1 &&
"Exit block should have been split to have one successor!");
@@ -1043,7 +1041,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
// and hooked up so as to preserve the loop structure, because
// trying to update it is complicated. So instead we preserve the
// loop structure and put the block on a dead code path.
- SplitEdge(Switch, SISucc, this);
+ SplitEdge(Switch, SISucc, DT, LI);
// Compute the successors instead of relying on the return value
// of SplitEdge, since it may have split the switch successor
// after PHI nodes.
@@ -1085,6 +1083,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
/// pass.
///
void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
+ const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
while (!Worklist.empty()) {
Instruction *I = Worklist.back();
Worklist.pop_back();
@@ -1107,7 +1106,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
// See if instruction simplification can hack this up. This is common for
// things like "select false, X, Y" after unswitching made the condition be
// 'false'. TODO: update the domtree properly so we can pass it here.
- if (Value *V = SimplifyInstruction(I))
+ if (Value *V = SimplifyInstruction(I, DL))
if (LI->replacementPreservesLCSSAForm(I, V)) {
ReplaceUsesOfWith(I, V, Worklist, L, LPM);
continue;
diff --git a/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp b/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
new file mode 100644
index 000000000000..0c47cbd5bfda
--- /dev/null
+++ b/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
@@ -0,0 +1,192 @@
+//===- LowerExpectIntrinsic.cpp - Lower expect intrinsic ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers the 'expect' intrinsic to LLVM metadata.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Transforms/Scalar.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "lower-expect-intrinsic"
+
+STATISTIC(ExpectIntrinsicsHandled,
+ "Number of 'expect' intrinsic instructions handled");
+
+static cl::opt<uint32_t>
+LikelyBranchWeight("likely-branch-weight", cl::Hidden, cl::init(64),
+ cl::desc("Weight of the branch likely to be taken (default = 64)"));
+static cl::opt<uint32_t>
+UnlikelyBranchWeight("unlikely-branch-weight", cl::Hidden, cl::init(4),
+ cl::desc("Weight of the branch unlikely to be taken (default = 4)"));
+
+static bool handleSwitchExpect(SwitchInst &SI) {
+ CallInst *CI = dyn_cast<CallInst>(SI.getCondition());
+ if (!CI)
+ return false;
+
+ Function *Fn = CI->getCalledFunction();
+ if (!Fn || Fn->getIntrinsicID() != Intrinsic::expect)
+ return false;
+
+ Value *ArgValue = CI->getArgOperand(0);
+ ConstantInt *ExpectedValue = dyn_cast<ConstantInt>(CI->getArgOperand(1));
+ if (!ExpectedValue)
+ return false;
+
+ SwitchInst::CaseIt Case = SI.findCaseValue(ExpectedValue);
+ unsigned n = SI.getNumCases(); // +1 for default case.
+ SmallVector<uint32_t, 16> Weights(n + 1, UnlikelyBranchWeight);
+
+ if (Case == SI.case_default())
+ Weights[0] = LikelyBranchWeight;
+ else
+ Weights[Case.getCaseIndex() + 1] = LikelyBranchWeight;
+
+ SI.setMetadata(LLVMContext::MD_prof,
+ MDBuilder(CI->getContext()).createBranchWeights(Weights));
+
+ SI.setCondition(ArgValue);
+ return true;
+}
+
+static bool handleBranchExpect(BranchInst &BI) {
+ if (BI.isUnconditional())
+ return false;
+
+ // Handle non-optimized IR code like:
+ // %expval = call i64 @llvm.expect.i64(i64 %conv1, i64 1)
+ // %tobool = icmp ne i64 %expval, 0
+ // br i1 %tobool, label %if.then, label %if.end
+ //
+ // Or the following simpler case:
+ // %expval = call i1 @llvm.expect.i1(i1 %cmp, i1 1)
+ // br i1 %expval, label %if.then, label %if.end
+
+ CallInst *CI;
+
+ ICmpInst *CmpI = dyn_cast<ICmpInst>(BI.getCondition());
+ if (!CmpI) {
+ CI = dyn_cast<CallInst>(BI.getCondition());
+ } else {
+ if (CmpI->getPredicate() != CmpInst::ICMP_NE)
+ return false;
+ CI = dyn_cast<CallInst>(CmpI->getOperand(0));
+ }
+
+ if (!CI)
+ return false;
+
+ Function *Fn = CI->getCalledFunction();
+ if (!Fn || Fn->getIntrinsicID() != Intrinsic::expect)
+ return false;
+
+ Value *ArgValue = CI->getArgOperand(0);
+ ConstantInt *ExpectedValue = dyn_cast<ConstantInt>(CI->getArgOperand(1));
+ if (!ExpectedValue)
+ return false;
+
+ MDBuilder MDB(CI->getContext());
+ MDNode *Node;
+
+ // If expect value is equal to 1 it means that we are more likely to take
+ // branch 0, in other case more likely is branch 1.
+ if (ExpectedValue->isOne())
+ Node = MDB.createBranchWeights(LikelyBranchWeight, UnlikelyBranchWeight);
+ else
+ Node = MDB.createBranchWeights(UnlikelyBranchWeight, LikelyBranchWeight);
+
+ BI.setMetadata(LLVMContext::MD_prof, Node);
+
+ if (CmpI)
+ CmpI->setOperand(0, ArgValue);
+ else
+ BI.setCondition(ArgValue);
+ return true;
+}
+
+static bool lowerExpectIntrinsic(Function &F) {
+ bool Changed = false;
+
+ for (BasicBlock &BB : F) {
+ // Create "block_weights" metadata.
+ if (BranchInst *BI = dyn_cast<BranchInst>(BB.getTerminator())) {
+ if (handleBranchExpect(*BI))
+ ExpectIntrinsicsHandled++;
+ } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB.getTerminator())) {
+ if (handleSwitchExpect(*SI))
+ ExpectIntrinsicsHandled++;
+ }
+
+ // remove llvm.expect intrinsics.
+ for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
+ CallInst *CI = dyn_cast<CallInst>(BI++);
+ if (!CI)
+ continue;
+
+ Function *Fn = CI->getCalledFunction();
+ if (Fn && Fn->getIntrinsicID() == Intrinsic::expect) {
+ Value *Exp = CI->getArgOperand(0);
+ CI->replaceAllUsesWith(Exp);
+ CI->eraseFromParent();
+ Changed = true;
+ }
+ }
+ }
+
+ return Changed;
+}
+
+PreservedAnalyses LowerExpectIntrinsicPass::run(Function &F) {
+ if (lowerExpectIntrinsic(F))
+ return PreservedAnalyses::none();
+
+ return PreservedAnalyses::all();
+}
+
+namespace {
+/// \brief Legacy pass for lowering expect intrinsics out of the IR.
+///
+/// When this pass is run over a function it uses expect intrinsics which feed
+/// branches and switches to provide branch weight metadata for those
+/// terminators. It then removes the expect intrinsics from the IR so the rest
+/// of the optimizer can ignore them.
+class LowerExpectIntrinsic : public FunctionPass {
+public:
+ static char ID;
+ LowerExpectIntrinsic() : FunctionPass(ID) {
+ initializeLowerExpectIntrinsicPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override { return lowerExpectIntrinsic(F); }
+};
+}
+
+char LowerExpectIntrinsic::ID = 0;
+INITIALIZE_PASS(LowerExpectIntrinsic, "lower-expect",
+ "Lower 'expect' Intrinsics", false, false)
+
+FunctionPass *llvm::createLowerExpectIntrinsicPass() {
+ return new LowerExpectIntrinsic();
+}
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 3eea3d4e27ae..66d6ac6f3a09 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -18,6 +18,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
@@ -28,7 +29,6 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include <list>
using namespace llvm;
@@ -41,7 +41,8 @@ STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
- bool &VariableIdxFound, const DataLayout &TD){
+ bool &VariableIdxFound,
+ const DataLayout &DL) {
// Skip over the first indices.
gep_type_iterator GTI = gep_type_begin(GEP);
for (unsigned i = 1; i != Idx; ++i, ++GTI)
@@ -57,13 +58,13 @@ static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
// Handle struct indices, which add their field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
+ Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
continue;
}
// Otherwise, we have a sequential type like an array or vector. Multiply
// the index by the ElementSize.
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*OpC->getSExtValue();
}
@@ -74,7 +75,7 @@ static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
/// constant offset, and return that constant offset. For example, Ptr1 might
/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
- const DataLayout &TD) {
+ const DataLayout &DL) {
Ptr1 = Ptr1->stripPointerCasts();
Ptr2 = Ptr2->stripPointerCasts();
@@ -92,12 +93,12 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
// If one pointer is a GEP and the other isn't, then see if the GEP is a
// constant offset from the base, as in "P" and "gep P, 1".
if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
- Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD);
+ Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
return !VariableIdxFound;
}
if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
- Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD);
+ Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
return !VariableIdxFound;
}
@@ -115,8 +116,8 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
break;
- int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
- int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
+ int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
+ int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
if (VariableIdxFound) return false;
Offset = Offset2-Offset1;
@@ -150,12 +151,11 @@ struct MemsetRange {
/// TheStores - The actual stores that make up this range.
SmallVector<Instruction*, 16> TheStores;
- bool isProfitableToUseMemset(const DataLayout &TD) const;
-
+ bool isProfitableToUseMemset(const DataLayout &DL) const;
};
} // end anon namespace
-bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const {
+bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
// If we found more than 4 stores to merge or 16 bytes, use memset.
if (TheStores.size() >= 4 || End-Start >= 16) return true;
@@ -183,7 +183,7 @@ bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const {
// size. If so, check to see whether we will end up actually reducing the
// number of stores used.
unsigned Bytes = unsigned(End-Start);
- unsigned MaxIntSize = TD.getLargestLegalIntTypeSize();
+ unsigned MaxIntSize = DL.getLargestLegalIntTypeSize();
if (MaxIntSize == 0)
MaxIntSize = 1;
unsigned NumPointerStores = Bytes / MaxIntSize;
@@ -314,14 +314,12 @@ namespace {
class MemCpyOpt : public FunctionPass {
MemoryDependenceAnalysis *MD;
TargetLibraryInfo *TLI;
- const DataLayout *DL;
public:
static char ID; // Pass identification, replacement for typeid
MemCpyOpt() : FunctionPass(ID) {
initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
MD = nullptr;
TLI = nullptr;
- DL = nullptr;
}
bool runOnFunction(Function &F) override;
@@ -334,7 +332,7 @@ namespace {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<MemoryDependenceAnalysis>();
AU.addRequired<AliasAnalysis>();
- AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addPreserved<AliasAnalysis>();
AU.addPreserved<MemoryDependenceAnalysis>();
}
@@ -346,8 +344,9 @@ namespace {
bool processMemMove(MemMoveInst *M);
bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
- bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
- uint64_t MSize);
+ bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep);
+ bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep);
+ bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep);
bool processByValArgument(CallSite CS, unsigned ArgNo);
Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
Value *ByteVal);
@@ -366,7 +365,7 @@ INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
false, false)
@@ -377,13 +376,13 @@ INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
/// attempts to merge them together into a memcpy/memset.
Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
Value *StartPtr, Value *ByteVal) {
- if (!DL) return nullptr;
+ const DataLayout &DL = StartInst->getModule()->getDataLayout();
// Okay, so we now have a single store that can be splatable. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
- MemsetRanges Ranges(*DL);
+ MemsetRanges Ranges(DL);
BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
@@ -406,8 +405,8 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
- if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
- Offset, *DL))
+ if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
+ DL))
break;
Ranges.addStore(Offset, NextStore);
@@ -420,7 +419,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
- if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *DL))
+ if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
break;
Ranges.addMemSet(Offset, MSI);
@@ -452,7 +451,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
if (Range.TheStores.size() == 1) continue;
// If it is profitable to lower this range to memset, do so now.
- if (!Range.isProfitableToUseMemset(*DL))
+ if (!Range.isProfitableToUseMemset(DL))
continue;
// Otherwise, we do want to transform this! Create a new memset.
@@ -464,7 +463,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
if (Alignment == 0) {
Type *EltType =
cast<PointerType>(StartPtr->getType())->getElementType();
- Alignment = DL->getABITypeAlignment(EltType);
+ Alignment = DL.getABITypeAlignment(EltType);
}
AMemSet =
@@ -494,8 +493,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (!SI->isSimple()) return false;
-
- if (!DL) return false;
+ const DataLayout &DL = SI->getModule()->getDataLayout();
// Detect cases where we're performing call slot forwarding, but
// happen to be using a load-store pair to implement it, rather than
@@ -525,16 +523,16 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (C) {
unsigned storeAlign = SI->getAlignment();
if (!storeAlign)
- storeAlign = DL->getABITypeAlignment(SI->getOperand(0)->getType());
+ storeAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
unsigned loadAlign = LI->getAlignment();
if (!loadAlign)
- loadAlign = DL->getABITypeAlignment(LI->getType());
+ loadAlign = DL.getABITypeAlignment(LI->getType());
- bool changed = performCallSlotOptzn(LI,
- SI->getPointerOperand()->stripPointerCasts(),
- LI->getPointerOperand()->stripPointerCasts(),
- DL->getTypeStoreSize(SI->getOperand(0)->getType()),
- std::min(storeAlign, loadAlign), C);
+ bool changed = performCallSlotOptzn(
+ LI, SI->getPointerOperand()->stripPointerCasts(),
+ LI->getPointerOperand()->stripPointerCasts(),
+ DL.getTypeStoreSize(SI->getOperand(0)->getType()),
+ std::min(storeAlign, loadAlign), C);
if (changed) {
MD->removeInstruction(SI);
SI->eraseFromParent();
@@ -606,15 +604,13 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
if (!srcAlloca)
return false;
- // Check that all of src is copied to dest.
- if (!DL) return false;
-
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
if (!srcArraySize)
return false;
- uint64_t srcSize = DL->getTypeAllocSize(srcAlloca->getAllocatedType()) *
- srcArraySize->getZExtValue();
+ const DataLayout &DL = cpy->getModule()->getDataLayout();
+ uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
+ srcArraySize->getZExtValue();
if (cpyLen < srcSize)
return false;
@@ -628,8 +624,8 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
if (!destArraySize)
return false;
- uint64_t destSize = DL->getTypeAllocSize(A->getAllocatedType()) *
- destArraySize->getZExtValue();
+ uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
+ destArraySize->getZExtValue();
if (destSize < srcSize)
return false;
@@ -648,7 +644,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
return false;
}
- uint64_t destSize = DL->getTypeAllocSize(StructTy);
+ uint64_t destSize = DL.getTypeAllocSize(StructTy);
if (destSize < srcSize)
return false;
}
@@ -659,7 +655,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
// Check that dest points to memory that is at least as aligned as src.
unsigned srcAlign = srcAlloca->getAlignment();
if (!srcAlign)
- srcAlign = DL->getABITypeAlignment(srcAlloca->getAllocatedType());
+ srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
// If dest is not aligned enough and we can't increase its alignment then
// bail out.
@@ -769,10 +765,9 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
/// processMemCpyMemCpyDependence - We've found that the (upward scanning)
/// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to
-/// copy from MDep's input if we can. MSize is the size of M's copy.
+/// copy from MDep's input if we can.
///
-bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
- uint64_t MSize) {
+bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep) {
// We can only transforms memcpy's where the dest of one is the source of the
// other.
if (M->getSource() != MDep->getDest() || MDep->isVolatile())
@@ -844,6 +839,103 @@ bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
return true;
}
+/// We've found that the (upward scanning) memory dependence of \p MemCpy is
+/// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
+/// weren't copied over by \p MemCpy.
+///
+/// In other words, transform:
+/// \code
+/// memset(dst, c, dst_size);
+/// memcpy(dst, src, src_size);
+/// \endcode
+/// into:
+/// \code
+/// memcpy(dst, src, src_size);
+/// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
+/// \endcode
+bool MemCpyOpt::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
+ MemSetInst *MemSet) {
+ // We can only transform memset/memcpy with the same destination.
+ if (MemSet->getDest() != MemCpy->getDest())
+ return false;
+
+ // Check that there are no other dependencies on the memset destination.
+ MemDepResult DstDepInfo =
+ MD->getPointerDependencyFrom(AliasAnalysis::getLocationForDest(MemSet),
+ false, MemCpy, MemCpy->getParent());
+ if (DstDepInfo.getInst() != MemSet)
+ return false;
+
+ // Use the same i8* dest as the memcpy, killing the memset dest if different.
+ Value *Dest = MemCpy->getRawDest();
+ Value *DestSize = MemSet->getLength();
+ Value *SrcSize = MemCpy->getLength();
+
+ // By default, create an unaligned memset.
+ unsigned Align = 1;
+ // If Dest is aligned, and SrcSize is constant, use the minimum alignment
+ // of the sum.
+ const unsigned DestAlign =
+ std::max(MemSet->getAlignment(), MemCpy->getAlignment());
+ if (DestAlign > 1)
+ if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
+ Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
+
+ IRBuilder<> Builder(MemCpy);
+
+ // If the sizes have different types, zext the smaller one.
+ if (DestSize->getType() != SrcSize->getType()) {
+ if (DestSize->getType()->getIntegerBitWidth() >
+ SrcSize->getType()->getIntegerBitWidth())
+ SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
+ else
+ DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
+ }
+
+ Value *MemsetLen =
+ Builder.CreateSelect(Builder.CreateICmpULE(DestSize, SrcSize),
+ ConstantInt::getNullValue(DestSize->getType()),
+ Builder.CreateSub(DestSize, SrcSize));
+ Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1),
+ MemsetLen, Align);
+
+ MD->removeInstruction(MemSet);
+ MemSet->eraseFromParent();
+ return true;
+}
+
+/// Transform memcpy to memset when its source was just memset.
+/// In other words, turn:
+/// \code
+/// memset(dst1, c, dst1_size);
+/// memcpy(dst2, dst1, dst2_size);
+/// \endcode
+/// into:
+/// \code
+/// memset(dst1, c, dst1_size);
+/// memset(dst2, c, dst2_size);
+/// \endcode
+/// When dst2_size <= dst1_size.
+///
+/// The \p MemCpy must have a Constant length.
+bool MemCpyOpt::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
+ MemSetInst *MemSet) {
+ // This only makes sense on memcpy(..., memset(...), ...).
+ if (MemSet->getRawDest() != MemCpy->getRawSource())
+ return false;
+
+ ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
+ ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
+ // Make sure the memcpy doesn't read any more than what the memset wrote.
+ // Don't worry about sizes larger than i64.
+ if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue())
+ return false;
+
+ IRBuilder<> Builder(MemCpy);
+ Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
+ CopySize, MemCpy->getAlignment());
+ return true;
+}
/// processMemCpy - perform simplification of memcpy's. If we have memcpy A
/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
@@ -874,17 +966,26 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
return true;
}
+ MemDepResult DepInfo = MD->getDependency(M);
+
+ // Try to turn a partially redundant memset + memcpy into
+ // memcpy + smaller memset. We don't need the memcpy size for this.
+ if (DepInfo.isClobber())
+ if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
+ if (processMemSetMemCpyDependence(M, MDep))
+ return true;
+
// The optimizations after this point require the memcpy size.
ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
if (!CopySize) return false;
- // The are three possible optimizations we can do for memcpy:
+ // There are four possible optimizations we can do for memcpy:
// a) memcpy-memcpy xform which exposes redundance for DSE.
// b) call-memcpy xform for return slot optimization.
// c) memcpy from freshly alloca'd space or space that has just started its
// lifetime copies undefined data, and we can therefore eliminate the
// memcpy in favor of the data that was already at the destination.
- MemDepResult DepInfo = MD->getDependency(M);
+ // d) memcpy from a just-memset'd source can be turned into memset.
if (DepInfo.isClobber()) {
if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
@@ -900,9 +1001,10 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
AliasAnalysis::Location SrcLoc = AliasAnalysis::getLocationForSource(M);
MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true,
M, M->getParent());
+
if (SrcDepInfo.isClobber()) {
if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
- return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
+ return processMemCpyMemCpyDependence(M, MDep);
} else if (SrcDepInfo.isDef()) {
Instruction *I = SrcDepInfo.getInst();
bool hasUndefContents = false;
@@ -924,6 +1026,15 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
}
}
+ if (SrcDepInfo.isClobber())
+ if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
+ if (performMemCpyToMemSetOptzn(M, MDep)) {
+ MD->removeInstruction(M);
+ M->eraseFromParent();
+ ++NumCpyToSet;
+ return true;
+ }
+
return false;
}
@@ -959,12 +1070,11 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
/// processByValArgument - This is called on every byval argument in call sites.
bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
- if (!DL) return false;
-
+ const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
// Find out what feeds this byval argument.
Value *ByValArg = CS.getArgument(ArgNo);
Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
- uint64_t ByValSize = DL->getTypeAllocSize(ByValTy);
+ uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
MemDepResult DepInfo =
MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
true, CS.getInstruction(),
@@ -997,8 +1107,8 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
*CS->getParent()->getParent());
DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
if (MDep->getAlignment() < ByValAlign &&
- getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &AC,
- CS.getInstruction(), &DT) < ByValAlign)
+ getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
+ CS.getInstruction(), &AC, &DT) < ByValAlign)
return false;
// Verify that the copied-from memory doesn't change in between the memcpy and
@@ -1051,7 +1161,7 @@ bool MemCpyOpt::iterateOnFunction(Function &F) {
RepeatInstruction = processMemCpy(M);
else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
RepeatInstruction = processMemMove(M);
- else if (CallSite CS = (Value*)I) {
+ else if (auto CS = CallSite(I)) {
for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
if (CS.isByValArgument(i))
MadeChange |= processByValArgument(CS, i);
@@ -1077,9 +1187,7 @@ bool MemCpyOpt::runOnFunction(Function &F) {
bool MadeChange = false;
MD = &getAnalysis<MemoryDependenceAnalysis>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
- TLI = &getAnalysis<TargetLibraryInfo>();
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
// If we don't have at least memset and memcpy, there is little point of doing
// anything here. These are required by a freestanding implementation, so if
diff --git a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
index 1f73cbc4ac30..611a941b0b21 100644
--- a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
+++ b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
@@ -81,12 +81,13 @@
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include <vector>
@@ -115,9 +116,9 @@ public:
private:
// This transformation requires dominator postdominator info
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<TargetLibraryInfo>();
- AU.addRequired<MemoryDependenceAnalysis>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addRequired<AliasAnalysis>();
+ AU.addPreserved<MemoryDependenceAnalysis>();
AU.addPreserved<AliasAnalysis>();
}
@@ -168,7 +169,7 @@ FunctionPass *llvm::createMergedLoadStoreMotionPass() {
INITIALIZE_PASS_BEGIN(MergedLoadStoreMotion, "mldst-motion",
"MergedLoadStoreMotion", false, false)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(MergedLoadStoreMotion, "mldst-motion",
"MergedLoadStoreMotion", false, false)
@@ -579,7 +580,7 @@ bool MergedLoadStoreMotion::mergeStores(BasicBlock *T) {
/// \brief Run the transformation for each function
///
bool MergedLoadStoreMotion::runOnFunction(Function &F) {
- MD = &getAnalysis<MemoryDependenceAnalysis>();
+ MD = getAnalysisIfAvailable<MemoryDependenceAnalysis>();
AA = &getAnalysis<AliasAnalysis>();
bool Changed = false;
diff --git a/lib/Transforms/Scalar/NaryReassociate.cpp b/lib/Transforms/Scalar/NaryReassociate.cpp
new file mode 100644
index 000000000000..5b370e04088f
--- /dev/null
+++ b/lib/Transforms/Scalar/NaryReassociate.cpp
@@ -0,0 +1,481 @@
+//===- NaryReassociate.cpp - Reassociate n-ary expressions ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass reassociates n-ary add expressions and eliminates the redundancy
+// exposed by the reassociation.
+//
+// A motivating example:
+//
+// void foo(int a, int b) {
+// bar(a + b);
+// bar((a + 2) + b);
+// }
+//
+// An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify
+// the above code to
+//
+// int t = a + b;
+// bar(t);
+// bar(t + 2);
+//
+// However, the Reassociate pass is unable to do that because it processes each
+// instruction individually and believes (a + 2) + b is the best form according
+// to its rank system.
+//
+// To address this limitation, NaryReassociate reassociates an expression in a
+// form that reuses existing instructions. As a result, NaryReassociate can
+// reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that
+// (a + b) is computed before.
+//
+// NaryReassociate works as follows. For every instruction in the form of (a +
+// b) + c, it checks whether a + c or b + c is already computed by a dominating
+// instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b +
+// c) + a and removes the redundancy accordingly. To efficiently look up whether
+// an expression is computed before, we store each instruction seen and its SCEV
+// into an SCEV-to-instruction map.
+//
+// Although the algorithm pattern-matches only ternary additions, it
+// automatically handles many >3-ary expressions by walking through the function
+// in the depth-first order. For example, given
+//
+// (a + c) + d
+// ((a + b) + c) + d
+//
+// NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites
+// ((a + c) + b) + d into ((a + c) + d) + b.
+//
+// Finally, the above dominator-based algorithm may need to be run multiple
+// iterations before emitting optimal code. One source of this need is that we
+// only split an operand when it is used only once. The above algorithm can
+// eliminate an instruction and decrease the usage count of its operands. As a
+// result, an instruction that previously had multiple uses may become a
+// single-use instruction and thus eligible for split consideration. For
+// example,
+//
+// ac = a + c
+// ab = a + b
+// abc = ab + c
+// ab2 = ab + b
+// ab2c = ab2 + c
+//
+// In the first iteration, we cannot reassociate abc to ac+b because ab is used
+// twice. However, we can reassociate ab2c to abc+b in the first iteration. As a
+// result, ab2 becomes dead and ab will be used only once in the second
+// iteration.
+//
+// Limitations and TODO items:
+//
+// 1) We only considers n-ary adds for now. This should be extended and
+// generalized.
+//
+// 2) Besides arithmetic operations, similar reassociation can be applied to
+// GEPs. For example, if
+// X = &arr[a]
+// dominates
+// Y = &arr[a + b]
+// we may rewrite Y into X + b.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/Local.h"
+using namespace llvm;
+using namespace PatternMatch;
+
+#define DEBUG_TYPE "nary-reassociate"
+
+namespace {
+class NaryReassociate : public FunctionPass {
+public:
+ static char ID;
+
+ NaryReassociate(): FunctionPass(ID) {
+ initializeNaryReassociatePass(*PassRegistry::getPassRegistry());
+ }
+
+ bool doInitialization(Module &M) override {
+ DL = &M.getDataLayout();
+ return false;
+ }
+ bool runOnFunction(Function &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<TargetLibraryInfoWrapperPass>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<ScalarEvolution>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ AU.setPreservesCFG();
+ }
+
+private:
+ // Runs only one iteration of the dominator-based algorithm. See the header
+ // comments for why we need multiple iterations.
+ bool doOneIteration(Function &F);
+
+ // Reassociates I for better CSE.
+ Instruction *tryReassociate(Instruction *I);
+
+ // Reassociate GEP for better CSE.
+ Instruction *tryReassociateGEP(GetElementPtrInst *GEP);
+ // Try splitting GEP at the I-th index and see whether either part can be
+ // CSE'ed. This is a helper function for tryReassociateGEP.
+ //
+ // \p IndexedType The element type indexed by GEP's I-th index. This is
+ // equivalent to
+ // GEP->getIndexedType(GEP->getPointerOperand(), 0-th index,
+ // ..., i-th index).
+ GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
+ unsigned I, Type *IndexedType);
+ // Given GEP's I-th index = LHS + RHS, see whether &Base[..][LHS][..] or
+ // &Base[..][RHS][..] can be CSE'ed and rewrite GEP accordingly.
+ GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
+ unsigned I, Value *LHS,
+ Value *RHS, Type *IndexedType);
+
+ // Reassociate Add for better CSE.
+ Instruction *tryReassociateAdd(BinaryOperator *I);
+ // A helper function for tryReassociateAdd. LHS and RHS are explicitly passed.
+ Instruction *tryReassociateAdd(Value *LHS, Value *RHS, Instruction *I);
+ // Rewrites I to LHS + RHS if LHS is computed already.
+ Instruction *tryReassociatedAdd(const SCEV *LHS, Value *RHS, Instruction *I);
+
+ // Returns the closest dominator of \c Dominatee that computes
+ // \c CandidateExpr. Returns null if not found.
+ Instruction *findClosestMatchingDominator(const SCEV *CandidateExpr,
+ Instruction *Dominatee);
+ // GetElementPtrInst implicitly sign-extends an index if the index is shorter
+ // than the pointer size. This function returns whether Index is shorter than
+ // GEP's pointer size, i.e., whether Index needs to be sign-extended in order
+ // to be an index of GEP.
+ bool requiresSignExtension(Value *Index, GetElementPtrInst *GEP);
+
+ DominatorTree *DT;
+ ScalarEvolution *SE;
+ TargetLibraryInfo *TLI;
+ TargetTransformInfo *TTI;
+ const DataLayout *DL;
+ // A lookup table quickly telling which instructions compute the given SCEV.
+ // Note that there can be multiple instructions at different locations
+ // computing to the same SCEV, so we map a SCEV to an instruction list. For
+ // example,
+ //
+ // if (p1)
+ // foo(a + b);
+ // if (p2)
+ // bar(a + b);
+ DenseMap<const SCEV *, SmallVector<Instruction *, 2>> SeenExprs;
+};
+} // anonymous namespace
+
+char NaryReassociate::ID = 0;
+INITIALIZE_PASS_BEGIN(NaryReassociate, "nary-reassociate", "Nary reassociation",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_END(NaryReassociate, "nary-reassociate", "Nary reassociation",
+ false, false)
+
+FunctionPass *llvm::createNaryReassociatePass() {
+ return new NaryReassociate();
+}
+
+bool NaryReassociate::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ SE = &getAnalysis<ScalarEvolution>();
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+
+ bool Changed = false, ChangedInThisIteration;
+ do {
+ ChangedInThisIteration = doOneIteration(F);
+ Changed |= ChangedInThisIteration;
+ } while (ChangedInThisIteration);
+ return Changed;
+}
+
+// Whitelist the instruction types NaryReassociate handles for now.
+static bool isPotentiallyNaryReassociable(Instruction *I) {
+ switch (I->getOpcode()) {
+ case Instruction::Add:
+ case Instruction::GetElementPtr:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool NaryReassociate::doOneIteration(Function &F) {
+ bool Changed = false;
+ SeenExprs.clear();
+ // Process the basic blocks in pre-order of the dominator tree. This order
+ // ensures that all bases of a candidate are in Candidates when we process it.
+ for (auto Node = GraphTraits<DominatorTree *>::nodes_begin(DT);
+ Node != GraphTraits<DominatorTree *>::nodes_end(DT); ++Node) {
+ BasicBlock *BB = Node->getBlock();
+ for (auto I = BB->begin(); I != BB->end(); ++I) {
+ if (SE->isSCEVable(I->getType()) && isPotentiallyNaryReassociable(I)) {
+ if (Instruction *NewI = tryReassociate(I)) {
+ Changed = true;
+ SE->forgetValue(I);
+ I->replaceAllUsesWith(NewI);
+ RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
+ I = NewI;
+ }
+ // Add the rewritten instruction to SeenExprs; the original instruction
+ // is deleted.
+ SeenExprs[SE->getSCEV(I)].push_back(I);
+ }
+ }
+ }
+ return Changed;
+}
+
+Instruction *NaryReassociate::tryReassociate(Instruction *I) {
+ switch (I->getOpcode()) {
+ case Instruction::Add:
+ return tryReassociateAdd(cast<BinaryOperator>(I));
+ case Instruction::GetElementPtr:
+ return tryReassociateGEP(cast<GetElementPtrInst>(I));
+ default:
+ llvm_unreachable("should be filtered out by isPotentiallyNaryReassociable");
+ }
+}
+
+// FIXME: extract this method into TTI->getGEPCost.
+static bool isGEPFoldable(GetElementPtrInst *GEP,
+ const TargetTransformInfo *TTI,
+ const DataLayout *DL) {
+ GlobalVariable *BaseGV = nullptr;
+ int64_t BaseOffset = 0;
+ bool HasBaseReg = false;
+ int64_t Scale = 0;
+
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getPointerOperand()))
+ BaseGV = GV;
+ else
+ HasBaseReg = true;
+
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I, ++GTI) {
+ if (isa<SequentialType>(*GTI)) {
+ int64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
+ if (ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I)) {
+ BaseOffset += ConstIdx->getSExtValue() * ElementSize;
+ } else {
+ // Needs scale register.
+ if (Scale != 0) {
+ // No addressing mode takes two scale registers.
+ return false;
+ }
+ Scale = ElementSize;
+ }
+ } else {
+ StructType *STy = cast<StructType>(*GTI);
+ uint64_t Field = cast<ConstantInt>(*I)->getZExtValue();
+ BaseOffset += DL->getStructLayout(STy)->getElementOffset(Field);
+ }
+ }
+ return TTI->isLegalAddressingMode(GEP->getType()->getElementType(), BaseGV,
+ BaseOffset, HasBaseReg, Scale);
+}
+
+Instruction *NaryReassociate::tryReassociateGEP(GetElementPtrInst *GEP) {
+ // Not worth reassociating GEP if it is foldable.
+ if (isGEPFoldable(GEP, TTI, DL))
+ return nullptr;
+
+ gep_type_iterator GTI = gep_type_begin(*GEP);
+ for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
+ if (isa<SequentialType>(*GTI++)) {
+ if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) {
+ return NewGEP;
+ }
+ }
+ }
+ return nullptr;
+}
+
+bool NaryReassociate::requiresSignExtension(Value *Index,
+ GetElementPtrInst *GEP) {
+ unsigned PointerSizeInBits =
+ DL->getPointerSizeInBits(GEP->getType()->getPointerAddressSpace());
+ return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits;
+}
+
+GetElementPtrInst *
+NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I,
+ Type *IndexedType) {
+ Value *IndexToSplit = GEP->getOperand(I + 1);
+ if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit))
+ IndexToSplit = SExt->getOperand(0);
+
+ if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) {
+ // If the I-th index needs sext and the underlying add is not equipped with
+ // nsw, we cannot split the add because
+ // sext(LHS + RHS) != sext(LHS) + sext(RHS).
+ if (requiresSignExtension(IndexToSplit, GEP) && !AO->hasNoSignedWrap())
+ return nullptr;
+ Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1);
+ // IndexToSplit = LHS + RHS.
+ if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType))
+ return NewGEP;
+ // Symmetrically, try IndexToSplit = RHS + LHS.
+ if (LHS != RHS) {
+ if (auto *NewGEP =
+ tryReassociateGEPAtIndex(GEP, I, RHS, LHS, IndexedType))
+ return NewGEP;
+ }
+ }
+ return nullptr;
+}
+
+GetElementPtrInst *
+NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I,
+ Value *LHS, Value *RHS,
+ Type *IndexedType) {
+ // Look for GEP's closest dominator that has the same SCEV as GEP except that
+ // the I-th index is replaced with LHS.
+ SmallVector<const SCEV *, 4> IndexExprs;
+ for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
+ IndexExprs.push_back(SE->getSCEV(*Index));
+ // Replace the I-th index with LHS.
+ IndexExprs[I] = SE->getSCEV(LHS);
+ const SCEV *CandidateExpr = SE->getGEPExpr(
+ GEP->getSourceElementType(), SE->getSCEV(GEP->getPointerOperand()),
+ IndexExprs, GEP->isInBounds());
+
+ auto *Candidate = findClosestMatchingDominator(CandidateExpr, GEP);
+ if (Candidate == nullptr)
+ return nullptr;
+
+ PointerType *TypeOfCandidate = dyn_cast<PointerType>(Candidate->getType());
+ // Pretty rare but theoretically possible when a numeric value happens to
+ // share CandidateExpr.
+ if (TypeOfCandidate == nullptr)
+ return nullptr;
+
+ // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType)
+ uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType);
+ Type *ElementType = TypeOfCandidate->getElementType();
+ uint64_t ElementSize = DL->getTypeAllocSize(ElementType);
+ // Another less rare case: because I is not necessarily the last index of the
+ // GEP, the size of the type at the I-th index (IndexedSize) is not
+ // necessarily divisible by ElementSize. For example,
+ //
+ // #pragma pack(1)
+ // struct S {
+ // int a[3];
+ // int64 b[8];
+ // };
+ // #pragma pack()
+ //
+ // sizeof(S) = 100 is indivisible by sizeof(int64) = 8.
+ //
+ // TODO: bail out on this case for now. We could emit uglygep.
+ if (IndexedSize % ElementSize != 0)
+ return nullptr;
+
+ // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0])));
+ IRBuilder<> Builder(GEP);
+ Type *IntPtrTy = DL->getIntPtrType(TypeOfCandidate);
+ if (RHS->getType() != IntPtrTy)
+ RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy);
+ if (IndexedSize != ElementSize) {
+ RHS = Builder.CreateMul(
+ RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize));
+ }
+ GetElementPtrInst *NewGEP =
+ cast<GetElementPtrInst>(Builder.CreateGEP(Candidate, RHS));
+ NewGEP->setIsInBounds(GEP->isInBounds());
+ NewGEP->takeName(GEP);
+ return NewGEP;
+}
+
+Instruction *NaryReassociate::tryReassociateAdd(BinaryOperator *I) {
+ Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
+ if (auto *NewI = tryReassociateAdd(LHS, RHS, I))
+ return NewI;
+ if (auto *NewI = tryReassociateAdd(RHS, LHS, I))
+ return NewI;
+ return nullptr;
+}
+
+Instruction *NaryReassociate::tryReassociateAdd(Value *LHS, Value *RHS,
+ Instruction *I) {
+ Value *A = nullptr, *B = nullptr;
+ // To be conservative, we reassociate I only when it is the only user of A+B.
+ if (LHS->hasOneUse() && match(LHS, m_Add(m_Value(A), m_Value(B)))) {
+ // I = (A + B) + RHS
+ // = (A + RHS) + B or (B + RHS) + A
+ const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B);
+ const SCEV *RHSExpr = SE->getSCEV(RHS);
+ if (BExpr != RHSExpr) {
+ if (auto *NewI = tryReassociatedAdd(SE->getAddExpr(AExpr, RHSExpr), B, I))
+ return NewI;
+ }
+ if (AExpr != RHSExpr) {
+ if (auto *NewI = tryReassociatedAdd(SE->getAddExpr(BExpr, RHSExpr), A, I))
+ return NewI;
+ }
+ }
+ return nullptr;
+}
+
+Instruction *NaryReassociate::tryReassociatedAdd(const SCEV *LHSExpr,
+ Value *RHS, Instruction *I) {
+ auto Pos = SeenExprs.find(LHSExpr);
+ // Bail out if LHSExpr is not previously seen.
+ if (Pos == SeenExprs.end())
+ return nullptr;
+
+ // Look for the closest dominator LHS of I that computes LHSExpr, and replace
+ // I with LHS + RHS.
+ auto *LHS = findClosestMatchingDominator(LHSExpr, I);
+ if (LHS == nullptr)
+ return nullptr;
+
+ Instruction *NewI = BinaryOperator::CreateAdd(LHS, RHS, "", I);
+ NewI->takeName(I);
+ return NewI;
+}
+
+Instruction *
+NaryReassociate::findClosestMatchingDominator(const SCEV *CandidateExpr,
+ Instruction *Dominatee) {
+ auto Pos = SeenExprs.find(CandidateExpr);
+ if (Pos == SeenExprs.end())
+ return nullptr;
+
+ auto &Candidates = Pos->second;
+ // Because we process the basic blocks in pre-order of the dominator tree, a
+ // candidate that doesn't dominate the current instruction won't dominate any
+ // future instruction either. Therefore, we pop it out of the stack. This
+ // optimization makes the algorithm O(n).
+ while (!Candidates.empty()) {
+ Instruction *Candidate = Candidates.back();
+ if (DT->dominates(Candidate, Dominatee))
+ return Candidate;
+ Candidates.pop_back();
+ }
+ return nullptr;
+}
diff --git a/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp b/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
index 5c8bed585b64..31d7df39c781 100644
--- a/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
+++ b/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
@@ -18,7 +18,7 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -52,16 +52,18 @@ INITIALIZE_PASS(PartiallyInlineLibCalls, "partially-inline-libcalls",
"Partially inline calls to library functions", false, false)
void PartiallyInlineLibCalls::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<TargetLibraryInfo>();
- AU.addRequired<TargetTransformInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
FunctionPass::getAnalysisUsage(AU);
}
bool PartiallyInlineLibCalls::runOnFunction(Function &F) {
bool Changed = false;
Function::iterator CurrBB;
- TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
- const TargetTransformInfo *TTI = &getAnalysis<TargetTransformInfo>();
+ TargetLibraryInfo *TLI =
+ &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ const TargetTransformInfo *TTI =
+ &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
for (Function::iterator BB = F.begin(), BE = F.end(); BB != BE;) {
CurrBB = BB++;
@@ -126,7 +128,7 @@ bool PartiallyInlineLibCalls::optimizeSQRT(CallInst *Call,
// Move all instructions following Call to newly created block JoinBB.
// Create phi and replace all uses.
- BasicBlock *JoinBB = llvm::SplitBlock(&CurrBB, Call->getNextNode(), this);
+ BasicBlock *JoinBB = llvm::SplitBlock(&CurrBB, Call->getNextNode());
IRBuilder<> Builder(JoinBB, JoinBB->begin());
PHINode *Phi = Builder.CreatePHI(Call->getType(), 2);
Call->replaceAllUsesWith(Phi);
diff --git a/lib/Transforms/Scalar/PlaceSafepoints.cpp b/lib/Transforms/Scalar/PlaceSafepoints.cpp
new file mode 100644
index 000000000000..3e7deeba9f21
--- /dev/null
+++ b/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -0,0 +1,993 @@
+//===- PlaceSafepoints.cpp - Place GC Safepoints --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Place garbage collection safepoints at appropriate locations in the IR. This
+// does not make relocation semantics or variable liveness explicit. That's
+// done by RewriteStatepointsForGC.
+//
+// Terminology:
+// - A call is said to be "parseable" if there is a stack map generated for the
+// return PC of the call. A runtime can determine where values listed in the
+// deopt arguments and (after RewriteStatepointsForGC) gc arguments are located
+// on the stack when the code is suspended inside such a call. Every parse
+// point is represented by a call wrapped in an gc.statepoint intrinsic.
+// - A "poll" is an explicit check in the generated code to determine if the
+// runtime needs the generated code to cooperate by calling a helper routine
+// and thus suspending its execution at a known state. The call to the helper
+// routine will be parseable. The (gc & runtime specific) logic of a poll is
+// assumed to be provided in a function of the name "gc.safepoint_poll".
+//
+// We aim to insert polls such that running code can quickly be brought to a
+// well defined state for inspection by the collector. In the current
+// implementation, this is done via the insertion of poll sites at method entry
+// and the backedge of most loops. We try to avoid inserting more polls than
+// are neccessary to ensure a finite period between poll sites. This is not
+// because the poll itself is expensive in the generated code; it's not. Polls
+// do tend to impact the optimizer itself in negative ways; we'd like to avoid
+// perturbing the optimization of the method as much as we can.
+//
+// We also need to make most call sites parseable. The callee might execute a
+// poll (or otherwise be inspected by the GC). If so, the entire stack
+// (including the suspended frame of the current method) must be parseable.
+//
+// This pass will insert:
+// - Call parse points ("call safepoints") for any call which may need to
+// reach a safepoint during the execution of the callee function.
+// - Backedge safepoint polls and entry safepoint polls to ensure that
+// executing code reaches a safepoint poll in a finite amount of time.
+//
+// We do not currently support return statepoints, but adding them would not
+// be hard. They are not required for correctness - entry safepoints are an
+// alternative - but some GCs may prefer them. Patches welcome.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Pass.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Transforms/Utils/Local.h"
+
+#define DEBUG_TYPE "safepoint-placement"
+STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted");
+STATISTIC(NumCallSafepoints, "Number of call safepoints inserted");
+STATISTIC(NumBackedgeSafepoints, "Number of backedge safepoints inserted");
+
+STATISTIC(CallInLoop, "Number of loops w/o safepoints due to calls in loop");
+STATISTIC(FiniteExecution, "Number of loops w/o safepoints finite execution");
+
+using namespace llvm;
+
+// Ignore oppurtunities to avoid placing safepoints on backedges, useful for
+// validation
+static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden,
+ cl::init(false));
+
+/// If true, do not place backedge safepoints in counted loops.
+static cl::opt<bool> SkipCounted("spp-counted", cl::Hidden, cl::init(true));
+
+// If true, split the backedge of a loop when placing the safepoint, otherwise
+// split the latch block itself. Both are useful to support for
+// experimentation, but in practice, it looks like splitting the backedge
+// optimizes better.
+static cl::opt<bool> SplitBackedge("spp-split-backedge", cl::Hidden,
+ cl::init(false));
+
+// Print tracing output
+static cl::opt<bool> TraceLSP("spp-trace", cl::Hidden, cl::init(false));
+
+namespace {
+
+/// An analysis pass whose purpose is to identify each of the backedges in
+/// the function which require a safepoint poll to be inserted.
+struct PlaceBackedgeSafepointsImpl : public FunctionPass {
+ static char ID;
+
+ /// The output of the pass - gives a list of each backedge (described by
+ /// pointing at the branch) which need a poll inserted.
+ std::vector<TerminatorInst *> PollLocations;
+
+ /// True unless we're running spp-no-calls in which case we need to disable
+ /// the call dependend placement opts.
+ bool CallSafepointsEnabled;
+
+ ScalarEvolution *SE = nullptr;
+ DominatorTree *DT = nullptr;
+ LoopInfo *LI = nullptr;
+
+ PlaceBackedgeSafepointsImpl(bool CallSafepoints = false)
+ : FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) {
+ initializePlaceBackedgeSafepointsImplPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnLoop(Loop *);
+ void runOnLoopAndSubLoops(Loop *L) {
+ // Visit all the subloops
+ for (auto I = L->begin(), E = L->end(); I != E; I++)
+ runOnLoopAndSubLoops(*I);
+ runOnLoop(L);
+ }
+
+ bool runOnFunction(Function &F) override {
+ SE = &getAnalysis<ScalarEvolution>();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ for (auto I = LI->begin(), E = LI->end(); I != E; I++) {
+ runOnLoopAndSubLoops(*I);
+ }
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<ScalarEvolution>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ // We no longer modify the IR at all in this pass. Thus all
+ // analysis are preserved.
+ AU.setPreservesAll();
+ }
+};
+}
+
+static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
+static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
+static cl::opt<bool> NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false));
+
+namespace {
+struct PlaceSafepoints : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+
+ PlaceSafepoints() : FunctionPass(ID) {
+ initializePlaceSafepointsPass(*PassRegistry::getPassRegistry());
+ }
+ bool runOnFunction(Function &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ // We modify the graph wholesale (inlining, block insertion, etc). We
+ // preserve nothing at the moment. We could potentially preserve dom tree
+ // if that was worth doing
+ }
+};
+}
+
+// Insert a safepoint poll immediately before the given instruction. Does
+// not handle the parsability of state at the runtime call, that's the
+// callers job.
+static void
+InsertSafepointPoll(Instruction *InsertBefore,
+ std::vector<CallSite> &ParsePointsNeeded /*rval*/);
+
+static bool isGCLeafFunction(const CallSite &CS);
+
+static bool needsStatepoint(const CallSite &CS) {
+ if (isGCLeafFunction(CS))
+ return false;
+ if (CS.isCall()) {
+ CallInst *call = cast<CallInst>(CS.getInstruction());
+ if (call->isInlineAsm())
+ return false;
+ }
+ if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)) {
+ return false;
+ }
+ return true;
+}
+
+static Value *ReplaceWithStatepoint(const CallSite &CS, Pass *P);
+
+/// Returns true if this loop is known to contain a call safepoint which
+/// must unconditionally execute on any iteration of the loop which returns
+/// to the loop header via an edge from Pred. Returns a conservative correct
+/// answer; i.e. false is always valid.
+static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
+ BasicBlock *Pred,
+ DominatorTree &DT) {
+ // In general, we're looking for any cut of the graph which ensures
+ // there's a call safepoint along every edge between Header and Pred.
+ // For the moment, we look only for the 'cuts' that consist of a single call
+ // instruction in a block which is dominated by the Header and dominates the
+ // loop latch (Pred) block. Somewhat surprisingly, walking the entire chain
+ // of such dominating blocks gets substaintially more occurences than just
+ // checking the Pred and Header blocks themselves. This may be due to the
+ // density of loop exit conditions caused by range and null checks.
+ // TODO: structure this as an analysis pass, cache the result for subloops,
+ // avoid dom tree recalculations
+ assert(DT.dominates(Header, Pred) && "loop latch not dominated by header?");
+
+ BasicBlock *Current = Pred;
+ while (true) {
+ for (Instruction &I : *Current) {
+ if (auto CS = CallSite(&I))
+ // Note: Technically, needing a safepoint isn't quite the right
+ // condition here. We should instead be checking if the target method
+ // has an
+ // unconditional poll. In practice, this is only a theoretical concern
+ // since we don't have any methods with conditional-only safepoint
+ // polls.
+ if (needsStatepoint(CS))
+ return true;
+ }
+
+ if (Current == Header)
+ break;
+ Current = DT.getNode(Current)->getIDom()->getBlock();
+ }
+
+ return false;
+}
+
+/// Returns true if this loop is known to terminate in a finite number of
+/// iterations. Note that this function may return false for a loop which
+/// does actual terminate in a finite constant number of iterations due to
+/// conservatism in the analysis.
+static bool mustBeFiniteCountedLoop(Loop *L, ScalarEvolution *SE,
+ BasicBlock *Pred) {
+ // Only used when SkipCounted is off
+ const unsigned upperTripBound = 8192;
+
+ // A conservative bound on the loop as a whole.
+ const SCEV *MaxTrips = SE->getMaxBackedgeTakenCount(L);
+ if (MaxTrips != SE->getCouldNotCompute()) {
+ if (SE->getUnsignedRange(MaxTrips).getUnsignedMax().ult(upperTripBound))
+ return true;
+ if (SkipCounted &&
+ SE->getUnsignedRange(MaxTrips).getUnsignedMax().isIntN(32))
+ return true;
+ }
+
+ // If this is a conditional branch to the header with the alternate path
+ // being outside the loop, we can ask questions about the execution frequency
+ // of the exit block.
+ if (L->isLoopExiting(Pred)) {
+ // This returns an exact expression only. TODO: We really only need an
+ // upper bound here, but SE doesn't expose that.
+ const SCEV *MaxExec = SE->getExitCount(L, Pred);
+ if (MaxExec != SE->getCouldNotCompute()) {
+ if (SE->getUnsignedRange(MaxExec).getUnsignedMax().ult(upperTripBound))
+ return true;
+ if (SkipCounted &&
+ SE->getUnsignedRange(MaxExec).getUnsignedMax().isIntN(32))
+ return true;
+ }
+ }
+
+ return /* not finite */ false;
+}
+
+static void scanOneBB(Instruction *start, Instruction *end,
+ std::vector<CallInst *> &calls,
+ std::set<BasicBlock *> &seen,
+ std::vector<BasicBlock *> &worklist) {
+ for (BasicBlock::iterator itr(start);
+ itr != start->getParent()->end() && itr != BasicBlock::iterator(end);
+ itr++) {
+ if (CallInst *CI = dyn_cast<CallInst>(&*itr)) {
+ calls.push_back(CI);
+ }
+ // FIXME: This code does not handle invokes
+ assert(!dyn_cast<InvokeInst>(&*itr) &&
+ "support for invokes in poll code needed");
+ // Only add the successor blocks if we reach the terminator instruction
+ // without encountering end first
+ if (itr->isTerminator()) {
+ BasicBlock *BB = itr->getParent();
+ for (BasicBlock *Succ : successors(BB)) {
+ if (seen.count(Succ) == 0) {
+ worklist.push_back(Succ);
+ seen.insert(Succ);
+ }
+ }
+ }
+ }
+}
+static void scanInlinedCode(Instruction *start, Instruction *end,
+ std::vector<CallInst *> &calls,
+ std::set<BasicBlock *> &seen) {
+ calls.clear();
+ std::vector<BasicBlock *> worklist;
+ seen.insert(start->getParent());
+ scanOneBB(start, end, calls, seen, worklist);
+ while (!worklist.empty()) {
+ BasicBlock *BB = worklist.back();
+ worklist.pop_back();
+ scanOneBB(&*BB->begin(), end, calls, seen, worklist);
+ }
+}
+
+bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
+ // Loop through all loop latches (branches controlling backedges). We need
+ // to place a safepoint on every backedge (potentially).
+ // Note: In common usage, there will be only one edge due to LoopSimplify
+ // having run sometime earlier in the pipeline, but this code must be correct
+ // w.r.t. loops with multiple backedges.
+ BasicBlock *header = L->getHeader();
+ SmallVector<BasicBlock*, 16> LoopLatches;
+ L->getLoopLatches(LoopLatches);
+ for (BasicBlock *pred : LoopLatches) {
+ assert(L->contains(pred));
+
+ // Make a policy decision about whether this loop needs a safepoint or
+ // not. Note that this is about unburdening the optimizer in loops, not
+ // avoiding the runtime cost of the actual safepoint.
+ if (!AllBackedges) {
+ if (mustBeFiniteCountedLoop(L, SE, pred)) {
+ if (TraceLSP)
+ errs() << "skipping safepoint placement in finite loop\n";
+ FiniteExecution++;
+ continue;
+ }
+ if (CallSafepointsEnabled &&
+ containsUnconditionalCallSafepoint(L, header, pred, *DT)) {
+ // Note: This is only semantically legal since we won't do any further
+ // IPO or inlining before the actual call insertion.. If we hadn't, we
+ // might latter loose this call safepoint.
+ if (TraceLSP)
+ errs() << "skipping safepoint placement due to unconditional call\n";
+ CallInLoop++;
+ continue;
+ }
+ }
+
+ // TODO: We can create an inner loop which runs a finite number of
+ // iterations with an outer loop which contains a safepoint. This would
+ // not help runtime performance that much, but it might help our ability to
+ // optimize the inner loop.
+
+ // Safepoint insertion would involve creating a new basic block (as the
+ // target of the current backedge) which does the safepoint (of all live
+ // variables) and branches to the true header
+ TerminatorInst *term = pred->getTerminator();
+
+ if (TraceLSP) {
+ errs() << "[LSP] terminator instruction: ";
+ term->dump();
+ }
+
+ PollLocations.push_back(term);
+ }
+
+ return false;
+}
+
+/// Returns true if an entry safepoint is not required before this callsite in
+/// the caller function.
+static bool doesNotRequireEntrySafepointBefore(const CallSite &CS) {
+ Instruction *Inst = CS.getInstruction();
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::experimental_gc_statepoint:
+ case Intrinsic::experimental_patchpoint_void:
+ case Intrinsic::experimental_patchpoint_i64:
+ // The can wrap an actual call which may grow the stack by an unbounded
+ // amount or run forever.
+ return false;
+ default:
+ // Most LLVM intrinsics are things which do not expand to actual calls, or
+ // at least if they do, are leaf functions that cause only finite stack
+ // growth. In particular, the optimizer likes to form things like memsets
+ // out of stores in the original IR. Another important example is
+ // llvm.frameescape which must occur in the entry block. Inserting a
+ // safepoint before it is not legal since it could push the frameescape
+ // out of the entry block.
+ return true;
+ }
+ }
+ return false;
+}
+
+static Instruction *findLocationForEntrySafepoint(Function &F,
+ DominatorTree &DT) {
+
+ // Conceptually, this poll needs to be on method entry, but in
+ // practice, we place it as late in the entry block as possible. We
+ // can place it as late as we want as long as it dominates all calls
+ // that can grow the stack. This, combined with backedge polls,
+ // give us all the progress guarantees we need.
+
+ // hasNextInstruction and nextInstruction are used to iterate
+ // through a "straight line" execution sequence.
+
+ auto hasNextInstruction = [](Instruction *I) {
+ if (!I->isTerminator()) {
+ return true;
+ }
+ BasicBlock *nextBB = I->getParent()->getUniqueSuccessor();
+ return nextBB && (nextBB->getUniquePredecessor() != nullptr);
+ };
+
+ auto nextInstruction = [&hasNextInstruction](Instruction *I) {
+ assert(hasNextInstruction(I) &&
+ "first check if there is a next instruction!");
+ if (I->isTerminator()) {
+ return I->getParent()->getUniqueSuccessor()->begin();
+ } else {
+ return std::next(BasicBlock::iterator(I));
+ }
+ };
+
+ Instruction *cursor = nullptr;
+ for (cursor = F.getEntryBlock().begin(); hasNextInstruction(cursor);
+ cursor = nextInstruction(cursor)) {
+
+ // We need to ensure a safepoint poll occurs before any 'real' call. The
+ // easiest way to ensure finite execution between safepoints in the face of
+ // recursive and mutually recursive functions is to enforce that each take
+ // a safepoint. Additionally, we need to ensure a poll before any call
+ // which can grow the stack by an unbounded amount. This isn't required
+ // for GC semantics per se, but is a common requirement for languages
+ // which detect stack overflow via guard pages and then throw exceptions.
+ if (auto CS = CallSite(cursor)) {
+ if (doesNotRequireEntrySafepointBefore(CS))
+ continue;
+ break;
+ }
+ }
+
+ assert((hasNextInstruction(cursor) || cursor->isTerminator()) &&
+ "either we stopped because of a call, or because of terminator");
+
+ return cursor;
+}
+
+/// Identify the list of call sites which need to be have parseable state
+static void findCallSafepoints(Function &F,
+ std::vector<CallSite> &Found /*rval*/) {
+ assert(Found.empty() && "must be empty!");
+ for (Instruction &I : inst_range(F)) {
+ Instruction *inst = &I;
+ if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) {
+ CallSite CS(inst);
+
+ // No safepoint needed or wanted
+ if (!needsStatepoint(CS)) {
+ continue;
+ }
+
+ Found.push_back(CS);
+ }
+ }
+}
+
+/// Implement a unique function which doesn't require we sort the input
+/// vector. Doing so has the effect of changing the output of a couple of
+/// tests in ways which make them less useful in testing fused safepoints.
+template <typename T> static void unique_unsorted(std::vector<T> &vec) {
+ std::set<T> seen;
+ std::vector<T> tmp;
+ vec.reserve(vec.size());
+ std::swap(tmp, vec);
+ for (auto V : tmp) {
+ if (seen.insert(V).second) {
+ vec.push_back(V);
+ }
+ }
+}
+
+static std::string GCSafepointPollName("gc.safepoint_poll");
+
+static bool isGCSafepointPoll(Function &F) {
+ return F.getName().equals(GCSafepointPollName);
+}
+
+/// Returns true if this function should be rewritten to include safepoint
+/// polls and parseable call sites. The main point of this function is to be
+/// an extension point for custom logic.
+static bool shouldRewriteFunction(Function &F) {
+ // TODO: This should check the GCStrategy
+ if (F.hasGC()) {
+ const char *FunctionGCName = F.getGC();
+ const StringRef StatepointExampleName("statepoint-example");
+ const StringRef CoreCLRName("coreclr");
+ return (StatepointExampleName == FunctionGCName) ||
+ (CoreCLRName == FunctionGCName);
+ } else
+ return false;
+}
+
+// TODO: These should become properties of the GCStrategy, possibly with
+// command line overrides.
+static bool enableEntrySafepoints(Function &F) { return !NoEntry; }
+static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; }
+static bool enableCallSafepoints(Function &F) { return !NoCall; }
+
+// Normalize basic block to make it ready to be target of invoke statepoint.
+// Ensure that 'BB' does not have phi nodes. It may require spliting it.
+static BasicBlock *normalizeForInvokeSafepoint(BasicBlock *BB,
+ BasicBlock *InvokeParent) {
+ BasicBlock *ret = BB;
+
+ if (!BB->getUniquePredecessor()) {
+ ret = SplitBlockPredecessors(BB, InvokeParent, "");
+ }
+
+ // Now that 'ret' has unique predecessor we can safely remove all phi nodes
+ // from it
+ FoldSingleEntryPHINodes(ret);
+ assert(!isa<PHINode>(ret->begin()));
+
+ return ret;
+}
+
+bool PlaceSafepoints::runOnFunction(Function &F) {
+ if (F.isDeclaration() || F.empty()) {
+ // This is a declaration, nothing to do. Must exit early to avoid crash in
+ // dom tree calculation
+ return false;
+ }
+
+ if (isGCSafepointPoll(F)) {
+ // Given we're inlining this inside of safepoint poll insertion, this
+ // doesn't make any sense. Note that we do make any contained calls
+ // parseable after we inline a poll.
+ return false;
+ }
+
+ if (!shouldRewriteFunction(F))
+ return false;
+
+ bool modified = false;
+
+ // In various bits below, we rely on the fact that uses are reachable from
+ // defs. When there are basic blocks unreachable from the entry, dominance
+ // and reachablity queries return non-sensical results. Thus, we preprocess
+ // the function to ensure these properties hold.
+ modified |= removeUnreachableBlocks(F);
+
+ // STEP 1 - Insert the safepoint polling locations. We do not need to
+ // actually insert parse points yet. That will be done for all polls and
+ // calls in a single pass.
+
+ DominatorTree DT;
+ DT.recalculate(F);
+
+ SmallVector<Instruction *, 16> PollsNeeded;
+ std::vector<CallSite> ParsePointNeeded;
+
+ if (enableBackedgeSafepoints(F)) {
+ // Construct a pass manager to run the LoopPass backedge logic. We
+ // need the pass manager to handle scheduling all the loop passes
+ // appropriately. Doing this by hand is painful and just not worth messing
+ // with for the moment.
+ legacy::FunctionPassManager FPM(F.getParent());
+ bool CanAssumeCallSafepoints = enableCallSafepoints(F);
+ PlaceBackedgeSafepointsImpl *PBS =
+ new PlaceBackedgeSafepointsImpl(CanAssumeCallSafepoints);
+ FPM.add(PBS);
+ FPM.run(F);
+
+ // We preserve dominance information when inserting the poll, otherwise
+ // we'd have to recalculate this on every insert
+ DT.recalculate(F);
+
+ auto &PollLocations = PBS->PollLocations;
+
+ auto OrderByBBName = [](Instruction *a, Instruction *b) {
+ return a->getParent()->getName() < b->getParent()->getName();
+ };
+ // We need the order of list to be stable so that naming ends up stable
+ // when we split edges. This makes test cases much easier to write.
+ std::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
+
+ // We can sometimes end up with duplicate poll locations. This happens if
+ // a single loop is visited more than once. The fact this happens seems
+ // wrong, but it does happen for the split-backedge.ll test case.
+ PollLocations.erase(std::unique(PollLocations.begin(),
+ PollLocations.end()),
+ PollLocations.end());
+
+ // Insert a poll at each point the analysis pass identified
+ // The poll location must be the terminator of a loop latch block.
+ for (TerminatorInst *Term : PollLocations) {
+ // We are inserting a poll, the function is modified
+ modified = true;
+
+ if (SplitBackedge) {
+ // Split the backedge of the loop and insert the poll within that new
+ // basic block. This creates a loop with two latches per original
+ // latch (which is non-ideal), but this appears to be easier to
+ // optimize in practice than inserting the poll immediately before the
+ // latch test.
+
+ // Since this is a latch, at least one of the successors must dominate
+ // it. Its possible that we have a) duplicate edges to the same header
+ // and b) edges to distinct loop headers. We need to insert pools on
+ // each.
+ SetVector<BasicBlock *> Headers;
+ for (unsigned i = 0; i < Term->getNumSuccessors(); i++) {
+ BasicBlock *Succ = Term->getSuccessor(i);
+ if (DT.dominates(Succ, Term->getParent())) {
+ Headers.insert(Succ);
+ }
+ }
+ assert(!Headers.empty() && "poll location is not a loop latch?");
+
+ // The split loop structure here is so that we only need to recalculate
+ // the dominator tree once. Alternatively, we could just keep it up to
+ // date and use a more natural merged loop.
+ SetVector<BasicBlock *> SplitBackedges;
+ for (BasicBlock *Header : Headers) {
+ BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, &DT);
+ PollsNeeded.push_back(NewBB->getTerminator());
+ NumBackedgeSafepoints++;
+ }
+ } else {
+ // Split the latch block itself, right before the terminator.
+ PollsNeeded.push_back(Term);
+ NumBackedgeSafepoints++;
+ }
+ }
+ }
+
+ if (enableEntrySafepoints(F)) {
+ Instruction *Location = findLocationForEntrySafepoint(F, DT);
+ if (!Location) {
+ // policy choice not to insert?
+ } else {
+ PollsNeeded.push_back(Location);
+ modified = true;
+ NumEntrySafepoints++;
+ }
+ }
+
+ // Now that we've identified all the needed safepoint poll locations, insert
+ // safepoint polls themselves.
+ for (Instruction *PollLocation : PollsNeeded) {
+ std::vector<CallSite> RuntimeCalls;
+ InsertSafepointPoll(PollLocation, RuntimeCalls);
+ ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(),
+ RuntimeCalls.end());
+ }
+ PollsNeeded.clear(); // make sure we don't accidentally use
+ // The dominator tree has been invalidated by the inlining performed in the
+ // above loop. TODO: Teach the inliner how to update the dom tree?
+ DT.recalculate(F);
+
+ if (enableCallSafepoints(F)) {
+ std::vector<CallSite> Calls;
+ findCallSafepoints(F, Calls);
+ NumCallSafepoints += Calls.size();
+ ParsePointNeeded.insert(ParsePointNeeded.end(), Calls.begin(), Calls.end());
+ }
+
+ // Unique the vectors since we can end up with duplicates if we scan the call
+ // site for call safepoints after we add it for entry or backedge. The
+ // only reason we need tracking at all is that some functions might have
+ // polls but not call safepoints and thus we might miss marking the runtime
+ // calls for the polls. (This is useful in test cases!)
+ unique_unsorted(ParsePointNeeded);
+
+ // Any parse point (no matter what source) will be handled here
+
+ // We're about to start modifying the function
+ if (!ParsePointNeeded.empty())
+ modified = true;
+
+ // Now run through and insert the safepoints, but do _NOT_ update or remove
+ // any existing uses. We have references to live variables that need to
+ // survive to the last iteration of this loop.
+ std::vector<Value *> Results;
+ Results.reserve(ParsePointNeeded.size());
+ for (size_t i = 0; i < ParsePointNeeded.size(); i++) {
+ CallSite &CS = ParsePointNeeded[i];
+
+ // For invoke statepoints we need to remove all phi nodes at the normal
+ // destination block.
+ // Reason for this is that we can place gc_result only after last phi node
+ // in basic block. We will get malformed code after RAUW for the
+ // gc_result if one of this phi nodes uses result from the invoke.
+ if (InvokeInst *Invoke = dyn_cast<InvokeInst>(CS.getInstruction())) {
+ normalizeForInvokeSafepoint(Invoke->getNormalDest(),
+ Invoke->getParent());
+ }
+
+ Value *GCResult = ReplaceWithStatepoint(CS, nullptr);
+ Results.push_back(GCResult);
+ }
+ assert(Results.size() == ParsePointNeeded.size());
+
+ // Adjust all users of the old call sites to use the new ones instead
+ for (size_t i = 0; i < ParsePointNeeded.size(); i++) {
+ CallSite &CS = ParsePointNeeded[i];
+ Value *GCResult = Results[i];
+ if (GCResult) {
+ // Can not RAUW for the invoke gc result in case of phi nodes preset.
+ assert(CS.isCall() || !isa<PHINode>(cast<Instruction>(GCResult)->getParent()->begin()));
+
+ // Replace all uses with the new call
+ CS.getInstruction()->replaceAllUsesWith(GCResult);
+ }
+
+ // Now that we've handled all uses, remove the original call itself
+ // Note: The insert point can't be the deleted instruction!
+ CS.getInstruction()->eraseFromParent();
+ }
+ return modified;
+}
+
+char PlaceBackedgeSafepointsImpl::ID = 0;
+char PlaceSafepoints::ID = 0;
+
+FunctionPass *llvm::createPlaceSafepointsPass() {
+ return new PlaceSafepoints();
+}
+
+INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl,
+ "place-backedge-safepoints-impl",
+ "Place Backedge Safepoints", false, false)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
+INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl,
+ "place-backedge-safepoints-impl",
+ "Place Backedge Safepoints", false, false)
+
+INITIALIZE_PASS_BEGIN(PlaceSafepoints, "place-safepoints", "Place Safepoints",
+ false, false)
+INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints",
+ false, false)
+
+static bool isGCLeafFunction(const CallSite &CS) {
+ Instruction *inst = CS.getInstruction();
+ if (isa<IntrinsicInst>(inst)) {
+ // Most LLVM intrinsics are things which can never take a safepoint.
+ // As a result, we don't need to have the stack parsable at the
+ // callsite. This is a highly useful optimization since intrinsic
+ // calls are fairly prevelent, particularly in debug builds.
+ return true;
+ }
+
+ // If this function is marked explicitly as a leaf call, we don't need to
+ // place a safepoint of it. In fact, for correctness we *can't* in many
+ // cases. Note: Indirect calls return Null for the called function,
+ // these obviously aren't runtime functions with attributes
+ // TODO: Support attributes on the call site as well.
+ const Function *F = CS.getCalledFunction();
+ bool isLeaf =
+ F &&
+ F->getFnAttribute("gc-leaf-function").getValueAsString().equals("true");
+ if (isLeaf) {
+ return true;
+ }
+ return false;
+}
+
+static void
+InsertSafepointPoll(Instruction *InsertBefore,
+ std::vector<CallSite> &ParsePointsNeeded /*rval*/) {
+ BasicBlock *OrigBB = InsertBefore->getParent();
+ Module *M = InsertBefore->getModule();
+ assert(M && "must be part of a module");
+
+ // Inline the safepoint poll implementation - this will get all the branch,
+ // control flow, etc.. Most importantly, it will introduce the actual slow
+ // path call - where we need to insert a safepoint (parsepoint).
+
+ auto *F = M->getFunction(GCSafepointPollName);
+ assert(F->getType()->getElementType() ==
+ FunctionType::get(Type::getVoidTy(M->getContext()), false) &&
+ "gc.safepoint_poll declared with wrong type");
+ assert(!F->empty() && "gc.safepoint_poll must be a non-empty function");
+ CallInst *PollCall = CallInst::Create(F, "", InsertBefore);
+
+ // Record some information about the call site we're replacing
+ BasicBlock::iterator before(PollCall), after(PollCall);
+ bool isBegin(false);
+ if (before == OrigBB->begin()) {
+ isBegin = true;
+ } else {
+ before--;
+ }
+ after++;
+ assert(after != OrigBB->end() && "must have successor");
+
+ // do the actual inlining
+ InlineFunctionInfo IFI;
+ bool InlineStatus = InlineFunction(PollCall, IFI);
+ assert(InlineStatus && "inline must succeed");
+ (void)InlineStatus; // suppress warning in release-asserts
+
+ // Check post conditions
+ assert(IFI.StaticAllocas.empty() && "can't have allocs");
+
+ std::vector<CallInst *> calls; // new calls
+ std::set<BasicBlock *> BBs; // new BBs + insertee
+ // Include only the newly inserted instructions, Note: begin may not be valid
+ // if we inserted to the beginning of the basic block
+ BasicBlock::iterator start;
+ if (isBegin) {
+ start = OrigBB->begin();
+ } else {
+ start = before;
+ start++;
+ }
+
+ // If your poll function includes an unreachable at the end, that's not
+ // valid. Bugpoint likes to create this, so check for it.
+ assert(isPotentiallyReachable(&*start, &*after, nullptr, nullptr) &&
+ "malformed poll function");
+
+ scanInlinedCode(&*(start), &*(after), calls, BBs);
+ assert(!calls.empty() && "slow path not found for safepoint poll");
+
+ // Record the fact we need a parsable state at the runtime call contained in
+ // the poll function. This is required so that the runtime knows how to
+ // parse the last frame when we actually take the safepoint (i.e. execute
+ // the slow path)
+ assert(ParsePointsNeeded.empty());
+ for (size_t i = 0; i < calls.size(); i++) {
+
+ // No safepoint needed or wanted
+ if (!needsStatepoint(calls[i])) {
+ continue;
+ }
+
+ // These are likely runtime calls. Should we assert that via calling
+ // convention or something?
+ ParsePointsNeeded.push_back(CallSite(calls[i]));
+ }
+ assert(ParsePointsNeeded.size() <= calls.size());
+}
+
+/// Replaces the given call site (Call or Invoke) with a gc.statepoint
+/// intrinsic with an empty deoptimization arguments list. This does
+/// NOT do explicit relocation for GC support.
+static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */
+ Pass *P) {
+ assert(CS.getInstruction()->getParent()->getParent()->getParent() &&
+ "must be set");
+
+ // TODO: technically, a pass is not allowed to get functions from within a
+ // function pass since it might trigger a new function addition. Refactor
+ // this logic out to the initialization of the pass. Doesn't appear to
+ // matter in practice.
+
+ // Then go ahead and use the builder do actually do the inserts. We insert
+ // immediately before the previous instruction under the assumption that all
+ // arguments will be available here. We can't insert afterwards since we may
+ // be replacing a terminator.
+ IRBuilder<> Builder(CS.getInstruction());
+
+ // Note: The gc args are not filled in at this time, that's handled by
+ // RewriteStatepointsForGC (which is currently under review).
+
+ // Create the statepoint given all the arguments
+ Instruction *Token = nullptr;
+
+ uint64_t ID;
+ uint32_t NumPatchBytes;
+
+ AttributeSet OriginalAttrs = CS.getAttributes();
+ Attribute AttrID =
+ OriginalAttrs.getAttribute(AttributeSet::FunctionIndex, "statepoint-id");
+ Attribute AttrNumPatchBytes = OriginalAttrs.getAttribute(
+ AttributeSet::FunctionIndex, "statepoint-num-patch-bytes");
+
+ AttrBuilder AttrsToRemove;
+ bool HasID = AttrID.isStringAttribute() &&
+ !AttrID.getValueAsString().getAsInteger(10, ID);
+
+ if (HasID)
+ AttrsToRemove.addAttribute("statepoint-id");
+ else
+ ID = 0xABCDEF00;
+
+ bool HasNumPatchBytes =
+ AttrNumPatchBytes.isStringAttribute() &&
+ !AttrNumPatchBytes.getValueAsString().getAsInteger(10, NumPatchBytes);
+
+ if (HasNumPatchBytes)
+ AttrsToRemove.addAttribute("statepoint-num-patch-bytes");
+ else
+ NumPatchBytes = 0;
+
+ OriginalAttrs = OriginalAttrs.removeAttributes(
+ CS.getInstruction()->getContext(), AttributeSet::FunctionIndex,
+ AttrsToRemove);
+
+ Value *StatepointTarget = NumPatchBytes == 0
+ ? CS.getCalledValue()
+ : ConstantPointerNull::get(cast<PointerType>(
+ CS.getCalledValue()->getType()));
+
+ if (CS.isCall()) {
+ CallInst *ToReplace = cast<CallInst>(CS.getInstruction());
+ CallInst *Call = Builder.CreateGCStatepointCall(
+ ID, NumPatchBytes, StatepointTarget,
+ makeArrayRef(CS.arg_begin(), CS.arg_end()), None, None,
+ "safepoint_token");
+ Call->setTailCall(ToReplace->isTailCall());
+ Call->setCallingConv(ToReplace->getCallingConv());
+
+ // In case if we can handle this set of attributes - set up function
+ // attributes directly on statepoint and return attributes later for
+ // gc_result intrinsic.
+ Call->setAttributes(OriginalAttrs.getFnAttributes());
+
+ Token = Call;
+
+ // Put the following gc_result and gc_relocate calls immediately after the
+ // the old call (which we're about to delete).
+ assert(ToReplace->getNextNode() && "not a terminator, must have next");
+ Builder.SetInsertPoint(ToReplace->getNextNode());
+ Builder.SetCurrentDebugLocation(ToReplace->getNextNode()->getDebugLoc());
+ } else if (CS.isInvoke()) {
+ InvokeInst *ToReplace = cast<InvokeInst>(CS.getInstruction());
+
+ // Insert the new invoke into the old block. We'll remove the old one in a
+ // moment at which point this will become the new terminator for the
+ // original block.
+ Builder.SetInsertPoint(ToReplace->getParent());
+ InvokeInst *Invoke = Builder.CreateGCStatepointInvoke(
+ ID, NumPatchBytes, StatepointTarget, ToReplace->getNormalDest(),
+ ToReplace->getUnwindDest(), makeArrayRef(CS.arg_begin(), CS.arg_end()),
+ None, None, "safepoint_token");
+
+ Invoke->setCallingConv(ToReplace->getCallingConv());
+
+ // In case if we can handle this set of attributes - set up function
+ // attributes directly on statepoint and return attributes later for
+ // gc_result intrinsic.
+ Invoke->setAttributes(OriginalAttrs.getFnAttributes());
+
+ Token = Invoke;
+
+ // We'll insert the gc.result into the normal block
+ BasicBlock *NormalDest = ToReplace->getNormalDest();
+ // Can not insert gc.result in case of phi nodes preset.
+ // Should have removed this cases prior to runnning this function
+ assert(!isa<PHINode>(NormalDest->begin()));
+ Instruction *IP = &*(NormalDest->getFirstInsertionPt());
+ Builder.SetInsertPoint(IP);
+ } else {
+ llvm_unreachable("unexpect type of CallSite");
+ }
+ assert(Token);
+
+ // Handle the return value of the original call - update all uses to use a
+ // gc_result hanging off the statepoint node we just inserted
+
+ // Only add the gc_result iff there is actually a used result
+ if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) {
+ std::string TakenName =
+ CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : "";
+ CallInst *GCResult = Builder.CreateGCResult(Token, CS.getType(), TakenName);
+ GCResult->setAttributes(OriginalAttrs.getRetAttributes());
+ return GCResult;
+ } else {
+ // No return value for the call.
+ return nullptr;
+ }
+}
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index 4e022556f9cc..b677523d7032 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -59,7 +59,7 @@ namespace {
}
#ifndef NDEBUG
-/// PrintOps - Print out the expression identified in the Ops list.
+/// Print out the expression identified in the Ops list.
///
static void PrintOps(Instruction *I, const SmallVectorImpl<ValueEntry> &Ops) {
Module *M = I->getParent()->getParent()->getParent();
@@ -233,8 +233,8 @@ INITIALIZE_PASS(Reassociate, "reassociate",
// Public interface to the Reassociate pass
FunctionPass *llvm::createReassociatePass() { return new Reassociate(); }
-/// isReassociableOp - Return true if V is an instruction of the specified
-/// opcode and if it only has one use.
+/// Return true if V is an instruction of the specified opcode and if it
+/// only has one use.
static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
if (V->hasOneUse() && isa<Instruction>(V) &&
cast<Instruction>(V)->getOpcode() == Opcode &&
@@ -321,10 +321,8 @@ unsigned Reassociate::getRank(Value *V) {
// If this is a not or neg instruction, do not count it for rank. This
// assures us that X and ~X will have the same rank.
- Type *Ty = V->getType();
- if ((!Ty->isIntegerTy() && !Ty->isFloatingPointTy()) ||
- (!BinaryOperator::isNot(I) && !BinaryOperator::isNeg(I) &&
- !BinaryOperator::isFNeg(I)))
+ if (!BinaryOperator::isNot(I) && !BinaryOperator::isNeg(I) &&
+ !BinaryOperator::isFNeg(I))
++Rank;
DEBUG(dbgs() << "Calculated Rank[" << V->getName() << "] = " << Rank << "\n");
@@ -351,7 +349,7 @@ void Reassociate::canonicalizeOperands(Instruction *I) {
static BinaryOperator *CreateAdd(Value *S1, Value *S2, const Twine &Name,
Instruction *InsertBefore, Value *FlagsOp) {
- if (S1->getType()->isIntegerTy())
+ if (S1->getType()->isIntOrIntVectorTy())
return BinaryOperator::CreateAdd(S1, S2, Name, InsertBefore);
else {
BinaryOperator *Res =
@@ -363,7 +361,7 @@ static BinaryOperator *CreateAdd(Value *S1, Value *S2, const Twine &Name,
static BinaryOperator *CreateMul(Value *S1, Value *S2, const Twine &Name,
Instruction *InsertBefore, Value *FlagsOp) {
- if (S1->getType()->isIntegerTy())
+ if (S1->getType()->isIntOrIntVectorTy())
return BinaryOperator::CreateMul(S1, S2, Name, InsertBefore);
else {
BinaryOperator *Res =
@@ -375,7 +373,7 @@ static BinaryOperator *CreateMul(Value *S1, Value *S2, const Twine &Name,
static BinaryOperator *CreateNeg(Value *S1, const Twine &Name,
Instruction *InsertBefore, Value *FlagsOp) {
- if (S1->getType()->isIntegerTy())
+ if (S1->getType()->isIntOrIntVectorTy())
return BinaryOperator::CreateNeg(S1, Name, InsertBefore);
else {
BinaryOperator *Res = BinaryOperator::CreateFNeg(S1, Name, InsertBefore);
@@ -384,12 +382,11 @@ static BinaryOperator *CreateNeg(Value *S1, const Twine &Name,
}
}
-/// LowerNegateToMultiply - Replace 0-X with X*-1.
-///
+/// Replace 0-X with X*-1.
static BinaryOperator *LowerNegateToMultiply(Instruction *Neg) {
Type *Ty = Neg->getType();
- Constant *NegOne = Ty->isIntegerTy() ? ConstantInt::getAllOnesValue(Ty)
- : ConstantFP::get(Ty, -1.0);
+ Constant *NegOne = Ty->isIntOrIntVectorTy() ?
+ ConstantInt::getAllOnesValue(Ty) : ConstantFP::get(Ty, -1.0);
BinaryOperator *Res = CreateMul(Neg->getOperand(1), NegOne, "", Neg, Neg);
Neg->setOperand(1, Constant::getNullValue(Ty)); // Drop use of op.
@@ -399,8 +396,8 @@ static BinaryOperator *LowerNegateToMultiply(Instruction *Neg) {
return Res;
}
-/// CarmichaelShift - Returns k such that lambda(2^Bitwidth) = 2^k, where lambda
-/// is the Carmichael function. This means that x^(2^k) === 1 mod 2^Bitwidth for
+/// Returns k such that lambda(2^Bitwidth) = 2^k, where lambda is the Carmichael
+/// function. This means that x^(2^k) === 1 mod 2^Bitwidth for
/// every odd x, i.e. x^(2^k) = 1 for every odd x in Bitwidth-bit arithmetic.
/// Note that 0 <= k < Bitwidth, and if Bitwidth > 3 then x^(2^k) = 0 for every
/// even x in Bitwidth-bit arithmetic.
@@ -410,7 +407,7 @@ static unsigned CarmichaelShift(unsigned Bitwidth) {
return Bitwidth - 2;
}
-/// IncorporateWeight - Add the extra weight 'RHS' to the existing weight 'LHS',
+/// Add the extra weight 'RHS' to the existing weight 'LHS',
/// reducing the combined weight using any special properties of the operation.
/// The existing weight LHS represents the computation X op X op ... op X where
/// X occurs LHS times. The combined weight represents X op X op ... op X with
@@ -492,7 +489,7 @@ static void IncorporateWeight(APInt &LHS, const APInt &RHS, unsigned Opcode) {
typedef std::pair<Value*, APInt> RepeatedValue;
-/// LinearizeExprTree - Given an associative binary expression, return the leaf
+/// Given an associative binary expression, return the leaf
/// nodes in Ops along with their weights (how many times the leaf occurs). The
/// original expression is the same as
/// (Ops[0].first op Ops[0].first op ... Ops[0].first) <- Ops[0].second times
@@ -742,8 +739,8 @@ static bool LinearizeExprTree(BinaryOperator *I,
return Changed;
}
-// RewriteExprTree - Now that the operands for this expression tree are
-// linearized and optimized, emit them in-order.
+/// Now that the operands for this expression tree are
+/// linearized and optimized, emit them in-order.
void Reassociate::RewriteExprTree(BinaryOperator *I,
SmallVectorImpl<ValueEntry> &Ops) {
assert(Ops.size() > 1 && "Single values should be used directly!");
@@ -872,7 +869,7 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
Constant *Undef = UndefValue::get(I->getType());
NewOp = BinaryOperator::Create(Instruction::BinaryOps(Opcode),
Undef, Undef, "", I);
- if (NewOp->getType()->isFloatingPointTy())
+ if (NewOp->getType()->isFPOrFPVectorTy())
NewOp->setFastMathFlags(I->getFastMathFlags());
} else {
NewOp = NodesToRewrite.pop_back_val();
@@ -912,15 +909,18 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
RedoInsts.insert(NodesToRewrite[i]);
}
-/// NegateValue - Insert instructions before the instruction pointed to by BI,
+/// Insert instructions before the instruction pointed to by BI,
/// that computes the negative version of the value specified. The negative
/// version of the value is returned, and BI is left pointing at the instruction
/// that should be processed next by the reassociation pass.
static Value *NegateValue(Value *V, Instruction *BI) {
- if (ConstantFP *C = dyn_cast<ConstantFP>(V))
- return ConstantExpr::getFNeg(C);
- if (Constant *C = dyn_cast<Constant>(V))
+ if (Constant *C = dyn_cast<Constant>(V)) {
+ if (C->getType()->isFPOrFPVectorTy()) {
+ return ConstantExpr::getFNeg(C);
+ }
return ConstantExpr::getNeg(C);
+ }
+
// We are trying to expose opportunity for reassociation. One of the things
// that we want to do to achieve this is to push a negation as deep into an
@@ -984,8 +984,7 @@ static Value *NegateValue(Value *V, Instruction *BI) {
return CreateNeg(V, V->getName() + ".neg", BI, BI);
}
-/// ShouldBreakUpSubtract - Return true if we should break up this subtract of
-/// X-Y into (X + -Y).
+/// Return true if we should break up this subtract of X-Y into (X + -Y).
static bool ShouldBreakUpSubtract(Instruction *Sub) {
// If this is a negation, we can't split it up!
if (BinaryOperator::isNeg(Sub) || BinaryOperator::isFNeg(Sub))
@@ -1014,9 +1013,8 @@ static bool ShouldBreakUpSubtract(Instruction *Sub) {
return false;
}
-/// BreakUpSubtract - If we have (X-Y), and if either X is an add, or if this is
-/// only used by an add, transform this into (X+(0-Y)) to promote better
-/// reassociation.
+/// If we have (X-Y), and if either X is an add, or if this is only used by an
+/// add, transform this into (X+(0-Y)) to promote better reassociation.
static BinaryOperator *BreakUpSubtract(Instruction *Sub) {
// Convert a subtract into an add and a neg instruction. This allows sub
// instructions to be commuted with other add instructions.
@@ -1038,9 +1036,8 @@ static BinaryOperator *BreakUpSubtract(Instruction *Sub) {
return New;
}
-/// ConvertShiftToMul - If this is a shift of a reassociable multiply or is used
-/// by one, change this into a multiply by a constant to assist with further
-/// reassociation.
+/// If this is a shift of a reassociable multiply or is used by one, change
+/// this into a multiply by a constant to assist with further reassociation.
static BinaryOperator *ConvertShiftToMul(Instruction *Shl) {
Constant *MulCst = ConstantInt::get(Shl->getType(), 1);
MulCst = ConstantExpr::getShl(MulCst, cast<Constant>(Shl->getOperand(1)));
@@ -1065,10 +1062,9 @@ static BinaryOperator *ConvertShiftToMul(Instruction *Shl) {
return Mul;
}
-/// FindInOperandList - Scan backwards and forwards among values with the same
-/// rank as element i to see if X exists. If X does not exist, return i. This
-/// is useful when scanning for 'x' when we see '-x' because they both get the
-/// same rank.
+/// Scan backwards and forwards among values with the same rank as element i
+/// to see if X exists. If X does not exist, return i. This is useful when
+/// scanning for 'x' when we see '-x' because they both get the same rank.
static unsigned FindInOperandList(SmallVectorImpl<ValueEntry> &Ops, unsigned i,
Value *X) {
unsigned XRank = Ops[i].Rank;
@@ -1093,7 +1089,7 @@ static unsigned FindInOperandList(SmallVectorImpl<ValueEntry> &Ops, unsigned i,
return i;
}
-/// EmitAddTreeOfValues - Emit a tree of add instructions, summing Ops together
+/// Emit a tree of add instructions, summing Ops together
/// and returning the result. Insert the tree before I.
static Value *EmitAddTreeOfValues(Instruction *I,
SmallVectorImpl<WeakVH> &Ops){
@@ -1105,8 +1101,8 @@ static Value *EmitAddTreeOfValues(Instruction *I,
return CreateAdd(V2, V1, "tmp", I, I);
}
-/// RemoveFactorFromExpression - If V is an expression tree that is a
-/// multiplication sequence, and if this sequence contains a multiply by Factor,
+/// If V is an expression tree that is a multiplication sequence,
+/// and if this sequence contains a multiply by Factor,
/// remove Factor from the tree and return the new tree.
Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
BinaryOperator *BO = isReassociableOp(V, Instruction::Mul, Instruction::FMul);
@@ -1178,8 +1174,8 @@ Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
return V;
}
-/// FindSingleUseMultiplyFactors - If V is a single-use multiply, recursively
-/// add its operands as factors, otherwise add V to the list of factors.
+/// If V is a single-use multiply, recursively add its operands as factors,
+/// otherwise add V to the list of factors.
///
/// Ops is the top-level list of add operands we're trying to factor.
static void FindSingleUseMultiplyFactors(Value *V,
@@ -1196,10 +1192,9 @@ static void FindSingleUseMultiplyFactors(Value *V,
FindSingleUseMultiplyFactors(BO->getOperand(0), Factors, Ops);
}
-/// OptimizeAndOrXor - Optimize a series of operands to an 'and', 'or', or 'xor'
-/// instruction. This optimizes based on identities. If it can be reduced to
-/// a single Value, it is returned, otherwise the Ops list is mutated as
-/// necessary.
+/// Optimize a series of operands to an 'and', 'or', or 'xor' instruction.
+/// This optimizes based on identities. If it can be reduced to a single Value,
+/// it is returned, otherwise the Ops list is mutated as necessary.
static Value *OptimizeAndOrXor(unsigned Opcode,
SmallVectorImpl<ValueEntry> &Ops) {
// Scan the operand lists looking for X and ~X pairs, along with X,X pairs.
@@ -1489,7 +1484,7 @@ Value *Reassociate::OptimizeXor(Instruction *I,
return nullptr;
}
-/// OptimizeAdd - Optimize a series of operands to an 'add' instruction. This
+/// Optimize a series of operands to an 'add' instruction. This
/// optimizes based on identities. If it can be reduced to a single Value, it
/// is returned, otherwise the Ops list is mutated as necessary.
Value *Reassociate::OptimizeAdd(Instruction *I,
@@ -1517,8 +1512,8 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
// Insert a new multiply.
Type *Ty = TheOp->getType();
- Constant *C = Ty->isIntegerTy() ? ConstantInt::get(Ty, NumFound)
- : ConstantFP::get(Ty, NumFound);
+ Constant *C = Ty->isIntOrIntVectorTy() ?
+ ConstantInt::get(Ty, NumFound) : ConstantFP::get(Ty, NumFound);
Instruction *Mul = CreateMul(TheOp, C, "factor", I, I);
// Now that we have inserted a multiply, optimize it. This allows us to
@@ -1658,7 +1653,7 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
// from an expression will drop a use of maxocc, and this can cause
// RemoveFactorFromExpression on successive values to behave differently.
Instruction *DummyInst =
- I->getType()->isIntegerTy()
+ I->getType()->isIntOrIntVectorTy()
? BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal)
: BinaryOperator::CreateFAdd(MaxOccVal, MaxOccVal);
@@ -1789,7 +1784,7 @@ static Value *buildMultiplyTree(IRBuilder<> &Builder,
Value *LHS = Ops.pop_back_val();
do {
- if (LHS->getType()->isIntegerTy())
+ if (LHS->getType()->isIntOrIntVectorTy())
LHS = Builder.CreateMul(LHS, Ops.pop_back_val());
else
LHS = Builder.CreateFMul(LHS, Ops.pop_back_val());
@@ -1942,8 +1937,7 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I,
return nullptr;
}
-/// EraseInst - Zap the given instruction, adding interesting operands to the
-/// work list.
+/// Zap the given instruction, adding interesting operands to the work list.
void Reassociate::EraseInst(Instruction *I) {
assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!");
SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
@@ -1988,7 +1982,7 @@ Instruction *Reassociate::canonicalizeNegConstExpr(Instruction *I) {
Constant *C = C0 ? C0 : C1;
unsigned ConstIdx = C0 ? 0 : 1;
if (auto *CI = dyn_cast<ConstantInt>(C)) {
- if (!CI->isNegative())
+ if (!CI->isNegative() || CI->isMinValue(true))
return nullptr;
} else if (auto *CF = dyn_cast<ConstantFP>(C)) {
if (!CF->isNegative())
@@ -2057,7 +2051,7 @@ Instruction *Reassociate::canonicalizeNegConstExpr(Instruction *I) {
return NI;
}
-/// OptimizeInst - Inspect and optimize the given instruction. Note that erasing
+/// Inspect and optimize the given instruction. Note that erasing
/// instructions is not allowed.
void Reassociate::OptimizeInst(Instruction *I) {
// Only consider operations that we understand.
@@ -2087,8 +2081,9 @@ void Reassociate::OptimizeInst(Instruction *I) {
if (I->isCommutative())
canonicalizeOperands(I);
- // Don't optimize vector instructions.
- if (I->getType()->isVectorTy())
+ // TODO: We should optimize vector Xor instructions, but they are
+ // currently unsupported.
+ if (I->getType()->isVectorTy() && I->getOpcode() == Instruction::Xor)
return;
// Don't optimize floating point instructions that don't have unsafe algebra.
@@ -2167,9 +2162,6 @@ void Reassociate::OptimizeInst(Instruction *I) {
}
void Reassociate::ReassociateExpression(BinaryOperator *I) {
- assert(!I->getType()->isVectorTy() &&
- "Reassociation of vector instructions is not supported.");
-
// First, walk the expression tree, linearizing the tree, collecting the
// operand information.
SmallVector<RepeatedValue, 8> Tree;
@@ -2192,7 +2184,7 @@ void Reassociate::ReassociateExpression(BinaryOperator *I) {
// the vector.
std::stable_sort(Ops.begin(), Ops.end());
- // OptimizeExpression - Now that we have the expression tree in a convenient
+ // Now that we have the expression tree in a convenient
// sorted form, optimize it globally if possible.
if (Value *V = OptimizeExpression(I, Ops)) {
if (V == I)
diff --git a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
new file mode 100644
index 000000000000..6cf765a8438c
--- /dev/null
+++ b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -0,0 +1,2506 @@
+//===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Rewrite an existing set of gc.statepoints such that they make potential
+// relocations performed by the garbage collector explicit in the IR.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Pass.h"
+#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/PromoteMemToReg.h"
+
+#define DEBUG_TYPE "rewrite-statepoints-for-gc"
+
+using namespace llvm;
+
+// Print tracing output
+static cl::opt<bool> TraceLSP("trace-rewrite-statepoints", cl::Hidden,
+ cl::init(false));
+
+// Print the liveset found at the insert location
+static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden,
+ cl::init(false));
+static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden,
+ cl::init(false));
+// Print out the base pointers for debugging
+static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden,
+ cl::init(false));
+
+// Cost threshold measuring when it is profitable to rematerialize value instead
+// of relocating it
+static cl::opt<unsigned>
+RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden,
+ cl::init(6));
+
+#ifdef XDEBUG
+static bool ClobberNonLive = true;
+#else
+static bool ClobberNonLive = false;
+#endif
+static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live",
+ cl::location(ClobberNonLive),
+ cl::Hidden);
+
+namespace {
+struct RewriteStatepointsForGC : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+
+ RewriteStatepointsForGC() : FunctionPass(ID) {
+ initializeRewriteStatepointsForGCPass(*PassRegistry::getPassRegistry());
+ }
+ bool runOnFunction(Function &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ // We add and rewrite a bunch of instructions, but don't really do much
+ // else. We could in theory preserve a lot more analyses here.
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ }
+};
+} // namespace
+
+char RewriteStatepointsForGC::ID = 0;
+
+FunctionPass *llvm::createRewriteStatepointsForGCPass() {
+ return new RewriteStatepointsForGC();
+}
+
+INITIALIZE_PASS_BEGIN(RewriteStatepointsForGC, "rewrite-statepoints-for-gc",
+ "Make relocations explicit at statepoints", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_END(RewriteStatepointsForGC, "rewrite-statepoints-for-gc",
+ "Make relocations explicit at statepoints", false, false)
+
+namespace {
+struct GCPtrLivenessData {
+ /// Values defined in this block.
+ DenseMap<BasicBlock *, DenseSet<Value *>> KillSet;
+ /// Values used in this block (and thus live); does not included values
+ /// killed within this block.
+ DenseMap<BasicBlock *, DenseSet<Value *>> LiveSet;
+
+ /// Values live into this basic block (i.e. used by any
+ /// instruction in this basic block or ones reachable from here)
+ DenseMap<BasicBlock *, DenseSet<Value *>> LiveIn;
+
+ /// Values live out of this basic block (i.e. live into
+ /// any successor block)
+ DenseMap<BasicBlock *, DenseSet<Value *>> LiveOut;
+};
+
+// The type of the internal cache used inside the findBasePointers family
+// of functions. From the callers perspective, this is an opaque type and
+// should not be inspected.
+//
+// In the actual implementation this caches two relations:
+// - The base relation itself (i.e. this pointer is based on that one)
+// - The base defining value relation (i.e. before base_phi insertion)
+// Generally, after the execution of a full findBasePointer call, only the
+// base relation will remain. Internally, we add a mixture of the two
+// types, then update all the second type to the first type
+typedef DenseMap<Value *, Value *> DefiningValueMapTy;
+typedef DenseSet<llvm::Value *> StatepointLiveSetTy;
+typedef DenseMap<Instruction *, Value *> RematerializedValueMapTy;
+
+struct PartiallyConstructedSafepointRecord {
+ /// The set of values known to be live accross this safepoint
+ StatepointLiveSetTy liveset;
+
+ /// Mapping from live pointers to a base-defining-value
+ DenseMap<llvm::Value *, llvm::Value *> PointerToBase;
+
+ /// The *new* gc.statepoint instruction itself. This produces the token
+ /// that normal path gc.relocates and the gc.result are tied to.
+ Instruction *StatepointToken;
+
+ /// Instruction to which exceptional gc relocates are attached
+ /// Makes it easier to iterate through them during relocationViaAlloca.
+ Instruction *UnwindToken;
+
+ /// Record live values we are rematerialized instead of relocating.
+ /// They are not included into 'liveset' field.
+ /// Maps rematerialized copy to it's original value.
+ RematerializedValueMapTy RematerializedValues;
+};
+}
+
+/// Compute the live-in set for every basic block in the function
+static void computeLiveInValues(DominatorTree &DT, Function &F,
+ GCPtrLivenessData &Data);
+
+/// Given results from the dataflow liveness computation, find the set of live
+/// Values at a particular instruction.
+static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data,
+ StatepointLiveSetTy &out);
+
+// TODO: Once we can get to the GCStrategy, this becomes
+// Optional<bool> isGCManagedPointer(const Value *V) const override {
+
+static bool isGCPointerType(const Type *T) {
+ if (const PointerType *PT = dyn_cast<PointerType>(T))
+ // For the sake of this example GC, we arbitrarily pick addrspace(1) as our
+ // GC managed heap. We know that a pointer into this heap needs to be
+ // updated and that no other pointer does.
+ return (1 == PT->getAddressSpace());
+ return false;
+}
+
+// Return true if this type is one which a) is a gc pointer or contains a GC
+// pointer and b) is of a type this code expects to encounter as a live value.
+// (The insertion code will assert that a type which matches (a) and not (b)
+// is not encountered.)
+static bool isHandledGCPointerType(Type *T) {
+ // We fully support gc pointers
+ if (isGCPointerType(T))
+ return true;
+ // We partially support vectors of gc pointers. The code will assert if it
+ // can't handle something.
+ if (auto VT = dyn_cast<VectorType>(T))
+ if (isGCPointerType(VT->getElementType()))
+ return true;
+ return false;
+}
+
+#ifndef NDEBUG
+/// Returns true if this type contains a gc pointer whether we know how to
+/// handle that type or not.
+static bool containsGCPtrType(Type *Ty) {
+ if (isGCPointerType(Ty))
+ return true;
+ if (VectorType *VT = dyn_cast<VectorType>(Ty))
+ return isGCPointerType(VT->getScalarType());
+ if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
+ return containsGCPtrType(AT->getElementType());
+ if (StructType *ST = dyn_cast<StructType>(Ty))
+ return std::any_of(
+ ST->subtypes().begin(), ST->subtypes().end(),
+ [](Type *SubType) { return containsGCPtrType(SubType); });
+ return false;
+}
+
+// Returns true if this is a type which a) is a gc pointer or contains a GC
+// pointer and b) is of a type which the code doesn't expect (i.e. first class
+// aggregates). Used to trip assertions.
+static bool isUnhandledGCPointerType(Type *Ty) {
+ return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty);
+}
+#endif
+
+static bool order_by_name(llvm::Value *a, llvm::Value *b) {
+ if (a->hasName() && b->hasName()) {
+ return -1 == a->getName().compare(b->getName());
+ } else if (a->hasName() && !b->hasName()) {
+ return true;
+ } else if (!a->hasName() && b->hasName()) {
+ return false;
+ } else {
+ // Better than nothing, but not stable
+ return a < b;
+ }
+}
+
+// Conservatively identifies any definitions which might be live at the
+// given instruction. The analysis is performed immediately before the
+// given instruction. Values defined by that instruction are not considered
+// live. Values used by that instruction are considered live.
+static void analyzeParsePointLiveness(
+ DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData,
+ const CallSite &CS, PartiallyConstructedSafepointRecord &result) {
+ Instruction *inst = CS.getInstruction();
+
+ StatepointLiveSetTy liveset;
+ findLiveSetAtInst(inst, OriginalLivenessData, liveset);
+
+ if (PrintLiveSet) {
+ // Note: This output is used by several of the test cases
+ // The order of elemtns in a set is not stable, put them in a vec and sort
+ // by name
+ SmallVector<Value *, 64> temp;
+ temp.insert(temp.end(), liveset.begin(), liveset.end());
+ std::sort(temp.begin(), temp.end(), order_by_name);
+ errs() << "Live Variables:\n";
+ for (Value *V : temp) {
+ errs() << " " << V->getName(); // no newline
+ V->dump();
+ }
+ }
+ if (PrintLiveSetSize) {
+ errs() << "Safepoint For: " << CS.getCalledValue()->getName() << "\n";
+ errs() << "Number live values: " << liveset.size() << "\n";
+ }
+ result.liveset = liveset;
+}
+
+static Value *findBaseDefiningValue(Value *I);
+
+/// If we can trivially determine that the index specified in the given vector
+/// is a base pointer, return it. In cases where the entire vector is known to
+/// consist of base pointers, the entire vector will be returned. This
+/// indicates that the relevant extractelement is a valid base pointer and
+/// should be used directly.
+static Value *findBaseOfVector(Value *I, Value *Index) {
+ assert(I->getType()->isVectorTy() &&
+ cast<VectorType>(I->getType())->getElementType()->isPointerTy() &&
+ "Illegal to ask for the base pointer of a non-pointer type");
+
+ // Each case parallels findBaseDefiningValue below, see that code for
+ // detailed motivation.
+
+ if (isa<Argument>(I))
+ // An incoming argument to the function is a base pointer
+ return I;
+
+ // We shouldn't see the address of a global as a vector value?
+ assert(!isa<GlobalVariable>(I) &&
+ "unexpected global variable found in base of vector");
+
+ // inlining could possibly introduce phi node that contains
+ // undef if callee has multiple returns
+ if (isa<UndefValue>(I))
+ // utterly meaningless, but useful for dealing with partially optimized
+ // code.
+ return I;
+
+ // Due to inheritance, this must be _after_ the global variable and undef
+ // checks
+ if (Constant *Con = dyn_cast<Constant>(I)) {
+ assert(!isa<GlobalVariable>(I) && !isa<UndefValue>(I) &&
+ "order of checks wrong!");
+ assert(Con->isNullValue() && "null is the only case which makes sense");
+ return Con;
+ }
+
+ if (isa<LoadInst>(I))
+ return I;
+
+ // For an insert element, we might be able to look through it if we know
+ // something about the indexes, but if the indices are arbitrary values, we
+ // can't without much more extensive scalarization.
+ if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(I)) {
+ Value *InsertIndex = IEI->getOperand(2);
+ // This index is inserting the value, look for it's base
+ if (InsertIndex == Index)
+ return findBaseDefiningValue(IEI->getOperand(1));
+ // Both constant, and can't be equal per above. This insert is definitely
+ // not relevant, look back at the rest of the vector and keep trying.
+ if (isa<ConstantInt>(Index) && isa<ConstantInt>(InsertIndex))
+ return findBaseOfVector(IEI->getOperand(0), Index);
+ }
+
+ // Note: This code is currently rather incomplete. We are essentially only
+ // handling cases where the vector element is trivially a base pointer. We
+ // need to update the entire base pointer construction algorithm to know how
+ // to track vector elements and potentially scalarize, but the case which
+ // would motivate the work hasn't shown up in real workloads yet.
+ llvm_unreachable("no base found for vector element");
+}
+
+/// Helper function for findBasePointer - Will return a value which either a)
+/// defines the base pointer for the input or b) blocks the simple search
+/// (i.e. a PHI or Select of two derived pointers)
+static Value *findBaseDefiningValue(Value *I) {
+ assert(I->getType()->isPointerTy() &&
+ "Illegal to ask for the base pointer of a non-pointer type");
+
+ // This case is a bit of a hack - it only handles extracts from vectors which
+ // trivially contain only base pointers or cases where we can directly match
+ // the index of the original extract element to an insertion into the vector.
+ // See note inside the function for how to improve this.
+ if (auto *EEI = dyn_cast<ExtractElementInst>(I)) {
+ Value *VectorOperand = EEI->getVectorOperand();
+ Value *Index = EEI->getIndexOperand();
+ Value *VectorBase = findBaseOfVector(VectorOperand, Index);
+ // If the result returned is a vector, we know the entire vector must
+ // contain base pointers. In that case, the extractelement is a valid base
+ // for this value.
+ if (VectorBase->getType()->isVectorTy())
+ return EEI;
+ // Otherwise, we needed to look through the vector to find the base for
+ // this particular element.
+ assert(VectorBase->getType()->isPointerTy());
+ return VectorBase;
+ }
+
+ if (isa<Argument>(I))
+ // An incoming argument to the function is a base pointer
+ // We should have never reached here if this argument isn't an gc value
+ return I;
+
+ if (isa<GlobalVariable>(I))
+ // base case
+ return I;
+
+ // inlining could possibly introduce phi node that contains
+ // undef if callee has multiple returns
+ if (isa<UndefValue>(I))
+ // utterly meaningless, but useful for dealing with
+ // partially optimized code.
+ return I;
+
+ // Due to inheritance, this must be _after_ the global variable and undef
+ // checks
+ if (Constant *Con = dyn_cast<Constant>(I)) {
+ assert(!isa<GlobalVariable>(I) && !isa<UndefValue>(I) &&
+ "order of checks wrong!");
+ // Note: Finding a constant base for something marked for relocation
+ // doesn't really make sense. The most likely case is either a) some
+ // screwed up the address space usage or b) your validating against
+ // compiled C++ code w/o the proper separation. The only real exception
+ // is a null pointer. You could have generic code written to index of
+ // off a potentially null value and have proven it null. We also use
+ // null pointers in dead paths of relocation phis (which we might later
+ // want to find a base pointer for).
+ assert(isa<ConstantPointerNull>(Con) &&
+ "null is the only case which makes sense");
+ return Con;
+ }
+
+ if (CastInst *CI = dyn_cast<CastInst>(I)) {
+ Value *Def = CI->stripPointerCasts();
+ // If we find a cast instruction here, it means we've found a cast which is
+ // not simply a pointer cast (i.e. an inttoptr). We don't know how to
+ // handle int->ptr conversion.
+ assert(!isa<CastInst>(Def) && "shouldn't find another cast here");
+ return findBaseDefiningValue(Def);
+ }
+
+ if (isa<LoadInst>(I))
+ return I; // The value loaded is an gc base itself
+
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
+ // The base of this GEP is the base
+ return findBaseDefiningValue(GEP->getPointerOperand());
+
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::experimental_gc_result_ptr:
+ default:
+ // fall through to general call handling
+ break;
+ case Intrinsic::experimental_gc_statepoint:
+ case Intrinsic::experimental_gc_result_float:
+ case Intrinsic::experimental_gc_result_int:
+ llvm_unreachable("these don't produce pointers");
+ case Intrinsic::experimental_gc_relocate: {
+ // Rerunning safepoint insertion after safepoints are already
+ // inserted is not supported. It could probably be made to work,
+ // but why are you doing this? There's no good reason.
+ llvm_unreachable("repeat safepoint insertion is not supported");
+ }
+ case Intrinsic::gcroot:
+ // Currently, this mechanism hasn't been extended to work with gcroot.
+ // There's no reason it couldn't be, but I haven't thought about the
+ // implications much.
+ llvm_unreachable(
+ "interaction with the gcroot mechanism is not supported");
+ }
+ }
+ // We assume that functions in the source language only return base
+ // pointers. This should probably be generalized via attributes to support
+ // both source language and internal functions.
+ if (isa<CallInst>(I) || isa<InvokeInst>(I))
+ return I;
+
+ // I have absolutely no idea how to implement this part yet. It's not
+ // neccessarily hard, I just haven't really looked at it yet.
+ assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented");
+
+ if (isa<AtomicCmpXchgInst>(I))
+ // A CAS is effectively a atomic store and load combined under a
+ // predicate. From the perspective of base pointers, we just treat it
+ // like a load.
+ return I;
+
+ assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are "
+ "binary ops which don't apply to pointers");
+
+ // The aggregate ops. Aggregates can either be in the heap or on the
+ // stack, but in either case, this is simply a field load. As a result,
+ // this is a defining definition of the base just like a load is.
+ if (isa<ExtractValueInst>(I))
+ return I;
+
+ // We should never see an insert vector since that would require we be
+ // tracing back a struct value not a pointer value.
+ assert(!isa<InsertValueInst>(I) &&
+ "Base pointer for a struct is meaningless");
+
+ // The last two cases here don't return a base pointer. Instead, they
+ // return a value which dynamically selects from amoung several base
+ // derived pointers (each with it's own base potentially). It's the job of
+ // the caller to resolve these.
+ assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
+ "missing instruction case in findBaseDefiningValing");
+ return I;
+}
+
+/// Returns the base defining value for this value.
+static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) {
+ Value *&Cached = Cache[I];
+ if (!Cached) {
+ Cached = findBaseDefiningValue(I);
+ }
+ assert(Cache[I] != nullptr);
+
+ if (TraceLSP) {
+ dbgs() << "fBDV-cached: " << I->getName() << " -> " << Cached->getName()
+ << "\n";
+ }
+ return Cached;
+}
+
+/// Return a base pointer for this value if known. Otherwise, return it's
+/// base defining value.
+static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) {
+ Value *Def = findBaseDefiningValueCached(I, Cache);
+ auto Found = Cache.find(Def);
+ if (Found != Cache.end()) {
+ // Either a base-of relation, or a self reference. Caller must check.
+ return Found->second;
+ }
+ // Only a BDV available
+ return Def;
+}
+
+/// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV,
+/// is it known to be a base pointer? Or do we need to continue searching.
+static bool isKnownBaseResult(Value *V) {
+ if (!isa<PHINode>(V) && !isa<SelectInst>(V)) {
+ // no recursion possible
+ return true;
+ }
+ if (isa<Instruction>(V) &&
+ cast<Instruction>(V)->getMetadata("is_base_value")) {
+ // This is a previously inserted base phi or select. We know
+ // that this is a base value.
+ return true;
+ }
+
+ // We need to keep searching
+ return false;
+}
+
+// TODO: find a better name for this
+namespace {
+class PhiState {
+public:
+ enum Status { Unknown, Base, Conflict };
+
+ PhiState(Status s, Value *b = nullptr) : status(s), base(b) {
+ assert(status != Base || b);
+ }
+ PhiState(Value *b) : status(Base), base(b) {}
+ PhiState() : status(Unknown), base(nullptr) {}
+
+ Status getStatus() const { return status; }
+ Value *getBase() const { return base; }
+
+ bool isBase() const { return getStatus() == Base; }
+ bool isUnknown() const { return getStatus() == Unknown; }
+ bool isConflict() const { return getStatus() == Conflict; }
+
+ bool operator==(const PhiState &other) const {
+ return base == other.base && status == other.status;
+ }
+
+ bool operator!=(const PhiState &other) const { return !(*this == other); }
+
+ void dump() {
+ errs() << status << " (" << base << " - "
+ << (base ? base->getName() : "nullptr") << "): ";
+ }
+
+private:
+ Status status;
+ Value *base; // non null only if status == base
+};
+
+typedef DenseMap<Value *, PhiState> ConflictStateMapTy;
+// Values of type PhiState form a lattice, and this is a helper
+// class that implementes the meet operation. The meat of the meet
+// operation is implemented in MeetPhiStates::pureMeet
+class MeetPhiStates {
+public:
+ // phiStates is a mapping from PHINodes and SelectInst's to PhiStates.
+ explicit MeetPhiStates(const ConflictStateMapTy &phiStates)
+ : phiStates(phiStates) {}
+
+ // Destructively meet the current result with the base V. V can
+ // either be a merge instruction (SelectInst / PHINode), in which
+ // case its status is looked up in the phiStates map; or a regular
+ // SSA value, in which case it is assumed to be a base.
+ void meetWith(Value *V) {
+ PhiState otherState = getStateForBDV(V);
+ assert((MeetPhiStates::pureMeet(otherState, currentResult) ==
+ MeetPhiStates::pureMeet(currentResult, otherState)) &&
+ "math is wrong: meet does not commute!");
+ currentResult = MeetPhiStates::pureMeet(otherState, currentResult);
+ }
+
+ PhiState getResult() const { return currentResult; }
+
+private:
+ const ConflictStateMapTy &phiStates;
+ PhiState currentResult;
+
+ /// Return a phi state for a base defining value. We'll generate a new
+ /// base state for known bases and expect to find a cached state otherwise
+ PhiState getStateForBDV(Value *baseValue) {
+ if (isKnownBaseResult(baseValue)) {
+ return PhiState(baseValue);
+ } else {
+ return lookupFromMap(baseValue);
+ }
+ }
+
+ PhiState lookupFromMap(Value *V) {
+ auto I = phiStates.find(V);
+ assert(I != phiStates.end() && "lookup failed!");
+ return I->second;
+ }
+
+ static PhiState pureMeet(const PhiState &stateA, const PhiState &stateB) {
+ switch (stateA.getStatus()) {
+ case PhiState::Unknown:
+ return stateB;
+
+ case PhiState::Base:
+ assert(stateA.getBase() && "can't be null");
+ if (stateB.isUnknown())
+ return stateA;
+
+ if (stateB.isBase()) {
+ if (stateA.getBase() == stateB.getBase()) {
+ assert(stateA == stateB && "equality broken!");
+ return stateA;
+ }
+ return PhiState(PhiState::Conflict);
+ }
+ assert(stateB.isConflict() && "only three states!");
+ return PhiState(PhiState::Conflict);
+
+ case PhiState::Conflict:
+ return stateA;
+ }
+ llvm_unreachable("only three states!");
+ }
+};
+}
+/// For a given value or instruction, figure out what base ptr it's derived
+/// from. For gc objects, this is simply itself. On success, returns a value
+/// which is the base pointer. (This is reliable and can be used for
+/// relocation.) On failure, returns nullptr.
+static Value *findBasePointer(Value *I, DefiningValueMapTy &cache) {
+ Value *def = findBaseOrBDV(I, cache);
+
+ if (isKnownBaseResult(def)) {
+ return def;
+ }
+
+ // Here's the rough algorithm:
+ // - For every SSA value, construct a mapping to either an actual base
+ // pointer or a PHI which obscures the base pointer.
+ // - Construct a mapping from PHI to unknown TOP state. Use an
+ // optimistic algorithm to propagate base pointer information. Lattice
+ // looks like:
+ // UNKNOWN
+ // b1 b2 b3 b4
+ // CONFLICT
+ // When algorithm terminates, all PHIs will either have a single concrete
+ // base or be in a conflict state.
+ // - For every conflict, insert a dummy PHI node without arguments. Add
+ // these to the base[Instruction] = BasePtr mapping. For every
+ // non-conflict, add the actual base.
+ // - For every conflict, add arguments for the base[a] of each input
+ // arguments.
+ //
+ // Note: A simpler form of this would be to add the conflict form of all
+ // PHIs without running the optimistic algorithm. This would be
+ // analougous to pessimistic data flow and would likely lead to an
+ // overall worse solution.
+
+ ConflictStateMapTy states;
+ states[def] = PhiState();
+ // Recursively fill in all phis & selects reachable from the initial one
+ // for which we don't already know a definite base value for
+ // TODO: This should be rewritten with a worklist
+ bool done = false;
+ while (!done) {
+ done = true;
+ // Since we're adding elements to 'states' as we run, we can't keep
+ // iterators into the set.
+ SmallVector<Value *, 16> Keys;
+ Keys.reserve(states.size());
+ for (auto Pair : states) {
+ Value *V = Pair.first;
+ Keys.push_back(V);
+ }
+ for (Value *v : Keys) {
+ assert(!isKnownBaseResult(v) && "why did it get added?");
+ if (PHINode *phi = dyn_cast<PHINode>(v)) {
+ assert(phi->getNumIncomingValues() > 0 &&
+ "zero input phis are illegal");
+ for (Value *InVal : phi->incoming_values()) {
+ Value *local = findBaseOrBDV(InVal, cache);
+ if (!isKnownBaseResult(local) && states.find(local) == states.end()) {
+ states[local] = PhiState();
+ done = false;
+ }
+ }
+ } else if (SelectInst *sel = dyn_cast<SelectInst>(v)) {
+ Value *local = findBaseOrBDV(sel->getTrueValue(), cache);
+ if (!isKnownBaseResult(local) && states.find(local) == states.end()) {
+ states[local] = PhiState();
+ done = false;
+ }
+ local = findBaseOrBDV(sel->getFalseValue(), cache);
+ if (!isKnownBaseResult(local) && states.find(local) == states.end()) {
+ states[local] = PhiState();
+ done = false;
+ }
+ }
+ }
+ }
+
+ if (TraceLSP) {
+ errs() << "States after initialization:\n";
+ for (auto Pair : states) {
+ Instruction *v = cast<Instruction>(Pair.first);
+ PhiState state = Pair.second;
+ state.dump();
+ v->dump();
+ }
+ }
+
+ // TODO: come back and revisit the state transitions around inputs which
+ // have reached conflict state. The current version seems too conservative.
+
+ bool progress = true;
+ while (progress) {
+#ifndef NDEBUG
+ size_t oldSize = states.size();
+#endif
+ progress = false;
+ // We're only changing keys in this loop, thus safe to keep iterators
+ for (auto Pair : states) {
+ MeetPhiStates calculateMeet(states);
+ Value *v = Pair.first;
+ assert(!isKnownBaseResult(v) && "why did it get added?");
+ if (SelectInst *select = dyn_cast<SelectInst>(v)) {
+ calculateMeet.meetWith(findBaseOrBDV(select->getTrueValue(), cache));
+ calculateMeet.meetWith(findBaseOrBDV(select->getFalseValue(), cache));
+ } else
+ for (Value *Val : cast<PHINode>(v)->incoming_values())
+ calculateMeet.meetWith(findBaseOrBDV(Val, cache));
+
+ PhiState oldState = states[v];
+ PhiState newState = calculateMeet.getResult();
+ if (oldState != newState) {
+ progress = true;
+ states[v] = newState;
+ }
+ }
+
+ assert(oldSize <= states.size());
+ assert(oldSize == states.size() || progress);
+ }
+
+ if (TraceLSP) {
+ errs() << "States after meet iteration:\n";
+ for (auto Pair : states) {
+ Instruction *v = cast<Instruction>(Pair.first);
+ PhiState state = Pair.second;
+ state.dump();
+ v->dump();
+ }
+ }
+
+ // Insert Phis for all conflicts
+ // We want to keep naming deterministic in the loop that follows, so
+ // sort the keys before iteration. This is useful in allowing us to
+ // write stable tests. Note that there is no invalidation issue here.
+ SmallVector<Value *, 16> Keys;
+ Keys.reserve(states.size());
+ for (auto Pair : states) {
+ Value *V = Pair.first;
+ Keys.push_back(V);
+ }
+ std::sort(Keys.begin(), Keys.end(), order_by_name);
+ // TODO: adjust naming patterns to avoid this order of iteration dependency
+ for (Value *V : Keys) {
+ Instruction *v = cast<Instruction>(V);
+ PhiState state = states[V];
+ assert(!isKnownBaseResult(v) && "why did it get added?");
+ assert(!state.isUnknown() && "Optimistic algorithm didn't complete!");
+ if (!state.isConflict())
+ continue;
+
+ if (isa<PHINode>(v)) {
+ int num_preds =
+ std::distance(pred_begin(v->getParent()), pred_end(v->getParent()));
+ assert(num_preds > 0 && "how did we reach here");
+ PHINode *phi = PHINode::Create(v->getType(), num_preds, "base_phi", v);
+ // Add metadata marking this as a base value
+ auto *const_1 = ConstantInt::get(
+ Type::getInt32Ty(
+ v->getParent()->getParent()->getParent()->getContext()),
+ 1);
+ auto MDConst = ConstantAsMetadata::get(const_1);
+ MDNode *md = MDNode::get(
+ v->getParent()->getParent()->getParent()->getContext(), MDConst);
+ phi->setMetadata("is_base_value", md);
+ states[v] = PhiState(PhiState::Conflict, phi);
+ } else {
+ SelectInst *sel = cast<SelectInst>(v);
+ // The undef will be replaced later
+ UndefValue *undef = UndefValue::get(sel->getType());
+ SelectInst *basesel = SelectInst::Create(sel->getCondition(), undef,
+ undef, "base_select", sel);
+ // Add metadata marking this as a base value
+ auto *const_1 = ConstantInt::get(
+ Type::getInt32Ty(
+ v->getParent()->getParent()->getParent()->getContext()),
+ 1);
+ auto MDConst = ConstantAsMetadata::get(const_1);
+ MDNode *md = MDNode::get(
+ v->getParent()->getParent()->getParent()->getContext(), MDConst);
+ basesel->setMetadata("is_base_value", md);
+ states[v] = PhiState(PhiState::Conflict, basesel);
+ }
+ }
+
+ // Fixup all the inputs of the new PHIs
+ for (auto Pair : states) {
+ Instruction *v = cast<Instruction>(Pair.first);
+ PhiState state = Pair.second;
+
+ assert(!isKnownBaseResult(v) && "why did it get added?");
+ assert(!state.isUnknown() && "Optimistic algorithm didn't complete!");
+ if (!state.isConflict())
+ continue;
+
+ if (PHINode *basephi = dyn_cast<PHINode>(state.getBase())) {
+ PHINode *phi = cast<PHINode>(v);
+ unsigned NumPHIValues = phi->getNumIncomingValues();
+ for (unsigned i = 0; i < NumPHIValues; i++) {
+ Value *InVal = phi->getIncomingValue(i);
+ BasicBlock *InBB = phi->getIncomingBlock(i);
+
+ // If we've already seen InBB, add the same incoming value
+ // we added for it earlier. The IR verifier requires phi
+ // nodes with multiple entries from the same basic block
+ // to have the same incoming value for each of those
+ // entries. If we don't do this check here and basephi
+ // has a different type than base, we'll end up adding two
+ // bitcasts (and hence two distinct values) as incoming
+ // values for the same basic block.
+
+ int blockIndex = basephi->getBasicBlockIndex(InBB);
+ if (blockIndex != -1) {
+ Value *oldBase = basephi->getIncomingValue(blockIndex);
+ basephi->addIncoming(oldBase, InBB);
+#ifndef NDEBUG
+ Value *base = findBaseOrBDV(InVal, cache);
+ if (!isKnownBaseResult(base)) {
+ // Either conflict or base.
+ assert(states.count(base));
+ base = states[base].getBase();
+ assert(base != nullptr && "unknown PhiState!");
+ }
+
+ // In essense this assert states: the only way two
+ // values incoming from the same basic block may be
+ // different is by being different bitcasts of the same
+ // value. A cleanup that remains TODO is changing
+ // findBaseOrBDV to return an llvm::Value of the correct
+ // type (and still remain pure). This will remove the
+ // need to add bitcasts.
+ assert(base->stripPointerCasts() == oldBase->stripPointerCasts() &&
+ "sanity -- findBaseOrBDV should be pure!");
+#endif
+ continue;
+ }
+
+ // Find either the defining value for the PHI or the normal base for
+ // a non-phi node
+ Value *base = findBaseOrBDV(InVal, cache);
+ if (!isKnownBaseResult(base)) {
+ // Either conflict or base.
+ assert(states.count(base));
+ base = states[base].getBase();
+ assert(base != nullptr && "unknown PhiState!");
+ }
+ assert(base && "can't be null");
+ // Must use original input BB since base may not be Instruction
+ // The cast is needed since base traversal may strip away bitcasts
+ if (base->getType() != basephi->getType()) {
+ base = new BitCastInst(base, basephi->getType(), "cast",
+ InBB->getTerminator());
+ }
+ basephi->addIncoming(base, InBB);
+ }
+ assert(basephi->getNumIncomingValues() == NumPHIValues);
+ } else {
+ SelectInst *basesel = cast<SelectInst>(state.getBase());
+ SelectInst *sel = cast<SelectInst>(v);
+ // Operand 1 & 2 are true, false path respectively. TODO: refactor to
+ // something more safe and less hacky.
+ for (int i = 1; i <= 2; i++) {
+ Value *InVal = sel->getOperand(i);
+ // Find either the defining value for the PHI or the normal base for
+ // a non-phi node
+ Value *base = findBaseOrBDV(InVal, cache);
+ if (!isKnownBaseResult(base)) {
+ // Either conflict or base.
+ assert(states.count(base));
+ base = states[base].getBase();
+ assert(base != nullptr && "unknown PhiState!");
+ }
+ assert(base && "can't be null");
+ // Must use original input BB since base may not be Instruction
+ // The cast is needed since base traversal may strip away bitcasts
+ if (base->getType() != basesel->getType()) {
+ base = new BitCastInst(base, basesel->getType(), "cast", basesel);
+ }
+ basesel->setOperand(i, base);
+ }
+ }
+ }
+
+ // Cache all of our results so we can cheaply reuse them
+ // NOTE: This is actually two caches: one of the base defining value
+ // relation and one of the base pointer relation! FIXME
+ for (auto item : states) {
+ Value *v = item.first;
+ Value *base = item.second.getBase();
+ assert(v && base);
+ assert(!isKnownBaseResult(v) && "why did it get added?");
+
+ if (TraceLSP) {
+ std::string fromstr =
+ cache.count(v) ? (cache[v]->hasName() ? cache[v]->getName() : "")
+ : "none";
+ errs() << "Updating base value cache"
+ << " for: " << (v->hasName() ? v->getName() : "")
+ << " from: " << fromstr
+ << " to: " << (base->hasName() ? base->getName() : "") << "\n";
+ }
+
+ assert(isKnownBaseResult(base) &&
+ "must be something we 'know' is a base pointer");
+ if (cache.count(v)) {
+ // Once we transition from the BDV relation being store in the cache to
+ // the base relation being stored, it must be stable
+ assert((!isKnownBaseResult(cache[v]) || cache[v] == base) &&
+ "base relation should be stable");
+ }
+ cache[v] = base;
+ }
+ assert(cache.find(def) != cache.end());
+ return cache[def];
+}
+
+// For a set of live pointers (base and/or derived), identify the base
+// pointer of the object which they are derived from. This routine will
+// mutate the IR graph as needed to make the 'base' pointer live at the
+// definition site of 'derived'. This ensures that any use of 'derived' can
+// also use 'base'. This may involve the insertion of a number of
+// additional PHI nodes.
+//
+// preconditions: live is a set of pointer type Values
+//
+// side effects: may insert PHI nodes into the existing CFG, will preserve
+// CFG, will not remove or mutate any existing nodes
+//
+// post condition: PointerToBase contains one (derived, base) pair for every
+// pointer in live. Note that derived can be equal to base if the original
+// pointer was a base pointer.
+static void
+findBasePointers(const StatepointLiveSetTy &live,
+ DenseMap<llvm::Value *, llvm::Value *> &PointerToBase,
+ DominatorTree *DT, DefiningValueMapTy &DVCache) {
+ // For the naming of values inserted to be deterministic - which makes for
+ // much cleaner and more stable tests - we need to assign an order to the
+ // live values. DenseSets do not provide a deterministic order across runs.
+ SmallVector<Value *, 64> Temp;
+ Temp.insert(Temp.end(), live.begin(), live.end());
+ std::sort(Temp.begin(), Temp.end(), order_by_name);
+ for (Value *ptr : Temp) {
+ Value *base = findBasePointer(ptr, DVCache);
+ assert(base && "failed to find base pointer");
+ PointerToBase[ptr] = base;
+ assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) ||
+ DT->dominates(cast<Instruction>(base)->getParent(),
+ cast<Instruction>(ptr)->getParent())) &&
+ "The base we found better dominate the derived pointer");
+
+ // If you see this trip and like to live really dangerously, the code should
+ // be correct, just with idioms the verifier can't handle. You can try
+ // disabling the verifier at your own substaintial risk.
+ assert(!isa<ConstantPointerNull>(base) &&
+ "the relocation code needs adjustment to handle the relocation of "
+ "a null pointer constant without causing false positives in the "
+ "safepoint ir verifier.");
+ }
+}
+
+/// Find the required based pointers (and adjust the live set) for the given
+/// parse point.
+static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache,
+ const CallSite &CS,
+ PartiallyConstructedSafepointRecord &result) {
+ DenseMap<llvm::Value *, llvm::Value *> PointerToBase;
+ findBasePointers(result.liveset, PointerToBase, &DT, DVCache);
+
+ if (PrintBasePointers) {
+ // Note: Need to print these in a stable order since this is checked in
+ // some tests.
+ errs() << "Base Pairs (w/o Relocation):\n";
+ SmallVector<Value *, 64> Temp;
+ Temp.reserve(PointerToBase.size());
+ for (auto Pair : PointerToBase) {
+ Temp.push_back(Pair.first);
+ }
+ std::sort(Temp.begin(), Temp.end(), order_by_name);
+ for (Value *Ptr : Temp) {
+ Value *Base = PointerToBase[Ptr];
+ errs() << " derived %" << Ptr->getName() << " base %" << Base->getName()
+ << "\n";
+ }
+ }
+
+ result.PointerToBase = PointerToBase;
+}
+
+/// Given an updated version of the dataflow liveness results, update the
+/// liveset and base pointer maps for the call site CS.
+static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
+ const CallSite &CS,
+ PartiallyConstructedSafepointRecord &result);
+
+static void recomputeLiveInValues(
+ Function &F, DominatorTree &DT, Pass *P, ArrayRef<CallSite> toUpdate,
+ MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) {
+ // TODO-PERF: reuse the original liveness, then simply run the dataflow
+ // again. The old values are still live and will help it stablize quickly.
+ GCPtrLivenessData RevisedLivenessData;
+ computeLiveInValues(DT, F, RevisedLivenessData);
+ for (size_t i = 0; i < records.size(); i++) {
+ struct PartiallyConstructedSafepointRecord &info = records[i];
+ const CallSite &CS = toUpdate[i];
+ recomputeLiveInValues(RevisedLivenessData, CS, info);
+ }
+}
+
+// When inserting gc.relocate calls, we need to ensure there are no uses
+// of the original value between the gc.statepoint and the gc.relocate call.
+// One case which can arise is a phi node starting one of the successor blocks.
+// We also need to be able to insert the gc.relocates only on the path which
+// goes through the statepoint. We might need to split an edge to make this
+// possible.
+static BasicBlock *
+normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, Pass *P) {
+ DominatorTree *DT = nullptr;
+ if (auto *DTP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>())
+ DT = &DTP->getDomTree();
+
+ BasicBlock *Ret = BB;
+ if (!BB->getUniquePredecessor()) {
+ Ret = SplitBlockPredecessors(BB, InvokeParent, "", nullptr, DT);
+ }
+
+ // Now that 'ret' has unique predecessor we can safely remove all phi nodes
+ // from it
+ FoldSingleEntryPHINodes(Ret);
+ assert(!isa<PHINode>(Ret->begin()));
+
+ // At this point, we can safely insert a gc.relocate as the first instruction
+ // in Ret if needed.
+ return Ret;
+}
+
+static int find_index(ArrayRef<Value *> livevec, Value *val) {
+ auto itr = std::find(livevec.begin(), livevec.end(), val);
+ assert(livevec.end() != itr);
+ size_t index = std::distance(livevec.begin(), itr);
+ assert(index < livevec.size());
+ return index;
+}
+
+// Create new attribute set containing only attributes which can be transfered
+// from original call to the safepoint.
+static AttributeSet legalizeCallAttributes(AttributeSet AS) {
+ AttributeSet ret;
+
+ for (unsigned Slot = 0; Slot < AS.getNumSlots(); Slot++) {
+ unsigned index = AS.getSlotIndex(Slot);
+
+ if (index == AttributeSet::ReturnIndex ||
+ index == AttributeSet::FunctionIndex) {
+
+ for (auto it = AS.begin(Slot), it_end = AS.end(Slot); it != it_end;
+ ++it) {
+ Attribute attr = *it;
+
+ // Do not allow certain attributes - just skip them
+ // Safepoint can not be read only or read none.
+ if (attr.hasAttribute(Attribute::ReadNone) ||
+ attr.hasAttribute(Attribute::ReadOnly))
+ continue;
+
+ ret = ret.addAttributes(
+ AS.getContext(), index,
+ AttributeSet::get(AS.getContext(), index, AttrBuilder(attr)));
+ }
+ }
+
+ // Just skip parameter attributes for now
+ }
+
+ return ret;
+}
+
+/// Helper function to place all gc relocates necessary for the given
+/// statepoint.
+/// Inputs:
+/// liveVariables - list of variables to be relocated.
+/// liveStart - index of the first live variable.
+/// basePtrs - base pointers.
+/// statepointToken - statepoint instruction to which relocates should be
+/// bound.
+/// Builder - Llvm IR builder to be used to construct new calls.
+static void CreateGCRelocates(ArrayRef<llvm::Value *> LiveVariables,
+ const int LiveStart,
+ ArrayRef<llvm::Value *> BasePtrs,
+ Instruction *StatepointToken,
+ IRBuilder<> Builder) {
+ SmallVector<Instruction *, 64> NewDefs;
+ NewDefs.reserve(LiveVariables.size());
+
+ Module *M = StatepointToken->getParent()->getParent()->getParent();
+
+ for (unsigned i = 0; i < LiveVariables.size(); i++) {
+ // We generate a (potentially) unique declaration for every pointer type
+ // combination. This results is some blow up the function declarations in
+ // the IR, but removes the need for argument bitcasts which shrinks the IR
+ // greatly and makes it much more readable.
+ SmallVector<Type *, 1> Types; // one per 'any' type
+ // All gc_relocate are set to i8 addrspace(1)* type. This could help avoid
+ // cases where the actual value's type mangling is not supported by llvm. A
+ // bitcast is added later to convert gc_relocate to the actual value's type.
+ Types.push_back(Type::getInt8PtrTy(M->getContext(), 1));
+ Value *GCRelocateDecl = Intrinsic::getDeclaration(
+ M, Intrinsic::experimental_gc_relocate, Types);
+
+ // Generate the gc.relocate call and save the result
+ Value *BaseIdx =
+ ConstantInt::get(Type::getInt32Ty(M->getContext()),
+ LiveStart + find_index(LiveVariables, BasePtrs[i]));
+ Value *LiveIdx = ConstantInt::get(
+ Type::getInt32Ty(M->getContext()),
+ LiveStart + find_index(LiveVariables, LiveVariables[i]));
+
+ // only specify a debug name if we can give a useful one
+ Value *Reloc = Builder.CreateCall(
+ GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx},
+ LiveVariables[i]->hasName() ? LiveVariables[i]->getName() + ".relocated"
+ : "");
+ // Trick CodeGen into thinking there are lots of free registers at this
+ // fake call.
+ cast<CallInst>(Reloc)->setCallingConv(CallingConv::Cold);
+
+ NewDefs.push_back(cast<Instruction>(Reloc));
+ }
+ assert(NewDefs.size() == LiveVariables.size() &&
+ "missing or extra redefinition at safepoint");
+}
+
+static void
+makeStatepointExplicitImpl(const CallSite &CS, /* to replace */
+ const SmallVectorImpl<llvm::Value *> &basePtrs,
+ const SmallVectorImpl<llvm::Value *> &liveVariables,
+ Pass *P,
+ PartiallyConstructedSafepointRecord &result) {
+ assert(basePtrs.size() == liveVariables.size());
+ assert(isStatepoint(CS) &&
+ "This method expects to be rewriting a statepoint");
+
+ BasicBlock *BB = CS.getInstruction()->getParent();
+ assert(BB);
+ Function *F = BB->getParent();
+ assert(F && "must be set");
+ Module *M = F->getParent();
+ (void)M;
+ assert(M && "must be set");
+
+ // We're not changing the function signature of the statepoint since the gc
+ // arguments go into the var args section.
+ Function *gc_statepoint_decl = CS.getCalledFunction();
+
+ // Then go ahead and use the builder do actually do the inserts. We insert
+ // immediately before the previous instruction under the assumption that all
+ // arguments will be available here. We can't insert afterwards since we may
+ // be replacing a terminator.
+ Instruction *insertBefore = CS.getInstruction();
+ IRBuilder<> Builder(insertBefore);
+ // Copy all of the arguments from the original statepoint - this includes the
+ // target, call args, and deopt args
+ SmallVector<llvm::Value *, 64> args;
+ args.insert(args.end(), CS.arg_begin(), CS.arg_end());
+ // TODO: Clear the 'needs rewrite' flag
+
+ // add all the pointers to be relocated (gc arguments)
+ // Capture the start of the live variable list for use in the gc_relocates
+ const int live_start = args.size();
+ args.insert(args.end(), liveVariables.begin(), liveVariables.end());
+
+ // Create the statepoint given all the arguments
+ Instruction *token = nullptr;
+ AttributeSet return_attributes;
+ if (CS.isCall()) {
+ CallInst *toReplace = cast<CallInst>(CS.getInstruction());
+ CallInst *call =
+ Builder.CreateCall(gc_statepoint_decl, args, "safepoint_token");
+ call->setTailCall(toReplace->isTailCall());
+ call->setCallingConv(toReplace->getCallingConv());
+
+ // Currently we will fail on parameter attributes and on certain
+ // function attributes.
+ AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes());
+ // In case if we can handle this set of sttributes - set up function attrs
+ // directly on statepoint and return attrs later for gc_result intrinsic.
+ call->setAttributes(new_attrs.getFnAttributes());
+ return_attributes = new_attrs.getRetAttributes();
+
+ token = call;
+
+ // Put the following gc_result and gc_relocate calls immediately after the
+ // the old call (which we're about to delete)
+ BasicBlock::iterator next(toReplace);
+ assert(BB->end() != next && "not a terminator, must have next");
+ next++;
+ Instruction *IP = &*(next);
+ Builder.SetInsertPoint(IP);
+ Builder.SetCurrentDebugLocation(IP->getDebugLoc());
+
+ } else {
+ InvokeInst *toReplace = cast<InvokeInst>(CS.getInstruction());
+
+ // Insert the new invoke into the old block. We'll remove the old one in a
+ // moment at which point this will become the new terminator for the
+ // original block.
+ InvokeInst *invoke = InvokeInst::Create(
+ gc_statepoint_decl, toReplace->getNormalDest(),
+ toReplace->getUnwindDest(), args, "", toReplace->getParent());
+ invoke->setCallingConv(toReplace->getCallingConv());
+
+ // Currently we will fail on parameter attributes and on certain
+ // function attributes.
+ AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes());
+ // In case if we can handle this set of sttributes - set up function attrs
+ // directly on statepoint and return attrs later for gc_result intrinsic.
+ invoke->setAttributes(new_attrs.getFnAttributes());
+ return_attributes = new_attrs.getRetAttributes();
+
+ token = invoke;
+
+ // Generate gc relocates in exceptional path
+ BasicBlock *unwindBlock = toReplace->getUnwindDest();
+ assert(!isa<PHINode>(unwindBlock->begin()) &&
+ unwindBlock->getUniquePredecessor() &&
+ "can't safely insert in this block!");
+
+ Instruction *IP = &*(unwindBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(IP);
+ Builder.SetCurrentDebugLocation(toReplace->getDebugLoc());
+
+ // Extract second element from landingpad return value. We will attach
+ // exceptional gc relocates to it.
+ const unsigned idx = 1;
+ Instruction *exceptional_token =
+ cast<Instruction>(Builder.CreateExtractValue(
+ unwindBlock->getLandingPadInst(), idx, "relocate_token"));
+ result.UnwindToken = exceptional_token;
+
+ // Just throw away return value. We will use the one we got for normal
+ // block.
+ (void)CreateGCRelocates(liveVariables, live_start, basePtrs,
+ exceptional_token, Builder);
+
+ // Generate gc relocates and returns for normal block
+ BasicBlock *normalDest = toReplace->getNormalDest();
+ assert(!isa<PHINode>(normalDest->begin()) &&
+ normalDest->getUniquePredecessor() &&
+ "can't safely insert in this block!");
+
+ IP = &*(normalDest->getFirstInsertionPt());
+ Builder.SetInsertPoint(IP);
+
+ // gc relocates will be generated later as if it were regular call
+ // statepoint
+ }
+ assert(token);
+
+ // Take the name of the original value call if it had one.
+ token->takeName(CS.getInstruction());
+
+// The GCResult is already inserted, we just need to find it
+#ifndef NDEBUG
+ Instruction *toReplace = CS.getInstruction();
+ assert((toReplace->hasNUses(0) || toReplace->hasNUses(1)) &&
+ "only valid use before rewrite is gc.result");
+ assert(!toReplace->hasOneUse() ||
+ isGCResult(cast<Instruction>(*toReplace->user_begin())));
+#endif
+
+ // Update the gc.result of the original statepoint (if any) to use the newly
+ // inserted statepoint. This is safe to do here since the token can't be
+ // considered a live reference.
+ CS.getInstruction()->replaceAllUsesWith(token);
+
+ result.StatepointToken = token;
+
+ // Second, create a gc.relocate for every live variable
+ CreateGCRelocates(liveVariables, live_start, basePtrs, token, Builder);
+}
+
+namespace {
+struct name_ordering {
+ Value *base;
+ Value *derived;
+ bool operator()(name_ordering const &a, name_ordering const &b) {
+ return -1 == a.derived->getName().compare(b.derived->getName());
+ }
+};
+}
+static void stablize_order(SmallVectorImpl<Value *> &basevec,
+ SmallVectorImpl<Value *> &livevec) {
+ assert(basevec.size() == livevec.size());
+
+ SmallVector<name_ordering, 64> temp;
+ for (size_t i = 0; i < basevec.size(); i++) {
+ name_ordering v;
+ v.base = basevec[i];
+ v.derived = livevec[i];
+ temp.push_back(v);
+ }
+ std::sort(temp.begin(), temp.end(), name_ordering());
+ for (size_t i = 0; i < basevec.size(); i++) {
+ basevec[i] = temp[i].base;
+ livevec[i] = temp[i].derived;
+ }
+}
+
+// Replace an existing gc.statepoint with a new one and a set of gc.relocates
+// which make the relocations happening at this safepoint explicit.
+//
+// WARNING: Does not do any fixup to adjust users of the original live
+// values. That's the callers responsibility.
+static void
+makeStatepointExplicit(DominatorTree &DT, const CallSite &CS, Pass *P,
+ PartiallyConstructedSafepointRecord &result) {
+ auto liveset = result.liveset;
+ auto PointerToBase = result.PointerToBase;
+
+ // Convert to vector for efficient cross referencing.
+ SmallVector<Value *, 64> basevec, livevec;
+ livevec.reserve(liveset.size());
+ basevec.reserve(liveset.size());
+ for (Value *L : liveset) {
+ livevec.push_back(L);
+
+ assert(PointerToBase.find(L) != PointerToBase.end());
+ Value *base = PointerToBase[L];
+ basevec.push_back(base);
+ }
+ assert(livevec.size() == basevec.size());
+
+ // To make the output IR slightly more stable (for use in diffs), ensure a
+ // fixed order of the values in the safepoint (by sorting the value name).
+ // The order is otherwise meaningless.
+ stablize_order(basevec, livevec);
+
+ // Do the actual rewriting and delete the old statepoint
+ makeStatepointExplicitImpl(CS, basevec, livevec, P, result);
+ CS.getInstruction()->eraseFromParent();
+}
+
+// Helper function for the relocationViaAlloca.
+// It receives iterator to the statepoint gc relocates and emits store to the
+// assigned
+// location (via allocaMap) for the each one of them.
+// Add visited values into the visitedLiveValues set we will later use them
+// for sanity check.
+static void
+insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
+ DenseMap<Value *, Value *> &AllocaMap,
+ DenseSet<Value *> &VisitedLiveValues) {
+
+ for (User *U : GCRelocs) {
+ if (!isa<IntrinsicInst>(U))
+ continue;
+
+ IntrinsicInst *RelocatedValue = cast<IntrinsicInst>(U);
+
+ // We only care about relocates
+ if (RelocatedValue->getIntrinsicID() !=
+ Intrinsic::experimental_gc_relocate) {
+ continue;
+ }
+
+ GCRelocateOperands RelocateOperands(RelocatedValue);
+ Value *OriginalValue =
+ const_cast<Value *>(RelocateOperands.getDerivedPtr());
+ assert(AllocaMap.count(OriginalValue));
+ Value *Alloca = AllocaMap[OriginalValue];
+
+ // Emit store into the related alloca
+ // All gc_relocate are i8 addrspace(1)* typed, and it must be bitcasted to
+ // the correct type according to alloca.
+ assert(RelocatedValue->getNextNode() && "Should always have one since it's not a terminator");
+ IRBuilder<> Builder(RelocatedValue->getNextNode());
+ Value *CastedRelocatedValue =
+ Builder.CreateBitCast(RelocatedValue, cast<AllocaInst>(Alloca)->getAllocatedType(),
+ RelocatedValue->hasName() ? RelocatedValue->getName() + ".casted" : "");
+
+ StoreInst *Store = new StoreInst(CastedRelocatedValue, Alloca);
+ Store->insertAfter(cast<Instruction>(CastedRelocatedValue));
+
+#ifndef NDEBUG
+ VisitedLiveValues.insert(OriginalValue);
+#endif
+ }
+}
+
+// Helper function for the "relocationViaAlloca". Similar to the
+// "insertRelocationStores" but works for rematerialized values.
+static void
+insertRematerializationStores(
+ RematerializedValueMapTy RematerializedValues,
+ DenseMap<Value *, Value *> &AllocaMap,
+ DenseSet<Value *> &VisitedLiveValues) {
+
+ for (auto RematerializedValuePair: RematerializedValues) {
+ Instruction *RematerializedValue = RematerializedValuePair.first;
+ Value *OriginalValue = RematerializedValuePair.second;
+
+ assert(AllocaMap.count(OriginalValue) &&
+ "Can not find alloca for rematerialized value");
+ Value *Alloca = AllocaMap[OriginalValue];
+
+ StoreInst *Store = new StoreInst(RematerializedValue, Alloca);
+ Store->insertAfter(RematerializedValue);
+
+#ifndef NDEBUG
+ VisitedLiveValues.insert(OriginalValue);
+#endif
+ }
+}
+
+/// do all the relocation update via allocas and mem2reg
+static void relocationViaAlloca(
+ Function &F, DominatorTree &DT, ArrayRef<Value *> Live,
+ ArrayRef<struct PartiallyConstructedSafepointRecord> Records) {
+#ifndef NDEBUG
+ // record initial number of (static) allocas; we'll check we have the same
+ // number when we get done.
+ int InitialAllocaNum = 0;
+ for (auto I = F.getEntryBlock().begin(), E = F.getEntryBlock().end(); I != E;
+ I++)
+ if (isa<AllocaInst>(*I))
+ InitialAllocaNum++;
+#endif
+
+ // TODO-PERF: change data structures, reserve
+ DenseMap<Value *, Value *> AllocaMap;
+ SmallVector<AllocaInst *, 200> PromotableAllocas;
+ // Used later to chack that we have enough allocas to store all values
+ std::size_t NumRematerializedValues = 0;
+ PromotableAllocas.reserve(Live.size());
+
+ // Emit alloca for "LiveValue" and record it in "allocaMap" and
+ // "PromotableAllocas"
+ auto emitAllocaFor = [&](Value *LiveValue) {
+ AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), "",
+ F.getEntryBlock().getFirstNonPHI());
+ AllocaMap[LiveValue] = Alloca;
+ PromotableAllocas.push_back(Alloca);
+ };
+
+ // emit alloca for each live gc pointer
+ for (unsigned i = 0; i < Live.size(); i++) {
+ emitAllocaFor(Live[i]);
+ }
+
+ // emit allocas for rematerialized values
+ for (size_t i = 0; i < Records.size(); i++) {
+ const struct PartiallyConstructedSafepointRecord &Info = Records[i];
+
+ for (auto RematerializedValuePair : Info.RematerializedValues) {
+ Value *OriginalValue = RematerializedValuePair.second;
+ if (AllocaMap.count(OriginalValue) != 0)
+ continue;
+
+ emitAllocaFor(OriginalValue);
+ ++NumRematerializedValues;
+ }
+ }
+
+ // The next two loops are part of the same conceptual operation. We need to
+ // insert a store to the alloca after the original def and at each
+ // redefinition. We need to insert a load before each use. These are split
+ // into distinct loops for performance reasons.
+
+ // update gc pointer after each statepoint
+ // either store a relocated value or null (if no relocated value found for
+ // this gc pointer and it is not a gc_result)
+ // this must happen before we update the statepoint with load of alloca
+ // otherwise we lose the link between statepoint and old def
+ for (size_t i = 0; i < Records.size(); i++) {
+ const struct PartiallyConstructedSafepointRecord &Info = Records[i];
+ Value *Statepoint = Info.StatepointToken;
+
+ // This will be used for consistency check
+ DenseSet<Value *> VisitedLiveValues;
+
+ // Insert stores for normal statepoint gc relocates
+ insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues);
+
+ // In case if it was invoke statepoint
+ // we will insert stores for exceptional path gc relocates.
+ if (isa<InvokeInst>(Statepoint)) {
+ insertRelocationStores(Info.UnwindToken->users(), AllocaMap,
+ VisitedLiveValues);
+ }
+
+ // Do similar thing with rematerialized values
+ insertRematerializationStores(Info.RematerializedValues, AllocaMap,
+ VisitedLiveValues);
+
+ if (ClobberNonLive) {
+ // As a debuging aid, pretend that an unrelocated pointer becomes null at
+ // the gc.statepoint. This will turn some subtle GC problems into
+ // slightly easier to debug SEGVs. Note that on large IR files with
+ // lots of gc.statepoints this is extremely costly both memory and time
+ // wise.
+ SmallVector<AllocaInst *, 64> ToClobber;
+ for (auto Pair : AllocaMap) {
+ Value *Def = Pair.first;
+ AllocaInst *Alloca = cast<AllocaInst>(Pair.second);
+
+ // This value was relocated
+ if (VisitedLiveValues.count(Def)) {
+ continue;
+ }
+ ToClobber.push_back(Alloca);
+ }
+
+ auto InsertClobbersAt = [&](Instruction *IP) {
+ for (auto *AI : ToClobber) {
+ auto AIType = cast<PointerType>(AI->getType());
+ auto PT = cast<PointerType>(AIType->getElementType());
+ Constant *CPN = ConstantPointerNull::get(PT);
+ StoreInst *Store = new StoreInst(CPN, AI);
+ Store->insertBefore(IP);
+ }
+ };
+
+ // Insert the clobbering stores. These may get intermixed with the
+ // gc.results and gc.relocates, but that's fine.
+ if (auto II = dyn_cast<InvokeInst>(Statepoint)) {
+ InsertClobbersAt(II->getNormalDest()->getFirstInsertionPt());
+ InsertClobbersAt(II->getUnwindDest()->getFirstInsertionPt());
+ } else {
+ BasicBlock::iterator Next(cast<CallInst>(Statepoint));
+ Next++;
+ InsertClobbersAt(Next);
+ }
+ }
+ }
+ // update use with load allocas and add store for gc_relocated
+ for (auto Pair : AllocaMap) {
+ Value *Def = Pair.first;
+ Value *Alloca = Pair.second;
+
+ // we pre-record the uses of allocas so that we dont have to worry about
+ // later update
+ // that change the user information.
+ SmallVector<Instruction *, 20> Uses;
+ // PERF: trade a linear scan for repeated reallocation
+ Uses.reserve(std::distance(Def->user_begin(), Def->user_end()));
+ for (User *U : Def->users()) {
+ if (!isa<ConstantExpr>(U)) {
+ // If the def has a ConstantExpr use, then the def is either a
+ // ConstantExpr use itself or null. In either case
+ // (recursively in the first, directly in the second), the oop
+ // it is ultimately dependent on is null and this particular
+ // use does not need to be fixed up.
+ Uses.push_back(cast<Instruction>(U));
+ }
+ }
+
+ std::sort(Uses.begin(), Uses.end());
+ auto Last = std::unique(Uses.begin(), Uses.end());
+ Uses.erase(Last, Uses.end());
+
+ for (Instruction *Use : Uses) {
+ if (isa<PHINode>(Use)) {
+ PHINode *Phi = cast<PHINode>(Use);
+ for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) {
+ if (Def == Phi->getIncomingValue(i)) {
+ LoadInst *Load = new LoadInst(
+ Alloca, "", Phi->getIncomingBlock(i)->getTerminator());
+ Phi->setIncomingValue(i, Load);
+ }
+ }
+ } else {
+ LoadInst *Load = new LoadInst(Alloca, "", Use);
+ Use->replaceUsesOfWith(Def, Load);
+ }
+ }
+
+ // emit store for the initial gc value
+ // store must be inserted after load, otherwise store will be in alloca's
+ // use list and an extra load will be inserted before it
+ StoreInst *Store = new StoreInst(Def, Alloca);
+ if (Instruction *Inst = dyn_cast<Instruction>(Def)) {
+ if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) {
+ // InvokeInst is a TerminatorInst so the store need to be inserted
+ // into its normal destination block.
+ BasicBlock *NormalDest = Invoke->getNormalDest();
+ Store->insertBefore(NormalDest->getFirstNonPHI());
+ } else {
+ assert(!Inst->isTerminator() &&
+ "The only TerminatorInst that can produce a value is "
+ "InvokeInst which is handled above.");
+ Store->insertAfter(Inst);
+ }
+ } else {
+ assert(isa<Argument>(Def));
+ Store->insertAfter(cast<Instruction>(Alloca));
+ }
+ }
+
+ assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues &&
+ "we must have the same allocas with lives");
+ if (!PromotableAllocas.empty()) {
+ // apply mem2reg to promote alloca to SSA
+ PromoteMemToReg(PromotableAllocas, DT);
+ }
+
+#ifndef NDEBUG
+ for (auto I = F.getEntryBlock().begin(), E = F.getEntryBlock().end(); I != E;
+ I++)
+ if (isa<AllocaInst>(*I))
+ InitialAllocaNum--;
+ assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas");
+#endif
+}
+
+/// Implement a unique function which doesn't require we sort the input
+/// vector. Doing so has the effect of changing the output of a couple of
+/// tests in ways which make them less useful in testing fused safepoints.
+template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) {
+ DenseSet<T> Seen;
+ SmallVector<T, 128> TempVec;
+ TempVec.reserve(Vec.size());
+ for (auto Element : Vec)
+ TempVec.push_back(Element);
+ Vec.clear();
+ for (auto V : TempVec) {
+ if (Seen.insert(V).second) {
+ Vec.push_back(V);
+ }
+ }
+}
+
+/// Insert holders so that each Value is obviously live through the entire
+/// lifetime of the call.
+static void insertUseHolderAfter(CallSite &CS, const ArrayRef<Value *> Values,
+ SmallVectorImpl<CallInst *> &Holders) {
+ if (Values.empty())
+ // No values to hold live, might as well not insert the empty holder
+ return;
+
+ Module *M = CS.getInstruction()->getParent()->getParent()->getParent();
+ // Use a dummy vararg function to actually hold the values live
+ Function *Func = cast<Function>(M->getOrInsertFunction(
+ "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true)));
+ if (CS.isCall()) {
+ // For call safepoints insert dummy calls right after safepoint
+ BasicBlock::iterator Next(CS.getInstruction());
+ Next++;
+ Holders.push_back(CallInst::Create(Func, Values, "", Next));
+ return;
+ }
+ // For invoke safepooints insert dummy calls both in normal and
+ // exceptional destination blocks
+ auto *II = cast<InvokeInst>(CS.getInstruction());
+ Holders.push_back(CallInst::Create(
+ Func, Values, "", II->getNormalDest()->getFirstInsertionPt()));
+ Holders.push_back(CallInst::Create(
+ Func, Values, "", II->getUnwindDest()->getFirstInsertionPt()));
+}
+
+static void findLiveReferences(
+ Function &F, DominatorTree &DT, Pass *P, ArrayRef<CallSite> toUpdate,
+ MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) {
+ GCPtrLivenessData OriginalLivenessData;
+ computeLiveInValues(DT, F, OriginalLivenessData);
+ for (size_t i = 0; i < records.size(); i++) {
+ struct PartiallyConstructedSafepointRecord &info = records[i];
+ const CallSite &CS = toUpdate[i];
+ analyzeParsePointLiveness(DT, OriginalLivenessData, CS, info);
+ }
+}
+
+/// Remove any vector of pointers from the liveset by scalarizing them over the
+/// statepoint instruction. Adds the scalarized pieces to the liveset. It
+/// would be preferrable to include the vector in the statepoint itself, but
+/// the lowering code currently does not handle that. Extending it would be
+/// slightly non-trivial since it requires a format change. Given how rare
+/// such cases are (for the moment?) scalarizing is an acceptable comprimise.
+static void splitVectorValues(Instruction *StatepointInst,
+ StatepointLiveSetTy &LiveSet, DominatorTree &DT) {
+ SmallVector<Value *, 16> ToSplit;
+ for (Value *V : LiveSet)
+ if (isa<VectorType>(V->getType()))
+ ToSplit.push_back(V);
+
+ if (ToSplit.empty())
+ return;
+
+ Function &F = *(StatepointInst->getParent()->getParent());
+
+ DenseMap<Value *, AllocaInst *> AllocaMap;
+ // First is normal return, second is exceptional return (invoke only)
+ DenseMap<Value *, std::pair<Value *, Value *>> Replacements;
+ for (Value *V : ToSplit) {
+ LiveSet.erase(V);
+
+ AllocaInst *Alloca =
+ new AllocaInst(V->getType(), "", F.getEntryBlock().getFirstNonPHI());
+ AllocaMap[V] = Alloca;
+
+ VectorType *VT = cast<VectorType>(V->getType());
+ IRBuilder<> Builder(StatepointInst);
+ SmallVector<Value *, 16> Elements;
+ for (unsigned i = 0; i < VT->getNumElements(); i++)
+ Elements.push_back(Builder.CreateExtractElement(V, Builder.getInt32(i)));
+ LiveSet.insert(Elements.begin(), Elements.end());
+
+ auto InsertVectorReform = [&](Instruction *IP) {
+ Builder.SetInsertPoint(IP);
+ Builder.SetCurrentDebugLocation(IP->getDebugLoc());
+ Value *ResultVec = UndefValue::get(VT);
+ for (unsigned i = 0; i < VT->getNumElements(); i++)
+ ResultVec = Builder.CreateInsertElement(ResultVec, Elements[i],
+ Builder.getInt32(i));
+ return ResultVec;
+ };
+
+ if (isa<CallInst>(StatepointInst)) {
+ BasicBlock::iterator Next(StatepointInst);
+ Next++;
+ Instruction *IP = &*(Next);
+ Replacements[V].first = InsertVectorReform(IP);
+ Replacements[V].second = nullptr;
+ } else {
+ InvokeInst *Invoke = cast<InvokeInst>(StatepointInst);
+ // We've already normalized - check that we don't have shared destination
+ // blocks
+ BasicBlock *NormalDest = Invoke->getNormalDest();
+ assert(!isa<PHINode>(NormalDest->begin()));
+ BasicBlock *UnwindDest = Invoke->getUnwindDest();
+ assert(!isa<PHINode>(UnwindDest->begin()));
+ // Insert insert element sequences in both successors
+ Instruction *IP = &*(NormalDest->getFirstInsertionPt());
+ Replacements[V].first = InsertVectorReform(IP);
+ IP = &*(UnwindDest->getFirstInsertionPt());
+ Replacements[V].second = InsertVectorReform(IP);
+ }
+ }
+ for (Value *V : ToSplit) {
+ AllocaInst *Alloca = AllocaMap[V];
+
+ // Capture all users before we start mutating use lists
+ SmallVector<Instruction *, 16> Users;
+ for (User *U : V->users())
+ Users.push_back(cast<Instruction>(U));
+
+ for (Instruction *I : Users) {
+ if (auto Phi = dyn_cast<PHINode>(I)) {
+ for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++)
+ if (V == Phi->getIncomingValue(i)) {
+ LoadInst *Load = new LoadInst(
+ Alloca, "", Phi->getIncomingBlock(i)->getTerminator());
+ Phi->setIncomingValue(i, Load);
+ }
+ } else {
+ LoadInst *Load = new LoadInst(Alloca, "", I);
+ I->replaceUsesOfWith(V, Load);
+ }
+ }
+
+ // Store the original value and the replacement value into the alloca
+ StoreInst *Store = new StoreInst(V, Alloca);
+ if (auto I = dyn_cast<Instruction>(V))
+ Store->insertAfter(I);
+ else
+ Store->insertAfter(Alloca);
+
+ // Normal return for invoke, or call return
+ Instruction *Replacement = cast<Instruction>(Replacements[V].first);
+ (new StoreInst(Replacement, Alloca))->insertAfter(Replacement);
+ // Unwind return for invoke only
+ Replacement = cast_or_null<Instruction>(Replacements[V].second);
+ if (Replacement)
+ (new StoreInst(Replacement, Alloca))->insertAfter(Replacement);
+ }
+
+ // apply mem2reg to promote alloca to SSA
+ SmallVector<AllocaInst *, 16> Allocas;
+ for (Value *V : ToSplit)
+ Allocas.push_back(AllocaMap[V]);
+ PromoteMemToReg(Allocas, DT);
+}
+
+// Helper function for the "rematerializeLiveValues". It walks use chain
+// starting from the "CurrentValue" until it meets "BaseValue". Only "simple"
+// values are visited (currently it is GEP's and casts). Returns true if it
+// sucessfully reached "BaseValue" and false otherwise.
+// Fills "ChainToBase" array with all visited values. "BaseValue" is not
+// recorded.
+static bool findRematerializableChainToBasePointer(
+ SmallVectorImpl<Instruction*> &ChainToBase,
+ Value *CurrentValue, Value *BaseValue) {
+
+ // We have found a base value
+ if (CurrentValue == BaseValue) {
+ return true;
+ }
+
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) {
+ ChainToBase.push_back(GEP);
+ return findRematerializableChainToBasePointer(ChainToBase,
+ GEP->getPointerOperand(),
+ BaseValue);
+ }
+
+ if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) {
+ Value *Def = CI->stripPointerCasts();
+
+ // This two checks are basically similar. First one is here for the
+ // consistency with findBasePointers logic.
+ assert(!isa<CastInst>(Def) && "not a pointer cast found");
+ if (!CI->isNoopCast(CI->getModule()->getDataLayout()))
+ return false;
+
+ ChainToBase.push_back(CI);
+ return findRematerializableChainToBasePointer(ChainToBase, Def, BaseValue);
+ }
+
+ // Not supported instruction in the chain
+ return false;
+}
+
+// Helper function for the "rematerializeLiveValues". Compute cost of the use
+// chain we are going to rematerialize.
+static unsigned
+chainToBasePointerCost(SmallVectorImpl<Instruction*> &Chain,
+ TargetTransformInfo &TTI) {
+ unsigned Cost = 0;
+
+ for (Instruction *Instr : Chain) {
+ if (CastInst *CI = dyn_cast<CastInst>(Instr)) {
+ assert(CI->isNoopCast(CI->getModule()->getDataLayout()) &&
+ "non noop cast is found during rematerialization");
+
+ Type *SrcTy = CI->getOperand(0)->getType();
+ Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy);
+
+ } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) {
+ // Cost of the address calculation
+ Type *ValTy = GEP->getPointerOperandType()->getPointerElementType();
+ Cost += TTI.getAddressComputationCost(ValTy);
+
+ // And cost of the GEP itself
+ // TODO: Use TTI->getGEPCost here (it exists, but appears to be not
+ // allowed for the external usage)
+ if (!GEP->hasAllConstantIndices())
+ Cost += 2;
+
+ } else {
+ llvm_unreachable("unsupported instruciton type during rematerialization");
+ }
+ }
+
+ return Cost;
+}
+
+// From the statepoint liveset pick values that are cheaper to recompute then to
+// relocate. Remove this values from the liveset, rematerialize them after
+// statepoint and record them in "Info" structure. Note that similar to
+// relocated values we don't do any user adjustments here.
+static void rematerializeLiveValues(CallSite CS,
+ PartiallyConstructedSafepointRecord &Info,
+ TargetTransformInfo &TTI) {
+ const unsigned int ChainLengthThreshold = 10;
+
+ // Record values we are going to delete from this statepoint live set.
+ // We can not di this in following loop due to iterator invalidation.
+ SmallVector<Value *, 32> LiveValuesToBeDeleted;
+
+ for (Value *LiveValue: Info.liveset) {
+ // For each live pointer find it's defining chain
+ SmallVector<Instruction *, 3> ChainToBase;
+ assert(Info.PointerToBase.find(LiveValue) != Info.PointerToBase.end());
+ bool FoundChain =
+ findRematerializableChainToBasePointer(ChainToBase,
+ LiveValue,
+ Info.PointerToBase[LiveValue]);
+ // Nothing to do, or chain is too long
+ if (!FoundChain ||
+ ChainToBase.size() == 0 ||
+ ChainToBase.size() > ChainLengthThreshold)
+ continue;
+
+ // Compute cost of this chain
+ unsigned Cost = chainToBasePointerCost(ChainToBase, TTI);
+ // TODO: We can also account for cases when we will be able to remove some
+ // of the rematerialized values by later optimization passes. I.e if
+ // we rematerialized several intersecting chains. Or if original values
+ // don't have any uses besides this statepoint.
+
+ // For invokes we need to rematerialize each chain twice - for normal and
+ // for unwind basic blocks. Model this by multiplying cost by two.
+ if (CS.isInvoke()) {
+ Cost *= 2;
+ }
+ // If it's too expensive - skip it
+ if (Cost >= RematerializationThreshold)
+ continue;
+
+ // Remove value from the live set
+ LiveValuesToBeDeleted.push_back(LiveValue);
+
+ // Clone instructions and record them inside "Info" structure
+
+ // Walk backwards to visit top-most instructions first
+ std::reverse(ChainToBase.begin(), ChainToBase.end());
+
+ // Utility function which clones all instructions from "ChainToBase"
+ // and inserts them before "InsertBefore". Returns rematerialized value
+ // which should be used after statepoint.
+ auto rematerializeChain = [&ChainToBase](Instruction *InsertBefore) {
+ Instruction *LastClonedValue = nullptr;
+ Instruction *LastValue = nullptr;
+ for (Instruction *Instr: ChainToBase) {
+ // Only GEP's and casts are suported as we need to be careful to not
+ // introduce any new uses of pointers not in the liveset.
+ // Note that it's fine to introduce new uses of pointers which were
+ // otherwise not used after this statepoint.
+ assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr));
+
+ Instruction *ClonedValue = Instr->clone();
+ ClonedValue->insertBefore(InsertBefore);
+ ClonedValue->setName(Instr->getName() + ".remat");
+
+ // If it is not first instruction in the chain then it uses previously
+ // cloned value. We should update it to use cloned value.
+ if (LastClonedValue) {
+ assert(LastValue);
+ ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue);
+#ifndef NDEBUG
+ // Assert that cloned instruction does not use any instructions from
+ // this chain other than LastClonedValue
+ for (auto OpValue : ClonedValue->operand_values()) {
+ assert(std::find(ChainToBase.begin(), ChainToBase.end(), OpValue) ==
+ ChainToBase.end() &&
+ "incorrect use in rematerialization chain");
+ }
+#endif
+ }
+
+ LastClonedValue = ClonedValue;
+ LastValue = Instr;
+ }
+ assert(LastClonedValue);
+ return LastClonedValue;
+ };
+
+ // Different cases for calls and invokes. For invokes we need to clone
+ // instructions both on normal and unwind path.
+ if (CS.isCall()) {
+ Instruction *InsertBefore = CS.getInstruction()->getNextNode();
+ assert(InsertBefore);
+ Instruction *RematerializedValue = rematerializeChain(InsertBefore);
+ Info.RematerializedValues[RematerializedValue] = LiveValue;
+ } else {
+ InvokeInst *Invoke = cast<InvokeInst>(CS.getInstruction());
+
+ Instruction *NormalInsertBefore =
+ Invoke->getNormalDest()->getFirstInsertionPt();
+ Instruction *UnwindInsertBefore =
+ Invoke->getUnwindDest()->getFirstInsertionPt();
+
+ Instruction *NormalRematerializedValue =
+ rematerializeChain(NormalInsertBefore);
+ Instruction *UnwindRematerializedValue =
+ rematerializeChain(UnwindInsertBefore);
+
+ Info.RematerializedValues[NormalRematerializedValue] = LiveValue;
+ Info.RematerializedValues[UnwindRematerializedValue] = LiveValue;
+ }
+ }
+
+ // Remove rematerializaed values from the live set
+ for (auto LiveValue: LiveValuesToBeDeleted) {
+ Info.liveset.erase(LiveValue);
+ }
+}
+
+static bool insertParsePoints(Function &F, DominatorTree &DT, Pass *P,
+ SmallVectorImpl<CallSite> &toUpdate) {
+#ifndef NDEBUG
+ // sanity check the input
+ std::set<CallSite> uniqued;
+ uniqued.insert(toUpdate.begin(), toUpdate.end());
+ assert(uniqued.size() == toUpdate.size() && "no duplicates please!");
+
+ for (size_t i = 0; i < toUpdate.size(); i++) {
+ CallSite &CS = toUpdate[i];
+ assert(CS.getInstruction()->getParent()->getParent() == &F);
+ assert(isStatepoint(CS) && "expected to already be a deopt statepoint");
+ }
+#endif
+
+ // When inserting gc.relocates for invokes, we need to be able to insert at
+ // the top of the successor blocks. See the comment on
+ // normalForInvokeSafepoint on exactly what is needed. Note that this step
+ // may restructure the CFG.
+ for (CallSite CS : toUpdate) {
+ if (!CS.isInvoke())
+ continue;
+ InvokeInst *invoke = cast<InvokeInst>(CS.getInstruction());
+ normalizeForInvokeSafepoint(invoke->getNormalDest(), invoke->getParent(),
+ P);
+ normalizeForInvokeSafepoint(invoke->getUnwindDest(), invoke->getParent(),
+ P);
+ }
+
+ // A list of dummy calls added to the IR to keep various values obviously
+ // live in the IR. We'll remove all of these when done.
+ SmallVector<CallInst *, 64> holders;
+
+ // Insert a dummy call with all of the arguments to the vm_state we'll need
+ // for the actual safepoint insertion. This ensures reference arguments in
+ // the deopt argument list are considered live through the safepoint (and
+ // thus makes sure they get relocated.)
+ for (size_t i = 0; i < toUpdate.size(); i++) {
+ CallSite &CS = toUpdate[i];
+ Statepoint StatepointCS(CS);
+
+ SmallVector<Value *, 64> DeoptValues;
+ for (Use &U : StatepointCS.vm_state_args()) {
+ Value *Arg = cast<Value>(&U);
+ assert(!isUnhandledGCPointerType(Arg->getType()) &&
+ "support for FCA unimplemented");
+ if (isHandledGCPointerType(Arg->getType()))
+ DeoptValues.push_back(Arg);
+ }
+ insertUseHolderAfter(CS, DeoptValues, holders);
+ }
+
+ SmallVector<struct PartiallyConstructedSafepointRecord, 64> records;
+ records.reserve(toUpdate.size());
+ for (size_t i = 0; i < toUpdate.size(); i++) {
+ struct PartiallyConstructedSafepointRecord info;
+ records.push_back(info);
+ }
+ assert(records.size() == toUpdate.size());
+
+ // A) Identify all gc pointers which are staticly live at the given call
+ // site.
+ findLiveReferences(F, DT, P, toUpdate, records);
+
+ // Do a limited scalarization of any live at safepoint vector values which
+ // contain pointers. This enables this pass to run after vectorization at
+ // the cost of some possible performance loss. TODO: it would be nice to
+ // natively support vectors all the way through the backend so we don't need
+ // to scalarize here.
+ for (size_t i = 0; i < records.size(); i++) {
+ struct PartiallyConstructedSafepointRecord &info = records[i];
+ Instruction *statepoint = toUpdate[i].getInstruction();
+ splitVectorValues(cast<Instruction>(statepoint), info.liveset, DT);
+ }
+
+ // B) Find the base pointers for each live pointer
+ /* scope for caching */ {
+ // Cache the 'defining value' relation used in the computation and
+ // insertion of base phis and selects. This ensures that we don't insert
+ // large numbers of duplicate base_phis.
+ DefiningValueMapTy DVCache;
+
+ for (size_t i = 0; i < records.size(); i++) {
+ struct PartiallyConstructedSafepointRecord &info = records[i];
+ CallSite &CS = toUpdate[i];
+ findBasePointers(DT, DVCache, CS, info);
+ }
+ } // end of cache scope
+
+ // The base phi insertion logic (for any safepoint) may have inserted new
+ // instructions which are now live at some safepoint. The simplest such
+ // example is:
+ // loop:
+ // phi a <-- will be a new base_phi here
+ // safepoint 1 <-- that needs to be live here
+ // gep a + 1
+ // safepoint 2
+ // br loop
+ // We insert some dummy calls after each safepoint to definitely hold live
+ // the base pointers which were identified for that safepoint. We'll then
+ // ask liveness for _every_ base inserted to see what is now live. Then we
+ // remove the dummy calls.
+ holders.reserve(holders.size() + records.size());
+ for (size_t i = 0; i < records.size(); i++) {
+ struct PartiallyConstructedSafepointRecord &info = records[i];
+ CallSite &CS = toUpdate[i];
+
+ SmallVector<Value *, 128> Bases;
+ for (auto Pair : info.PointerToBase) {
+ Bases.push_back(Pair.second);
+ }
+ insertUseHolderAfter(CS, Bases, holders);
+ }
+
+ // By selecting base pointers, we've effectively inserted new uses. Thus, we
+ // need to rerun liveness. We may *also* have inserted new defs, but that's
+ // not the key issue.
+ recomputeLiveInValues(F, DT, P, toUpdate, records);
+
+ if (PrintBasePointers) {
+ for (size_t i = 0; i < records.size(); i++) {
+ struct PartiallyConstructedSafepointRecord &info = records[i];
+ errs() << "Base Pairs: (w/Relocation)\n";
+ for (auto Pair : info.PointerToBase) {
+ errs() << " derived %" << Pair.first->getName() << " base %"
+ << Pair.second->getName() << "\n";
+ }
+ }
+ }
+ for (size_t i = 0; i < holders.size(); i++) {
+ holders[i]->eraseFromParent();
+ holders[i] = nullptr;
+ }
+ holders.clear();
+
+ // In order to reduce live set of statepoint we might choose to rematerialize
+ // some values instead of relocating them. This is purelly an optimization and
+ // does not influence correctness.
+ TargetTransformInfo &TTI =
+ P->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+
+ for (size_t i = 0; i < records.size(); i++) {
+ struct PartiallyConstructedSafepointRecord &info = records[i];
+ CallSite &CS = toUpdate[i];
+
+ rematerializeLiveValues(CS, info, TTI);
+ }
+
+ // Now run through and replace the existing statepoints with new ones with
+ // the live variables listed. We do not yet update uses of the values being
+ // relocated. We have references to live variables that need to
+ // survive to the last iteration of this loop. (By construction, the
+ // previous statepoint can not be a live variable, thus we can and remove
+ // the old statepoint calls as we go.)
+ for (size_t i = 0; i < records.size(); i++) {
+ struct PartiallyConstructedSafepointRecord &info = records[i];
+ CallSite &CS = toUpdate[i];
+ makeStatepointExplicit(DT, CS, P, info);
+ }
+ toUpdate.clear(); // prevent accident use of invalid CallSites
+
+ // Do all the fixups of the original live variables to their relocated selves
+ SmallVector<Value *, 128> live;
+ for (size_t i = 0; i < records.size(); i++) {
+ struct PartiallyConstructedSafepointRecord &info = records[i];
+ // We can't simply save the live set from the original insertion. One of
+ // the live values might be the result of a call which needs a safepoint.
+ // That Value* no longer exists and we need to use the new gc_result.
+ // Thankfully, the liveset is embedded in the statepoint (and updated), so
+ // we just grab that.
+ Statepoint statepoint(info.StatepointToken);
+ live.insert(live.end(), statepoint.gc_args_begin(),
+ statepoint.gc_args_end());
+#ifndef NDEBUG
+ // Do some basic sanity checks on our liveness results before performing
+ // relocation. Relocation can and will turn mistakes in liveness results
+ // into non-sensical code which is must harder to debug.
+ // TODO: It would be nice to test consistency as well
+ assert(DT.isReachableFromEntry(info.StatepointToken->getParent()) &&
+ "statepoint must be reachable or liveness is meaningless");
+ for (Value *V : statepoint.gc_args()) {
+ if (!isa<Instruction>(V))
+ // Non-instruction values trivial dominate all possible uses
+ continue;
+ auto LiveInst = cast<Instruction>(V);
+ assert(DT.isReachableFromEntry(LiveInst->getParent()) &&
+ "unreachable values should never be live");
+ assert(DT.dominates(LiveInst, info.StatepointToken) &&
+ "basic SSA liveness expectation violated by liveness analysis");
+ }
+#endif
+ }
+ unique_unsorted(live);
+
+#ifndef NDEBUG
+ // sanity check
+ for (auto ptr : live) {
+ assert(isGCPointerType(ptr->getType()) && "must be a gc pointer type");
+ }
+#endif
+
+ relocationViaAlloca(F, DT, live, records);
+ return !records.empty();
+}
+
+/// Returns true if this function should be rewritten by this pass. The main
+/// point of this function is as an extension point for custom logic.
+static bool shouldRewriteStatepointsIn(Function &F) {
+ // TODO: This should check the GCStrategy
+ if (F.hasGC()) {
+ const char *FunctionGCName = F.getGC();
+ const StringRef StatepointExampleName("statepoint-example");
+ const StringRef CoreCLRName("coreclr");
+ return (StatepointExampleName == FunctionGCName) ||
+ (CoreCLRName == FunctionGCName);
+ } else
+ return false;
+}
+
+bool RewriteStatepointsForGC::runOnFunction(Function &F) {
+ // Nothing to do for declarations.
+ if (F.isDeclaration() || F.empty())
+ return false;
+
+ // Policy choice says not to rewrite - the most common reason is that we're
+ // compiling code without a GCStrategy.
+ if (!shouldRewriteStatepointsIn(F))
+ return false;
+
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+
+ // Gather all the statepoints which need rewritten. Be careful to only
+ // consider those in reachable code since we need to ask dominance queries
+ // when rewriting. We'll delete the unreachable ones in a moment.
+ SmallVector<CallSite, 64> ParsePointNeeded;
+ bool HasUnreachableStatepoint = false;
+ for (Instruction &I : inst_range(F)) {
+ // TODO: only the ones with the flag set!
+ if (isStatepoint(I)) {
+ if (DT.isReachableFromEntry(I.getParent()))
+ ParsePointNeeded.push_back(CallSite(&I));
+ else
+ HasUnreachableStatepoint = true;
+ }
+ }
+
+ bool MadeChange = false;
+
+ // Delete any unreachable statepoints so that we don't have unrewritten
+ // statepoints surviving this pass. This makes testing easier and the
+ // resulting IR less confusing to human readers. Rather than be fancy, we
+ // just reuse a utility function which removes the unreachable blocks.
+ if (HasUnreachableStatepoint)
+ MadeChange |= removeUnreachableBlocks(F);
+
+ // Return early if no work to do.
+ if (ParsePointNeeded.empty())
+ return MadeChange;
+
+ // As a prepass, go ahead and aggressively destroy single entry phi nodes.
+ // These are created by LCSSA. They have the effect of increasing the size
+ // of liveness sets for no good reason. It may be harder to do this post
+ // insertion since relocations and base phis can confuse things.
+ for (BasicBlock &BB : F)
+ if (BB.getUniquePredecessor()) {
+ MadeChange = true;
+ FoldSingleEntryPHINodes(&BB);
+ }
+
+ MadeChange |= insertParsePoints(F, DT, this, ParsePointNeeded);
+ return MadeChange;
+}
+
+// liveness computation via standard dataflow
+// -------------------------------------------------------------------
+
+// TODO: Consider using bitvectors for liveness, the set of potentially
+// interesting values should be small and easy to pre-compute.
+
+/// Compute the live-in set for the location rbegin starting from
+/// the live-out set of the basic block
+static void computeLiveInValues(BasicBlock::reverse_iterator rbegin,
+ BasicBlock::reverse_iterator rend,
+ DenseSet<Value *> &LiveTmp) {
+
+ for (BasicBlock::reverse_iterator ritr = rbegin; ritr != rend; ritr++) {
+ Instruction *I = &*ritr;
+
+ // KILL/Def - Remove this definition from LiveIn
+ LiveTmp.erase(I);
+
+ // Don't consider *uses* in PHI nodes, we handle their contribution to
+ // predecessor blocks when we seed the LiveOut sets
+ if (isa<PHINode>(I))
+ continue;
+
+ // USE - Add to the LiveIn set for this instruction
+ for (Value *V : I->operands()) {
+ assert(!isUnhandledGCPointerType(V->getType()) &&
+ "support for FCA unimplemented");
+ if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) {
+ // The choice to exclude all things constant here is slightly subtle.
+ // There are two idependent reasons:
+ // - We assume that things which are constant (from LLVM's definition)
+ // do not move at runtime. For example, the address of a global
+ // variable is fixed, even though it's contents may not be.
+ // - Second, we can't disallow arbitrary inttoptr constants even
+ // if the language frontend does. Optimization passes are free to
+ // locally exploit facts without respect to global reachability. This
+ // can create sections of code which are dynamically unreachable and
+ // contain just about anything. (see constants.ll in tests)
+ LiveTmp.insert(V);
+ }
+ }
+ }
+}
+
+static void computeLiveOutSeed(BasicBlock *BB, DenseSet<Value *> &LiveTmp) {
+
+ for (BasicBlock *Succ : successors(BB)) {
+ const BasicBlock::iterator E(Succ->getFirstNonPHI());
+ for (BasicBlock::iterator I = Succ->begin(); I != E; I++) {
+ PHINode *Phi = cast<PHINode>(&*I);
+ Value *V = Phi->getIncomingValueForBlock(BB);
+ assert(!isUnhandledGCPointerType(V->getType()) &&
+ "support for FCA unimplemented");
+ if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) {
+ LiveTmp.insert(V);
+ }
+ }
+ }
+}
+
+static DenseSet<Value *> computeKillSet(BasicBlock *BB) {
+ DenseSet<Value *> KillSet;
+ for (Instruction &I : *BB)
+ if (isHandledGCPointerType(I.getType()))
+ KillSet.insert(&I);
+ return KillSet;
+}
+
+#ifndef NDEBUG
+/// Check that the items in 'Live' dominate 'TI'. This is used as a basic
+/// sanity check for the liveness computation.
+static void checkBasicSSA(DominatorTree &DT, DenseSet<Value *> &Live,
+ TerminatorInst *TI, bool TermOkay = false) {
+ for (Value *V : Live) {
+ if (auto *I = dyn_cast<Instruction>(V)) {
+ // The terminator can be a member of the LiveOut set. LLVM's definition
+ // of instruction dominance states that V does not dominate itself. As
+ // such, we need to special case this to allow it.
+ if (TermOkay && TI == I)
+ continue;
+ assert(DT.dominates(I, TI) &&
+ "basic SSA liveness expectation violated by liveness analysis");
+ }
+ }
+}
+
+/// Check that all the liveness sets used during the computation of liveness
+/// obey basic SSA properties. This is useful for finding cases where we miss
+/// a def.
+static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data,
+ BasicBlock &BB) {
+ checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator());
+ checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true);
+ checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator());
+}
+#endif
+
+static void computeLiveInValues(DominatorTree &DT, Function &F,
+ GCPtrLivenessData &Data) {
+
+ SmallSetVector<BasicBlock *, 200> Worklist;
+ auto AddPredsToWorklist = [&](BasicBlock *BB) {
+ // We use a SetVector so that we don't have duplicates in the worklist.
+ Worklist.insert(pred_begin(BB), pred_end(BB));
+ };
+ auto NextItem = [&]() {
+ BasicBlock *BB = Worklist.back();
+ Worklist.pop_back();
+ return BB;
+ };
+
+ // Seed the liveness for each individual block
+ for (BasicBlock &BB : F) {
+ Data.KillSet[&BB] = computeKillSet(&BB);
+ Data.LiveSet[&BB].clear();
+ computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]);
+
+#ifndef NDEBUG
+ for (Value *Kill : Data.KillSet[&BB])
+ assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill");
+#endif
+
+ Data.LiveOut[&BB] = DenseSet<Value *>();
+ computeLiveOutSeed(&BB, Data.LiveOut[&BB]);
+ Data.LiveIn[&BB] = Data.LiveSet[&BB];
+ set_union(Data.LiveIn[&BB], Data.LiveOut[&BB]);
+ set_subtract(Data.LiveIn[&BB], Data.KillSet[&BB]);
+ if (!Data.LiveIn[&BB].empty())
+ AddPredsToWorklist(&BB);
+ }
+
+ // Propagate that liveness until stable
+ while (!Worklist.empty()) {
+ BasicBlock *BB = NextItem();
+
+ // Compute our new liveout set, then exit early if it hasn't changed
+ // despite the contribution of our successor.
+ DenseSet<Value *> LiveOut = Data.LiveOut[BB];
+ const auto OldLiveOutSize = LiveOut.size();
+ for (BasicBlock *Succ : successors(BB)) {
+ assert(Data.LiveIn.count(Succ));
+ set_union(LiveOut, Data.LiveIn[Succ]);
+ }
+ // assert OutLiveOut is a subset of LiveOut
+ if (OldLiveOutSize == LiveOut.size()) {
+ // If the sets are the same size, then we didn't actually add anything
+ // when unioning our successors LiveIn Thus, the LiveIn of this block
+ // hasn't changed.
+ continue;
+ }
+ Data.LiveOut[BB] = LiveOut;
+
+ // Apply the effects of this basic block
+ DenseSet<Value *> LiveTmp = LiveOut;
+ set_union(LiveTmp, Data.LiveSet[BB]);
+ set_subtract(LiveTmp, Data.KillSet[BB]);
+
+ assert(Data.LiveIn.count(BB));
+ const DenseSet<Value *> &OldLiveIn = Data.LiveIn[BB];
+ // assert: OldLiveIn is a subset of LiveTmp
+ if (OldLiveIn.size() != LiveTmp.size()) {
+ Data.LiveIn[BB] = LiveTmp;
+ AddPredsToWorklist(BB);
+ }
+ } // while( !worklist.empty() )
+
+#ifndef NDEBUG
+ // Sanity check our ouput against SSA properties. This helps catch any
+ // missing kills during the above iteration.
+ for (BasicBlock &BB : F) {
+ checkBasicSSA(DT, Data, BB);
+ }
+#endif
+}
+
+static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data,
+ StatepointLiveSetTy &Out) {
+
+ BasicBlock *BB = Inst->getParent();
+
+ // Note: The copy is intentional and required
+ assert(Data.LiveOut.count(BB));
+ DenseSet<Value *> LiveOut = Data.LiveOut[BB];
+
+ // We want to handle the statepoint itself oddly. It's
+ // call result is not live (normal), nor are it's arguments
+ // (unless they're used again later). This adjustment is
+ // specifically what we need to relocate
+ BasicBlock::reverse_iterator rend(Inst);
+ computeLiveInValues(BB->rbegin(), rend, LiveOut);
+ LiveOut.erase(Inst);
+ Out.insert(LiveOut.begin(), LiveOut.end());
+}
+
+static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
+ const CallSite &CS,
+ PartiallyConstructedSafepointRecord &Info) {
+ Instruction *Inst = CS.getInstruction();
+ StatepointLiveSetTy Updated;
+ findLiveSetAtInst(Inst, RevisedLivenessData, Updated);
+
+#ifndef NDEBUG
+ DenseSet<Value *> Bases;
+ for (auto KVPair : Info.PointerToBase) {
+ Bases.insert(KVPair.second);
+ }
+#endif
+ // We may have base pointers which are now live that weren't before. We need
+ // to update the PointerToBase structure to reflect this.
+ for (auto V : Updated)
+ if (!Info.PointerToBase.count(V)) {
+ assert(Bases.count(V) && "can't find base for unexpected live value");
+ Info.PointerToBase[V] = V;
+ continue;
+ }
+
+#ifndef NDEBUG
+ for (auto V : Updated) {
+ assert(Info.PointerToBase.count(V) &&
+ "must be able to find base for live value");
+ }
+#endif
+
+ // Remove any stale base mappings - this can happen since our liveness is
+ // more precise then the one inherent in the base pointer analysis
+ DenseSet<Value *> ToErase;
+ for (auto KVPair : Info.PointerToBase)
+ if (!Updated.count(KVPair.first))
+ ToErase.insert(KVPair.first);
+ for (auto V : ToErase)
+ Info.PointerToBase.erase(V);
+
+#ifndef NDEBUG
+ for (auto KVPair : Info.PointerToBase)
+ assert(Updated.count(KVPair.first) && "record for non-live value");
+#endif
+
+ Info.liveset = Updated;
+}
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index cfc9a8e89fa0..bc068f78c576 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -25,6 +25,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@@ -35,7 +36,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
@@ -154,7 +154,7 @@ namespace {
/// Constant Propagation.
///
class SCCPSolver : public InstVisitor<SCCPSolver> {
- const DataLayout *DL;
+ const DataLayout &DL;
const TargetLibraryInfo *TLI;
SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable.
DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
@@ -206,8 +206,8 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
typedef std::pair<BasicBlock*, BasicBlock*> Edge;
DenseSet<Edge> KnownFeasibleEdges;
public:
- SCCPSolver(const DataLayout *DL, const TargetLibraryInfo *tli)
- : DL(DL), TLI(tli) {}
+ SCCPSolver(const DataLayout &DL, const TargetLibraryInfo *tli)
+ : DL(DL), TLI(tli) {}
/// MarkBlockExecutable - This method can be used by clients to mark all of
/// the blocks that are known to be intrinsically live in the processed unit.
@@ -1012,7 +1012,8 @@ void SCCPSolver::visitGetElementPtrInst(GetElementPtrInst &I) {
Constant *Ptr = Operands[0];
auto Indices = makeArrayRef(Operands.begin() + 1, Operands.end());
- markConstant(&I, ConstantExpr::getGetElementPtr(Ptr, Indices));
+ markConstant(&I, ConstantExpr::getGetElementPtr(I.getSourceElementType(), Ptr,
+ Indices));
}
void SCCPSolver::visitStoreInst(StoreInst &SI) {
@@ -1504,7 +1505,7 @@ namespace {
///
struct SCCP : public FunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
static char ID; // Pass identification, replacement for typeid
SCCP() : FunctionPass(ID) {
@@ -1561,9 +1562,9 @@ bool SCCP::runOnFunction(Function &F) {
return false;
DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
- const DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
- const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ const TargetLibraryInfo *TLI =
+ &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
SCCPSolver Solver(DL, TLI);
// Mark the first block of the function as being executable.
@@ -1637,7 +1638,7 @@ namespace {
///
struct IPSCCP : public ModulePass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
static char ID;
IPSCCP() : ModulePass(ID) {
@@ -1651,7 +1652,7 @@ char IPSCCP::ID = 0;
INITIALIZE_PASS_BEGIN(IPSCCP, "ipsccp",
"Interprocedural Sparse Conditional Constant Propagation",
false, false)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(IPSCCP, "ipsccp",
"Interprocedural Sparse Conditional Constant Propagation",
false, false)
@@ -1690,9 +1691,9 @@ static bool AddressIsTaken(const GlobalValue *GV) {
}
bool IPSCCP::runOnModule(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
- const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
+ const DataLayout &DL = M.getDataLayout();
+ const TargetLibraryInfo *TLI =
+ &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
SCCPSolver Solver(DL, TLI);
// AddressTakenFunctions - This set keeps track of the address-taken functions
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index ed161fd4af3e..056dd11b5ab3 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -247,7 +247,7 @@ public:
/// hold.
void insert(ArrayRef<Slice> NewSlices) {
int OldSize = Slices.size();
- std::move(NewSlices.begin(), NewSlices.end(), std::back_inserter(Slices));
+ Slices.append(NewSlices.begin(), NewSlices.end());
auto SliceI = Slices.begin() + OldSize;
std::sort(SliceI, Slices.end());
std::inplace_merge(Slices.begin(), SliceI, Slices.end());
@@ -701,6 +701,7 @@ private:
// by writing out the code here where we have tho underlying allocation
// size readily available.
APInt GEPOffset = Offset;
+ const DataLayout &DL = GEPI.getModule()->getDataLayout();
for (gep_type_iterator GTI = gep_type_begin(GEPI),
GTE = gep_type_end(GEPI);
GTI != GTE; ++GTI) {
@@ -750,6 +751,7 @@ private:
if (!IsOffsetKnown)
return PI.setAborted(&LI);
+ const DataLayout &DL = LI.getModule()->getDataLayout();
uint64_t Size = DL.getTypeStoreSize(LI.getType());
return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
}
@@ -761,6 +763,7 @@ private:
if (!IsOffsetKnown)
return PI.setAborted(&SI);
+ const DataLayout &DL = SI.getModule()->getDataLayout();
uint64_t Size = DL.getTypeStoreSize(ValOp->getType());
// If this memory access can be shown to *statically* extend outside the
@@ -898,6 +901,7 @@ private:
SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
Visited.insert(Root);
Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
+ const DataLayout &DL = Root->getModule()->getDataLayout();
// If there are no loads or stores, the access is dead. We mark that as
// a size zero access.
Size = 0;
@@ -1084,7 +1088,8 @@ class AllocaPromoter : public LoadAndStorePromoter {
SmallVector<DbgValueInst *, 4> DVIs;
public:
- AllocaPromoter(const SmallVectorImpl<Instruction *> &Insts, SSAUpdater &S,
+ AllocaPromoter(ArrayRef<const Instruction *> Insts,
+ SSAUpdater &S,
AllocaInst &AI, DIBuilder &DIB)
: LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
@@ -1092,8 +1097,8 @@ public:
// Retain the debug information attached to the alloca for use when
// rewriting loads and stores.
if (auto *L = LocalAsMetadata::getIfExists(&AI)) {
- if (auto *DebugNode = MetadataAsValue::getIfExists(AI.getContext(), L)) {
- for (User *U : DebugNode->users())
+ if (auto *DINode = MetadataAsValue::getIfExists(AI.getContext(), L)) {
+ for (User *U : DINode->users())
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
DDIs.push_back(DDI);
else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
@@ -1162,10 +1167,9 @@ public:
} else {
continue;
}
- Instruction *DbgVal =
- DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
- DIExpression(DVI->getExpression()), Inst);
- DbgVal->setDebugLoc(DVI->getDebugLoc());
+ DIB.insertDbgValueIntrinsic(Arg, 0, DVI->getVariable(),
+ DVI->getExpression(), DVI->getDebugLoc(),
+ Inst);
}
}
};
@@ -1194,7 +1198,6 @@ class SROA : public FunctionPass {
const bool RequiresDomTree;
LLVMContext *C;
- const DataLayout *DL;
DominatorTree *DT;
AssumptionCache *AC;
@@ -1243,7 +1246,7 @@ class SROA : public FunctionPass {
public:
SROA(bool RequiresDomTree = true)
: FunctionPass(ID), RequiresDomTree(RequiresDomTree), C(nullptr),
- DL(nullptr), DT(nullptr) {
+ DT(nullptr) {
initializeSROAPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
@@ -1257,8 +1260,8 @@ private:
friend class AllocaSliceRewriter;
bool presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS);
- bool rewritePartition(AllocaInst &AI, AllocaSlices &AS,
- AllocaSlices::Partition &P);
+ AllocaInst *rewritePartition(AllocaInst &AI, AllocaSlices &AS,
+ AllocaSlices::Partition &P);
bool splitAlloca(AllocaInst &AI, AllocaSlices &AS);
bool runOnAlloca(AllocaInst &AI);
void clobberUse(Use &U);
@@ -1349,7 +1352,7 @@ static Type *findCommonType(AllocaSlices::const_iterator B,
///
/// FIXME: This should be hoisted into a generic utility, likely in
/// Transforms/Util/Local.h
-static bool isSafePHIToSpeculate(PHINode &PN, const DataLayout *DL = nullptr) {
+static bool isSafePHIToSpeculate(PHINode &PN) {
// For now, we can only do this promotion if the load is in the same block
// as the PHI, and if there are no stores between the phi and load.
// TODO: Allow recursive phi users.
@@ -1381,6 +1384,8 @@ static bool isSafePHIToSpeculate(PHINode &PN, const DataLayout *DL = nullptr) {
if (!HaveLoad)
return false;
+ const DataLayout &DL = PN.getModule()->getDataLayout();
+
// We can only transform this if it is safe to push the loads into the
// predecessor blocks. The only thing to watch out for is that we can't put
// a possibly trapping load in the predecessor if it is a critical edge.
@@ -1402,8 +1407,8 @@ static bool isSafePHIToSpeculate(PHINode &PN, const DataLayout *DL = nullptr) {
// If this pointer is always safe to load, or if we can prove that there
// is already a load in the block, then we can move the load to the pred
// block.
- if (InVal->isDereferenceablePointer(DL) ||
- isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL))
+ if (isDereferenceablePointer(InVal, DL) ||
+ isSafeToLoadUnconditionally(InVal, TI, MaxAlign))
continue;
return false;
@@ -1468,12 +1473,12 @@ static void speculatePHINodeLoads(PHINode &PN) {
///
/// We can do this to a select if its only uses are loads and if the operand
/// to the select can be loaded unconditionally.
-static bool isSafeSelectToSpeculate(SelectInst &SI,
- const DataLayout *DL = nullptr) {
+static bool isSafeSelectToSpeculate(SelectInst &SI) {
Value *TValue = SI.getTrueValue();
Value *FValue = SI.getFalseValue();
- bool TDerefable = TValue->isDereferenceablePointer(DL);
- bool FDerefable = FValue->isDereferenceablePointer(DL);
+ const DataLayout &DL = SI.getModule()->getDataLayout();
+ bool TDerefable = isDereferenceablePointer(TValue, DL);
+ bool FDerefable = isDereferenceablePointer(FValue, DL);
for (User *U : SI.users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
@@ -1484,10 +1489,10 @@ static bool isSafeSelectToSpeculate(SelectInst &SI,
// absolutely (e.g. allocas) or at this point because we can see other
// accesses to it.
if (!TDerefable &&
- !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment(), DL))
+ !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment()))
return false;
if (!FDerefable &&
- !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment(), DL))
+ !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment()))
return false;
}
@@ -1547,7 +1552,8 @@ static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
return BasePtr;
- return IRB.CreateInBoundsGEP(BasePtr, Indices, NamePrefix + "sroa_idx");
+ return IRB.CreateInBoundsGEP(nullptr, BasePtr, Indices,
+ NamePrefix + "sroa_idx");
}
/// \brief Get a natural GEP off of the BasePtr walking through Ty toward
@@ -1798,7 +1804,8 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
OffsetPtr = Int8PtrOffset == 0
? Int8Ptr
- : IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
+ : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr,
+ IRB.getInt(Int8PtrOffset),
NamePrefix + "sroa_raw_idx");
}
Ptr = OffsetPtr;
@@ -3245,7 +3252,8 @@ private:
void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
assert(Ty->isSingleValueType());
// Load the single value and insert it using the indices.
- Value *GEP = IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep");
+ Value *GEP =
+ IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep");
Value *Load = IRB.CreateLoad(GEP, Name + ".load");
Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
DEBUG(dbgs() << " to: " << *Load << "\n");
@@ -3278,7 +3286,7 @@ private:
// Extract the single value and store it using the indices.
Value *Store = IRB.CreateStore(
IRB.CreateExtractValue(Agg, Indices, Name + ".extract"),
- IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"));
+ IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep"));
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
}
@@ -3699,6 +3707,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
// them to the alloca slices.
SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap;
std::vector<LoadInst *> SplitLoads;
+ const DataLayout &DL = AI.getModule()->getDataLayout();
for (LoadInst *LI : Loads) {
SplitLoads.clear();
@@ -3724,10 +3733,10 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
auto *PartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace());
LoadInst *PLoad = IRB.CreateAlignedLoad(
- getAdjustedPtr(IRB, *DL, BasePtr,
- APInt(DL->getPointerSizeInBits(), PartOffset),
+ getAdjustedPtr(IRB, DL, BasePtr,
+ APInt(DL.getPointerSizeInBits(), PartOffset),
PartPtrTy, BasePtr->getName() + "."),
- getAdjustedAlignment(LI, PartOffset, *DL), /*IsVolatile*/ false,
+ getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
LI->getName());
// Append this load onto the list of split loads so we can find it later
@@ -3777,10 +3786,10 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
PLoad->getType()->getPointerTo(SI->getPointerAddressSpace());
StoreInst *PStore = IRB.CreateAlignedStore(
- PLoad, getAdjustedPtr(IRB, *DL, StoreBasePtr,
- APInt(DL->getPointerSizeInBits(), PartOffset),
+ PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr,
+ APInt(DL.getPointerSizeInBits(), PartOffset),
PartPtrTy, StoreBasePtr->getName() + "."),
- getAdjustedAlignment(SI, PartOffset, *DL), /*IsVolatile*/ false);
+ getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
(void)PStore;
DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n");
}
@@ -3857,20 +3866,20 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
} else {
IRB.SetInsertPoint(BasicBlock::iterator(LI));
PLoad = IRB.CreateAlignedLoad(
- getAdjustedPtr(IRB, *DL, LoadBasePtr,
- APInt(DL->getPointerSizeInBits(), PartOffset),
+ getAdjustedPtr(IRB, DL, LoadBasePtr,
+ APInt(DL.getPointerSizeInBits(), PartOffset),
PartPtrTy, LoadBasePtr->getName() + "."),
- getAdjustedAlignment(LI, PartOffset, *DL), /*IsVolatile*/ false,
+ getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
LI->getName());
}
// And store this partition.
IRB.SetInsertPoint(BasicBlock::iterator(SI));
StoreInst *PStore = IRB.CreateAlignedStore(
- PLoad, getAdjustedPtr(IRB, *DL, StoreBasePtr,
- APInt(DL->getPointerSizeInBits(), PartOffset),
+ PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr,
+ APInt(DL.getPointerSizeInBits(), PartOffset),
PartPtrTy, StoreBasePtr->getName() + "."),
- getAdjustedAlignment(SI, PartOffset, *DL), /*IsVolatile*/ false);
+ getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
// Now build a new slice for the alloca.
NewSlices.push_back(
@@ -3964,31 +3973,32 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
/// appropriate new offsets. It also evaluates how successful the rewrite was
/// at enabling promotion and if it was successful queues the alloca to be
/// promoted.
-bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
- AllocaSlices::Partition &P) {
+AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
+ AllocaSlices::Partition &P) {
// Try to compute a friendly type for this partition of the alloca. This
// won't always succeed, in which case we fall back to a legal integer type
// or an i8 array of an appropriate size.
Type *SliceTy = nullptr;
+ const DataLayout &DL = AI.getModule()->getDataLayout();
if (Type *CommonUseTy = findCommonType(P.begin(), P.end(), P.endOffset()))
- if (DL->getTypeAllocSize(CommonUseTy) >= P.size())
+ if (DL.getTypeAllocSize(CommonUseTy) >= P.size())
SliceTy = CommonUseTy;
if (!SliceTy)
- if (Type *TypePartitionTy = getTypePartition(*DL, AI.getAllocatedType(),
+ if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(),
P.beginOffset(), P.size()))
SliceTy = TypePartitionTy;
if ((!SliceTy || (SliceTy->isArrayTy() &&
SliceTy->getArrayElementType()->isIntegerTy())) &&
- DL->isLegalInteger(P.size() * 8))
+ DL.isLegalInteger(P.size() * 8))
SliceTy = Type::getIntNTy(*C, P.size() * 8);
if (!SliceTy)
SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size());
- assert(DL->getTypeAllocSize(SliceTy) >= P.size());
+ assert(DL.getTypeAllocSize(SliceTy) >= P.size());
- bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, *DL);
+ bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL);
VectorType *VecTy =
- IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, *DL);
+ IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL);
if (VecTy)
SliceTy = VecTy;
@@ -4003,18 +4013,19 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
NewAI = &AI;
// FIXME: We should be able to bail at this point with "nothing changed".
// FIXME: We might want to defer PHI speculation until after here.
+ // FIXME: return nullptr;
} else {
unsigned Alignment = AI.getAlignment();
if (!Alignment) {
// The minimum alignment which users can rely on when the explicit
// alignment is omitted or zero is that required by the ABI for this
// type.
- Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
+ Alignment = DL.getABITypeAlignment(AI.getAllocatedType());
}
Alignment = MinAlign(Alignment, P.beginOffset());
// If we will get at least this much alignment from the type alone, leave
// the alloca's alignment unconstrained.
- if (Alignment <= DL->getABITypeAlignment(SliceTy))
+ if (Alignment <= DL.getABITypeAlignment(SliceTy))
Alignment = 0;
NewAI = new AllocaInst(
SliceTy, nullptr, Alignment,
@@ -4034,7 +4045,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
SmallPtrSet<PHINode *, 8> PHIUsers;
SmallPtrSet<SelectInst *, 8> SelectUsers;
- AllocaSliceRewriter Rewriter(*DL, AS, *this, AI, *NewAI, P.beginOffset(),
+ AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(),
P.endOffset(), IsIntegerPromotable, VecTy,
PHIUsers, SelectUsers);
bool Promotable = true;
@@ -4056,7 +4067,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(),
E = PHIUsers.end();
I != E; ++I)
- if (!isSafePHIToSpeculate(**I, DL)) {
+ if (!isSafePHIToSpeculate(**I)) {
Promotable = false;
PHIUsers.clear();
SelectUsers.clear();
@@ -4065,7 +4076,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(),
E = SelectUsers.end();
I != E; ++I)
- if (!isSafeSelectToSpeculate(**I, DL)) {
+ if (!isSafeSelectToSpeculate(**I)) {
Promotable = false;
PHIUsers.clear();
SelectUsers.clear();
@@ -4098,7 +4109,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
PostPromotionWorklist.pop_back();
}
- return true;
+ return NewAI;
}
/// \brief Walks the slices of an alloca and form partitions based on them,
@@ -4109,6 +4120,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
unsigned NumPartitions = 0;
bool Changed = false;
+ const DataLayout &DL = AI.getModule()->getDataLayout();
// First try to pre-split loads and stores.
Changed |= presplitLoadsAndStores(AI, AS);
@@ -4126,7 +4138,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
// confident that the above handling of splittable loads and stores is
// completely sufficient before we forcibly disable the remaining handling.
if (S.beginOffset() == 0 &&
- S.endOffset() >= DL->getTypeAllocSize(AI.getAllocatedType()))
+ S.endOffset() >= DL.getTypeAllocSize(AI.getAllocatedType()))
continue;
if (isa<LoadInst>(S.getUse()->getUser()) ||
isa<StoreInst>(S.getUse()->getUser())) {
@@ -4137,9 +4149,29 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
if (!IsSorted)
std::sort(AS.begin(), AS.end());
+ /// \brief Describes the allocas introduced by rewritePartition
+ /// in order to migrate the debug info.
+ struct Piece {
+ AllocaInst *Alloca;
+ uint64_t Offset;
+ uint64_t Size;
+ Piece(AllocaInst *AI, uint64_t O, uint64_t S)
+ : Alloca(AI), Offset(O), Size(S) {}
+ };
+ SmallVector<Piece, 4> Pieces;
+
// Rewrite each partition.
for (auto &P : AS.partitions()) {
- Changed |= rewritePartition(AI, AS, P);
+ if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) {
+ Changed = true;
+ if (NewAI != &AI) {
+ uint64_t SizeOfByte = 8;
+ uint64_t AllocaSize = DL.getTypeSizeInBits(NewAI->getAllocatedType());
+ // Don't include any padding.
+ uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte);
+ Pieces.push_back(Piece(NewAI, P.beginOffset() * SizeOfByte, Size));
+ }
+ }
++NumPartitions;
}
@@ -4147,6 +4179,42 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
MaxPartitionsPerAlloca =
std::max<unsigned>(NumPartitions, MaxPartitionsPerAlloca);
+ // Migrate debug information from the old alloca to the new alloca(s)
+ // and the individial partitions.
+ if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(&AI)) {
+ auto *Var = DbgDecl->getVariable();
+ auto *Expr = DbgDecl->getExpression();
+ DIBuilder DIB(*AI.getParent()->getParent()->getParent(),
+ /*AllowUnresolved*/ false);
+ bool IsSplit = Pieces.size() > 1;
+ for (auto Piece : Pieces) {
+ // Create a piece expression describing the new partition or reuse AI's
+ // expression if there is only one partition.
+ auto *PieceExpr = Expr;
+ if (IsSplit || Expr->isBitPiece()) {
+ // If this alloca is already a scalar replacement of a larger aggregate,
+ // Piece.Offset describes the offset inside the scalar.
+ uint64_t Offset = Expr->isBitPiece() ? Expr->getBitPieceOffset() : 0;
+ uint64_t Start = Offset + Piece.Offset;
+ uint64_t Size = Piece.Size;
+ if (Expr->isBitPiece()) {
+ uint64_t AbsEnd = Expr->getBitPieceOffset() + Expr->getBitPieceSize();
+ if (Start >= AbsEnd)
+ // No need to describe a SROAed padding.
+ continue;
+ Size = std::min(Size, AbsEnd - Start);
+ }
+ PieceExpr = DIB.createBitPieceExpression(Start, Size);
+ }
+
+ // Remove any existing dbg.declare intrinsic describing the same alloca.
+ if (DbgDeclareInst *OldDDI = FindAllocaDbgDeclare(Piece.Alloca))
+ OldDDI->eraseFromParent();
+
+ DIB.insertDeclare(Piece.Alloca, Var, PieceExpr, DbgDecl->getDebugLoc(),
+ &AI);
+ }
+ }
return Changed;
}
@@ -4179,21 +4247,22 @@ bool SROA::runOnAlloca(AllocaInst &AI) {
AI.eraseFromParent();
return true;
}
+ const DataLayout &DL = AI.getModule()->getDataLayout();
// Skip alloca forms that this analysis can't handle.
if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
- DL->getTypeAllocSize(AI.getAllocatedType()) == 0)
+ DL.getTypeAllocSize(AI.getAllocatedType()) == 0)
return false;
bool Changed = false;
// First, split any FCA loads and stores touching this alloca to promote
// better splitting and promotion opportunities.
- AggLoadStoreRewriter AggRewriter(*DL);
+ AggLoadStoreRewriter AggRewriter(DL);
Changed |= AggRewriter.rewrite(AI);
// Build the slices using a recursive instruction-visiting builder.
- AllocaSlices AS(*DL, AI);
+ AllocaSlices AS(DL, AI);
DEBUG(AS.print(dbgs()));
if (AS.isEscaped())
return Changed;
@@ -4258,8 +4327,11 @@ void SROA::deleteDeadInstructions(
DeadInsts.insert(U);
}
- if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
DeletedAllocas.insert(AI);
+ if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(AI))
+ DbgDecl->eraseFromParent();
+ }
++NumDeleted;
I->eraseFromParent();
@@ -4363,12 +4435,6 @@ bool SROA::runOnFunction(Function &F) {
DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP) {
- DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
- return false;
- }
- DL = &DLP->getDataLayout();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
@@ -4376,9 +4442,10 @@ bool SROA::runOnFunction(Function &F) {
BasicBlock &EntryBB = F.getEntryBlock();
for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
- I != E; ++I)
+ I != E; ++I) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
Worklist.insert(AI);
+ }
bool Changed = false;
// A set of deleted alloca instruction pointers which should be removed from
diff --git a/lib/Transforms/Scalar/SampleProfile.cpp b/lib/Transforms/Scalar/SampleProfile.cpp
index 179bbf78366d..3480cd499127 100644
--- a/lib/Transforms/Scalar/SampleProfile.cpp
+++ b/lib/Transforms/Scalar/SampleProfile.cpp
@@ -95,7 +95,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
- AU.addRequired<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<PostDominatorTree>();
}
@@ -217,13 +217,16 @@ void SampleProfileLoader::printBlockWeight(raw_ostream &OS, BasicBlock *BB) {
/// \returns The profiled weight of I.
unsigned SampleProfileLoader::getInstWeight(Instruction &Inst) {
DebugLoc DLoc = Inst.getDebugLoc();
+ if (!DLoc)
+ return 0;
+
unsigned Lineno = DLoc.getLine();
if (Lineno < HeaderLineno)
return 0;
- DILocation DIL(DLoc.getAsMDNode(*Ctx));
+ const DILocation *DIL = DLoc;
int LOffset = Lineno - HeaderLineno;
- unsigned Discriminator = DIL.getDiscriminator();
+ unsigned Discriminator = DIL->getDiscriminator();
unsigned Weight = Samples->samplesAt(LOffset, Discriminator);
DEBUG(dbgs() << " " << Lineno << "." << Discriminator << ":" << Inst
<< " (line offset: " << LOffset << "." << Discriminator
@@ -577,6 +580,10 @@ void SampleProfileLoader::propagateWeights(Function &F) {
bool Changed = true;
unsigned i = 0;
+ // Add an entry count to the function using the samples gathered
+ // at the function entry.
+ F.setEntryCount(Samples->getHeadSamples());
+
// Before propagation starts, build, for each block, a list of
// unique predecessors and successors. This is necessary to handle
// identical edges in multiway branches. Since we visit all blocks and all
@@ -639,9 +646,8 @@ void SampleProfileLoader::propagateWeights(Function &F) {
/// \returns the line number where \p F is defined. If it returns 0,
/// it means that there is no debug information available for \p F.
unsigned SampleProfileLoader::getFunctionLoc(Function &F) {
- DISubprogram S = getDISubprogram(&F);
- if (S.isSubprogram())
- return S.getLineNumber();
+ if (DISubprogram *S = getDISubprogram(&F))
+ return S->getLine();
// If could not find the start of \p F, emit a diagnostic to inform the user
// about the missed opportunity.
@@ -731,7 +737,7 @@ INITIALIZE_PASS_BEGIN(SampleProfileLoader, "sample-profile",
"Sample Profile loader", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(PostDominatorTree)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AddDiscriminators)
INITIALIZE_PASS_END(SampleProfileLoader, "sample-profile",
"Sample Profile loader", false, false)
@@ -762,7 +768,7 @@ bool SampleProfileLoader::runOnFunction(Function &F) {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
PDT = &getAnalysis<PostDominatorTree>();
- LI = &getAnalysis<LoopInfo>();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
Ctx = &F.getParent()->getContext();
Samples = Reader->getSamplesFor(F);
if (!Samples->empty())
diff --git a/lib/Transforms/Scalar/Scalar.cpp b/lib/Transforms/Scalar/Scalar.cpp
index a16e9e29a1f1..d5d360571f88 100644
--- a/lib/Transforms/Scalar/Scalar.cpp
+++ b/lib/Transforms/Scalar/Scalar.cpp
@@ -20,7 +20,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Verifier.h"
#include "llvm/InitializePasses.h"
-#include "llvm/PassManager.h"
+#include "llvm/IR/LegacyPassManager.h"
using namespace llvm;
@@ -28,6 +28,7 @@ using namespace llvm;
/// ScalarOpts library.
void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeADCEPass(Registry);
+ initializeBDCEPass(Registry);
initializeAlignmentFromAssumptionsPass(Registry);
initializeSampleProfileLoaderPass(Registry);
initializeConstantHoistingPass(Registry);
@@ -38,13 +39,16 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeScalarizerPass(Registry);
initializeDSEPass(Registry);
initializeGVNPass(Registry);
- initializeEarlyCSEPass(Registry);
+ initializeEarlyCSELegacyPassPass(Registry);
initializeFlattenCFGPassPass(Registry);
+ initializeInductiveRangeCheckEliminationPass(Registry);
initializeIndVarSimplifyPass(Registry);
initializeJumpThreadingPass(Registry);
initializeLICMPass(Registry);
initializeLoopDeletionPass(Registry);
+ initializeLoopAccessAnalysisPass(Registry);
initializeLoopInstSimplifyPass(Registry);
+ initializeLoopInterchangePass(Registry);
initializeLoopRotatePass(Registry);
initializeLoopStrengthReducePass(Registry);
initializeLoopRerollPass(Registry);
@@ -55,9 +59,11 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeLowerExpectIntrinsicPass(Registry);
initializeMemCpyOptPass(Registry);
initializeMergedLoadStoreMotionPass(Registry);
+ initializeNaryReassociatePass(Registry);
initializePartiallyInlineLibCallsPass(Registry);
initializeReassociatePass(Registry);
initializeRegToMemPass(Registry);
+ initializeRewriteStatepointsForGCPass(Registry);
initializeSCCPPass(Registry);
initializeIPSCCPPass(Registry);
initializeSROAPass(Registry);
@@ -68,7 +74,13 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeSinkingPass(Registry);
initializeTailCallElimPass(Registry);
initializeSeparateConstOffsetFromGEPPass(Registry);
+ initializeSpeculativeExecutionPass(Registry);
+ initializeStraightLineStrengthReducePass(Registry);
initializeLoadCombinePass(Registry);
+ initializePlaceBackedgeSafepointsImplPass(Registry);
+ initializePlaceSafepointsPass(Registry);
+ initializeFloat2IntPass(Registry);
+ initializeLoopDistributePass(Registry);
}
void LLVMInitializeScalarOpts(LLVMPassRegistryRef R) {
@@ -79,6 +91,10 @@ void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createAggressiveDCEPass());
}
+void LLVMAddBitTrackingDCEPass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createBitTrackingDCEPass());
+}
+
void LLVMAddAlignmentFromAssumptionsPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createAlignmentFromAssumptionsPass());
}
@@ -198,7 +214,6 @@ void LLVMAddDemoteMemoryToRegisterPass(LLVMPassManagerRef PM) {
void LLVMAddVerifierPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createVerifierPass());
- // FIXME: should this also add createDebugInfoVerifierPass()?
}
void LLVMAddCorrelatedValuePropagationPass(LLVMPassManagerRef PM) {
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 5c49a5504b47..d955da7ce75d 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -89,7 +89,6 @@ namespace {
private:
bool HasDomTree;
- const DataLayout *DL;
/// DeadInsts - Keep track of instructions we have made dead, so that
/// we can remove them after we are done working.
@@ -159,9 +158,10 @@ namespace {
void isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
Type *MemOpType, bool isStore, AllocaInfo &Info,
Instruction *TheAccess, bool AllowWholeAccess);
- bool TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size);
- uint64_t FindElementAndOffset(Type *&T, uint64_t &Offset,
- Type *&IdxTy);
+ bool TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size,
+ const DataLayout &DL);
+ uint64_t FindElementAndOffset(Type *&T, uint64_t &Offset, Type *&IdxTy,
+ const DataLayout &DL);
void DoScalarReplacement(AllocaInst *AI,
std::vector<AllocaInst*> &WorkList);
@@ -699,9 +699,9 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
// If the source and destination are both to the same alloca, then this is
// a noop copy-to-self, just delete it. Otherwise, emit a load and store
// as appropriate.
- AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &DL, 0));
+ AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, DL, 0));
- if (GetUnderlyingObject(MTI->getSource(), &DL, 0) != OrigAI) {
+ if (GetUnderlyingObject(MTI->getSource(), DL, 0) != OrigAI) {
// Dest must be OrigAI, change this to be a load from the original
// pointer (bitcasted), then a store to our new alloca.
assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
@@ -717,7 +717,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
SrcVal->setAlignment(MTI->getAlignment());
Builder.CreateStore(SrcVal, NewAI);
- } else if (GetUnderlyingObject(MTI->getDest(), &DL, 0) != OrigAI) {
+ } else if (GetUnderlyingObject(MTI->getDest(), DL, 0) != OrigAI) {
// Src must be OrigAI, change this to be a load from NewAI then a store
// through the original dest pointer (bitcasted).
assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
@@ -1032,17 +1032,8 @@ bool SROA::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
-
bool Changed = performPromotion(F);
- // FIXME: ScalarRepl currently depends on DataLayout more than it
- // theoretically needs to. It should be refactored in order to support
- // target-independent IR. Until this is done, just skip the actual
- // scalar-replacement portion of this pass.
- if (!DL) return Changed;
-
while (1) {
bool LocalChange = performScalarRepl(F);
if (!LocalChange) break; // No need to repromote if no scalarrepl
@@ -1061,7 +1052,7 @@ class AllocaPromoter : public LoadAndStorePromoter {
SmallVector<DbgDeclareInst *, 4> DDIs;
SmallVector<DbgValueInst *, 4> DVIs;
public:
- AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
+ AllocaPromoter(ArrayRef<Instruction*> Insts, SSAUpdater &S,
DIBuilder *DB)
: LoadAndStorePromoter(Insts, S), AI(nullptr), DIB(DB) {}
@@ -1069,8 +1060,8 @@ public:
// Remember which alloca we're promoting (for isInstInList).
this->AI = AI;
if (auto *L = LocalAsMetadata::getIfExists(AI)) {
- if (auto *DebugNode = MetadataAsValue::getIfExists(AI->getContext(), L)) {
- for (User *U : DebugNode->users())
+ if (auto *DINode = MetadataAsValue::getIfExists(AI->getContext(), L)) {
+ for (User *U : DINode->users())
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
DDIs.push_back(DDI);
else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
@@ -1126,10 +1117,9 @@ public:
} else {
continue;
}
- Instruction *DbgVal = DIB->insertDbgValueIntrinsic(
- Arg, 0, DIVariable(DVI->getVariable()),
- DIExpression(DVI->getExpression()), Inst);
- DbgVal->setDebugLoc(DVI->getDebugLoc());
+ DIB->insertDbgValueIntrinsic(Arg, 0, DVI->getVariable(),
+ DVI->getExpression(), DVI->getDebugLoc(),
+ Inst);
}
}
};
@@ -1148,9 +1138,10 @@ public:
///
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
-static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) {
- bool TDerefable = SI->getTrueValue()->isDereferenceablePointer(DL);
- bool FDerefable = SI->getFalseValue()->isDereferenceablePointer(DL);
+static bool isSafeSelectToSpeculate(SelectInst *SI) {
+ const DataLayout &DL = SI->getModule()->getDataLayout();
+ bool TDerefable = isDereferenceablePointer(SI->getTrueValue(), DL);
+ bool FDerefable = isDereferenceablePointer(SI->getFalseValue(), DL);
for (User *U : SI->users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
@@ -1158,11 +1149,13 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) {
// Both operands to the select need to be dereferencable, either absolutely
// (e.g. allocas) or at this point because we can see other accesses to it.
- if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI,
- LI->getAlignment(), DL))
+ if (!TDerefable &&
+ !isSafeToLoadUnconditionally(SI->getTrueValue(), LI,
+ LI->getAlignment()))
return false;
- if (!FDerefable && !isSafeToLoadUnconditionally(SI->getFalseValue(), LI,
- LI->getAlignment(), DL))
+ if (!FDerefable &&
+ !isSafeToLoadUnconditionally(SI->getFalseValue(), LI,
+ LI->getAlignment()))
return false;
}
@@ -1185,7 +1178,7 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) {
///
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
-static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
+static bool isSafePHIToSpeculate(PHINode *PN) {
// For now, we can only do this promotion if the load is in the same block as
// the PHI, and if there are no stores between the phi and load.
// TODO: Allow recursive phi users.
@@ -1209,6 +1202,8 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
MaxAlign = std::max(MaxAlign, LI->getAlignment());
}
+ const DataLayout &DL = PN->getModule()->getDataLayout();
+
// Okay, we know that we have one or more loads in the same block as the PHI.
// We can transform this if it is safe to push the loads into the predecessor
// blocks. The only thing to watch out for is that we can't put a possibly
@@ -1233,8 +1228,8 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
// If this pointer is always safe to load, or if we can prove that there is
// already a load in the block, then we can move the load to the pred block.
- if (InVal->isDereferenceablePointer(DL) ||
- isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, DL))
+ if (isDereferenceablePointer(InVal, DL) ||
+ isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign))
continue;
return false;
@@ -1248,7 +1243,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
/// direct (non-volatile) loads and stores to it. If the alloca is close but
/// not quite there, this will transform the code to allow promotion. As such,
/// it is a non-pure predicate.
-static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
+static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout &DL) {
SetVector<Instruction*, SmallVector<Instruction*, 4>,
SmallPtrSet<Instruction*, 4> > InstsToRewrite;
for (User *U : AI->users()) {
@@ -1279,7 +1274,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
// If it is safe to turn "load (select c, AI, ptr)" into a select of two
// loads, then we can transform this by rewriting the select.
- if (!isSafeSelectToSpeculate(SI, DL))
+ if (!isSafeSelectToSpeculate(SI))
return false;
InstsToRewrite.insert(SI);
@@ -1294,7 +1289,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
// If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads
// in the pred blocks, then we can transform this by rewriting the PHI.
- if (!isSafePHIToSpeculate(PN, DL))
+ if (!isSafePHIToSpeculate(PN))
return false;
InstsToRewrite.insert(PN);
@@ -1416,6 +1411,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
bool SROA::performPromotion(Function &F) {
std::vector<AllocaInst*> Allocas;
+ const DataLayout &DL = F.getParent()->getDataLayout();
DominatorTree *DT = nullptr;
if (HasDomTree)
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
@@ -1479,6 +1475,7 @@ bool SROA::ShouldAttemptScalarRepl(AllocaInst *AI) {
//
bool SROA::performScalarRepl(Function &F) {
std::vector<AllocaInst*> WorkList;
+ const DataLayout &DL = F.getParent()->getDataLayout();
// Scan the entry basic block, adding allocas to the worklist.
BasicBlock &BB = F.getEntryBlock();
@@ -1508,7 +1505,7 @@ bool SROA::performScalarRepl(Function &F) {
// transform the allocation instruction if it is an array allocation
// (allocations OF arrays are ok though), and an allocation of a scalar
// value cannot be decomposed at all.
- uint64_t AllocaSize = DL->getTypeAllocSize(AI->getAllocatedType());
+ uint64_t AllocaSize = DL.getTypeAllocSize(AI->getAllocatedType());
// Do not promote [0 x %struct].
if (AllocaSize == 0) continue;
@@ -1531,8 +1528,9 @@ bool SROA::performScalarRepl(Function &F) {
// promoted itself. If so, we don't want to transform it needlessly. Note
// that we can't just check based on the type: the alloca may be of an i32
// but that has pointer arithmetic to set byte 3 of it or something.
- if (AllocaInst *NewAI = ConvertToScalarInfo(
- (unsigned)AllocaSize, *DL, ScalarLoadThreshold).TryConvert(AI)) {
+ if (AllocaInst *NewAI =
+ ConvertToScalarInfo((unsigned)AllocaSize, DL, ScalarLoadThreshold)
+ .TryConvert(AI)) {
NewAI->takeName(AI);
AI->eraseFromParent();
++NumConverted;
@@ -1610,6 +1608,7 @@ void SROA::DeleteDeadInstructions() {
/// referenced by this instruction.
void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
AllocaInfo &Info) {
+ const DataLayout &DL = I->getModule()->getDataLayout();
for (Use &U : I->uses()) {
Instruction *User = cast<Instruction>(U.getUser());
@@ -1632,8 +1631,8 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
if (!LI->isSimple())
return MarkUnsafe(Info, User);
Type *LIType = LI->getType();
- isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
- LIType, false, Info, LI, true /*AllowWholeAccess*/);
+ isSafeMemAccess(Offset, DL.getTypeAllocSize(LIType), LIType, false, Info,
+ LI, true /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
@@ -1642,8 +1641,8 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
return MarkUnsafe(Info, User);
Type *SIType = SI->getOperand(0)->getType();
- isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
- SIType, true, Info, SI, true /*AllowWholeAccess*/);
+ isSafeMemAccess(Offset, DL.getTypeAllocSize(SIType), SIType, true, Info,
+ SI, true /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
} else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {
if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
@@ -1675,6 +1674,7 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
if (!Info.CheckedPHIs.insert(PN).second)
return;
+ const DataLayout &DL = I->getModule()->getDataLayout();
for (User *U : I->users()) {
Instruction *UI = cast<Instruction>(U);
@@ -1691,8 +1691,8 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
if (!LI->isSimple())
return MarkUnsafe(Info, UI);
Type *LIType = LI->getType();
- isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
- LIType, false, Info, LI, false /*AllowWholeAccess*/);
+ isSafeMemAccess(Offset, DL.getTypeAllocSize(LIType), LIType, false, Info,
+ LI, false /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
} else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
@@ -1701,8 +1701,8 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
return MarkUnsafe(Info, UI);
Type *SIType = SI->getOperand(0)->getType();
- isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
- SIType, true, Info, SI, false /*AllowWholeAccess*/);
+ isSafeMemAccess(Offset, DL.getTypeAllocSize(SIType), SIType, true, Info,
+ SI, false /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
} else if (isa<PHINode>(UI) || isa<SelectInst>(UI)) {
isSafePHISelectUseForScalarRepl(UI, Offset, Info);
@@ -1746,9 +1746,11 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI,
// constant part of the offset.
if (NonConstant)
Indices.pop_back();
- Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
- if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset,
- NonConstantIdxSize))
+
+ const DataLayout &DL = GEPI->getModule()->getDataLayout();
+ Offset += DL.getIndexedOffset(GEPI->getPointerOperandType(), Indices);
+ if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset, NonConstantIdxSize,
+ DL))
MarkUnsafe(Info, GEPI);
}
@@ -1803,9 +1805,10 @@ void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
Type *MemOpType, bool isStore,
AllocaInfo &Info, Instruction *TheAccess,
bool AllowWholeAccess) {
+ const DataLayout &DL = TheAccess->getModule()->getDataLayout();
// Check if this is a load/store of the entire alloca.
if (Offset == 0 && AllowWholeAccess &&
- MemSize == DL->getTypeAllocSize(Info.AI->getAllocatedType())) {
+ MemSize == DL.getTypeAllocSize(Info.AI->getAllocatedType())) {
// This can be safe for MemIntrinsics (where MemOpType is 0) and integer
// loads/stores (which are essentially the same as the MemIntrinsics with
// regard to copying padding between elements). But, if an alloca is
@@ -1828,7 +1831,7 @@ void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
}
// Check if the offset/size correspond to a component within the alloca type.
Type *T = Info.AI->getAllocatedType();
- if (TypeHasComponent(T, Offset, MemSize)) {
+ if (TypeHasComponent(T, Offset, MemSize, DL)) {
Info.hasSubelementAccess = true;
return;
}
@@ -1838,24 +1841,25 @@ void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
/// TypeHasComponent - Return true if T has a component type with the
/// specified offset and size. If Size is zero, do not check the size.
-bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
+bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size,
+ const DataLayout &DL) {
Type *EltTy;
uint64_t EltSize;
if (StructType *ST = dyn_cast<StructType>(T)) {
- const StructLayout *Layout = DL->getStructLayout(ST);
+ const StructLayout *Layout = DL.getStructLayout(ST);
unsigned EltIdx = Layout->getElementContainingOffset(Offset);
EltTy = ST->getContainedType(EltIdx);
- EltSize = DL->getTypeAllocSize(EltTy);
+ EltSize = DL.getTypeAllocSize(EltTy);
Offset -= Layout->getElementOffset(EltIdx);
} else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
EltTy = AT->getElementType();
- EltSize = DL->getTypeAllocSize(EltTy);
+ EltSize = DL.getTypeAllocSize(EltTy);
if (Offset >= AT->getNumElements() * EltSize)
return false;
Offset %= EltSize;
} else if (VectorType *VT = dyn_cast<VectorType>(T)) {
EltTy = VT->getElementType();
- EltSize = DL->getTypeAllocSize(EltTy);
+ EltSize = DL.getTypeAllocSize(EltTy);
if (Offset >= VT->getNumElements() * EltSize)
return false;
Offset %= EltSize;
@@ -1867,7 +1871,7 @@ bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
// Check if the component spans multiple elements.
if (Offset + Size > EltSize)
return false;
- return TypeHasComponent(EltTy, Offset, Size);
+ return TypeHasComponent(EltTy, Offset, Size, DL);
}
/// RewriteForScalarRepl - Alloca AI is being split into NewElts, so rewrite
@@ -1876,6 +1880,7 @@ bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
/// instruction.
void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
SmallVectorImpl<AllocaInst *> &NewElts) {
+ const DataLayout &DL = I->getModule()->getDataLayout();
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E;) {
Use &TheUse = *UI++;
Instruction *User = cast<Instruction>(TheUse.getUser());
@@ -1893,8 +1898,7 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
uint64_t MemSize = Length->getZExtValue();
- if (Offset == 0 &&
- MemSize == DL->getTypeAllocSize(AI->getAllocatedType()))
+ if (Offset == 0 && MemSize == DL.getTypeAllocSize(AI->getAllocatedType()))
RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);
// Otherwise the intrinsic can only touch a single element and the
// address operand will be updated, so nothing else needs to be done.
@@ -1930,8 +1934,8 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
LI->replaceAllUsesWith(Insert);
DeadInsts.push_back(LI);
} else if (LIType->isIntegerTy() &&
- DL->getTypeAllocSize(LIType) ==
- DL->getTypeAllocSize(AI->getAllocatedType())) {
+ DL.getTypeAllocSize(LIType) ==
+ DL.getTypeAllocSize(AI->getAllocatedType())) {
// If this is a load of the entire alloca to an integer, rewrite it.
RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
}
@@ -1957,8 +1961,8 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
}
DeadInsts.push_back(SI);
} else if (SIType->isIntegerTy() &&
- DL->getTypeAllocSize(SIType) ==
- DL->getTypeAllocSize(AI->getAllocatedType())) {
+ DL.getTypeAllocSize(SIType) ==
+ DL.getTypeAllocSize(AI->getAllocatedType())) {
// If this is a store of the entire alloca from an integer, rewrite it.
RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
}
@@ -2001,7 +2005,8 @@ void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
Type *T = AI->getAllocatedType();
uint64_t EltOffset = 0;
Type *IdxTy;
- uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy);
+ uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy,
+ BC->getModule()->getDataLayout());
Instruction *Val = NewElts[Idx];
if (Val->getType() != BC->getDestTy()) {
Val = new BitCastInst(Val, BC->getDestTy(), "", BC);
@@ -2016,11 +2021,12 @@ void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
/// Sets T to the type of the element and Offset to the offset within that
/// element. IdxTy is set to the type of the index result to be used in a
/// GEP instruction.
-uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
- Type *&IdxTy) {
+uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset, Type *&IdxTy,
+ const DataLayout &DL) {
uint64_t Idx = 0;
+
if (StructType *ST = dyn_cast<StructType>(T)) {
- const StructLayout *Layout = DL->getStructLayout(ST);
+ const StructLayout *Layout = DL.getStructLayout(ST);
Idx = Layout->getElementContainingOffset(Offset);
T = ST->getContainedType(Idx);
Offset -= Layout->getElementOffset(Idx);
@@ -2028,7 +2034,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
return Idx;
} else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
T = AT->getElementType();
- uint64_t EltSize = DL->getTypeAllocSize(T);
+ uint64_t EltSize = DL.getTypeAllocSize(T);
Idx = Offset / EltSize;
Offset -= Idx * EltSize;
IdxTy = Type::getInt64Ty(T->getContext());
@@ -2036,7 +2042,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
}
VectorType *VT = cast<VectorType>(T);
T = VT->getElementType();
- uint64_t EltSize = DL->getTypeAllocSize(T);
+ uint64_t EltSize = DL.getTypeAllocSize(T);
Idx = Offset / EltSize;
Offset -= Idx * EltSize;
IdxTy = Type::getInt64Ty(T->getContext());
@@ -2049,6 +2055,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
SmallVectorImpl<AllocaInst *> &NewElts) {
uint64_t OldOffset = Offset;
+ const DataLayout &DL = GEPI->getModule()->getDataLayout();
SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
// If the GEP was dynamic then it must have been a dynamic vector lookup.
// In this case, it must be the last GEP operand which is dynamic so keep that
@@ -2057,19 +2064,19 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
Value* NonConstantIdx = nullptr;
if (!GEPI->hasAllConstantIndices())
NonConstantIdx = Indices.pop_back_val();
- Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
+ Offset += DL.getIndexedOffset(GEPI->getPointerOperandType(), Indices);
RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
Type *T = AI->getAllocatedType();
Type *IdxTy;
- uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy);
+ uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy, DL);
if (GEPI->getOperand(0) == AI)
OldIdx = ~0ULL; // Force the GEP to be rewritten.
T = AI->getAllocatedType();
uint64_t EltOffset = Offset;
- uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy);
+ uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy, DL);
// If this GEP does not move the pointer across elements of the alloca
// being split, then it does not needs to be rewritten.
@@ -2080,7 +2087,7 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
SmallVector<Value*, 8> NewArgs;
NewArgs.push_back(Constant::getNullValue(i32Ty));
while (EltOffset != 0) {
- uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy);
+ uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy, DL);
NewArgs.push_back(ConstantInt::get(IdxTy, EltIdx));
}
if (NonConstantIdx) {
@@ -2114,9 +2121,10 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
// Put matching lifetime markers on everything from Offset up to
// Offset+OldSize.
Type *AIType = AI->getAllocatedType();
+ const DataLayout &DL = II->getModule()->getDataLayout();
uint64_t NewOffset = Offset;
Type *IdxTy;
- uint64_t Idx = FindElementAndOffset(AIType, NewOffset, IdxTy);
+ uint64_t Idx = FindElementAndOffset(AIType, NewOffset, IdxTy, DL);
IRBuilder<> Builder(II);
uint64_t Size = OldSize->getLimitedValue();
@@ -2126,10 +2134,10 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
// split the alloca again later.
unsigned AS = AI->getType()->getAddressSpace();
Value *V = Builder.CreateBitCast(NewElts[Idx], Builder.getInt8PtrTy(AS));
- V = Builder.CreateGEP(V, Builder.getInt64(NewOffset));
+ V = Builder.CreateGEP(Builder.getInt8Ty(), V, Builder.getInt64(NewOffset));
IdxTy = NewElts[Idx]->getAllocatedType();
- uint64_t EltSize = DL->getTypeAllocSize(IdxTy) - NewOffset;
+ uint64_t EltSize = DL.getTypeAllocSize(IdxTy) - NewOffset;
if (EltSize > Size) {
EltSize = Size;
Size = 0;
@@ -2145,7 +2153,7 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
for (; Idx != NewElts.size() && Size; ++Idx) {
IdxTy = NewElts[Idx]->getAllocatedType();
- uint64_t EltSize = DL->getTypeAllocSize(IdxTy);
+ uint64_t EltSize = DL.getTypeAllocSize(IdxTy);
if (EltSize > Size) {
EltSize = Size;
Size = 0;
@@ -2221,6 +2229,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
bool SROADest = MI->getRawDest() == Inst;
Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext()));
+ const DataLayout &DL = MI->getModule()->getDataLayout();
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// If this is a memcpy/memmove, emit a GEP of the other element address.
@@ -2237,10 +2246,10 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
Type *OtherTy = OtherPtrTy->getElementType();
if (StructType *ST = dyn_cast<StructType>(OtherTy)) {
- EltOffset = DL->getStructLayout(ST)->getElementOffset(i);
+ EltOffset = DL.getStructLayout(ST)->getElementOffset(i);
} else {
Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
- EltOffset = DL->getTypeAllocSize(EltTy)*i;
+ EltOffset = DL.getTypeAllocSize(EltTy) * i;
}
// The alignment of the other pointer is the guaranteed alignment of the
@@ -2281,7 +2290,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
Type *ValTy = EltTy->getScalarType();
// Construct an integer with the right value.
- unsigned EltSize = DL->getTypeSizeInBits(ValTy);
+ unsigned EltSize = DL.getTypeSizeInBits(ValTy);
APInt OneVal(EltSize, CI->getZExtValue());
APInt TotalVal(OneVal);
// Set each byte.
@@ -2311,7 +2320,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// this element.
}
- unsigned EltSize = DL->getTypeAllocSize(EltTy);
+ unsigned EltSize = DL.getTypeAllocSize(EltTy);
if (!EltSize)
continue;
@@ -2345,12 +2354,13 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
// and store the element value to the individual alloca.
Value *SrcVal = SI->getOperand(0);
Type *AllocaEltTy = AI->getAllocatedType();
- uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);
+ const DataLayout &DL = SI->getModule()->getDataLayout();
+ uint64_t AllocaSizeBits = DL.getTypeAllocSizeInBits(AllocaEltTy);
IRBuilder<> Builder(SI);
// Handle tail padding by extending the operand
- if (DL->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
+ if (DL.getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
SrcVal = Builder.CreateZExt(SrcVal,
IntegerType::get(SI->getContext(), AllocaSizeBits));
@@ -2360,15 +2370,15 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
// There are two forms here: AI could be an array or struct. Both cases
// have different ways to compute the element offset.
if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
- const StructLayout *Layout = DL->getStructLayout(EltSTy);
+ const StructLayout *Layout = DL.getStructLayout(EltSTy);
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// Get the number of bits to shift SrcVal to get the value.
Type *FieldTy = EltSTy->getElementType(i);
uint64_t Shift = Layout->getElementOffsetInBits(i);
- if (DL->isBigEndian())
- Shift = AllocaSizeBits-Shift-DL->getTypeAllocSizeInBits(FieldTy);
+ if (DL.isBigEndian())
+ Shift = AllocaSizeBits - Shift - DL.getTypeAllocSizeInBits(FieldTy);
Value *EltVal = SrcVal;
if (Shift) {
@@ -2377,7 +2387,7 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
}
// Truncate down to an integer of the right size.
- uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);
+ uint64_t FieldSizeBits = DL.getTypeSizeInBits(FieldTy);
// Ignore zero sized fields like {}, they obviously contain no data.
if (FieldSizeBits == 0) continue;
@@ -2402,12 +2412,12 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
} else {
ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
Type *ArrayEltTy = ATy->getElementType();
- uint64_t ElementOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);
- uint64_t ElementSizeBits = DL->getTypeSizeInBits(ArrayEltTy);
+ uint64_t ElementOffset = DL.getTypeAllocSizeInBits(ArrayEltTy);
+ uint64_t ElementSizeBits = DL.getTypeSizeInBits(ArrayEltTy);
uint64_t Shift;
- if (DL->isBigEndian())
+ if (DL.isBigEndian())
Shift = AllocaSizeBits-ElementOffset;
else
Shift = 0;
@@ -2441,7 +2451,7 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
}
new StoreInst(EltVal, DestField, SI);
- if (DL->isBigEndian())
+ if (DL.isBigEndian())
Shift -= ElementOffset;
else
Shift += ElementOffset;
@@ -2459,7 +2469,8 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
// Extract each element out of the NewElts according to its structure offset
// and form the result value.
Type *AllocaEltTy = AI->getAllocatedType();
- uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);
+ const DataLayout &DL = LI->getModule()->getDataLayout();
+ uint64_t AllocaSizeBits = DL.getTypeAllocSizeInBits(AllocaEltTy);
DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
<< '\n');
@@ -2469,10 +2480,10 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
const StructLayout *Layout = nullptr;
uint64_t ArrayEltBitOffset = 0;
if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
- Layout = DL->getStructLayout(EltSTy);
+ Layout = DL.getStructLayout(EltSTy);
} else {
Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
- ArrayEltBitOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);
+ ArrayEltBitOffset = DL.getTypeAllocSizeInBits(ArrayEltTy);
}
Value *ResultVal =
@@ -2484,7 +2495,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
Value *SrcField = NewElts[i];
Type *FieldTy =
cast<PointerType>(SrcField->getType())->getElementType();
- uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);
+ uint64_t FieldSizeBits = DL.getTypeSizeInBits(FieldTy);
// Ignore zero sized fields like {}, they obviously contain no data.
if (FieldSizeBits == 0) continue;
@@ -2515,7 +2526,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
else // Array case.
Shift = i*ArrayEltBitOffset;
- if (DL->isBigEndian())
+ if (DL.isBigEndian())
Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
if (Shift) {
@@ -2532,7 +2543,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
}
// Handle tail padding by truncating the result
- if (DL->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
+ if (DL.getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
LI->replaceAllUsesWith(ResultVal);
@@ -2589,13 +2600,15 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
return false;
}
+ const DataLayout &DL = AI->getModule()->getDataLayout();
+
// Okay, we know all the users are promotable. If the aggregate is a memcpy
// source and destination, we have to be careful. In particular, the memcpy
// could be moving around elements that live in structure padding of the LLVM
// types, but may actually be used. In these cases, we refuse to promote the
// struct.
if (Info.isMemCpySrc && Info.isMemCpyDst &&
- HasPadding(AI->getAllocatedType(), *DL))
+ HasPadding(AI->getAllocatedType(), DL))
return false;
// If the alloca never has an access to just *part* of it, but is accessed
diff --git a/lib/Transforms/Scalar/Scalarizer.cpp b/lib/Transforms/Scalar/Scalarizer.cpp
index 6036c099be0e..d55dc6a20a06 100644
--- a/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/lib/Transforms/Scalar/Scalarizer.cpp
@@ -165,7 +165,7 @@ private:
void gather(Instruction *, const ValueVector &);
bool canTransferMetadata(unsigned Kind);
void transferMetadata(Instruction *, const ValueVector &);
- bool getVectorLayout(Type *, unsigned, VectorLayout &);
+ bool getVectorLayout(Type *, unsigned, VectorLayout &, const DataLayout &);
bool finish();
template<typename T> bool splitBinary(Instruction &, const T &);
@@ -173,7 +173,6 @@ private:
ScatterMap Scattered;
GatherList Gathered;
unsigned ParallelLoopAccessMDKind;
- const DataLayout *DL;
bool ScalarizeLoadStore;
};
@@ -214,7 +213,7 @@ Value *Scatterer::operator[](unsigned I) {
CV[0] = Builder.CreateBitCast(V, Ty, V->getName() + ".i0");
}
if (I != 0)
- CV[I] = Builder.CreateConstGEP1_32(CV[0], I,
+ CV[I] = Builder.CreateConstGEP1_32(nullptr, CV[0], I,
V->getName() + ".i" + Twine(I));
} else {
// Search through a chain of InsertElementInsts looking for element I.
@@ -248,8 +247,6 @@ bool Scalarizer::doInitialization(Module &M) {
}
bool Scalarizer::runOnFunction(Function &F) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
BasicBlock *BB = BBI;
for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
@@ -345,10 +342,7 @@ void Scalarizer::transferMetadata(Instruction *Op, const ValueVector &CV) {
// Try to fill in Layout from Ty, returning true on success. Alignment is
// the alignment of the vector, or 0 if the ABI default should be used.
bool Scalarizer::getVectorLayout(Type *Ty, unsigned Alignment,
- VectorLayout &Layout) {
- if (!DL)
- return false;
-
+ VectorLayout &Layout, const DataLayout &DL) {
// Make sure we're dealing with a vector.
Layout.VecTy = dyn_cast<VectorType>(Ty);
if (!Layout.VecTy)
@@ -356,15 +350,15 @@ bool Scalarizer::getVectorLayout(Type *Ty, unsigned Alignment,
// Check that we're dealing with full-byte elements.
Layout.ElemTy = Layout.VecTy->getElementType();
- if (DL->getTypeSizeInBits(Layout.ElemTy) !=
- DL->getTypeStoreSizeInBits(Layout.ElemTy))
+ if (DL.getTypeSizeInBits(Layout.ElemTy) !=
+ DL.getTypeStoreSizeInBits(Layout.ElemTy))
return false;
if (Alignment)
Layout.VecAlign = Alignment;
else
- Layout.VecAlign = DL->getABITypeAlignment(Layout.VecTy);
- Layout.ElemSize = DL->getTypeStoreSize(Layout.ElemTy);
+ Layout.VecAlign = DL.getABITypeAlignment(Layout.VecTy);
+ Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy);
return true;
}
@@ -456,7 +450,7 @@ bool Scalarizer::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
Indices.resize(NumIndices);
for (unsigned J = 0; J < NumIndices; ++J)
Indices[J] = Ops[J][I];
- Res[I] = Builder.CreateGEP(Base[I], Indices,
+ Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
GEPI.getName() + ".i" + Twine(I));
if (GEPI.isInBounds())
if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
@@ -595,7 +589,8 @@ bool Scalarizer::visitLoadInst(LoadInst &LI) {
return false;
VectorLayout Layout;
- if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout))
+ if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout,
+ LI.getModule()->getDataLayout()))
return false;
unsigned NumElems = Layout.VecTy->getNumElements();
@@ -619,7 +614,8 @@ bool Scalarizer::visitStoreInst(StoreInst &SI) {
VectorLayout Layout;
Value *FullValue = SI.getValueOperand();
- if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout))
+ if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout,
+ SI.getModule()->getDataLayout()))
return false;
unsigned NumElems = Layout.VecTy->getNumElements();
diff --git a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index 6157746af48c..3a782d159dab 100644
--- a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -160,6 +160,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
@@ -167,6 +168,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/IR/IRBuilder.h"
@@ -177,6 +179,13 @@ static cl::opt<bool> DisableSeparateConstOffsetFromGEP(
"disable-separate-const-offset-from-gep", cl::init(false),
cl::desc("Do not separate the constant offset from a GEP instruction"),
cl::Hidden);
+// Setting this flag may emit false positives when the input module already
+// contains dead instructions. Therefore, we set it only in unit tests that are
+// free of dead code.
+static cl::opt<bool>
+ VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false),
+ cl::desc("Verify this pass produces no dead code"),
+ cl::Hidden);
namespace {
@@ -194,23 +203,26 @@ namespace {
/// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case,
/// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
class ConstantOffsetExtractor {
- public:
+public:
/// Extracts a constant offset from the given GEP index. It returns the
/// new index representing the remainder (equal to the original index minus
/// the constant offset), or nullptr if we cannot extract a constant offset.
- /// \p Idx The given GEP index
- /// \p DL The datalayout of the module
- /// \p GEP The given GEP
- static Value *Extract(Value *Idx, const DataLayout *DL,
- GetElementPtrInst *GEP);
+ /// \p Idx The given GEP index
+ /// \p GEP The given GEP
+ /// \p UserChainTail Outputs the tail of UserChain so that we can
+ /// garbage-collect unused instructions in UserChain.
+ static Value *Extract(Value *Idx, GetElementPtrInst *GEP,
+ User *&UserChainTail, const DominatorTree *DT);
/// Looks for a constant offset from the given GEP index without extracting
/// it. It returns the numeric value of the extracted constant offset (0 if
/// failed). The meaning of the arguments are the same as Extract.
- static int64_t Find(Value *Idx, const DataLayout *DL, GetElementPtrInst *GEP);
+ static int64_t Find(Value *Idx, GetElementPtrInst *GEP,
+ const DominatorTree *DT);
- private:
- ConstantOffsetExtractor(const DataLayout *Layout, Instruction *InsertionPt)
- : DL(Layout), IP(InsertionPt) {}
+private:
+ ConstantOffsetExtractor(Instruction *InsertionPt, const DominatorTree *DT)
+ : IP(InsertionPt), DL(InsertionPt->getModule()->getDataLayout()), DT(DT) {
+ }
/// Searches the expression that computes V for a non-zero constant C s.t.
/// V can be reassociated into the form V' + C. If the searching is
/// successful, returns C and update UserChain as a def-use chain from C to V;
@@ -268,12 +280,6 @@ class ConstantOffsetExtractor {
/// returns "sext i32 (zext i16 V to i32) to i64".
Value *applyExts(Value *V);
- /// Returns true if LHS and RHS have no bits in common, i.e., LHS | RHS == 0.
- bool NoCommonBits(Value *LHS, Value *RHS) const;
- /// Computes which bits are known to be one or zero.
- /// \p KnownOne Mask of all bits that are known to be one.
- /// \p KnownZero Mask of all bits that are known to be zero.
- void ComputeKnownBits(Value *V, APInt &KnownOne, APInt &KnownZero) const;
/// A helper function that returns whether we can trace into the operands
/// of binary operator BO for a constant offset.
///
@@ -294,39 +300,36 @@ class ConstantOffsetExtractor {
/// A data structure used in rebuildWithoutConstOffset. Contains all
/// sext/zext instructions along UserChain.
SmallVector<CastInst *, 16> ExtInsts;
- /// The data layout of the module. Used in ComputeKnownBits.
- const DataLayout *DL;
Instruction *IP; /// Insertion position of cloned instructions.
+ const DataLayout &DL;
+ const DominatorTree *DT;
};
/// \brief A pass that tries to split every GEP in the function into a variadic
/// base and a constant offset. It is a FunctionPass because searching for the
/// constant offset may inspect other basic blocks.
class SeparateConstOffsetFromGEP : public FunctionPass {
- public:
+public:
static char ID;
SeparateConstOffsetFromGEP(const TargetMachine *TM = nullptr,
bool LowerGEP = false)
- : FunctionPass(ID), TM(TM), LowerGEP(LowerGEP) {
+ : FunctionPass(ID), DL(nullptr), DT(nullptr), TM(TM), LowerGEP(LowerGEP) {
initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
- AU.addRequired<TargetTransformInfo>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ AU.setPreservesCFG();
}
bool doInitialization(Module &M) override {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (DLP == nullptr)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
return false;
}
-
bool runOnFunction(Function &F) override;
- private:
+private:
/// Tries to split the given GEP into a variadic base and a constant offset,
/// and returns true if the splitting succeeds.
bool splitGEP(GetElementPtrInst *GEP);
@@ -370,8 +373,11 @@ class SeparateConstOffsetFromGEP : public FunctionPass {
///
/// Verified in @i32_add in split-gep.ll
bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
+ /// Verify F is free of dead code.
+ void verifyNoDeadCode(Function &F);
const DataLayout *DL;
+ const DominatorTree *DT;
const TargetMachine *TM;
/// Whether to lower a GEP with multiple indices into arithmetic operations or
/// multiple GEPs with a single index.
@@ -384,8 +390,8 @@ INITIALIZE_PASS_BEGIN(
SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
"Split GEPs to a variadic base and a constant offset for better CSE", false,
false)
-INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
-INITIALIZE_PASS_DEPENDENCY(DataLayoutPass)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(
SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
"Split GEPs to a variadic base and a constant offset for better CSE", false,
@@ -413,7 +419,8 @@ bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended,
Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1);
// Do not trace into "or" unless it is equivalent to "add". If LHS and RHS
// don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS).
- if (BO->getOpcode() == Instruction::Or && !NoCommonBits(LHS, RHS))
+ if (BO->getOpcode() == Instruction::Or &&
+ !haveNoCommonBitsSet(LHS, RHS, DL, nullptr, BO, DT))
return false;
// In addition, tracing into BO requires that its surrounding s/zext (if
@@ -498,9 +505,8 @@ APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
ConstantOffset = CI->getValue();
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
// Trace into subexpressions for more hoisting opportunities.
- if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative)) {
+ if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative))
ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
- }
} else if (isa<SExtInst>(V)) {
ConstantOffset = find(U->getOperand(0), /* SignExtended */ true,
ZeroExtended, NonNegative).sext(BitWidth);
@@ -597,6 +603,11 @@ Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
}
BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
+ assert(BO->getNumUses() <= 1 &&
+ "distributeExtsAndCloneChain clones each BinaryOperator in "
+ "UserChain, so no one should be used more than "
+ "once");
+
unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]);
Value *NextInChain = removeConstOffset(ChainIndex - 1);
@@ -609,6 +620,7 @@ Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
return TheOther;
}
+ BinaryOperator::BinaryOps NewOp = BO->getOpcode();
if (BO->getOpcode() == Instruction::Or) {
// Rebuild "or" as "add", because "or" may be invalid for the new
// epxression.
@@ -623,68 +635,46 @@ Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
//
// Replacing the "or" with "add" is fine, because
// a | (b + 5) = a + (b + 5) = (a + b) + 5
- if (OpNo == 0) {
- return BinaryOperator::CreateAdd(NextInChain, TheOther, BO->getName(),
- IP);
- } else {
- return BinaryOperator::CreateAdd(TheOther, NextInChain, BO->getName(),
- IP);
- }
+ NewOp = Instruction::Add;
}
- // We can reuse BO in this case, because the new expression shares the same
- // instruction type and BO is used at most once.
- assert(BO->getNumUses() <= 1 &&
- "distributeExtsAndCloneChain clones each BinaryOperator in "
- "UserChain, so no one should be used more than "
- "once");
- BO->setOperand(OpNo, NextInChain);
- BO->setHasNoSignedWrap(false);
- BO->setHasNoUnsignedWrap(false);
- // Make sure it appears after all instructions we've inserted so far.
- BO->moveBefore(IP);
- return BO;
+ BinaryOperator *NewBO;
+ if (OpNo == 0) {
+ NewBO = BinaryOperator::Create(NewOp, NextInChain, TheOther, "", IP);
+ } else {
+ NewBO = BinaryOperator::Create(NewOp, TheOther, NextInChain, "", IP);
+ }
+ NewBO->takeName(BO);
+ return NewBO;
}
-Value *ConstantOffsetExtractor::Extract(Value *Idx, const DataLayout *DL,
- GetElementPtrInst *GEP) {
- ConstantOffsetExtractor Extractor(DL, GEP);
+Value *ConstantOffsetExtractor::Extract(Value *Idx, GetElementPtrInst *GEP,
+ User *&UserChainTail,
+ const DominatorTree *DT) {
+ ConstantOffsetExtractor Extractor(GEP, DT);
// Find a non-zero constant offset first.
APInt ConstantOffset =
Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
GEP->isInBounds());
- if (ConstantOffset == 0)
+ if (ConstantOffset == 0) {
+ UserChainTail = nullptr;
return nullptr;
+ }
// Separates the constant offset from the GEP index.
- return Extractor.rebuildWithoutConstOffset();
+ Value *IdxWithoutConstOffset = Extractor.rebuildWithoutConstOffset();
+ UserChainTail = Extractor.UserChain.back();
+ return IdxWithoutConstOffset;
}
-int64_t ConstantOffsetExtractor::Find(Value *Idx, const DataLayout *DL,
- GetElementPtrInst *GEP) {
+int64_t ConstantOffsetExtractor::Find(Value *Idx, GetElementPtrInst *GEP,
+ const DominatorTree *DT) {
// If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative.
- return ConstantOffsetExtractor(DL, GEP)
+ return ConstantOffsetExtractor(GEP, DT)
.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
GEP->isInBounds())
.getSExtValue();
}
-void ConstantOffsetExtractor::ComputeKnownBits(Value *V, APInt &KnownOne,
- APInt &KnownZero) const {
- IntegerType *IT = cast<IntegerType>(V->getType());
- KnownOne = APInt(IT->getBitWidth(), 0);
- KnownZero = APInt(IT->getBitWidth(), 0);
- llvm::computeKnownBits(V, KnownZero, KnownOne, DL, 0);
-}
-
-bool ConstantOffsetExtractor::NoCommonBits(Value *LHS, Value *RHS) const {
- assert(LHS->getType() == RHS->getType() &&
- "LHS and RHS should have the same type");
- APInt LHSKnownOne, LHSKnownZero, RHSKnownOne, RHSKnownZero;
- ComputeKnownBits(LHS, LHSKnownOne, LHSKnownZero);
- ComputeKnownBits(RHS, RHSKnownOne, RHSKnownZero);
- return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
-}
-
bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
GetElementPtrInst *GEP) {
bool Changed = false;
@@ -713,7 +703,7 @@ SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
if (isa<SequentialType>(*GTI)) {
// Tries to extract a constant offset from this GEP index.
int64_t ConstantOffset =
- ConstantOffsetExtractor::Find(GEP->getOperand(I), DL, GEP);
+ ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT);
if (ConstantOffset != 0) {
NeedsExtraction = true;
// A GEP may have multiple indices. We accumulate the extracted
@@ -770,14 +760,16 @@ void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
}
}
// Create an ugly GEP with a single index for each index.
- ResultPtr = Builder.CreateGEP(ResultPtr, Idx, "uglygep");
+ ResultPtr =
+ Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Idx, "uglygep");
}
}
// Create a GEP with the constant offset index.
if (AccumulativeByteOffset != 0) {
Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset);
- ResultPtr = Builder.CreateGEP(ResultPtr, Offset, "uglygep");
+ ResultPtr =
+ Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Offset, "uglygep");
}
if (ResultPtr->getType() != Variadic->getType())
ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType());
@@ -857,7 +849,9 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
// of variable indices. Therefore, we don't check for addressing modes in that
// case.
if (!LowerGEP) {
- TargetTransformInfo &TTI = getAnalysis<TargetTransformInfo>();
+ TargetTransformInfo &TTI =
+ getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
+ *GEP->getParent()->getParent());
if (!TTI.isLegalAddressingMode(GEP->getType()->getElementType(),
/*BaseGV=*/nullptr, AccumulativeByteOffset,
/*HasBaseReg=*/true, /*Scale=*/0)) {
@@ -877,10 +871,17 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
if (isa<SequentialType>(*GTI)) {
// Splits this GEP index into a variadic part and a constant offset, and
// uses the variadic part as the new index.
+ Value *OldIdx = GEP->getOperand(I);
+ User *UserChainTail;
Value *NewIdx =
- ConstantOffsetExtractor::Extract(GEP->getOperand(I), DL, GEP);
+ ConstantOffsetExtractor::Extract(OldIdx, GEP, UserChainTail, DT);
if (NewIdx != nullptr) {
+ // Switches to the index with the constant offset removed.
GEP->setOperand(I, NewIdx);
+ // After switching to the new index, we can garbage-collect UserChain
+ // and the old index if they are not used.
+ RecursivelyDeleteTriviallyDeadInstructions(UserChainTail);
+ RecursivelyDeleteTriviallyDeadInstructions(OldIdx);
}
}
}
@@ -910,7 +911,7 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
if (LowerGEP) {
// As currently BasicAA does not analyze ptrtoint/inttoptr, do not lower to
// arithmetic operations if the target uses alias analysis in codegen.
- if (TM && TM->getSubtarget<TargetSubtargetInfo>().useAA())
+ if (TM && TM->getSubtargetImpl(*GEP->getParent()->getParent())->useAA())
lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
else
lowerToArithmetics(GEP, AccumulativeByteOffset);
@@ -962,8 +963,9 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
// Very likely. As long as %gep is natually aligned, the byte offset we
// extracted should be a multiple of sizeof(*%gep).
int64_t Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
- NewGEP = GetElementPtrInst::Create(
- NewGEP, ConstantInt::get(IntPtrTy, Index, true), GEP->getName(), GEP);
+ NewGEP = GetElementPtrInst::Create(GEP->getResultElementType(), NewGEP,
+ ConstantInt::get(IntPtrTy, Index, true),
+ GEP->getName(), GEP);
} else {
// Unlikely but possible. For example,
// #pragma pack(1)
@@ -983,8 +985,9 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
GEP->getPointerAddressSpace());
NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP);
NewGEP = GetElementPtrInst::Create(
- NewGEP, ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true),
- "uglygep", GEP);
+ Type::getInt8Ty(GEP->getContext()), NewGEP,
+ ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), "uglygep",
+ GEP);
if (GEP->getType() != I8PtrTy)
NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP);
}
@@ -996,9 +999,14 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
}
bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
if (DisableSeparateConstOffsetFromGEP)
return false;
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+
bool Changed = false;
for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) {
for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ) {
@@ -1009,5 +1017,22 @@ bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
// already.
}
}
+
+ if (VerifyNoDeadCode)
+ verifyNoDeadCode(F);
+
return Changed;
}
+
+void SeparateConstOffsetFromGEP::verifyNoDeadCode(Function &F) {
+ for (auto &B : F) {
+ for (auto &I : B) {
+ if (isInstructionTriviallyDead(&I)) {
+ std::string ErrMessage;
+ raw_string_ostream RSO(ErrMessage);
+ RSO << "Dead instruction detected!\n" << I << "\n";
+ llvm_unreachable(RSO.str().c_str());
+ }
+ }
+ }
+}
diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 2e317f9d0999..8566cd9736d3 100644
--- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -21,7 +21,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Scalar/SimplifyCFG.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
@@ -37,6 +37,7 @@
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Scalar.h"
using namespace llvm;
#define DEBUG_TYPE "simplifycfg"
@@ -47,36 +48,6 @@ UserBonusInstThreshold("bonus-inst-threshold", cl::Hidden, cl::init(1),
STATISTIC(NumSimpl, "Number of blocks simplified");
-namespace {
-struct CFGSimplifyPass : public FunctionPass {
- static char ID; // Pass identification, replacement for typeid
- unsigned BonusInstThreshold;
- CFGSimplifyPass(int T = -1) : FunctionPass(ID) {
- BonusInstThreshold = (T == -1) ? UserBonusInstThreshold : unsigned(T);
- initializeCFGSimplifyPassPass(*PassRegistry::getPassRegistry());
- }
- bool runOnFunction(Function &F) override;
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<AssumptionCacheTracker>();
- AU.addRequired<TargetTransformInfo>();
- }
-};
-}
-
-char CFGSimplifyPass::ID = 0;
-INITIALIZE_PASS_BEGIN(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false,
- false)
-INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
-INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_END(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false,
- false)
-
-// Public interface to the CFGSimplification pass
-FunctionPass *llvm::createCFGSimplificationPass(int Threshold) {
- return new CFGSimplifyPass(Threshold);
-}
-
/// mergeEmptyReturnBlocks - If we have more than one empty (other than phi
/// node) return blocks, merge them together to promote recursive block merging.
static bool mergeEmptyReturnBlocks(Function &F) {
@@ -156,7 +127,7 @@ static bool mergeEmptyReturnBlocks(Function &F) {
/// iterativelySimplifyCFG - Call SimplifyCFG on all the blocks in the function,
/// iterating until no more changes are made.
static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI,
- const DataLayout *DL, AssumptionCache *AC,
+ AssumptionCache *AC,
unsigned BonusInstThreshold) {
bool Changed = false;
bool LocalChange = true;
@@ -166,7 +137,7 @@ static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI,
// Loop over all of the basic blocks and remove them if they are unneeded...
//
for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) {
- if (SimplifyCFG(BBIt++, TTI, BonusInstThreshold, DL, AC)) {
+ if (SimplifyCFG(BBIt++, TTI, BonusInstThreshold, AC)) {
LocalChange = true;
++NumSimpl;
}
@@ -176,21 +147,11 @@ static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI,
return Changed;
}
-// It is possible that we may require multiple passes over the code to fully
-// simplify the CFG.
-//
-bool CFGSimplifyPass::runOnFunction(Function &F) {
- if (skipOptnoneFunction(F))
- return false;
-
- AssumptionCache *AC =
- &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- const TargetTransformInfo &TTI = getAnalysis<TargetTransformInfo>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+static bool simplifyFunctionCFG(Function &F, const TargetTransformInfo &TTI,
+ AssumptionCache *AC, int BonusInstThreshold) {
bool EverChanged = removeUnreachableBlocks(F);
EverChanged |= mergeEmptyReturnBlocks(F);
- EverChanged |= iterativelySimplifyCFG(F, TTI, DL, AC, BonusInstThreshold);
+ EverChanged |= iterativelySimplifyCFG(F, TTI, AC, BonusInstThreshold);
// If neither pass changed anything, we're done.
if (!EverChanged) return false;
@@ -204,9 +165,66 @@ bool CFGSimplifyPass::runOnFunction(Function &F) {
return true;
do {
- EverChanged = iterativelySimplifyCFG(F, TTI, DL, AC, BonusInstThreshold);
+ EverChanged = iterativelySimplifyCFG(F, TTI, AC, BonusInstThreshold);
EverChanged |= removeUnreachableBlocks(F);
} while (EverChanged);
return true;
}
+
+SimplifyCFGPass::SimplifyCFGPass()
+ : BonusInstThreshold(UserBonusInstThreshold) {}
+
+SimplifyCFGPass::SimplifyCFGPass(int BonusInstThreshold)
+ : BonusInstThreshold(BonusInstThreshold) {}
+
+PreservedAnalyses SimplifyCFGPass::run(Function &F,
+ AnalysisManager<Function> *AM) {
+ auto &TTI = AM->getResult<TargetIRAnalysis>(F);
+ auto &AC = AM->getResult<AssumptionAnalysis>(F);
+
+ if (!simplifyFunctionCFG(F, TTI, &AC, BonusInstThreshold))
+ return PreservedAnalyses::none();
+
+ return PreservedAnalyses::all();
+}
+
+namespace {
+struct CFGSimplifyPass : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ unsigned BonusInstThreshold;
+ CFGSimplifyPass(int T = -1) : FunctionPass(ID) {
+ BonusInstThreshold = (T == -1) ? UserBonusInstThreshold : unsigned(T);
+ initializeCFGSimplifyPassPass(*PassRegistry::getPassRegistry());
+ }
+ bool runOnFunction(Function &F) override {
+ if (skipOptnoneFunction(F))
+ return false;
+
+ AssumptionCache *AC =
+ &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
+ const TargetTransformInfo &TTI =
+ getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+ return simplifyFunctionCFG(F, TTI, AC, BonusInstThreshold);
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<AssumptionCacheTracker>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ }
+};
+}
+
+char CFGSimplifyPass::ID = 0;
+INITIALIZE_PASS_BEGIN(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false,
+ false)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
+INITIALIZE_PASS_END(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false,
+ false)
+
+// Public interface to the CFGSimplification pass
+FunctionPass *llvm::createCFGSimplificationPass(int Threshold) {
+ return new CFGSimplifyPass(Threshold);
+}
+
diff --git a/lib/Transforms/Scalar/Sink.cpp b/lib/Transforms/Scalar/Sink.cpp
index 903b675fdd56..b169d5612f00 100644
--- a/lib/Transforms/Scalar/Sink.cpp
+++ b/lib/Transforms/Scalar/Sink.cpp
@@ -21,6 +21,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -35,7 +36,6 @@ namespace {
DominatorTree *DT;
LoopInfo *LI;
AliasAnalysis *AA;
- const DataLayout *DL;
public:
static char ID; // Pass identification
@@ -50,9 +50,9 @@ namespace {
FunctionPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<LoopInfo>();
+ AU.addPreserved<LoopInfoWrapperPass>();
}
private:
bool ProcessBlock(BasicBlock &BB);
@@ -64,7 +64,7 @@ namespace {
char Sinking::ID = 0;
INITIALIZE_PASS_BEGIN(Sinking, "sink", "Code sinking", false, false)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(Sinking, "sink", "Code sinking", false, false)
@@ -98,10 +98,8 @@ bool Sinking::AllUsesDominatedByBlock(Instruction *Inst,
bool Sinking::runOnFunction(Function &F) {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- LI = &getAnalysis<LoopInfo>();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
AA = &getAnalysis<AliasAnalysis>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
bool MadeChange, EverMadeChange = false;
@@ -196,7 +194,7 @@ bool Sinking::IsAcceptableTarget(Instruction *Inst,
if (SuccToSinkTo->getUniquePredecessor() != Inst->getParent()) {
// We cannot sink a load across a critical edge - there may be stores in
// other code paths.
- if (!isSafeToSpeculativelyExecute(Inst, DL))
+ if (!isSafeToSpeculativelyExecute(Inst))
return false;
// We don't want to sink across a critical edge if we don't dominate the
diff --git a/lib/Transforms/Scalar/SpeculativeExecution.cpp b/lib/Transforms/Scalar/SpeculativeExecution.cpp
new file mode 100644
index 000000000000..ff3f00a2e2f8
--- /dev/null
+++ b/lib/Transforms/Scalar/SpeculativeExecution.cpp
@@ -0,0 +1,243 @@
+//===- SpeculativeExecution.cpp ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass hoists instructions to enable speculative execution on
+// targets where branches are expensive. This is aimed at GPUs. It
+// currently works on simple if-then and if-then-else
+// patterns.
+//
+// Removing branches is not the only motivation for this
+// pass. E.g. consider this code and assume that there is no
+// addressing mode for multiplying by sizeof(*a):
+//
+// if (b > 0)
+// c = a[i + 1]
+// if (d > 0)
+// e = a[i + 2]
+//
+// turns into
+//
+// p = &a[i + 1];
+// if (b > 0)
+// c = *p;
+// q = &a[i + 2];
+// if (d > 0)
+// e = *q;
+//
+// which could later be optimized to
+//
+// r = &a[i];
+// if (b > 0)
+// c = r[1];
+// if (d > 0)
+// e = r[2];
+//
+// Later passes sink back much of the speculated code that did not enable
+// further optimization.
+//
+// This pass is more aggressive than the function SpeculativeyExecuteBB in
+// SimplifyCFG. SimplifyCFG will not speculate if no selects are introduced and
+// it will speculate at most one instruction. It also will not speculate if
+// there is a value defined in the if-block that is only used in the then-block.
+// These restrictions make sense since the speculation in SimplifyCFG seems
+// aimed at introducing cheap selects, while this pass is intended to do more
+// aggressive speculation while counting on later passes to either capitalize on
+// that or clean it up.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "speculative-execution"
+
+// The risk that speculation will not pay off increases with the
+// number of instructions speculated, so we put a limit on that.
+static cl::opt<unsigned> SpecExecMaxSpeculationCost(
+ "spec-exec-max-speculation-cost", cl::init(7), cl::Hidden,
+ cl::desc("Speculative execution is not applied to basic blocks where "
+ "the cost of the instructions to speculatively execute "
+ "exceeds this limit."));
+
+// Speculating just a few instructions from a larger block tends not
+// to be profitable and this limit prevents that. A reason for that is
+// that small basic blocks are more likely to be candidates for
+// further optimization.
+static cl::opt<unsigned> SpecExecMaxNotHoisted(
+ "spec-exec-max-not-hoisted", cl::init(5), cl::Hidden,
+ cl::desc("Speculative execution is not applied to basic blocks where the "
+ "number of instructions that would not be speculatively executed "
+ "exceeds this limit."));
+
+namespace {
+class SpeculativeExecution : public FunctionPass {
+ public:
+ static char ID;
+ SpeculativeExecution(): FunctionPass(ID) {}
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnFunction(Function &F) override;
+
+ private:
+ bool runOnBasicBlock(BasicBlock &B);
+ bool considerHoistingFromTo(BasicBlock &FromBlock, BasicBlock &ToBlock);
+
+ const TargetTransformInfo *TTI = nullptr;
+};
+} // namespace
+
+char SpeculativeExecution::ID = 0;
+INITIALIZE_PASS_BEGIN(SpeculativeExecution, "speculative-execution",
+ "Speculatively execute instructions", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_END(SpeculativeExecution, "speculative-execution",
+ "Speculatively execute instructions", false, false)
+
+void SpeculativeExecution::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+}
+
+bool SpeculativeExecution::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
+ TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+
+ bool Changed = false;
+ for (auto& B : F) {
+ Changed |= runOnBasicBlock(B);
+ }
+ return Changed;
+}
+
+bool SpeculativeExecution::runOnBasicBlock(BasicBlock &B) {
+ BranchInst *BI = dyn_cast<BranchInst>(B.getTerminator());
+ if (BI == nullptr)
+ return false;
+
+ if (BI->getNumSuccessors() != 2)
+ return false;
+ BasicBlock &Succ0 = *BI->getSuccessor(0);
+ BasicBlock &Succ1 = *BI->getSuccessor(1);
+
+ if (&B == &Succ0 || &B == &Succ1 || &Succ0 == &Succ1) {
+ return false;
+ }
+
+ // Hoist from if-then (triangle).
+ if (Succ0.getSinglePredecessor() != nullptr &&
+ Succ0.getSingleSuccessor() == &Succ1) {
+ return considerHoistingFromTo(Succ0, B);
+ }
+
+ // Hoist from if-else (triangle).
+ if (Succ1.getSinglePredecessor() != nullptr &&
+ Succ1.getSingleSuccessor() == &Succ0) {
+ return considerHoistingFromTo(Succ1, B);
+ }
+
+ // Hoist from if-then-else (diamond), but only if it is equivalent to
+ // an if-else or if-then due to one of the branches doing nothing.
+ if (Succ0.getSinglePredecessor() != nullptr &&
+ Succ1.getSinglePredecessor() != nullptr &&
+ Succ1.getSingleSuccessor() != nullptr &&
+ Succ1.getSingleSuccessor() != &B &&
+ Succ1.getSingleSuccessor() == Succ0.getSingleSuccessor()) {
+ // If a block has only one instruction, then that is a terminator
+ // instruction so that the block does nothing. This does happen.
+ if (Succ1.size() == 1) // equivalent to if-then
+ return considerHoistingFromTo(Succ0, B);
+ if (Succ0.size() == 1) // equivalent to if-else
+ return considerHoistingFromTo(Succ1, B);
+ }
+
+ return false;
+}
+
+static unsigned ComputeSpeculationCost(const Instruction *I,
+ const TargetTransformInfo &TTI) {
+ switch (Operator::getOpcode(I)) {
+ case Instruction::GetElementPtr:
+ case Instruction::Add:
+ case Instruction::Mul:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Select:
+ case Instruction::Shl:
+ case Instruction::Sub:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::Xor:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ return TTI.getUserCost(I);
+
+ default:
+ return UINT_MAX; // Disallow anything not whitelisted.
+ }
+}
+
+bool SpeculativeExecution::considerHoistingFromTo(BasicBlock &FromBlock,
+ BasicBlock &ToBlock) {
+ SmallSet<const Instruction *, 8> NotHoisted;
+ const auto AllPrecedingUsesFromBlockHoisted = [&NotHoisted](User *U) {
+ for (Value* V : U->operand_values()) {
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ if (NotHoisted.count(I) > 0)
+ return false;
+ }
+ }
+ return true;
+ };
+
+ unsigned TotalSpeculationCost = 0;
+ for (auto& I : FromBlock) {
+ const unsigned Cost = ComputeSpeculationCost(&I, *TTI);
+ if (Cost != UINT_MAX && isSafeToSpeculativelyExecute(&I) &&
+ AllPrecedingUsesFromBlockHoisted(&I)) {
+ TotalSpeculationCost += Cost;
+ if (TotalSpeculationCost > SpecExecMaxSpeculationCost)
+ return false; // too much to hoist
+ } else {
+ NotHoisted.insert(&I);
+ if (NotHoisted.size() > SpecExecMaxNotHoisted)
+ return false; // too much left behind
+ }
+ }
+
+ if (TotalSpeculationCost == 0)
+ return false; // nothing to hoist
+
+ for (auto I = FromBlock.begin(); I != FromBlock.end();) {
+ // We have to increment I before moving Current as moving Current
+ // changes the list that I is iterating through.
+ auto Current = I;
+ ++I;
+ if (!NotHoisted.count(Current)) {
+ Current->moveBefore(ToBlock.getTerminator());
+ }
+ }
+ return true;
+}
+
+namespace llvm {
+
+FunctionPass *createSpeculativeExecutionPass() {
+ return new SpeculativeExecution();
+}
+
+} // namespace llvm
diff --git a/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
new file mode 100644
index 000000000000..453503ab61da
--- /dev/null
+++ b/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
@@ -0,0 +1,710 @@
+//===-- StraightLineStrengthReduce.cpp - ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements straight-line strength reduction (SLSR). Unlike loop
+// strength reduction, this algorithm is designed to reduce arithmetic
+// redundancy in straight-line code instead of loops. It has proven to be
+// effective in simplifying arithmetic statements derived from an unrolled loop.
+// It can also simplify the logic of SeparateConstOffsetFromGEP.
+//
+// There are many optimizations we can perform in the domain of SLSR. This file
+// for now contains only an initial step. Specifically, we look for strength
+// reduction candidates in the following forms:
+//
+// Form 1: B + i * S
+// Form 2: (B + i) * S
+// Form 3: &B[i * S]
+//
+// where S is an integer variable, and i is a constant integer. If we found two
+// candidates S1 and S2 in the same form and S1 dominates S2, we may rewrite S2
+// in a simpler way with respect to S1. For example,
+//
+// S1: X = B + i * S
+// S2: Y = B + i' * S => X + (i' - i) * S
+//
+// S1: X = (B + i) * S
+// S2: Y = (B + i') * S => X + (i' - i) * S
+//
+// S1: X = &B[i * S]
+// S2: Y = &B[i' * S] => &X[(i' - i) * S]
+//
+// Note: (i' - i) * S is folded to the extent possible.
+//
+// This rewriting is in general a good idea. The code patterns we focus on
+// usually come from loop unrolling, so (i' - i) * S is likely the same
+// across iterations and can be reused. When that happens, the optimized form
+// takes only one add starting from the second iteration.
+//
+// When such rewriting is possible, we call S1 a "basis" of S2. When S2 has
+// multiple bases, we choose to rewrite S2 with respect to its "immediate"
+// basis, the basis that is the closest ancestor in the dominator tree.
+//
+// TODO:
+//
+// - Floating point arithmetics when fast math is enabled.
+//
+// - SLSR may decrease ILP at the architecture level. Targets that are very
+// sensitive to ILP may want to disable it. Having SLSR to consider ILP is
+// left as future work.
+//
+// - When (i' - i) is constant but i and i' are not, we could still perform
+// SLSR.
+#include <vector>
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/Local.h"
+
+using namespace llvm;
+using namespace PatternMatch;
+
+namespace {
+
+class StraightLineStrengthReduce : public FunctionPass {
+public:
+ // SLSR candidate. Such a candidate must be in one of the forms described in
+ // the header comments.
+ struct Candidate : public ilist_node<Candidate> {
+ enum Kind {
+ Invalid, // reserved for the default constructor
+ Add, // B + i * S
+ Mul, // (B + i) * S
+ GEP, // &B[..][i * S][..]
+ };
+
+ Candidate()
+ : CandidateKind(Invalid), Base(nullptr), Index(nullptr),
+ Stride(nullptr), Ins(nullptr), Basis(nullptr) {}
+ Candidate(Kind CT, const SCEV *B, ConstantInt *Idx, Value *S,
+ Instruction *I)
+ : CandidateKind(CT), Base(B), Index(Idx), Stride(S), Ins(I),
+ Basis(nullptr) {}
+ Kind CandidateKind;
+ const SCEV *Base;
+ // Note that Index and Stride of a GEP candidate do not necessarily have the
+ // same integer type. In that case, during rewriting, Stride will be
+ // sign-extended or truncated to Index's type.
+ ConstantInt *Index;
+ Value *Stride;
+ // The instruction this candidate corresponds to. It helps us to rewrite a
+ // candidate with respect to its immediate basis. Note that one instruction
+ // can correspond to multiple candidates depending on how you associate the
+ // expression. For instance,
+ //
+ // (a + 1) * (b + 2)
+ //
+ // can be treated as
+ //
+ // <Base: a, Index: 1, Stride: b + 2>
+ //
+ // or
+ //
+ // <Base: b, Index: 2, Stride: a + 1>
+ Instruction *Ins;
+ // Points to the immediate basis of this candidate, or nullptr if we cannot
+ // find any basis for this candidate.
+ Candidate *Basis;
+ };
+
+ static char ID;
+
+ StraightLineStrengthReduce()
+ : FunctionPass(ID), DL(nullptr), DT(nullptr), TTI(nullptr) {
+ initializeStraightLineStrengthReducePass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<ScalarEvolution>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ // We do not modify the shape of the CFG.
+ AU.setPreservesCFG();
+ }
+
+ bool doInitialization(Module &M) override {
+ DL = &M.getDataLayout();
+ return false;
+ }
+
+ bool runOnFunction(Function &F) override;
+
+private:
+ // Returns true if Basis is a basis for C, i.e., Basis dominates C and they
+ // share the same base and stride.
+ bool isBasisFor(const Candidate &Basis, const Candidate &C);
+ // Returns whether the candidate can be folded into an addressing mode.
+ bool isFoldable(const Candidate &C, TargetTransformInfo *TTI,
+ const DataLayout *DL);
+ // Returns true if C is already in a simplest form and not worth being
+ // rewritten.
+ bool isSimplestForm(const Candidate &C);
+ // Checks whether I is in a candidate form. If so, adds all the matching forms
+ // to Candidates, and tries to find the immediate basis for each of them.
+ void allocateCandidatesAndFindBasis(Instruction *I);
+ // Allocate candidates and find bases for Add instructions.
+ void allocateCandidatesAndFindBasisForAdd(Instruction *I);
+ // Given I = LHS + RHS, factors RHS into i * S and makes (LHS + i * S) a
+ // candidate.
+ void allocateCandidatesAndFindBasisForAdd(Value *LHS, Value *RHS,
+ Instruction *I);
+ // Allocate candidates and find bases for Mul instructions.
+ void allocateCandidatesAndFindBasisForMul(Instruction *I);
+ // Splits LHS into Base + Index and, if succeeds, calls
+ // allocateCandidatesAndFindBasis.
+ void allocateCandidatesAndFindBasisForMul(Value *LHS, Value *RHS,
+ Instruction *I);
+ // Allocate candidates and find bases for GetElementPtr instructions.
+ void allocateCandidatesAndFindBasisForGEP(GetElementPtrInst *GEP);
+ // A helper function that scales Idx with ElementSize before invoking
+ // allocateCandidatesAndFindBasis.
+ void allocateCandidatesAndFindBasisForGEP(const SCEV *B, ConstantInt *Idx,
+ Value *S, uint64_t ElementSize,
+ Instruction *I);
+ // Adds the given form <CT, B, Idx, S> to Candidates, and finds its immediate
+ // basis.
+ void allocateCandidatesAndFindBasis(Candidate::Kind CT, const SCEV *B,
+ ConstantInt *Idx, Value *S,
+ Instruction *I);
+ // Rewrites candidate C with respect to Basis.
+ void rewriteCandidateWithBasis(const Candidate &C, const Candidate &Basis);
+ // A helper function that factors ArrayIdx to a product of a stride and a
+ // constant index, and invokes allocateCandidatesAndFindBasis with the
+ // factorings.
+ void factorArrayIndex(Value *ArrayIdx, const SCEV *Base, uint64_t ElementSize,
+ GetElementPtrInst *GEP);
+ // Emit code that computes the "bump" from Basis to C. If the candidate is a
+ // GEP and the bump is not divisible by the element size of the GEP, this
+ // function sets the BumpWithUglyGEP flag to notify its caller to bump the
+ // basis using an ugly GEP.
+ static Value *emitBump(const Candidate &Basis, const Candidate &C,
+ IRBuilder<> &Builder, const DataLayout *DL,
+ bool &BumpWithUglyGEP);
+
+ const DataLayout *DL;
+ DominatorTree *DT;
+ ScalarEvolution *SE;
+ TargetTransformInfo *TTI;
+ ilist<Candidate> Candidates;
+ // Temporarily holds all instructions that are unlinked (but not deleted) by
+ // rewriteCandidateWithBasis. These instructions will be actually removed
+ // after all rewriting finishes.
+ std::vector<Instruction *> UnlinkedInstructions;
+};
+} // anonymous namespace
+
+char StraightLineStrengthReduce::ID = 0;
+INITIALIZE_PASS_BEGIN(StraightLineStrengthReduce, "slsr",
+ "Straight line strength reduction", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_END(StraightLineStrengthReduce, "slsr",
+ "Straight line strength reduction", false, false)
+
+FunctionPass *llvm::createStraightLineStrengthReducePass() {
+ return new StraightLineStrengthReduce();
+}
+
+bool StraightLineStrengthReduce::isBasisFor(const Candidate &Basis,
+ const Candidate &C) {
+ return (Basis.Ins != C.Ins && // skip the same instruction
+ // Basis must dominate C in order to rewrite C with respect to Basis.
+ DT->dominates(Basis.Ins->getParent(), C.Ins->getParent()) &&
+ // They share the same base, stride, and candidate kind.
+ Basis.Base == C.Base &&
+ Basis.Stride == C.Stride &&
+ Basis.CandidateKind == C.CandidateKind);
+}
+
+static bool isGEPFoldable(GetElementPtrInst *GEP,
+ const TargetTransformInfo *TTI,
+ const DataLayout *DL) {
+ GlobalVariable *BaseGV = nullptr;
+ int64_t BaseOffset = 0;
+ bool HasBaseReg = false;
+ int64_t Scale = 0;
+
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getPointerOperand()))
+ BaseGV = GV;
+ else
+ HasBaseReg = true;
+
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I, ++GTI) {
+ if (isa<SequentialType>(*GTI)) {
+ int64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
+ if (ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I)) {
+ BaseOffset += ConstIdx->getSExtValue() * ElementSize;
+ } else {
+ // Needs scale register.
+ if (Scale != 0) {
+ // No addressing mode takes two scale registers.
+ return false;
+ }
+ Scale = ElementSize;
+ }
+ } else {
+ StructType *STy = cast<StructType>(*GTI);
+ uint64_t Field = cast<ConstantInt>(*I)->getZExtValue();
+ BaseOffset += DL->getStructLayout(STy)->getElementOffset(Field);
+ }
+ }
+ return TTI->isLegalAddressingMode(GEP->getType()->getElementType(), BaseGV,
+ BaseOffset, HasBaseReg, Scale);
+}
+
+// Returns whether (Base + Index * Stride) can be folded to an addressing mode.
+static bool isAddFoldable(const SCEV *Base, ConstantInt *Index, Value *Stride,
+ TargetTransformInfo *TTI) {
+ return TTI->isLegalAddressingMode(Base->getType(), nullptr, 0, true,
+ Index->getSExtValue());
+}
+
+bool StraightLineStrengthReduce::isFoldable(const Candidate &C,
+ TargetTransformInfo *TTI,
+ const DataLayout *DL) {
+ if (C.CandidateKind == Candidate::Add)
+ return isAddFoldable(C.Base, C.Index, C.Stride, TTI);
+ if (C.CandidateKind == Candidate::GEP)
+ return isGEPFoldable(cast<GetElementPtrInst>(C.Ins), TTI, DL);
+ return false;
+}
+
+// Returns true if GEP has zero or one non-zero index.
+static bool hasOnlyOneNonZeroIndex(GetElementPtrInst *GEP) {
+ unsigned NumNonZeroIndices = 0;
+ for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I) {
+ ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
+ if (ConstIdx == nullptr || !ConstIdx->isZero())
+ ++NumNonZeroIndices;
+ }
+ return NumNonZeroIndices <= 1;
+}
+
+bool StraightLineStrengthReduce::isSimplestForm(const Candidate &C) {
+ if (C.CandidateKind == Candidate::Add) {
+ // B + 1 * S or B + (-1) * S
+ return C.Index->isOne() || C.Index->isMinusOne();
+ }
+ if (C.CandidateKind == Candidate::Mul) {
+ // (B + 0) * S
+ return C.Index->isZero();
+ }
+ if (C.CandidateKind == Candidate::GEP) {
+ // (char*)B + S or (char*)B - S
+ return ((C.Index->isOne() || C.Index->isMinusOne()) &&
+ hasOnlyOneNonZeroIndex(cast<GetElementPtrInst>(C.Ins)));
+ }
+ return false;
+}
+
+// TODO: We currently implement an algorithm whose time complexity is linear in
+// the number of existing candidates. However, we could do better by using
+// ScopedHashTable. Specifically, while traversing the dominator tree, we could
+// maintain all the candidates that dominate the basic block being traversed in
+// a ScopedHashTable. This hash table is indexed by the base and the stride of
+// a candidate. Therefore, finding the immediate basis of a candidate boils down
+// to one hash-table look up.
+void StraightLineStrengthReduce::allocateCandidatesAndFindBasis(
+ Candidate::Kind CT, const SCEV *B, ConstantInt *Idx, Value *S,
+ Instruction *I) {
+ Candidate C(CT, B, Idx, S, I);
+ // SLSR can complicate an instruction in two cases:
+ //
+ // 1. If we can fold I into an addressing mode, computing I is likely free or
+ // takes only one instruction.
+ //
+ // 2. I is already in a simplest form. For example, when
+ // X = B + 8 * S
+ // Y = B + S,
+ // rewriting Y to X - 7 * S is probably a bad idea.
+ //
+ // In the above cases, we still add I to the candidate list so that I can be
+ // the basis of other candidates, but we leave I's basis blank so that I
+ // won't be rewritten.
+ if (!isFoldable(C, TTI, DL) && !isSimplestForm(C)) {
+ // Try to compute the immediate basis of C.
+ unsigned NumIterations = 0;
+ // Limit the scan radius to avoid running in quadratice time.
+ static const unsigned MaxNumIterations = 50;
+ for (auto Basis = Candidates.rbegin();
+ Basis != Candidates.rend() && NumIterations < MaxNumIterations;
+ ++Basis, ++NumIterations) {
+ if (isBasisFor(*Basis, C)) {
+ C.Basis = &(*Basis);
+ break;
+ }
+ }
+ }
+ // Regardless of whether we find a basis for C, we need to push C to the
+ // candidate list so that it can be the basis of other candidates.
+ Candidates.push_back(C);
+}
+
+void StraightLineStrengthReduce::allocateCandidatesAndFindBasis(
+ Instruction *I) {
+ switch (I->getOpcode()) {
+ case Instruction::Add:
+ allocateCandidatesAndFindBasisForAdd(I);
+ break;
+ case Instruction::Mul:
+ allocateCandidatesAndFindBasisForMul(I);
+ break;
+ case Instruction::GetElementPtr:
+ allocateCandidatesAndFindBasisForGEP(cast<GetElementPtrInst>(I));
+ break;
+ }
+}
+
+void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd(
+ Instruction *I) {
+ // Try matching B + i * S.
+ if (!isa<IntegerType>(I->getType()))
+ return;
+
+ assert(I->getNumOperands() == 2 && "isn't I an add?");
+ Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
+ allocateCandidatesAndFindBasisForAdd(LHS, RHS, I);
+ if (LHS != RHS)
+ allocateCandidatesAndFindBasisForAdd(RHS, LHS, I);
+}
+
+void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd(
+ Value *LHS, Value *RHS, Instruction *I) {
+ Value *S = nullptr;
+ ConstantInt *Idx = nullptr;
+ if (match(RHS, m_Mul(m_Value(S), m_ConstantInt(Idx)))) {
+ // I = LHS + RHS = LHS + Idx * S
+ allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), Idx, S, I);
+ } else if (match(RHS, m_Shl(m_Value(S), m_ConstantInt(Idx)))) {
+ // I = LHS + RHS = LHS + (S << Idx) = LHS + S * (1 << Idx)
+ APInt One(Idx->getBitWidth(), 1);
+ Idx = ConstantInt::get(Idx->getContext(), One << Idx->getValue());
+ allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), Idx, S, I);
+ } else {
+ // At least, I = LHS + 1 * RHS
+ ConstantInt *One = ConstantInt::get(cast<IntegerType>(I->getType()), 1);
+ allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), One, RHS,
+ I);
+ }
+}
+
+// Returns true if A matches B + C where C is constant.
+static bool matchesAdd(Value *A, Value *&B, ConstantInt *&C) {
+ return (match(A, m_Add(m_Value(B), m_ConstantInt(C))) ||
+ match(A, m_Add(m_ConstantInt(C), m_Value(B))));
+}
+
+// Returns true if A matches B | C where C is constant.
+static bool matchesOr(Value *A, Value *&B, ConstantInt *&C) {
+ return (match(A, m_Or(m_Value(B), m_ConstantInt(C))) ||
+ match(A, m_Or(m_ConstantInt(C), m_Value(B))));
+}
+
+void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul(
+ Value *LHS, Value *RHS, Instruction *I) {
+ Value *B = nullptr;
+ ConstantInt *Idx = nullptr;
+ if (matchesAdd(LHS, B, Idx)) {
+ // If LHS is in the form of "Base + Index", then I is in the form of
+ // "(Base + Index) * RHS".
+ allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(B), Idx, RHS, I);
+ } else if (matchesOr(LHS, B, Idx) && haveNoCommonBitsSet(B, Idx, *DL)) {
+ // If LHS is in the form of "Base | Index" and Base and Index have no common
+ // bits set, then
+ // Base | Index = Base + Index
+ // and I is thus in the form of "(Base + Index) * RHS".
+ allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(B), Idx, RHS, I);
+ } else {
+ // Otherwise, at least try the form (LHS + 0) * RHS.
+ ConstantInt *Zero = ConstantInt::get(cast<IntegerType>(I->getType()), 0);
+ allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(LHS), Zero, RHS,
+ I);
+ }
+}
+
+void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul(
+ Instruction *I) {
+ // Try matching (B + i) * S.
+ // TODO: we could extend SLSR to float and vector types.
+ if (!isa<IntegerType>(I->getType()))
+ return;
+
+ assert(I->getNumOperands() == 2 && "isn't I a mul?");
+ Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
+ allocateCandidatesAndFindBasisForMul(LHS, RHS, I);
+ if (LHS != RHS) {
+ // Symmetrically, try to split RHS to Base + Index.
+ allocateCandidatesAndFindBasisForMul(RHS, LHS, I);
+ }
+}
+
+void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
+ const SCEV *B, ConstantInt *Idx, Value *S, uint64_t ElementSize,
+ Instruction *I) {
+ // I = B + sext(Idx *nsw S) * ElementSize
+ // = B + (sext(Idx) * sext(S)) * ElementSize
+ // = B + (sext(Idx) * ElementSize) * sext(S)
+ // Casting to IntegerType is safe because we skipped vector GEPs.
+ IntegerType *IntPtrTy = cast<IntegerType>(DL->getIntPtrType(I->getType()));
+ ConstantInt *ScaledIdx = ConstantInt::get(
+ IntPtrTy, Idx->getSExtValue() * (int64_t)ElementSize, true);
+ allocateCandidatesAndFindBasis(Candidate::GEP, B, ScaledIdx, S, I);
+}
+
+void StraightLineStrengthReduce::factorArrayIndex(Value *ArrayIdx,
+ const SCEV *Base,
+ uint64_t ElementSize,
+ GetElementPtrInst *GEP) {
+ // At least, ArrayIdx = ArrayIdx *nsw 1.
+ allocateCandidatesAndFindBasisForGEP(
+ Base, ConstantInt::get(cast<IntegerType>(ArrayIdx->getType()), 1),
+ ArrayIdx, ElementSize, GEP);
+ Value *LHS = nullptr;
+ ConstantInt *RHS = nullptr;
+ // One alternative is matching the SCEV of ArrayIdx instead of ArrayIdx
+ // itself. This would allow us to handle the shl case for free. However,
+ // matching SCEVs has two issues:
+ //
+ // 1. this would complicate rewriting because the rewriting procedure
+ // would have to translate SCEVs back to IR instructions. This translation
+ // is difficult when LHS is further evaluated to a composite SCEV.
+ //
+ // 2. ScalarEvolution is designed to be control-flow oblivious. It tends
+ // to strip nsw/nuw flags which are critical for SLSR to trace into
+ // sext'ed multiplication.
+ if (match(ArrayIdx, m_NSWMul(m_Value(LHS), m_ConstantInt(RHS)))) {
+ // SLSR is currently unsafe if i * S may overflow.
+ // GEP = Base + sext(LHS *nsw RHS) * ElementSize
+ allocateCandidatesAndFindBasisForGEP(Base, RHS, LHS, ElementSize, GEP);
+ } else if (match(ArrayIdx, m_NSWShl(m_Value(LHS), m_ConstantInt(RHS)))) {
+ // GEP = Base + sext(LHS <<nsw RHS) * ElementSize
+ // = Base + sext(LHS *nsw (1 << RHS)) * ElementSize
+ APInt One(RHS->getBitWidth(), 1);
+ ConstantInt *PowerOf2 =
+ ConstantInt::get(RHS->getContext(), One << RHS->getValue());
+ allocateCandidatesAndFindBasisForGEP(Base, PowerOf2, LHS, ElementSize, GEP);
+ }
+}
+
+void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
+ GetElementPtrInst *GEP) {
+ // TODO: handle vector GEPs
+ if (GEP->getType()->isVectorTy())
+ return;
+
+ SmallVector<const SCEV *, 4> IndexExprs;
+ for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I)
+ IndexExprs.push_back(SE->getSCEV(*I));
+
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
+ if (!isa<SequentialType>(*GTI++))
+ continue;
+
+ const SCEV *OrigIndexExpr = IndexExprs[I - 1];
+ IndexExprs[I - 1] = SE->getConstant(OrigIndexExpr->getType(), 0);
+
+ // The base of this candidate is GEP's base plus the offsets of all
+ // indices except this current one.
+ const SCEV *BaseExpr = SE->getGEPExpr(GEP->getSourceElementType(),
+ SE->getSCEV(GEP->getPointerOperand()),
+ IndexExprs, GEP->isInBounds());
+ Value *ArrayIdx = GEP->getOperand(I);
+ uint64_t ElementSize = DL->getTypeAllocSize(*GTI);
+ factorArrayIndex(ArrayIdx, BaseExpr, ElementSize, GEP);
+ // When ArrayIdx is the sext of a value, we try to factor that value as
+ // well. Handling this case is important because array indices are
+ // typically sign-extended to the pointer size.
+ Value *TruncatedArrayIdx = nullptr;
+ if (match(ArrayIdx, m_SExt(m_Value(TruncatedArrayIdx))))
+ factorArrayIndex(TruncatedArrayIdx, BaseExpr, ElementSize, GEP);
+
+ IndexExprs[I - 1] = OrigIndexExpr;
+ }
+}
+
+// A helper function that unifies the bitwidth of A and B.
+static void unifyBitWidth(APInt &A, APInt &B) {
+ if (A.getBitWidth() < B.getBitWidth())
+ A = A.sext(B.getBitWidth());
+ else if (A.getBitWidth() > B.getBitWidth())
+ B = B.sext(A.getBitWidth());
+}
+
+Value *StraightLineStrengthReduce::emitBump(const Candidate &Basis,
+ const Candidate &C,
+ IRBuilder<> &Builder,
+ const DataLayout *DL,
+ bool &BumpWithUglyGEP) {
+ APInt Idx = C.Index->getValue(), BasisIdx = Basis.Index->getValue();
+ unifyBitWidth(Idx, BasisIdx);
+ APInt IndexOffset = Idx - BasisIdx;
+
+ BumpWithUglyGEP = false;
+ if (Basis.CandidateKind == Candidate::GEP) {
+ APInt ElementSize(
+ IndexOffset.getBitWidth(),
+ DL->getTypeAllocSize(
+ cast<GetElementPtrInst>(Basis.Ins)->getType()->getElementType()));
+ APInt Q, R;
+ APInt::sdivrem(IndexOffset, ElementSize, Q, R);
+ if (R.getSExtValue() == 0)
+ IndexOffset = Q;
+ else
+ BumpWithUglyGEP = true;
+ }
+
+ // Compute Bump = C - Basis = (i' - i) * S.
+ // Common case 1: if (i' - i) is 1, Bump = S.
+ if (IndexOffset.getSExtValue() == 1)
+ return C.Stride;
+ // Common case 2: if (i' - i) is -1, Bump = -S.
+ if (IndexOffset.getSExtValue() == -1)
+ return Builder.CreateNeg(C.Stride);
+
+ // Otherwise, Bump = (i' - i) * sext/trunc(S). Note that (i' - i) and S may
+ // have different bit widths.
+ IntegerType *DeltaType =
+ IntegerType::get(Basis.Ins->getContext(), IndexOffset.getBitWidth());
+ Value *ExtendedStride = Builder.CreateSExtOrTrunc(C.Stride, DeltaType);
+ if (IndexOffset.isPowerOf2()) {
+ // If (i' - i) is a power of 2, Bump = sext/trunc(S) << log(i' - i).
+ ConstantInt *Exponent = ConstantInt::get(DeltaType, IndexOffset.logBase2());
+ return Builder.CreateShl(ExtendedStride, Exponent);
+ }
+ if ((-IndexOffset).isPowerOf2()) {
+ // If (i - i') is a power of 2, Bump = -sext/trunc(S) << log(i' - i).
+ ConstantInt *Exponent =
+ ConstantInt::get(DeltaType, (-IndexOffset).logBase2());
+ return Builder.CreateNeg(Builder.CreateShl(ExtendedStride, Exponent));
+ }
+ Constant *Delta = ConstantInt::get(DeltaType, IndexOffset);
+ return Builder.CreateMul(ExtendedStride, Delta);
+}
+
+void StraightLineStrengthReduce::rewriteCandidateWithBasis(
+ const Candidate &C, const Candidate &Basis) {
+ assert(C.CandidateKind == Basis.CandidateKind && C.Base == Basis.Base &&
+ C.Stride == Basis.Stride);
+ // We run rewriteCandidateWithBasis on all candidates in a post-order, so the
+ // basis of a candidate cannot be unlinked before the candidate.
+ assert(Basis.Ins->getParent() != nullptr && "the basis is unlinked");
+
+ // An instruction can correspond to multiple candidates. Therefore, instead of
+ // simply deleting an instruction when we rewrite it, we mark its parent as
+ // nullptr (i.e. unlink it) so that we can skip the candidates whose
+ // instruction is already rewritten.
+ if (!C.Ins->getParent())
+ return;
+
+ IRBuilder<> Builder(C.Ins);
+ bool BumpWithUglyGEP;
+ Value *Bump = emitBump(Basis, C, Builder, DL, BumpWithUglyGEP);
+ Value *Reduced = nullptr; // equivalent to but weaker than C.Ins
+ switch (C.CandidateKind) {
+ case Candidate::Add:
+ case Candidate::Mul:
+ // C = Basis + Bump
+ if (BinaryOperator::isNeg(Bump)) {
+ // If Bump is a neg instruction, emit C = Basis - (-Bump).
+ Reduced =
+ Builder.CreateSub(Basis.Ins, BinaryOperator::getNegArgument(Bump));
+ // We only use the negative argument of Bump, and Bump itself may be
+ // trivially dead.
+ RecursivelyDeleteTriviallyDeadInstructions(Bump);
+ } else {
+ Reduced = Builder.CreateAdd(Basis.Ins, Bump);
+ }
+ break;
+ case Candidate::GEP:
+ {
+ Type *IntPtrTy = DL->getIntPtrType(C.Ins->getType());
+ bool InBounds = cast<GetElementPtrInst>(C.Ins)->isInBounds();
+ if (BumpWithUglyGEP) {
+ // C = (char *)Basis + Bump
+ unsigned AS = Basis.Ins->getType()->getPointerAddressSpace();
+ Type *CharTy = Type::getInt8PtrTy(Basis.Ins->getContext(), AS);
+ Reduced = Builder.CreateBitCast(Basis.Ins, CharTy);
+ if (InBounds)
+ Reduced =
+ Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Reduced, Bump);
+ else
+ Reduced = Builder.CreateGEP(Builder.getInt8Ty(), Reduced, Bump);
+ Reduced = Builder.CreateBitCast(Reduced, C.Ins->getType());
+ } else {
+ // C = gep Basis, Bump
+ // Canonicalize bump to pointer size.
+ Bump = Builder.CreateSExtOrTrunc(Bump, IntPtrTy);
+ if (InBounds)
+ Reduced = Builder.CreateInBoundsGEP(nullptr, Basis.Ins, Bump);
+ else
+ Reduced = Builder.CreateGEP(nullptr, Basis.Ins, Bump);
+ }
+ }
+ break;
+ default:
+ llvm_unreachable("C.CandidateKind is invalid");
+ };
+ Reduced->takeName(C.Ins);
+ C.Ins->replaceAllUsesWith(Reduced);
+ // Unlink C.Ins so that we can skip other candidates also corresponding to
+ // C.Ins. The actual deletion is postponed to the end of runOnFunction.
+ C.Ins->removeFromParent();
+ UnlinkedInstructions.push_back(C.Ins);
+}
+
+bool StraightLineStrengthReduce::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
+ TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ SE = &getAnalysis<ScalarEvolution>();
+ // Traverse the dominator tree in the depth-first order. This order makes sure
+ // all bases of a candidate are in Candidates when we process it.
+ for (auto node = GraphTraits<DominatorTree *>::nodes_begin(DT);
+ node != GraphTraits<DominatorTree *>::nodes_end(DT); ++node) {
+ for (auto &I : *node->getBlock())
+ allocateCandidatesAndFindBasis(&I);
+ }
+
+ // Rewrite candidates in the reverse depth-first order. This order makes sure
+ // a candidate being rewritten is not a basis for any other candidate.
+ while (!Candidates.empty()) {
+ const Candidate &C = Candidates.back();
+ if (C.Basis != nullptr) {
+ rewriteCandidateWithBasis(C, *C.Basis);
+ }
+ Candidates.pop_back();
+ }
+
+ // Delete all unlink instructions.
+ for (auto *UnlinkedInst : UnlinkedInstructions) {
+ for (unsigned I = 0, E = UnlinkedInst->getNumOperands(); I != E; ++I) {
+ Value *Op = UnlinkedInst->getOperand(I);
+ UnlinkedInst->setOperand(I, nullptr);
+ RecursivelyDeleteTriviallyDeadInstructions(Op);
+ }
+ delete UnlinkedInst;
+ }
+ bool Ret = !UnlinkedInstructions.empty();
+ UnlinkedInstructions.clear();
+ return Ret;
+}
diff --git a/lib/Transforms/Scalar/StructurizeCFG.cpp b/lib/Transforms/Scalar/StructurizeCFG.cpp
index 7fe87f9319b6..4f23e20d251d 100644
--- a/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -9,6 +9,7 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/RegionInfo.h"
@@ -16,6 +17,8 @@
#include "llvm/Analysis/RegionPass.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
@@ -249,7 +252,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredID(LowerSwitchID);
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<LoopInfo>();
+ AU.addRequired<LoopInfoWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
RegionPass::getAnalysisUsage(AU);
}
@@ -281,11 +284,65 @@ bool StructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) {
/// \brief Build up the general order of nodes
void StructurizeCFG::orderNodes() {
- scc_iterator<Region *> I = scc_begin(ParentRegion);
- for (Order.clear(); !I.isAtEnd(); ++I) {
- const std::vector<RegionNode *> &Nodes = *I;
- Order.append(Nodes.begin(), Nodes.end());
+ RNVector TempOrder;
+ ReversePostOrderTraversal<Region*> RPOT(ParentRegion);
+ TempOrder.append(RPOT.begin(), RPOT.end());
+
+ std::map<Loop*, unsigned> LoopBlocks;
+
+
+ // The reverse post-order traversal of the list gives us an ordering close
+ // to what we want. The only problem with it is that sometimes backedges
+ // for outer loops will be visited before backedges for inner loops.
+ for (RegionNode *RN : TempOrder) {
+ BasicBlock *BB = RN->getEntry();
+ Loop *Loop = LI->getLoopFor(BB);
+ if (!LoopBlocks.count(Loop)) {
+ LoopBlocks[Loop] = 1;
+ continue;
+ }
+ LoopBlocks[Loop]++;
+ }
+
+ unsigned CurrentLoopDepth = 0;
+ Loop *CurrentLoop = nullptr;
+ BBSet TempVisited;
+ for (RNVector::iterator I = TempOrder.begin(), E = TempOrder.end(); I != E; ++I) {
+ BasicBlock *BB = (*I)->getEntry();
+ unsigned LoopDepth = LI->getLoopDepth(BB);
+
+ if (std::find(Order.begin(), Order.end(), *I) != Order.end())
+ continue;
+
+ if (LoopDepth < CurrentLoopDepth) {
+ // Make sure we have visited all blocks in this loop before moving back to
+ // the outer loop.
+
+ RNVector::iterator LoopI = I;
+ while(LoopBlocks[CurrentLoop]) {
+ LoopI++;
+ BasicBlock *LoopBB = (*LoopI)->getEntry();
+ if (LI->getLoopFor(LoopBB) == CurrentLoop) {
+ LoopBlocks[CurrentLoop]--;
+ Order.push_back(*LoopI);
+ }
+ }
+ }
+
+ CurrentLoop = LI->getLoopFor(BB);
+ if (CurrentLoop) {
+ LoopBlocks[CurrentLoop]--;
+ }
+
+ CurrentLoopDepth = LoopDepth;
+ Order.push_back(*I);
}
+
+ // This pass originally used a post-order traversal and then operated on
+ // the list in reverse. Now that we are using a reverse post-order traversal
+ // rather than re-working the whole pass to operate on the list in order,
+ // we just reverse the list and continue to operate on it in reverse.
+ std::reverse(Order.begin(), Order.end());
}
/// \brief Determine the end of the loops
@@ -304,7 +361,7 @@ void StructurizeCFG::analyzeLoops(RegionNode *N) {
for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
BasicBlock *Succ = Term->getSuccessor(i);
- if (Visited.count(Succ) && LI->isLoopHeader(Succ) ) {
+ if (Visited.count(Succ)) {
Loops[Succ] = BB;
}
}
@@ -441,6 +498,10 @@ void StructurizeCFG::collectInfos() {
for (RNVector::reverse_iterator OI = Order.rbegin(), OE = Order.rend();
OI != OE; ++OI) {
+ DEBUG(dbgs() << "Visiting: " <<
+ ((*OI)->isSubRegion() ? "SubRegion with entry: " : "") <<
+ (*OI)->getEntry()->getName() << " Loop Depth: " << LI->getLoopDepth((*OI)->getEntry()) << "\n");
+
// Analyze all the conditions leading to a node
gatherPredicates(*OI);
@@ -826,7 +887,7 @@ void StructurizeCFG::createFlow() {
/// no longer dominate all their uses. Not sure if this is really nessasary
void StructurizeCFG::rebuildSSA() {
SSAUpdater Updater;
- for (const auto &BB : ParentRegion->blocks())
+ for (auto *BB : ParentRegion->blocks())
for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
II != IE; ++II) {
@@ -866,7 +927,7 @@ bool StructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) {
ParentRegion = R;
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- LI = &getAnalysis<LoopInfo>();
+ LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
orderNodes();
collectInfos();
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index f3c3e3054b60..9eef1327c3f6 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -54,8 +54,8 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/Loads.h"
@@ -87,7 +87,6 @@ STATISTIC(NumAccumAdded, "Number of accumulators introduced");
namespace {
struct TailCallElim : public FunctionPass {
const TargetTransformInfo *TTI;
- const DataLayout *DL;
static char ID; // Pass identification, replacement for typeid
TailCallElim() : FunctionPass(ID) {
@@ -126,7 +125,7 @@ namespace {
char TailCallElim::ID = 0;
INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim",
"Tail Call Elimination", false, false)
-INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(TailCallElim, "tailcallelim",
"Tail Call Elimination", false, false)
@@ -136,7 +135,7 @@ FunctionPass *llvm::createTailCallEliminationPass() {
}
void TailCallElim::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<TargetTransformInfo>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
}
/// \brief Scan the specified function for alloca instructions.
@@ -159,8 +158,6 @@ bool TailCallElim::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
- DL = F.getParent()->getDataLayout();
-
bool AllCallsAreTailCalls = false;
bool Modified = markTails(F, AllCallsAreTailCalls);
if (AllCallsAreTailCalls)
@@ -386,16 +383,15 @@ bool TailCallElim::runTRE(Function &F) {
// right, so don't even try to convert it...
if (F.getFunctionType()->isVarArg()) return false;
- TTI = &getAnalysis<TargetTransformInfo>();
+ TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
BasicBlock *OldEntry = nullptr;
bool TailCallsAreMarkedTail = false;
SmallVector<PHINode*, 8> ArgumentPHIs;
bool MadeChange = false;
- // CanTRETailMarkedCall - If false, we cannot perform TRE on tail calls
- // marked with the 'tail' attribute, because doing so would cause the stack
- // size to increase (real TRE would deallocate variable sized allocas, TRE
- // doesn't).
+ // If false, we cannot perform TRE on tail calls marked with the 'tail'
+ // attribute, because doing so would cause the stack size to increase (real
+ // TRE would deallocate variable sized allocas, TRE doesn't).
bool CanTRETailMarkedCall = CanTRE(F);
// Change any tail recursive calls to loops.
@@ -404,28 +400,19 @@ bool TailCallElim::runTRE(Function &F) {
// alloca' is changed from being a static alloca to being a dynamic alloca.
// Until this is resolved, disable this transformation if that would ever
// happen. This bug is PR962.
- SmallVector<BasicBlock*, 8> BBToErase;
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
+ for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; /*in loop*/) {
+ BasicBlock *BB = BBI++; // FoldReturnAndProcessPred may delete BB.
if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) {
bool Change = ProcessReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail,
ArgumentPHIs, !CanTRETailMarkedCall);
- if (!Change && BB->getFirstNonPHIOrDbg() == Ret) {
+ if (!Change && BB->getFirstNonPHIOrDbg() == Ret)
Change = FoldReturnAndProcessPred(BB, Ret, OldEntry,
TailCallsAreMarkedTail, ArgumentPHIs,
!CanTRETailMarkedCall);
- // FoldReturnAndProcessPred may have emptied some BB. Remember to
- // erase them.
- if (Change && BB->empty())
- BBToErase.push_back(BB);
-
- }
MadeChange |= Change;
}
}
- for (auto BB: BBToErase)
- BB->eraseFromParent();
-
// If we eliminated any tail recursions, it's possible that we inserted some
// silly PHI nodes which just merge an initial value (the incoming operand)
// with themselves. Check to see if we did and clean up our mess if so. This
@@ -435,7 +422,7 @@ bool TailCallElim::runTRE(Function &F) {
PHINode *PN = ArgumentPHIs[i];
// If the PHI Node is a dynamic constant, replace it with the value it is.
- if (Value *PNV = SimplifyInstruction(PN)) {
+ if (Value *PNV = SimplifyInstruction(PN, F.getParent()->getDataLayout())) {
PN->replaceAllUsesWith(PNV);
PN->eraseFromParent();
}
@@ -445,7 +432,7 @@ bool TailCallElim::runTRE(Function &F) {
}
-/// CanMoveAboveCall - Return true if it is safe to move the specified
+/// Return true if it is safe to move the specified
/// instruction from after the call to before the call, assuming that all
/// instructions between the call and this instruction are movable.
///
@@ -464,7 +451,7 @@ bool TailCallElim::CanMoveAboveCall(Instruction *I, CallInst *CI) {
// being loaded from.
if (CI->mayWriteToMemory() ||
!isSafeToLoadUnconditionally(L->getPointerOperand(), L,
- L->getAlignment(), DL))
+ L->getAlignment()))
return false;
}
}
@@ -480,13 +467,11 @@ bool TailCallElim::CanMoveAboveCall(Instruction *I, CallInst *CI) {
return true;
}
-// isDynamicConstant - Return true if the specified value is the same when the
-// return would exit as it was when the initial iteration of the recursive
-// function was executed.
-//
-// We currently handle static constants and arguments that are not modified as
-// part of the recursion.
-//
+/// Return true if the specified value is the same when the return would exit
+/// as it was when the initial iteration of the recursive function was executed.
+///
+/// We currently handle static constants and arguments that are not modified as
+/// part of the recursion.
static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
if (isa<Constant>(V)) return true; // Static constants are always dyn consts
@@ -518,10 +503,9 @@ static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
return false;
}
-// getCommonReturnValue - Check to see if the function containing the specified
-// tail call consistently returns the same runtime-constant value at all exit
-// points except for IgnoreRI. If so, return the returned value.
-//
+/// Check to see if the function containing the specified tail call consistently
+/// returns the same runtime-constant value at all exit points except for
+/// IgnoreRI. If so, return the returned value.
static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
Function *F = CI->getParent()->getParent();
Value *ReturnedValue = nullptr;
@@ -545,10 +529,9 @@ static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
return ReturnedValue;
}
-/// CanTransformAccumulatorRecursion - If the specified instruction can be
-/// transformed using accumulator recursion elimination, return the constant
-/// which is the start of the accumulator value. Otherwise return null.
-///
+/// If the specified instruction can be transformed using accumulator recursion
+/// elimination, return the constant which is the start of the accumulator
+/// value. Otherwise return null.
Value *TailCallElim::CanTransformAccumulatorRecursion(Instruction *I,
CallInst *CI) {
if (!I->isAssociative() || !I->isCommutative()) return nullptr;
@@ -836,14 +819,11 @@ bool TailCallElim::FoldReturnAndProcessPred(BasicBlock *BB,
ReturnInst *RI = FoldReturnIntoUncondBranch(Ret, BB, Pred);
// Cleanup: if all predecessors of BB have been eliminated by
- // FoldReturnIntoUncondBranch, we would like to delete it, but we
- // can not just nuke it as it is being used as an iterator by our caller.
- // Just empty it, and the caller will erase it when it is safe to do so.
- // It is important to empty it, because the ret instruction in there is
- // still using a value which EliminateRecursiveTailCall will attempt
- // to remove.
+ // FoldReturnIntoUncondBranch, delete it. It is important to empty it,
+ // because the ret instruction in there is still using a value which
+ // EliminateRecursiveTailCall will attempt to remove.
if (!BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
- BB->getInstList().clear();
+ BB->eraseFromParent();
EliminateRecursiveTailCall(CI, RI, OldEntry, TailCallsAreMarkedTail,
ArgumentPHIs,