aboutsummaryrefslogtreecommitdiff
path: root/unittests/Analysis
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2016-07-23 20:41:05 +0000
committerDimitry Andric <dim@FreeBSD.org>2016-07-23 20:41:05 +0000
commit01095a5d43bbfde13731688ddcf6048ebb8b7721 (patch)
tree4def12e759965de927d963ac65840d663ef9d1ea /unittests/Analysis
parentf0f4822ed4b66e3579e92a89f368f8fb860e218e (diff)
downloadsrc-01095a5d43bbfde13731688ddcf6048ebb8b7721.tar.gz
src-01095a5d43bbfde13731688ddcf6048ebb8b7721.zip
Vendor import of llvm release_39 branch r276489:vendor/llvm/llvm-release_39-r276489
Notes
Notes: svn path=/vendor/llvm/dist/; revision=303231 svn path=/vendor/llvm/llvm-release_39-r276489/; revision=303232; tag=vendor/llvm/llvm-release_39-r276489
Diffstat (limited to 'unittests/Analysis')
-rw-r--r--unittests/Analysis/AliasAnalysisTest.cpp26
-rw-r--r--unittests/Analysis/BlockFrequencyInfoTest.cpp86
-rw-r--r--unittests/Analysis/CFGTest.cpp3
-rw-r--r--unittests/Analysis/CGSCCPassManagerTest.cpp311
-rw-r--r--unittests/Analysis/CMakeLists.txt4
-rw-r--r--unittests/Analysis/CallGraphTest.cpp6
-rw-r--r--unittests/Analysis/LazyCallGraphTest.cpp1237
-rw-r--r--unittests/Analysis/LoopPassManagerTest.cpp205
-rw-r--r--unittests/Analysis/Makefile15
-rw-r--r--unittests/Analysis/MixedTBAATest.cpp1
-rw-r--r--unittests/Analysis/ScalarEvolutionTest.cpp27
-rw-r--r--unittests/Analysis/UnrollAnalyzer.cpp331
-rw-r--r--unittests/Analysis/ValueTrackingTest.cpp3
13 files changed, 1895 insertions, 360 deletions
diff --git a/unittests/Analysis/AliasAnalysisTest.cpp b/unittests/Analysis/AliasAnalysisTest.cpp
index ee116992fe76..84a04257bc27 100644
--- a/unittests/Analysis/AliasAnalysisTest.cpp
+++ b/unittests/Analysis/AliasAnalysisTest.cpp
@@ -14,12 +14,11 @@
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/Instructions.h"
#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/SourceMgr.h"
#include "gtest/gtest.h"
@@ -80,9 +79,8 @@ struct TestCustomAAResult : AAResultBase<TestCustomAAResult> {
std::function<void()> CB;
- explicit TestCustomAAResult(const TargetLibraryInfo &TLI,
- std::function<void()> CB)
- : AAResultBase(TLI), CB(std::move(CB)) {}
+ explicit TestCustomAAResult(std::function<void()> CB)
+ : AAResultBase(), CB(std::move(CB)) {}
TestCustomAAResult(TestCustomAAResult &&Arg)
: AAResultBase(std::move(Arg)), CB(std::move(Arg.CB)) {}
@@ -117,8 +115,7 @@ public:
}
bool doInitialization(Module &M) override {
- Result.reset(new TestCustomAAResult(
- getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), std::move(CB)));
+ Result.reset(new TestCustomAAResult(std::move(CB)));
return true;
}
@@ -155,7 +152,7 @@ protected:
AAResults &getAAResults(Function &F) {
// Reset the Function AA results first to clear out any references.
- AAR.reset(new AAResults());
+ AAR.reset(new AAResults(TLI));
// Build the various AA results and register them.
AC.reset(new AssumptionCache(F));
@@ -181,12 +178,12 @@ TEST_F(AliasAnalysisTest, getModRefInfo) {
auto *Load1 = new LoadInst(Addr, "load", BB);
auto *Add1 = BinaryOperator::CreateAdd(Value, Value, "add", BB);
auto *VAArg1 = new VAArgInst(Addr, PtrType, "vaarg", BB);
- auto *CmpXChg1 = new AtomicCmpXchgInst(Addr, ConstantInt::get(IntType, 0),
- ConstantInt::get(IntType, 1),
- Monotonic, Monotonic, CrossThread, BB);
+ auto *CmpXChg1 = new AtomicCmpXchgInst(
+ Addr, ConstantInt::get(IntType, 0), ConstantInt::get(IntType, 1),
+ AtomicOrdering::Monotonic, AtomicOrdering::Monotonic, CrossThread, BB);
auto *AtomicRMW =
new AtomicRMWInst(AtomicRMWInst::Xchg, Addr, ConstantInt::get(IntType, 1),
- Monotonic, CrossThread, BB);
+ AtomicOrdering::Monotonic, CrossThread, BB);
ReturnInst::Create(C, nullptr, BB);
@@ -209,14 +206,13 @@ TEST_F(AliasAnalysisTest, getModRefInfo) {
class AAPassInfraTest : public testing::Test {
protected:
- LLVMContext &C;
+ LLVMContext C;
SMDiagnostic Err;
std::unique_ptr<Module> M;
public:
AAPassInfraTest()
- : C(getGlobalContext()),
- M(parseAssemblyString("define i32 @f(i32* %x, i32* %y) {\n"
+ : M(parseAssemblyString("define i32 @f(i32* %x, i32* %y) {\n"
"entry:\n"
" %lx = load i32, i32* %x\n"
" %ly = load i32, i32* %y\n"
diff --git a/unittests/Analysis/BlockFrequencyInfoTest.cpp b/unittests/Analysis/BlockFrequencyInfoTest.cpp
new file mode 100644
index 000000000000..b3b0fcfb049b
--- /dev/null
+++ b/unittests/Analysis/BlockFrequencyInfoTest.cpp
@@ -0,0 +1,86 @@
+//===- BlockFrequencyInfoTest.cpp - BlockFrequencyInfo unit tests ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace {
+
+class BlockFrequencyInfoTest : public testing::Test {
+protected:
+ std::unique_ptr<BranchProbabilityInfo> BPI;
+ std::unique_ptr<DominatorTree> DT;
+ std::unique_ptr<LoopInfo> LI;
+ LLVMContext C;
+
+ BlockFrequencyInfo buildBFI(Function &F) {
+ DT.reset(new DominatorTree(F));
+ LI.reset(new LoopInfo(*DT));
+ BPI.reset(new BranchProbabilityInfo(F, *LI));
+ return BlockFrequencyInfo(F, *BPI, *LI);
+ }
+ std::unique_ptr<Module> makeLLVMModule() {
+ const char *ModuleStrig = "define i32 @f(i32 %x) {\n"
+ "bb0:\n"
+ " %y1 = icmp eq i32 %x, 0 \n"
+ " br i1 %y1, label %bb1, label %bb2 \n"
+ "bb1:\n"
+ " br label %bb3\n"
+ "bb2:\n"
+ " br label %bb3\n"
+ "bb3:\n"
+ " %y2 = phi i32 [0, %bb1], [1, %bb2] \n"
+ " ret i32 %y2\n"
+ "}\n";
+ SMDiagnostic Err;
+ return parseAssemblyString(ModuleStrig, Err, C);
+ }
+};
+
+TEST_F(BlockFrequencyInfoTest, Basic) {
+ auto M = makeLLVMModule();
+ Function *F = M->getFunction("f");
+ F->setEntryCount(100);
+
+ BlockFrequencyInfo BFI = buildBFI(*F);
+ BasicBlock &BB0 = F->getEntryBlock();
+ BasicBlock *BB1 = BB0.getTerminator()->getSuccessor(0);
+ BasicBlock *BB2 = BB0.getTerminator()->getSuccessor(1);
+ BasicBlock *BB3 = BB1->getSingleSuccessor();
+
+ uint64_t BB0Freq = BFI.getBlockFreq(&BB0).getFrequency();
+ uint64_t BB1Freq = BFI.getBlockFreq(BB1).getFrequency();
+ uint64_t BB2Freq = BFI.getBlockFreq(BB2).getFrequency();
+ uint64_t BB3Freq = BFI.getBlockFreq(BB3).getFrequency();
+
+ EXPECT_EQ(BB0Freq, BB3Freq);
+ EXPECT_EQ(BB0Freq, BB1Freq + BB2Freq);
+ EXPECT_EQ(BB0Freq, BB3Freq);
+
+ EXPECT_EQ(BFI.getBlockProfileCount(&BB0).getValue(), UINT64_C(100));
+ EXPECT_EQ(BFI.getBlockProfileCount(BB3).getValue(), UINT64_C(100));
+ EXPECT_EQ(BFI.getBlockProfileCount(BB1).getValue(), 100 * BB1Freq / BB0Freq);
+ EXPECT_EQ(BFI.getBlockProfileCount(BB2).getValue(), 100 * BB2Freq / BB0Freq);
+}
+
+} // end anonymous namespace
+} // end namespace llvm
diff --git a/unittests/Analysis/CFGTest.cpp b/unittests/Analysis/CFGTest.cpp
index 44f0fe681dff..c60044fa52df 100644
--- a/unittests/Analysis/CFGTest.cpp
+++ b/unittests/Analysis/CFGTest.cpp
@@ -31,7 +31,7 @@ class IsPotentiallyReachableTest : public testing::Test {
protected:
void ParseAssembly(const char *Assembly) {
SMDiagnostic Error;
- M = parseAssemblyString(Assembly, Error, getGlobalContext());
+ M = parseAssemblyString(Assembly, Error, Context);
std::string errMsg;
raw_string_ostream os(errMsg);
@@ -112,6 +112,7 @@ protected:
PM.run(*M);
}
+ LLVMContext Context;
std::unique_ptr<Module> M;
Instruction *A, *B;
};
diff --git a/unittests/Analysis/CGSCCPassManagerTest.cpp b/unittests/Analysis/CGSCCPassManagerTest.cpp
new file mode 100644
index 000000000000..224f2df13181
--- /dev/null
+++ b/unittests/Analysis/CGSCCPassManagerTest.cpp
@@ -0,0 +1,311 @@
+//===- CGSCCPassManagerTest.cpp -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+namespace {
+
+class TestModuleAnalysis {
+public:
+ struct Result {
+ Result(int Count) : FunctionCount(Count) {}
+ int FunctionCount;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+ static StringRef name() { return "TestModuleAnalysis"; }
+
+ TestModuleAnalysis(int &Runs) : Runs(Runs) {}
+
+ Result run(Module &M, ModuleAnalysisManager &AM) {
+ ++Runs;
+ return Result(M.size());
+ }
+
+private:
+ static char PassID;
+
+ int &Runs;
+};
+
+char TestModuleAnalysis::PassID;
+
+class TestSCCAnalysis {
+public:
+ struct Result {
+ Result(int Count) : FunctionCount(Count) {}
+ int FunctionCount;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+ static StringRef name() { return "TestSCCAnalysis"; }
+
+ TestSCCAnalysis(int &Runs) : Runs(Runs) {}
+
+ Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM) {
+ ++Runs;
+ return Result(C.size());
+ }
+
+private:
+ static char PassID;
+
+ int &Runs;
+};
+
+char TestSCCAnalysis::PassID;
+
+class TestFunctionAnalysis {
+public:
+ struct Result {
+ Result(int Count) : InstructionCount(Count) {}
+ int InstructionCount;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+ static StringRef name() { return "TestFunctionAnalysis"; }
+
+ TestFunctionAnalysis(int &Runs) : Runs(Runs) {}
+
+ Result run(Function &F, FunctionAnalysisManager &AM) {
+ ++Runs;
+ int Count = 0;
+ for (Instruction &I : instructions(F)) {
+ (void)I;
+ ++Count;
+ }
+ return Result(Count);
+ }
+
+private:
+ static char PassID;
+
+ int &Runs;
+};
+
+char TestFunctionAnalysis::PassID;
+
+class TestImmutableFunctionAnalysis {
+public:
+ struct Result {
+ bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+ };
+
+ static void *ID() { return (void *)&PassID; }
+ static StringRef name() { return "TestImmutableFunctionAnalysis"; }
+
+ TestImmutableFunctionAnalysis(int &Runs) : Runs(Runs) {}
+
+ Result run(Function &F, FunctionAnalysisManager &AM) {
+ ++Runs;
+ return Result();
+ }
+
+private:
+ static char PassID;
+
+ int &Runs;
+};
+
+char TestImmutableFunctionAnalysis::PassID;
+
+struct TestModulePass {
+ TestModulePass(int &RunCount) : RunCount(RunCount) {}
+
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM) {
+ ++RunCount;
+ (void)AM.getResult<TestModuleAnalysis>(M);
+ return PreservedAnalyses::all();
+ }
+
+ static StringRef name() { return "TestModulePass"; }
+
+ int &RunCount;
+};
+
+struct TestSCCPass {
+ TestSCCPass(int &RunCount, int &AnalyzedInstrCount,
+ int &AnalyzedSCCFunctionCount, int &AnalyzedModuleFunctionCount,
+ bool OnlyUseCachedResults = false)
+ : RunCount(RunCount), AnalyzedInstrCount(AnalyzedInstrCount),
+ AnalyzedSCCFunctionCount(AnalyzedSCCFunctionCount),
+ AnalyzedModuleFunctionCount(AnalyzedModuleFunctionCount),
+ OnlyUseCachedResults(OnlyUseCachedResults) {}
+
+ PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM) {
+ ++RunCount;
+
+ const ModuleAnalysisManager &MAM =
+ AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C).getManager();
+ FunctionAnalysisManager &FAM =
+ AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C).getManager();
+ if (TestModuleAnalysis::Result *TMA =
+ MAM.getCachedResult<TestModuleAnalysis>(
+ *C.begin()->getFunction().getParent()))
+ AnalyzedModuleFunctionCount += TMA->FunctionCount;
+
+ if (OnlyUseCachedResults) {
+ // Hack to force the use of the cached interface.
+ if (TestSCCAnalysis::Result *AR = AM.getCachedResult<TestSCCAnalysis>(C))
+ AnalyzedSCCFunctionCount += AR->FunctionCount;
+ for (LazyCallGraph::Node &N : C)
+ if (TestFunctionAnalysis::Result *FAR =
+ FAM.getCachedResult<TestFunctionAnalysis>(N.getFunction()))
+ AnalyzedInstrCount += FAR->InstructionCount;
+ } else {
+ // Typical path just runs the analysis as needed.
+ TestSCCAnalysis::Result &AR = AM.getResult<TestSCCAnalysis>(C);
+ AnalyzedSCCFunctionCount += AR.FunctionCount;
+ for (LazyCallGraph::Node &N : C) {
+ TestFunctionAnalysis::Result &FAR =
+ FAM.getResult<TestFunctionAnalysis>(N.getFunction());
+ AnalyzedInstrCount += FAR.InstructionCount;
+
+ // Just ensure we get the immutable results.
+ (void)FAM.getResult<TestImmutableFunctionAnalysis>(N.getFunction());
+ }
+ }
+
+ return PreservedAnalyses::all();
+ }
+
+ static StringRef name() { return "TestSCCPass"; }
+
+ int &RunCount;
+ int &AnalyzedInstrCount;
+ int &AnalyzedSCCFunctionCount;
+ int &AnalyzedModuleFunctionCount;
+ bool OnlyUseCachedResults;
+};
+
+struct TestFunctionPass {
+ TestFunctionPass(int &RunCount) : RunCount(RunCount) {}
+
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> &) {
+ ++RunCount;
+ return PreservedAnalyses::none();
+ }
+
+ static StringRef name() { return "TestFunctionPass"; }
+
+ int &RunCount;
+};
+
+std::unique_ptr<Module> parseIR(const char *IR) {
+ // We just use a static context here. This is never called from multiple
+ // threads so it is harmless no matter how it is implemented. We just need
+ // the context to outlive the module which it does.
+ static LLVMContext C;
+ SMDiagnostic Err;
+ return parseAssemblyString(IR, Err, C);
+}
+
+TEST(CGSCCPassManagerTest, Basic) {
+ auto M = parseIR("define void @f() {\n"
+ "entry:\n"
+ " call void @g()\n"
+ " call void @h1()\n"
+ " ret void\n"
+ "}\n"
+ "define void @g() {\n"
+ "entry:\n"
+ " call void @g()\n"
+ " call void @x()\n"
+ " ret void\n"
+ "}\n"
+ "define void @h1() {\n"
+ "entry:\n"
+ " call void @h2()\n"
+ " ret void\n"
+ "}\n"
+ "define void @h2() {\n"
+ "entry:\n"
+ " call void @h3()\n"
+ " call void @x()\n"
+ " ret void\n"
+ "}\n"
+ "define void @h3() {\n"
+ "entry:\n"
+ " call void @h1()\n"
+ " ret void\n"
+ "}\n"
+ "define void @x() {\n"
+ "entry:\n"
+ " ret void\n"
+ "}\n");
+ FunctionAnalysisManager FAM(/*DebugLogging*/ true);
+ int FunctionAnalysisRuns = 0;
+ FAM.registerPass([&] { return TestFunctionAnalysis(FunctionAnalysisRuns); });
+ int ImmutableFunctionAnalysisRuns = 0;
+ FAM.registerPass([&] {
+ return TestImmutableFunctionAnalysis(ImmutableFunctionAnalysisRuns);
+ });
+
+ CGSCCAnalysisManager CGAM(/*DebugLogging*/ true);
+ int SCCAnalysisRuns = 0;
+ CGAM.registerPass([&] { return TestSCCAnalysis(SCCAnalysisRuns); });
+
+ ModuleAnalysisManager MAM(/*DebugLogging*/ true);
+ int ModuleAnalysisRuns = 0;
+ MAM.registerPass([&] { return LazyCallGraphAnalysis(); });
+ MAM.registerPass([&] { return TestModuleAnalysis(ModuleAnalysisRuns); });
+
+ MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); });
+ MAM.registerPass([&] { return CGSCCAnalysisManagerModuleProxy(CGAM); });
+ CGAM.registerPass([&] { return FunctionAnalysisManagerCGSCCProxy(FAM); });
+ CGAM.registerPass([&] { return ModuleAnalysisManagerCGSCCProxy(MAM); });
+ FAM.registerPass([&] { return CGSCCAnalysisManagerFunctionProxy(CGAM); });
+ FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); });
+
+ ModulePassManager MPM(/*DebugLogging*/ true);
+ int ModulePassRunCount1 = 0;
+ MPM.addPass(TestModulePass(ModulePassRunCount1));
+
+ CGSCCPassManager CGPM1(/*DebugLogging*/ true);
+ int SCCPassRunCount1 = 0;
+ int AnalyzedInstrCount1 = 0;
+ int AnalyzedSCCFunctionCount1 = 0;
+ int AnalyzedModuleFunctionCount1 = 0;
+ CGPM1.addPass(TestSCCPass(SCCPassRunCount1, AnalyzedInstrCount1,
+ AnalyzedSCCFunctionCount1,
+ AnalyzedModuleFunctionCount1));
+
+ FunctionPassManager FPM1(/*DebugLogging*/ true);
+ int FunctionPassRunCount1 = 0;
+ FPM1.addPass(TestFunctionPass(FunctionPassRunCount1));
+ CGPM1.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM1)));
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
+
+ MPM.run(*M, MAM);
+
+ EXPECT_EQ(1, ModulePassRunCount1);
+
+ EXPECT_EQ(1, ModuleAnalysisRuns);
+ EXPECT_EQ(4, SCCAnalysisRuns);
+ EXPECT_EQ(6, FunctionAnalysisRuns);
+ EXPECT_EQ(6, ImmutableFunctionAnalysisRuns);
+
+ EXPECT_EQ(4, SCCPassRunCount1);
+ EXPECT_EQ(14, AnalyzedInstrCount1);
+ EXPECT_EQ(6, AnalyzedSCCFunctionCount1);
+ EXPECT_EQ(4 * 6, AnalyzedModuleFunctionCount1);
+}
+
+}
diff --git a/unittests/Analysis/CMakeLists.txt b/unittests/Analysis/CMakeLists.txt
index 06560cf14d4a..2292a454c27a 100644
--- a/unittests/Analysis/CMakeLists.txt
+++ b/unittests/Analysis/CMakeLists.txt
@@ -7,10 +7,14 @@ set(LLVM_LINK_COMPONENTS
add_llvm_unittest(AnalysisTests
AliasAnalysisTest.cpp
+ BlockFrequencyInfoTest.cpp
CallGraphTest.cpp
CFGTest.cpp
+ CGSCCPassManagerTest.cpp
LazyCallGraphTest.cpp
+ LoopPassManagerTest.cpp
ScalarEvolutionTest.cpp
MixedTBAATest.cpp
ValueTrackingTest.cpp
+ UnrollAnalyzer.cpp
)
diff --git a/unittests/Analysis/CallGraphTest.cpp b/unittests/Analysis/CallGraphTest.cpp
index 777907a55b11..af46291074c2 100644
--- a/unittests/Analysis/CallGraphTest.cpp
+++ b/unittests/Analysis/CallGraphTest.cpp
@@ -44,14 +44,16 @@ template <typename Ty> void canSpecializeGraphTraitsIterators(Ty *G) {
}
TEST(CallGraphTest, GraphTraitsSpecialization) {
- Module M("", getGlobalContext());
+ LLVMContext Context;
+ Module M("", Context);
CallGraph CG(M);
canSpecializeGraphTraitsIterators(&CG);
}
TEST(CallGraphTest, GraphTraitsConstSpecialization) {
- Module M("", getGlobalContext());
+ LLVMContext Context;
+ Module M("", Context);
CallGraph CG(M);
canSpecializeGraphTraitsIterators(const_cast<const CallGraph *>(&CG));
diff --git a/unittests/Analysis/LazyCallGraphTest.cpp b/unittests/Analysis/LazyCallGraphTest.cpp
index 6caccb892395..224a9458cc88 100644
--- a/unittests/Analysis/LazyCallGraphTest.cpp
+++ b/unittests/Analysis/LazyCallGraphTest.cpp
@@ -21,10 +21,10 @@ using namespace llvm;
namespace {
-std::unique_ptr<Module> parseAssembly(const char *Assembly) {
+std::unique_ptr<Module> parseAssembly(LLVMContext &Context,
+ const char *Assembly) {
SMDiagnostic Error;
- std::unique_ptr<Module> M =
- parseAssemblyString(Assembly, Error, getGlobalContext());
+ std::unique_ptr<Module> M = parseAssemblyString(Assembly, Error, Context);
std::string ErrMsg;
raw_string_ostream OS(ErrMsg);
@@ -121,36 +121,37 @@ static const char DiamondOfTriangles[] =
"}\n";
TEST(LazyCallGraphTest, BasicGraphFormation) {
- std::unique_ptr<Module> M = parseAssembly(DiamondOfTriangles);
+ LLVMContext Context;
+ std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
LazyCallGraph CG(*M);
// The order of the entry nodes should be stable w.r.t. the source order of
// the IR, and everything in our module is an entry node, so just directly
// build variables for each node.
auto I = CG.begin();
- LazyCallGraph::Node &A1 = *I++;
+ LazyCallGraph::Node &A1 = (I++)->getNode(CG);
EXPECT_EQ("a1", A1.getFunction().getName());
- LazyCallGraph::Node &A2 = *I++;
+ LazyCallGraph::Node &A2 = (I++)->getNode(CG);
EXPECT_EQ("a2", A2.getFunction().getName());
- LazyCallGraph::Node &A3 = *I++;
+ LazyCallGraph::Node &A3 = (I++)->getNode(CG);
EXPECT_EQ("a3", A3.getFunction().getName());
- LazyCallGraph::Node &B1 = *I++;
+ LazyCallGraph::Node &B1 = (I++)->getNode(CG);
EXPECT_EQ("b1", B1.getFunction().getName());
- LazyCallGraph::Node &B2 = *I++;
+ LazyCallGraph::Node &B2 = (I++)->getNode(CG);
EXPECT_EQ("b2", B2.getFunction().getName());
- LazyCallGraph::Node &B3 = *I++;
+ LazyCallGraph::Node &B3 = (I++)->getNode(CG);
EXPECT_EQ("b3", B3.getFunction().getName());
- LazyCallGraph::Node &C1 = *I++;
+ LazyCallGraph::Node &C1 = (I++)->getNode(CG);
EXPECT_EQ("c1", C1.getFunction().getName());
- LazyCallGraph::Node &C2 = *I++;
+ LazyCallGraph::Node &C2 = (I++)->getNode(CG);
EXPECT_EQ("c2", C2.getFunction().getName());
- LazyCallGraph::Node &C3 = *I++;
+ LazyCallGraph::Node &C3 = (I++)->getNode(CG);
EXPECT_EQ("c3", C3.getFunction().getName());
- LazyCallGraph::Node &D1 = *I++;
+ LazyCallGraph::Node &D1 = (I++)->getNode(CG);
EXPECT_EQ("d1", D1.getFunction().getName());
- LazyCallGraph::Node &D2 = *I++;
+ LazyCallGraph::Node &D2 = (I++)->getNode(CG);
EXPECT_EQ("d2", D2.getFunction().getName());
- LazyCallGraph::Node &D3 = *I++;
+ LazyCallGraph::Node &D3 = (I++)->getNode(CG);
EXPECT_EQ("d3", D3.getFunction().getName());
EXPECT_EQ(CG.end(), I);
@@ -158,8 +159,8 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
// independent of order.
std::vector<std::string> Nodes;
- for (LazyCallGraph::Node &N : A1)
- Nodes.push_back(N.getFunction().getName());
+ for (LazyCallGraph::Edge &E : A1)
+ Nodes.push_back(E.getFunction().getName());
std::sort(Nodes.begin(), Nodes.end());
EXPECT_EQ("a2", Nodes[0]);
EXPECT_EQ("b2", Nodes[1]);
@@ -171,8 +172,8 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
EXPECT_EQ(A3.end(), std::next(A3.begin()));
EXPECT_EQ("a1", A3.begin()->getFunction().getName());
- for (LazyCallGraph::Node &N : B1)
- Nodes.push_back(N.getFunction().getName());
+ for (LazyCallGraph::Edge &E : B1)
+ Nodes.push_back(E.getFunction().getName());
std::sort(Nodes.begin(), Nodes.end());
EXPECT_EQ("b2", Nodes[0]);
EXPECT_EQ("d3", Nodes[1]);
@@ -183,8 +184,8 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
EXPECT_EQ(B3.end(), std::next(B3.begin()));
EXPECT_EQ("b1", B3.begin()->getFunction().getName());
- for (LazyCallGraph::Node &N : C1)
- Nodes.push_back(N.getFunction().getName());
+ for (LazyCallGraph::Edge &E : C1)
+ Nodes.push_back(E.getFunction().getName());
std::sort(Nodes.begin(), Nodes.end());
EXPECT_EQ("c2", Nodes[0]);
EXPECT_EQ("d2", Nodes[1]);
@@ -202,12 +203,13 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
EXPECT_EQ(D3.end(), std::next(D3.begin()));
EXPECT_EQ("d1", D3.begin()->getFunction().getName());
- // Now lets look at the SCCs.
- auto SCCI = CG.postorder_scc_begin();
+ // Now lets look at the RefSCCs and SCCs.
+ auto J = CG.postorder_ref_scc_begin();
- LazyCallGraph::SCC &D = *SCCI++;
- for (LazyCallGraph::Node *N : D)
- Nodes.push_back(N->getFunction().getName());
+ LazyCallGraph::RefSCC &D = *J++;
+ ASSERT_EQ(1, D.size());
+ for (LazyCallGraph::Node &N : *D.begin())
+ Nodes.push_back(N.getFunction().getName());
std::sort(Nodes.begin(), Nodes.end());
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("d1", Nodes[0]);
@@ -219,9 +221,10 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
EXPECT_FALSE(D.isAncestorOf(D));
EXPECT_FALSE(D.isDescendantOf(D));
- LazyCallGraph::SCC &C = *SCCI++;
- for (LazyCallGraph::Node *N : C)
- Nodes.push_back(N->getFunction().getName());
+ LazyCallGraph::RefSCC &C = *J++;
+ ASSERT_EQ(1, C.size());
+ for (LazyCallGraph::Node &N : *C.begin())
+ Nodes.push_back(N.getFunction().getName());
std::sort(Nodes.begin(), Nodes.end());
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("c1", Nodes[0]);
@@ -233,9 +236,10 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
EXPECT_TRUE(C.isAncestorOf(D));
EXPECT_FALSE(C.isDescendantOf(D));
- LazyCallGraph::SCC &B = *SCCI++;
- for (LazyCallGraph::Node *N : B)
- Nodes.push_back(N->getFunction().getName());
+ LazyCallGraph::RefSCC &B = *J++;
+ ASSERT_EQ(1, B.size());
+ for (LazyCallGraph::Node &N : *B.begin())
+ Nodes.push_back(N.getFunction().getName());
std::sort(Nodes.begin(), Nodes.end());
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("b1", Nodes[0]);
@@ -249,9 +253,10 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
EXPECT_FALSE(B.isAncestorOf(C));
EXPECT_FALSE(C.isAncestorOf(B));
- LazyCallGraph::SCC &A = *SCCI++;
- for (LazyCallGraph::Node *N : A)
- Nodes.push_back(N->getFunction().getName());
+ LazyCallGraph::RefSCC &A = *J++;
+ ASSERT_EQ(1, A.size());
+ for (LazyCallGraph::Node &N : *A.begin())
+ Nodes.push_back(N.getFunction().getName());
std::sort(Nodes.begin(), Nodes.end());
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("a1", Nodes[0]);
@@ -265,7 +270,7 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
EXPECT_TRUE(A.isAncestorOf(C));
EXPECT_TRUE(A.isAncestorOf(D));
- EXPECT_EQ(CG.postorder_scc_end(), SCCI);
+ EXPECT_EQ(CG.postorder_ref_scc_end(), J);
}
static Function &lookupFunction(Module &M, StringRef Name) {
@@ -276,21 +281,21 @@ static Function &lookupFunction(Module &M, StringRef Name) {
}
TEST(LazyCallGraphTest, BasicGraphMutation) {
- std::unique_ptr<Module> M = parseAssembly(
- "define void @a() {\n"
- "entry:\n"
- " call void @b()\n"
- " call void @c()\n"
- " ret void\n"
- "}\n"
- "define void @b() {\n"
- "entry:\n"
- " ret void\n"
- "}\n"
- "define void @c() {\n"
- "entry:\n"
- " ret void\n"
- "}\n");
+ LLVMContext Context;
+ std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
+ "entry:\n"
+ " call void @b()\n"
+ " call void @c()\n"
+ " ret void\n"
+ "}\n"
+ "define void @b() {\n"
+ "entry:\n"
+ " ret void\n"
+ "}\n"
+ "define void @c() {\n"
+ "entry:\n"
+ " ret void\n"
+ "}\n");
LazyCallGraph CG(*M);
LazyCallGraph::Node &A = CG.get(lookupFunction(*M, "a"));
@@ -298,23 +303,23 @@ TEST(LazyCallGraphTest, BasicGraphMutation) {
EXPECT_EQ(2, std::distance(A.begin(), A.end()));
EXPECT_EQ(0, std::distance(B.begin(), B.end()));
- CG.insertEdge(B, lookupFunction(*M, "c"));
+ CG.insertEdge(B, lookupFunction(*M, "c"), LazyCallGraph::Edge::Call);
EXPECT_EQ(1, std::distance(B.begin(), B.end()));
- LazyCallGraph::Node &C = *B.begin();
+ LazyCallGraph::Node &C = B.begin()->getNode(CG);
EXPECT_EQ(0, std::distance(C.begin(), C.end()));
- CG.insertEdge(C, B.getFunction());
+ CG.insertEdge(C, B.getFunction(), LazyCallGraph::Edge::Call);
EXPECT_EQ(1, std::distance(C.begin(), C.end()));
- EXPECT_EQ(&B, &*C.begin());
+ EXPECT_EQ(&B, C.begin()->getNode());
- CG.insertEdge(C, C.getFunction());
+ CG.insertEdge(C, C.getFunction(), LazyCallGraph::Edge::Call);
EXPECT_EQ(2, std::distance(C.begin(), C.end()));
- EXPECT_EQ(&B, &*C.begin());
- EXPECT_EQ(&C, &*std::next(C.begin()));
+ EXPECT_EQ(&B, C.begin()->getNode());
+ EXPECT_EQ(&C, std::next(C.begin())->getNode());
CG.removeEdge(C, B.getFunction());
EXPECT_EQ(1, std::distance(C.begin(), C.end()));
- EXPECT_EQ(&C, &*C.begin());
+ EXPECT_EQ(&C, C.begin()->getNode());
CG.removeEdge(C, C.getFunction());
EXPECT_EQ(0, std::distance(C.begin(), C.end()));
@@ -323,83 +328,157 @@ TEST(LazyCallGraphTest, BasicGraphMutation) {
EXPECT_EQ(0, std::distance(B.begin(), B.end()));
}
+TEST(LazyCallGraphTest, InnerSCCFormation) {
+ LLVMContext Context;
+ std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
+ LazyCallGraph CG(*M);
+
+ // Now mutate the graph to connect every node into a single RefSCC to ensure
+ // that our inner SCC formation handles the rest.
+ CG.insertEdge(lookupFunction(*M, "d1"), lookupFunction(*M, "a1"),
+ LazyCallGraph::Edge::Ref);
+
+ // Build vectors and sort them for the rest of the assertions to make them
+ // independent of order.
+ std::vector<std::string> Nodes;
+
+ // We should build a single RefSCC for the entire graph.
+ auto I = CG.postorder_ref_scc_begin();
+ LazyCallGraph::RefSCC &RC = *I++;
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+ // Now walk the four SCCs which should be in post-order.
+ auto J = RC.begin();
+ LazyCallGraph::SCC &D = *J++;
+ for (LazyCallGraph::Node &N : D)
+ Nodes.push_back(N.getFunction().getName());
+ std::sort(Nodes.begin(), Nodes.end());
+ EXPECT_EQ(3u, Nodes.size());
+ EXPECT_EQ("d1", Nodes[0]);
+ EXPECT_EQ("d2", Nodes[1]);
+ EXPECT_EQ("d3", Nodes[2]);
+ Nodes.clear();
+
+ LazyCallGraph::SCC &B = *J++;
+ for (LazyCallGraph::Node &N : B)
+ Nodes.push_back(N.getFunction().getName());
+ std::sort(Nodes.begin(), Nodes.end());
+ EXPECT_EQ(3u, Nodes.size());
+ EXPECT_EQ("b1", Nodes[0]);
+ EXPECT_EQ("b2", Nodes[1]);
+ EXPECT_EQ("b3", Nodes[2]);
+ Nodes.clear();
+
+ LazyCallGraph::SCC &C = *J++;
+ for (LazyCallGraph::Node &N : C)
+ Nodes.push_back(N.getFunction().getName());
+ std::sort(Nodes.begin(), Nodes.end());
+ EXPECT_EQ(3u, Nodes.size());
+ EXPECT_EQ("c1", Nodes[0]);
+ EXPECT_EQ("c2", Nodes[1]);
+ EXPECT_EQ("c3", Nodes[2]);
+ Nodes.clear();
+
+ LazyCallGraph::SCC &A = *J++;
+ for (LazyCallGraph::Node &N : A)
+ Nodes.push_back(N.getFunction().getName());
+ std::sort(Nodes.begin(), Nodes.end());
+ EXPECT_EQ(3u, Nodes.size());
+ EXPECT_EQ("a1", Nodes[0]);
+ EXPECT_EQ("a2", Nodes[1]);
+ EXPECT_EQ("a3", Nodes[2]);
+ Nodes.clear();
+
+ EXPECT_EQ(RC.end(), J);
+}
+
TEST(LazyCallGraphTest, MultiArmSCC) {
+ LLVMContext Context;
// Two interlocking cycles. The really useful thing about this SCC is that it
// will require Tarjan's DFS to backtrack and finish processing all of the
- // children of each node in the SCC.
- std::unique_ptr<Module> M = parseAssembly(
- "define void @a() {\n"
- "entry:\n"
- " call void @b()\n"
- " call void @d()\n"
- " ret void\n"
- "}\n"
- "define void @b() {\n"
- "entry:\n"
- " call void @c()\n"
- " ret void\n"
- "}\n"
- "define void @c() {\n"
- "entry:\n"
- " call void @a()\n"
- " ret void\n"
- "}\n"
- "define void @d() {\n"
- "entry:\n"
- " call void @e()\n"
- " ret void\n"
- "}\n"
- "define void @e() {\n"
- "entry:\n"
- " call void @a()\n"
- " ret void\n"
- "}\n");
+ // children of each node in the SCC. Since this involves call edges, both
+ // Tarjan implementations will have to successfully navigate the structure.
+ std::unique_ptr<Module> M = parseAssembly(Context, "define void @f1() {\n"
+ "entry:\n"
+ " call void @f2()\n"
+ " call void @f4()\n"
+ " ret void\n"
+ "}\n"
+ "define void @f2() {\n"
+ "entry:\n"
+ " call void @f3()\n"
+ " ret void\n"
+ "}\n"
+ "define void @f3() {\n"
+ "entry:\n"
+ " call void @f1()\n"
+ " ret void\n"
+ "}\n"
+ "define void @f4() {\n"
+ "entry:\n"
+ " call void @f5()\n"
+ " ret void\n"
+ "}\n"
+ "define void @f5() {\n"
+ "entry:\n"
+ " call void @f1()\n"
+ " ret void\n"
+ "}\n");
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
- auto SCCI = CG.postorder_scc_begin();
- LazyCallGraph::SCC &SCC = *SCCI++;
- EXPECT_EQ(CG.postorder_scc_end(), SCCI);
-
- LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
- LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
- LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
- LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
- LazyCallGraph::Node &E = *CG.lookup(lookupFunction(*M, "e"));
- EXPECT_EQ(&SCC, CG.lookupSCC(A));
- EXPECT_EQ(&SCC, CG.lookupSCC(B));
- EXPECT_EQ(&SCC, CG.lookupSCC(C));
- EXPECT_EQ(&SCC, CG.lookupSCC(D));
- EXPECT_EQ(&SCC, CG.lookupSCC(E));
+ auto I = CG.postorder_ref_scc_begin();
+ LazyCallGraph::RefSCC &RC = *I++;
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+ LazyCallGraph::Node &N1 = *CG.lookup(lookupFunction(*M, "f1"));
+ LazyCallGraph::Node &N2 = *CG.lookup(lookupFunction(*M, "f2"));
+ LazyCallGraph::Node &N3 = *CG.lookup(lookupFunction(*M, "f3"));
+ LazyCallGraph::Node &N4 = *CG.lookup(lookupFunction(*M, "f4"));
+ LazyCallGraph::Node &N5 = *CG.lookup(lookupFunction(*M, "f4"));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(N1));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(N2));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(N3));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(N4));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(N5));
+
+ ASSERT_EQ(1, RC.size());
+
+ LazyCallGraph::SCC &C = *RC.begin();
+ EXPECT_EQ(&C, CG.lookupSCC(N1));
+ EXPECT_EQ(&C, CG.lookupSCC(N2));
+ EXPECT_EQ(&C, CG.lookupSCC(N3));
+ EXPECT_EQ(&C, CG.lookupSCC(N4));
+ EXPECT_EQ(&C, CG.lookupSCC(N5));
}
-TEST(LazyCallGraphTest, OutgoingSCCEdgeInsertion) {
- std::unique_ptr<Module> M = parseAssembly(
- "define void @a() {\n"
- "entry:\n"
- " call void @b()\n"
- " call void @c()\n"
- " ret void\n"
- "}\n"
- "define void @b() {\n"
- "entry:\n"
- " call void @d()\n"
- " ret void\n"
- "}\n"
- "define void @c() {\n"
- "entry:\n"
- " call void @d()\n"
- " ret void\n"
- "}\n"
- "define void @d() {\n"
- "entry:\n"
- " ret void\n"
- "}\n");
+TEST(LazyCallGraphTest, OutgoingEdgeMutation) {
+ LLVMContext Context;
+ std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
+ "entry:\n"
+ " call void @b()\n"
+ " call void @c()\n"
+ " ret void\n"
+ "}\n"
+ "define void @b() {\n"
+ "entry:\n"
+ " call void @d()\n"
+ " ret void\n"
+ "}\n"
+ "define void @c() {\n"
+ "entry:\n"
+ " call void @d()\n"
+ " ret void\n"
+ "}\n"
+ "define void @d() {\n"
+ "entry:\n"
+ " ret void\n"
+ "}\n");
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
- for (LazyCallGraph::SCC &C : CG.postorder_sccs())
- (void)C;
+ for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
+ (void)RC;
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
@@ -409,24 +488,96 @@ TEST(LazyCallGraphTest, OutgoingSCCEdgeInsertion) {
LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
- EXPECT_TRUE(AC.isAncestorOf(BC));
- EXPECT_TRUE(AC.isAncestorOf(CC));
- EXPECT_TRUE(AC.isAncestorOf(DC));
- EXPECT_TRUE(DC.isDescendantOf(AC));
- EXPECT_TRUE(DC.isDescendantOf(BC));
- EXPECT_TRUE(DC.isDescendantOf(CC));
+ LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A);
+ LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B);
+ LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C);
+ LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D);
+ EXPECT_TRUE(ARC.isParentOf(BRC));
+ EXPECT_TRUE(ARC.isParentOf(CRC));
+ EXPECT_FALSE(ARC.isParentOf(DRC));
+ EXPECT_TRUE(ARC.isAncestorOf(DRC));
+ EXPECT_FALSE(DRC.isChildOf(ARC));
+ EXPECT_TRUE(DRC.isDescendantOf(ARC));
+ EXPECT_TRUE(DRC.isChildOf(BRC));
+ EXPECT_TRUE(DRC.isChildOf(CRC));
EXPECT_EQ(2, std::distance(A.begin(), A.end()));
- AC.insertOutgoingEdge(A, D);
+ ARC.insertOutgoingEdge(A, D, LazyCallGraph::Edge::Call);
EXPECT_EQ(3, std::distance(A.begin(), A.end()));
- EXPECT_TRUE(AC.isParentOf(DC));
+ const LazyCallGraph::Edge &NewE = A[D];
+ EXPECT_TRUE(NewE);
+ EXPECT_TRUE(NewE.isCall());
+ EXPECT_EQ(&D, NewE.getNode());
+
+ // Only the parent and child tests sholud have changed. The rest of the graph
+ // remains the same.
+ EXPECT_TRUE(ARC.isParentOf(DRC));
+ EXPECT_TRUE(ARC.isAncestorOf(DRC));
+ EXPECT_TRUE(DRC.isChildOf(ARC));
+ EXPECT_TRUE(DRC.isDescendantOf(ARC));
+ EXPECT_EQ(&AC, CG.lookupSCC(A));
+ EXPECT_EQ(&BC, CG.lookupSCC(B));
+ EXPECT_EQ(&CC, CG.lookupSCC(C));
+ EXPECT_EQ(&DC, CG.lookupSCC(D));
+ EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+ EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
+ EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
+
+ ARC.switchOutgoingEdgeToRef(A, D);
+ EXPECT_FALSE(NewE.isCall());
+
+ // Verify the graph remains the same.
+ EXPECT_TRUE(ARC.isParentOf(DRC));
+ EXPECT_TRUE(ARC.isAncestorOf(DRC));
+ EXPECT_TRUE(DRC.isChildOf(ARC));
+ EXPECT_TRUE(DRC.isDescendantOf(ARC));
EXPECT_EQ(&AC, CG.lookupSCC(A));
EXPECT_EQ(&BC, CG.lookupSCC(B));
EXPECT_EQ(&CC, CG.lookupSCC(C));
EXPECT_EQ(&DC, CG.lookupSCC(D));
+ EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+ EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
+ EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
+
+ ARC.switchOutgoingEdgeToCall(A, D);
+ EXPECT_TRUE(NewE.isCall());
+
+ // Verify the graph remains the same.
+ EXPECT_TRUE(ARC.isParentOf(DRC));
+ EXPECT_TRUE(ARC.isAncestorOf(DRC));
+ EXPECT_TRUE(DRC.isChildOf(ARC));
+ EXPECT_TRUE(DRC.isDescendantOf(ARC));
+ EXPECT_EQ(&AC, CG.lookupSCC(A));
+ EXPECT_EQ(&BC, CG.lookupSCC(B));
+ EXPECT_EQ(&CC, CG.lookupSCC(C));
+ EXPECT_EQ(&DC, CG.lookupSCC(D));
+ EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+ EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
+ EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
+
+ ARC.removeOutgoingEdge(A, D);
+ EXPECT_EQ(2, std::distance(A.begin(), A.end()));
+
+ // Now the parent and child tests fail again but the rest remains the same.
+ EXPECT_FALSE(ARC.isParentOf(DRC));
+ EXPECT_TRUE(ARC.isAncestorOf(DRC));
+ EXPECT_FALSE(DRC.isChildOf(ARC));
+ EXPECT_TRUE(DRC.isDescendantOf(ARC));
+ EXPECT_EQ(&AC, CG.lookupSCC(A));
+ EXPECT_EQ(&BC, CG.lookupSCC(B));
+ EXPECT_EQ(&CC, CG.lookupSCC(C));
+ EXPECT_EQ(&DC, CG.lookupSCC(D));
+ EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+ EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
+ EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
}
-TEST(LazyCallGraphTest, IncomingSCCEdgeInsertion) {
+TEST(LazyCallGraphTest, IncomingEdgeInsertion) {
+ LLVMContext Context;
// We want to ensure we can add edges even across complex diamond graphs, so
// we use the diamond of triangles graph defined above. The ascii diagram is
// repeated here for easy reference.
@@ -443,12 +594,12 @@ TEST(LazyCallGraphTest, IncomingSCCEdgeInsertion) {
// / \ |
// a3--a2 |
//
- std::unique_ptr<Module> M = parseAssembly(DiamondOfTriangles);
+ std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
- for (LazyCallGraph::SCC &C : CG.postorder_sccs())
- (void)C;
+ for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
+ (void)RC;
LazyCallGraph::Node &A1 = *CG.lookup(lookupFunction(*M, "a1"));
LazyCallGraph::Node &A2 = *CG.lookup(lookupFunction(*M, "a2"));
@@ -462,18 +613,18 @@ TEST(LazyCallGraphTest, IncomingSCCEdgeInsertion) {
LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
- LazyCallGraph::SCC &AC = *CG.lookupSCC(A1);
- LazyCallGraph::SCC &BC = *CG.lookupSCC(B1);
- LazyCallGraph::SCC &CC = *CG.lookupSCC(C1);
- LazyCallGraph::SCC &DC = *CG.lookupSCC(D1);
- ASSERT_EQ(&AC, CG.lookupSCC(A2));
- ASSERT_EQ(&AC, CG.lookupSCC(A3));
- ASSERT_EQ(&BC, CG.lookupSCC(B2));
- ASSERT_EQ(&BC, CG.lookupSCC(B3));
- ASSERT_EQ(&CC, CG.lookupSCC(C2));
- ASSERT_EQ(&CC, CG.lookupSCC(C3));
- ASSERT_EQ(&DC, CG.lookupSCC(D2));
- ASSERT_EQ(&DC, CG.lookupSCC(D3));
+ LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A1);
+ LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B1);
+ LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C1);
+ LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D1);
+ ASSERT_EQ(&ARC, CG.lookupRefSCC(A2));
+ ASSERT_EQ(&ARC, CG.lookupRefSCC(A3));
+ ASSERT_EQ(&BRC, CG.lookupRefSCC(B2));
+ ASSERT_EQ(&BRC, CG.lookupRefSCC(B3));
+ ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
+ ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
+ ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
+ ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
ASSERT_EQ(1, std::distance(D2.begin(), D2.end()));
// Add an edge to make the graph:
@@ -489,47 +640,52 @@ TEST(LazyCallGraphTest, IncomingSCCEdgeInsertion) {
// a1 |
// / \ |
// a3--a2 |
- CC.insertIncomingEdge(D2, C2);
+ auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
// Make sure we connected the nodes.
- EXPECT_EQ(2, std::distance(D2.begin(), D2.end()));
+ for (LazyCallGraph::Edge E : D2) {
+ if (E.getNode() == &D3)
+ continue;
+ EXPECT_EQ(&C2, E.getNode());
+ }
+ // And marked the D ref-SCC as no longer valid.
+ EXPECT_EQ(1u, MergedRCs.size());
+ EXPECT_EQ(&DRC, MergedRCs[0]);
// Make sure we have the correct nodes in the SCC sets.
- EXPECT_EQ(&AC, CG.lookupSCC(A1));
- EXPECT_EQ(&AC, CG.lookupSCC(A2));
- EXPECT_EQ(&AC, CG.lookupSCC(A3));
- EXPECT_EQ(&BC, CG.lookupSCC(B1));
- EXPECT_EQ(&BC, CG.lookupSCC(B2));
- EXPECT_EQ(&BC, CG.lookupSCC(B3));
- EXPECT_EQ(&CC, CG.lookupSCC(C1));
- EXPECT_EQ(&CC, CG.lookupSCC(C2));
- EXPECT_EQ(&CC, CG.lookupSCC(C3));
- EXPECT_EQ(&CC, CG.lookupSCC(D1));
- EXPECT_EQ(&CC, CG.lookupSCC(D2));
- EXPECT_EQ(&CC, CG.lookupSCC(D3));
+ EXPECT_EQ(&ARC, CG.lookupRefSCC(A1));
+ EXPECT_EQ(&ARC, CG.lookupRefSCC(A2));
+ EXPECT_EQ(&ARC, CG.lookupRefSCC(A3));
+ EXPECT_EQ(&BRC, CG.lookupRefSCC(B1));
+ EXPECT_EQ(&BRC, CG.lookupRefSCC(B2));
+ EXPECT_EQ(&BRC, CG.lookupRefSCC(B3));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(D1));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(D2));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(D3));
// And that ancestry tests have been updated.
- EXPECT_TRUE(AC.isParentOf(BC));
- EXPECT_TRUE(AC.isParentOf(CC));
- EXPECT_FALSE(AC.isAncestorOf(DC));
- EXPECT_FALSE(BC.isAncestorOf(DC));
- EXPECT_FALSE(CC.isAncestorOf(DC));
+ EXPECT_TRUE(ARC.isParentOf(CRC));
+ EXPECT_TRUE(BRC.isParentOf(CRC));
}
-TEST(LazyCallGraphTest, IncomingSCCEdgeInsertionMidTraversal) {
+TEST(LazyCallGraphTest, IncomingEdgeInsertionMidTraversal) {
+ LLVMContext Context;
// This is the same fundamental test as the previous, but we perform it
- // having only partially walked the SCCs of the graph.
- std::unique_ptr<Module> M = parseAssembly(DiamondOfTriangles);
+ // having only partially walked the RefSCCs of the graph.
+ std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
LazyCallGraph CG(*M);
- // Walk the SCCs until we find the one containing 'c1'.
- auto SCCI = CG.postorder_scc_begin(), SCCE = CG.postorder_scc_end();
- ASSERT_NE(SCCI, SCCE);
- LazyCallGraph::SCC &DC = *SCCI;
- ASSERT_NE(&DC, nullptr);
- ++SCCI;
- ASSERT_NE(SCCI, SCCE);
- LazyCallGraph::SCC &CC = *SCCI;
- ASSERT_NE(&CC, nullptr);
+ // Walk the RefSCCs until we find the one containing 'c1'.
+ auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
+ ASSERT_NE(I, E);
+ LazyCallGraph::RefSCC &DRC = *I;
+ ASSERT_NE(&DRC, nullptr);
+ ++I;
+ ASSERT_NE(I, E);
+ LazyCallGraph::RefSCC &CRC = *I;
+ ASSERT_NE(&CRC, nullptr);
ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a1")));
ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a2")));
@@ -543,178 +699,607 @@ TEST(LazyCallGraphTest, IncomingSCCEdgeInsertionMidTraversal) {
LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
- ASSERT_EQ(&CC, CG.lookupSCC(C1));
- ASSERT_EQ(&CC, CG.lookupSCC(C2));
- ASSERT_EQ(&CC, CG.lookupSCC(C3));
- ASSERT_EQ(&DC, CG.lookupSCC(D1));
- ASSERT_EQ(&DC, CG.lookupSCC(D2));
- ASSERT_EQ(&DC, CG.lookupSCC(D3));
+ ASSERT_EQ(&CRC, CG.lookupRefSCC(C1));
+ ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
+ ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
+ ASSERT_EQ(&DRC, CG.lookupRefSCC(D1));
+ ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
+ ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
ASSERT_EQ(1, std::distance(D2.begin(), D2.end()));
- CC.insertIncomingEdge(D2, C2);
- EXPECT_EQ(2, std::distance(D2.begin(), D2.end()));
+ auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
+ // Make sure we connected the nodes.
+ for (LazyCallGraph::Edge E : D2) {
+ if (E.getNode() == &D3)
+ continue;
+ EXPECT_EQ(&C2, E.getNode());
+ }
+ // And marked the D ref-SCC as no longer valid.
+ EXPECT_EQ(1u, MergedRCs.size());
+ EXPECT_EQ(&DRC, MergedRCs[0]);
+
+ // Make sure we have the correct nodes in the RefSCCs.
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(D1));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(D2));
+ EXPECT_EQ(&CRC, CG.lookupRefSCC(D3));
+
+ // Check that we can form the last two RefSCCs now in a coherent way.
+ ++I;
+ EXPECT_NE(I, E);
+ LazyCallGraph::RefSCC &BRC = *I;
+ EXPECT_NE(&BRC, nullptr);
+ EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b1"))));
+ EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b2"))));
+ EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b3"))));
+ EXPECT_TRUE(BRC.isParentOf(CRC));
+ ++I;
+ EXPECT_NE(I, E);
+ LazyCallGraph::RefSCC &ARC = *I;
+ EXPECT_NE(&ARC, nullptr);
+ EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a1"))));
+ EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a2"))));
+ EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a3"))));
+ EXPECT_TRUE(ARC.isParentOf(CRC));
+ ++I;
+ EXPECT_EQ(E, I);
+}
- // Make sure we have the correct nodes in the SCC sets.
- EXPECT_EQ(&CC, CG.lookupSCC(C1));
- EXPECT_EQ(&CC, CG.lookupSCC(C2));
- EXPECT_EQ(&CC, CG.lookupSCC(C3));
- EXPECT_EQ(&CC, CG.lookupSCC(D1));
- EXPECT_EQ(&CC, CG.lookupSCC(D2));
- EXPECT_EQ(&CC, CG.lookupSCC(D3));
-
- // Check that we can form the last two SCCs now in a coherent way.
- ++SCCI;
- EXPECT_NE(SCCI, SCCE);
- LazyCallGraph::SCC &BC = *SCCI;
- EXPECT_NE(&BC, nullptr);
- EXPECT_EQ(&BC, CG.lookupSCC(*CG.lookup(lookupFunction(*M, "b1"))));
- EXPECT_EQ(&BC, CG.lookupSCC(*CG.lookup(lookupFunction(*M, "b2"))));
- EXPECT_EQ(&BC, CG.lookupSCC(*CG.lookup(lookupFunction(*M, "b3"))));
- ++SCCI;
- EXPECT_NE(SCCI, SCCE);
- LazyCallGraph::SCC &AC = *SCCI;
- EXPECT_NE(&AC, nullptr);
- EXPECT_EQ(&AC, CG.lookupSCC(*CG.lookup(lookupFunction(*M, "a1"))));
- EXPECT_EQ(&AC, CG.lookupSCC(*CG.lookup(lookupFunction(*M, "a2"))));
- EXPECT_EQ(&AC, CG.lookupSCC(*CG.lookup(lookupFunction(*M, "a3"))));
- ++SCCI;
- EXPECT_EQ(SCCI, SCCE);
+TEST(LazyCallGraphTest, InternalEdgeMutation) {
+ LLVMContext Context;
+ std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
+ "entry:\n"
+ " call void @b()\n"
+ " ret void\n"
+ "}\n"
+ "define void @b() {\n"
+ "entry:\n"
+ " call void @c()\n"
+ " ret void\n"
+ "}\n"
+ "define void @c() {\n"
+ "entry:\n"
+ " call void @a()\n"
+ " ret void\n"
+ "}\n");
+ LazyCallGraph CG(*M);
+
+ // Force the graph to be fully expanded.
+ auto I = CG.postorder_ref_scc_begin();
+ LazyCallGraph::RefSCC &RC = *I++;
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+ LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+ LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+ LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+ EXPECT_EQ(1, RC.size());
+ EXPECT_EQ(&*RC.begin(), CG.lookupSCC(A));
+ EXPECT_EQ(&*RC.begin(), CG.lookupSCC(B));
+ EXPECT_EQ(&*RC.begin(), CG.lookupSCC(C));
+
+ // Insert an edge from 'a' to 'c'. Nothing changes about the graph.
+ RC.insertInternalRefEdge(A, C);
+ EXPECT_EQ(2, std::distance(A.begin(), A.end()));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+ EXPECT_EQ(1, RC.size());
+ EXPECT_EQ(&*RC.begin(), CG.lookupSCC(A));
+ EXPECT_EQ(&*RC.begin(), CG.lookupSCC(B));
+ EXPECT_EQ(&*RC.begin(), CG.lookupSCC(C));
+
+ // Switch the call edge from 'b' to 'c' to a ref edge. This will break the
+ // call cycle and cause us to form more SCCs. The RefSCC will remain the same
+ // though.
+ RC.switchInternalEdgeToRef(B, C);
+ EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+ auto J = RC.begin();
+ // The SCCs must be in *post-order* which means successors before
+ // predecessors. At this point we have call edges from C to A and from A to
+ // B. The only valid postorder is B, A, C.
+ EXPECT_EQ(&*J++, CG.lookupSCC(B));
+ EXPECT_EQ(&*J++, CG.lookupSCC(A));
+ EXPECT_EQ(&*J++, CG.lookupSCC(C));
+ EXPECT_EQ(RC.end(), J);
+
+ // Test turning the ref edge from A to C into a call edge. This will form an
+ // SCC out of A and C. Since we previously had a call edge from C to A, the
+ // C SCC should be preserved and have A merged into it while the A SCC should
+ // be invalidated.
+ LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
+ LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
+ auto InvalidatedSCCs = RC.switchInternalEdgeToCall(A, C);
+ ASSERT_EQ(1u, InvalidatedSCCs.size());
+ EXPECT_EQ(&AC, InvalidatedSCCs[0]);
+ EXPECT_EQ(2, CC.size());
+ EXPECT_EQ(&CC, CG.lookupSCC(A));
+ EXPECT_EQ(&CC, CG.lookupSCC(C));
+ J = RC.begin();
+ EXPECT_EQ(&*J++, CG.lookupSCC(B));
+ EXPECT_EQ(&*J++, CG.lookupSCC(C));
+ EXPECT_EQ(RC.end(), J);
}
-TEST(LazyCallGraphTest, InterSCCEdgeRemoval) {
+TEST(LazyCallGraphTest, InternalEdgeRemoval) {
+ LLVMContext Context;
+ // A nice fully connected (including self-edges) RefSCC.
std::unique_ptr<Module> M = parseAssembly(
- "define void @a() {\n"
- "entry:\n"
- " call void @b()\n"
- " ret void\n"
- "}\n"
- "define void @b() {\n"
- "entry:\n"
- " ret void\n"
- "}\n");
+ Context, "define void @a(i8** %ptr) {\n"
+ "entry:\n"
+ " store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
+ " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+ " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " ret void\n"
+ "}\n"
+ "define void @b(i8** %ptr) {\n"
+ "entry:\n"
+ " store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
+ " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+ " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " ret void\n"
+ "}\n"
+ "define void @c(i8** %ptr) {\n"
+ "entry:\n"
+ " store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
+ " store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+ " store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+ " ret void\n"
+ "}\n");
LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
- for (LazyCallGraph::SCC &C : CG.postorder_sccs())
- (void)C;
+ auto I = CG.postorder_ref_scc_begin();
+ LazyCallGraph::RefSCC &RC = *I++;
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
- LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
- LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
+ LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+
+ // Remove the edge from b -> a, which should leave the 3 functions still in
+ // a single connected component because of a -> b -> c -> a.
+ SmallVector<LazyCallGraph::RefSCC *, 1> NewRCs =
+ RC.removeInternalRefEdge(B, A);
+ EXPECT_EQ(0u, NewRCs.size());
+ EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+ EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+
+ // Remove the edge from c -> a, which should leave 'a' in the original RefSCC
+ // and form a new RefSCC for 'b' and 'c'.
+ NewRCs = RC.removeInternalRefEdge(C, A);
+ EXPECT_EQ(1u, NewRCs.size());
+ EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+ EXPECT_EQ(1, std::distance(RC.begin(), RC.end()));
+ LazyCallGraph::RefSCC *RC2 = CG.lookupRefSCC(B);
+ EXPECT_EQ(RC2, CG.lookupRefSCC(C));
+ EXPECT_EQ(RC2, NewRCs[0]);
+}
+
+TEST(LazyCallGraphTest, InternalCallEdgeToRef) {
+ LLVMContext Context;
+ // A nice fully connected (including self-edges) SCC (and RefSCC)
+ std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
+ "entry:\n"
+ " call void @a()\n"
+ " call void @b()\n"
+ " call void @c()\n"
+ " ret void\n"
+ "}\n"
+ "define void @b() {\n"
+ "entry:\n"
+ " call void @a()\n"
+ " call void @b()\n"
+ " call void @c()\n"
+ " ret void\n"
+ "}\n"
+ "define void @c() {\n"
+ "entry:\n"
+ " call void @a()\n"
+ " call void @b()\n"
+ " call void @c()\n"
+ " ret void\n"
+ "}\n");
+ LazyCallGraph CG(*M);
+
+ // Force the graph to be fully expanded.
+ auto I = CG.postorder_ref_scc_begin();
+ LazyCallGraph::RefSCC &RC = *I++;
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
- EXPECT_EQ("b", A.begin()->getFunction().getName());
- EXPECT_EQ(B.end(), B.begin());
- EXPECT_EQ(&AC, &*BC.parent_begin());
+ EXPECT_EQ(1, RC.size());
+ LazyCallGraph::SCC &CallC = *RC.begin();
- AC.removeInterSCCEdge(A, B);
+ LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+ LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+ LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+ EXPECT_EQ(&CallC, CG.lookupSCC(A));
+ EXPECT_EQ(&CallC, CG.lookupSCC(B));
+ EXPECT_EQ(&CallC, CG.lookupSCC(C));
+
+ // Remove the call edge from b -> a to a ref edge, which should leave the
+ // 3 functions still in a single connected component because of a -> b ->
+ // c -> a.
+ RC.switchInternalEdgeToRef(B, A);
+ EXPECT_EQ(1, RC.size());
+ EXPECT_EQ(&CallC, CG.lookupSCC(A));
+ EXPECT_EQ(&CallC, CG.lookupSCC(B));
+ EXPECT_EQ(&CallC, CG.lookupSCC(C));
- EXPECT_EQ(A.end(), A.begin());
- EXPECT_EQ(B.end(), B.begin());
- EXPECT_EQ(BC.parent_end(), BC.parent_begin());
+ // Remove the edge from c -> a, which should leave 'a' in the original SCC
+ // and form a new SCC for 'b' and 'c'.
+ RC.switchInternalEdgeToRef(C, A);
+ EXPECT_EQ(2, RC.size());
+ EXPECT_EQ(&CallC, CG.lookupSCC(A));
+ LazyCallGraph::SCC &BCallC = *CG.lookupSCC(B);
+ EXPECT_NE(&BCallC, &CallC);
+ EXPECT_EQ(&BCallC, CG.lookupSCC(C));
+ auto J = RC.find(CallC);
+ EXPECT_EQ(&CallC, &*J);
+ --J;
+ EXPECT_EQ(&BCallC, &*J);
+ EXPECT_EQ(RC.begin(), J);
+
+ // Remove the edge from c -> b, which should leave 'b' in the original SCC
+ // and form a new SCC for 'c'. It shouldn't change 'a's SCC.
+ RC.switchInternalEdgeToRef(C, B);
+ EXPECT_EQ(3, RC.size());
+ EXPECT_EQ(&CallC, CG.lookupSCC(A));
+ EXPECT_EQ(&BCallC, CG.lookupSCC(B));
+ LazyCallGraph::SCC &CCallC = *CG.lookupSCC(C);
+ EXPECT_NE(&CCallC, &CallC);
+ EXPECT_NE(&CCallC, &BCallC);
+ J = RC.find(CallC);
+ EXPECT_EQ(&CallC, &*J);
+ --J;
+ EXPECT_EQ(&BCallC, &*J);
+ --J;
+ EXPECT_EQ(&CCallC, &*J);
+ EXPECT_EQ(RC.begin(), J);
}
-TEST(LazyCallGraphTest, IntraSCCEdgeInsertion) {
- std::unique_ptr<Module> M1 = parseAssembly(
- "define void @a() {\n"
- "entry:\n"
- " call void @b()\n"
- " ret void\n"
- "}\n"
- "define void @b() {\n"
- "entry:\n"
- " call void @c()\n"
- " ret void\n"
- "}\n"
- "define void @c() {\n"
- "entry:\n"
- " call void @a()\n"
- " ret void\n"
- "}\n");
- LazyCallGraph CG1(*M1);
+TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
+ LLVMContext Context;
+ // Basic tests for making a ref edge a call. This hits the basics of the
+ // process only.
+ std::unique_ptr<Module> M =
+ parseAssembly(Context, "define void @a() {\n"
+ "entry:\n"
+ " call void @b()\n"
+ " call void @c()\n"
+ " store void()* @d, void()** undef\n"
+ " ret void\n"
+ "}\n"
+ "define void @b() {\n"
+ "entry:\n"
+ " store void()* @c, void()** undef\n"
+ " call void @d()\n"
+ " ret void\n"
+ "}\n"
+ "define void @c() {\n"
+ "entry:\n"
+ " store void()* @b, void()** undef\n"
+ " call void @d()\n"
+ " ret void\n"
+ "}\n"
+ "define void @d() {\n"
+ "entry:\n"
+ " store void()* @a, void()** undef\n"
+ " ret void\n"
+ "}\n");
+ LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
- auto SCCI = CG1.postorder_scc_begin();
- LazyCallGraph::SCC &SCC = *SCCI++;
- EXPECT_EQ(CG1.postorder_scc_end(), SCCI);
-
- LazyCallGraph::Node &A = *CG1.lookup(lookupFunction(*M1, "a"));
- LazyCallGraph::Node &B = *CG1.lookup(lookupFunction(*M1, "b"));
- LazyCallGraph::Node &C = *CG1.lookup(lookupFunction(*M1, "c"));
- EXPECT_EQ(&SCC, CG1.lookupSCC(A));
- EXPECT_EQ(&SCC, CG1.lookupSCC(B));
- EXPECT_EQ(&SCC, CG1.lookupSCC(C));
-
- // Insert an edge from 'a' to 'c'. Nothing changes about the SCCs.
- SCC.insertIntraSCCEdge(A, C);
- EXPECT_EQ(2, std::distance(A.begin(), A.end()));
- EXPECT_EQ(&SCC, CG1.lookupSCC(A));
- EXPECT_EQ(&SCC, CG1.lookupSCC(B));
- EXPECT_EQ(&SCC, CG1.lookupSCC(C));
+ auto I = CG.postorder_ref_scc_begin();
+ LazyCallGraph::RefSCC &RC = *I++;
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
- // Insert a self edge from 'a' back to 'a'.
- SCC.insertIntraSCCEdge(A, A);
- EXPECT_EQ(3, std::distance(A.begin(), A.end()));
- EXPECT_EQ(&SCC, CG1.lookupSCC(A));
- EXPECT_EQ(&SCC, CG1.lookupSCC(B));
- EXPECT_EQ(&SCC, CG1.lookupSCC(C));
+ LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+ LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+ LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+ LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
+ LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
+ LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
+ LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
+ LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
+
+ // Check the initial post-order. Note that B and C could be flipped here (and
+ // in our mutation) without changing the nature of this test.
+ ASSERT_EQ(4, RC.size());
+ EXPECT_EQ(&DC, &RC[0]);
+ EXPECT_EQ(&BC, &RC[1]);
+ EXPECT_EQ(&CC, &RC[2]);
+ EXPECT_EQ(&AC, &RC[3]);
+
+ // Switch the ref edge from A -> D to a call edge. This should have no
+ // effect as it is already in postorder and no new cycles are formed.
+ auto MergedCs = RC.switchInternalEdgeToCall(A, D);
+ EXPECT_EQ(0u, MergedCs.size());
+ ASSERT_EQ(4, RC.size());
+ EXPECT_EQ(&DC, &RC[0]);
+ EXPECT_EQ(&BC, &RC[1]);
+ EXPECT_EQ(&CC, &RC[2]);
+ EXPECT_EQ(&AC, &RC[3]);
+
+ // Switch B -> C to a call edge. This doesn't form any new cycles but does
+ // require reordering the SCCs.
+ MergedCs = RC.switchInternalEdgeToCall(B, C);
+ EXPECT_EQ(0u, MergedCs.size());
+ ASSERT_EQ(4, RC.size());
+ EXPECT_EQ(&DC, &RC[0]);
+ EXPECT_EQ(&CC, &RC[1]);
+ EXPECT_EQ(&BC, &RC[2]);
+ EXPECT_EQ(&AC, &RC[3]);
+
+ // Switch C -> B to a call edge. This forms a cycle and forces merging SCCs.
+ MergedCs = RC.switchInternalEdgeToCall(C, B);
+ ASSERT_EQ(1u, MergedCs.size());
+ EXPECT_EQ(&CC, MergedCs[0]);
+ ASSERT_EQ(3, RC.size());
+ EXPECT_EQ(&DC, &RC[0]);
+ EXPECT_EQ(&BC, &RC[1]);
+ EXPECT_EQ(&AC, &RC[2]);
+ EXPECT_EQ(2, BC.size());
+ EXPECT_EQ(&BC, CG.lookupSCC(B));
+ EXPECT_EQ(&BC, CG.lookupSCC(C));
}
-TEST(LazyCallGraphTest, IntraSCCEdgeRemoval) {
- // A nice fully connected (including self-edges) SCC.
- std::unique_ptr<Module> M1 = parseAssembly(
- "define void @a() {\n"
- "entry:\n"
- " call void @a()\n"
- " call void @b()\n"
- " call void @c()\n"
- " ret void\n"
- "}\n"
- "define void @b() {\n"
- "entry:\n"
- " call void @a()\n"
- " call void @b()\n"
- " call void @c()\n"
- " ret void\n"
- "}\n"
- "define void @c() {\n"
- "entry:\n"
- " call void @a()\n"
- " call void @b()\n"
- " call void @c()\n"
- " ret void\n"
- "}\n");
- LazyCallGraph CG1(*M1);
+TEST(LazyCallGraphTest, InternalRefEdgeToCallNoCycleInterleaved) {
+ LLVMContext Context;
+ // Test for having a post-order prior to changing a ref edge to a call edge
+ // with SCCs connecting to the source and connecting to the target, but not
+ // connecting to both, interleaved between the source and target. This
+ // ensures we correctly partition the range rather than simply moving one or
+ // the other.
+ std::unique_ptr<Module> M =
+ parseAssembly(Context, "define void @a() {\n"
+ "entry:\n"
+ " call void @b1()\n"
+ " call void @c1()\n"
+ " ret void\n"
+ "}\n"
+ "define void @b1() {\n"
+ "entry:\n"
+ " call void @c1()\n"
+ " call void @b2()\n"
+ " ret void\n"
+ "}\n"
+ "define void @c1() {\n"
+ "entry:\n"
+ " call void @b2()\n"
+ " call void @c2()\n"
+ " ret void\n"
+ "}\n"
+ "define void @b2() {\n"
+ "entry:\n"
+ " call void @c2()\n"
+ " call void @b3()\n"
+ " ret void\n"
+ "}\n"
+ "define void @c2() {\n"
+ "entry:\n"
+ " call void @b3()\n"
+ " call void @c3()\n"
+ " ret void\n"
+ "}\n"
+ "define void @b3() {\n"
+ "entry:\n"
+ " call void @c3()\n"
+ " call void @d()\n"
+ " ret void\n"
+ "}\n"
+ "define void @c3() {\n"
+ "entry:\n"
+ " store void()* @b1, void()** undef\n"
+ " call void @d()\n"
+ " ret void\n"
+ "}\n"
+ "define void @d() {\n"
+ "entry:\n"
+ " store void()* @a, void()** undef\n"
+ " ret void\n"
+ "}\n");
+ LazyCallGraph CG(*M);
// Force the graph to be fully expanded.
- auto SCCI = CG1.postorder_scc_begin();
- LazyCallGraph::SCC &SCC = *SCCI++;
- EXPECT_EQ(CG1.postorder_scc_end(), SCCI);
+ auto I = CG.postorder_ref_scc_begin();
+ LazyCallGraph::RefSCC &RC = *I++;
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
- LazyCallGraph::Node &A = *CG1.lookup(lookupFunction(*M1, "a"));
- LazyCallGraph::Node &B = *CG1.lookup(lookupFunction(*M1, "b"));
- LazyCallGraph::Node &C = *CG1.lookup(lookupFunction(*M1, "c"));
- EXPECT_EQ(&SCC, CG1.lookupSCC(A));
- EXPECT_EQ(&SCC, CG1.lookupSCC(B));
- EXPECT_EQ(&SCC, CG1.lookupSCC(C));
+ LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+ LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
+ LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
+ LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
+ LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
+ LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
+ LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
+ LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
+ LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
+ LazyCallGraph::SCC &B1C = *CG.lookupSCC(B1);
+ LazyCallGraph::SCC &B2C = *CG.lookupSCC(B2);
+ LazyCallGraph::SCC &B3C = *CG.lookupSCC(B3);
+ LazyCallGraph::SCC &C1C = *CG.lookupSCC(C1);
+ LazyCallGraph::SCC &C2C = *CG.lookupSCC(C2);
+ LazyCallGraph::SCC &C3C = *CG.lookupSCC(C3);
+ LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
- // Remove the edge from b -> a, which should leave the 3 functions still in
- // a single connected component because of a -> b -> c -> a.
- SmallVector<LazyCallGraph::SCC *, 1> NewSCCs = SCC.removeIntraSCCEdge(B, A);
- EXPECT_EQ(0u, NewSCCs.size());
- EXPECT_EQ(&SCC, CG1.lookupSCC(A));
- EXPECT_EQ(&SCC, CG1.lookupSCC(B));
- EXPECT_EQ(&SCC, CG1.lookupSCC(C));
+ // Several call edges are initially present to force a particual post-order.
+ // Remove them now, leaving an interleaved post-order pattern.
+ RC.switchInternalEdgeToRef(B3, C3);
+ RC.switchInternalEdgeToRef(C2, B3);
+ RC.switchInternalEdgeToRef(B2, C2);
+ RC.switchInternalEdgeToRef(C1, B2);
+ RC.switchInternalEdgeToRef(B1, C1);
+
+ // Check the initial post-order. We ensure this order with the extra edges
+ // that are nuked above.
+ ASSERT_EQ(8, RC.size());
+ EXPECT_EQ(&DC, &RC[0]);
+ EXPECT_EQ(&C3C, &RC[1]);
+ EXPECT_EQ(&B3C, &RC[2]);
+ EXPECT_EQ(&C2C, &RC[3]);
+ EXPECT_EQ(&B2C, &RC[4]);
+ EXPECT_EQ(&C1C, &RC[5]);
+ EXPECT_EQ(&B1C, &RC[6]);
+ EXPECT_EQ(&AC, &RC[7]);
+
+ // Switch C3 -> B1 to a call edge. This doesn't form any new cycles but does
+ // require reordering the SCCs in the face of tricky internal node
+ // structures.
+ auto MergedCs = RC.switchInternalEdgeToCall(C3, B1);
+ EXPECT_EQ(0u, MergedCs.size());
+ ASSERT_EQ(8, RC.size());
+ EXPECT_EQ(&DC, &RC[0]);
+ EXPECT_EQ(&B3C, &RC[1]);
+ EXPECT_EQ(&B2C, &RC[2]);
+ EXPECT_EQ(&B1C, &RC[3]);
+ EXPECT_EQ(&C3C, &RC[4]);
+ EXPECT_EQ(&C2C, &RC[5]);
+ EXPECT_EQ(&C1C, &RC[6]);
+ EXPECT_EQ(&AC, &RC[7]);
+}
- // Remove the edge from c -> a, which should leave 'a' in the original SCC
- // and form a new SCC for 'b' and 'c'.
- NewSCCs = SCC.removeIntraSCCEdge(C, A);
- EXPECT_EQ(1u, NewSCCs.size());
- EXPECT_EQ(&SCC, CG1.lookupSCC(A));
- EXPECT_EQ(1, std::distance(SCC.begin(), SCC.end()));
- LazyCallGraph::SCC *SCC2 = CG1.lookupSCC(B);
- EXPECT_EQ(SCC2, CG1.lookupSCC(C));
- EXPECT_EQ(SCC2, NewSCCs[0]);
+TEST(LazyCallGraphTest, InternalRefEdgeToCallBothPartitionAndMerge) {
+ LLVMContext Context;
+ // Test for having a postorder where between the source and target are all
+ // three kinds of other SCCs:
+ // 1) One connected to the target only that have to be shifted below the
+ // source.
+ // 2) One connected to the source only that have to be shifted below the
+ // target.
+ // 3) One connected to both source and target that has to remain and get
+ // merged away.
+ //
+ // To achieve this we construct a heavily connected graph to force
+ // a particular post-order. Then we remove the forcing edges and connect
+ // a cycle.
+ //
+ // Diagram for the graph we want on the left and the graph we use to force
+ // the ordering on the right. Edges ponit down or right.
+ //
+ // A | A |
+ // / \ | / \ |
+ // B E | B \ |
+ // |\ | | |\ | |
+ // | D | | C-D-E |
+ // | \| | | \| |
+ // C F | \ F |
+ // \ / | \ / |
+ // G | G |
+ //
+ // And we form a cycle by connecting F to B.
+ std::unique_ptr<Module> M =
+ parseAssembly(Context, "define void @a() {\n"
+ "entry:\n"
+ " call void @b()\n"
+ " call void @e()\n"
+ " ret void\n"
+ "}\n"
+ "define void @b() {\n"
+ "entry:\n"
+ " call void @c()\n"
+ " call void @d()\n"
+ " ret void\n"
+ "}\n"
+ "define void @c() {\n"
+ "entry:\n"
+ " call void @d()\n"
+ " call void @g()\n"
+ " ret void\n"
+ "}\n"
+ "define void @d() {\n"
+ "entry:\n"
+ " call void @e()\n"
+ " call void @f()\n"
+ " ret void\n"
+ "}\n"
+ "define void @e() {\n"
+ "entry:\n"
+ " call void @f()\n"
+ " ret void\n"
+ "}\n"
+ "define void @f() {\n"
+ "entry:\n"
+ " store void()* @b, void()** undef\n"
+ " call void @g()\n"
+ " ret void\n"
+ "}\n"
+ "define void @g() {\n"
+ "entry:\n"
+ " store void()* @a, void()** undef\n"
+ " ret void\n"
+ "}\n");
+ LazyCallGraph CG(*M);
+
+ // Force the graph to be fully expanded.
+ auto I = CG.postorder_ref_scc_begin();
+ LazyCallGraph::RefSCC &RC = *I++;
+ EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+ LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+ LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+ LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+ LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
+ LazyCallGraph::Node &E = *CG.lookup(lookupFunction(*M, "e"));
+ LazyCallGraph::Node &F = *CG.lookup(lookupFunction(*M, "f"));
+ LazyCallGraph::Node &G = *CG.lookup(lookupFunction(*M, "g"));
+ LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
+ LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
+ LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
+ LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
+ LazyCallGraph::SCC &EC = *CG.lookupSCC(E);
+ LazyCallGraph::SCC &FC = *CG.lookupSCC(F);
+ LazyCallGraph::SCC &GC = *CG.lookupSCC(G);
+
+ // Remove the extra edges that were used to force a particular post-order.
+ RC.switchInternalEdgeToRef(C, D);
+ RC.switchInternalEdgeToRef(D, E);
+
+ // Check the initial post-order. We ensure this order with the extra edges
+ // that are nuked above.
+ ASSERT_EQ(7, RC.size());
+ EXPECT_EQ(&GC, &RC[0]);
+ EXPECT_EQ(&FC, &RC[1]);
+ EXPECT_EQ(&EC, &RC[2]);
+ EXPECT_EQ(&DC, &RC[3]);
+ EXPECT_EQ(&CC, &RC[4]);
+ EXPECT_EQ(&BC, &RC[5]);
+ EXPECT_EQ(&AC, &RC[6]);
+
+ // Switch F -> B to a call edge. This merges B, D, and F into a single SCC,
+ // and has to place the C and E SCCs on either side of it:
+ // A A |
+ // / \ / \ |
+ // B E | E |
+ // |\ | \ / |
+ // | D | -> B |
+ // | \| / \ |
+ // C F C | |
+ // \ / \ / |
+ // G G |
+ auto MergedCs = RC.switchInternalEdgeToCall(F, B);
+ ASSERT_EQ(2u, MergedCs.size());
+ EXPECT_EQ(&FC, MergedCs[0]);
+ EXPECT_EQ(&DC, MergedCs[1]);
+ EXPECT_EQ(3, BC.size());
+
+ // And make sure the postorder was updated.
+ ASSERT_EQ(5, RC.size());
+ EXPECT_EQ(&GC, &RC[0]);
+ EXPECT_EQ(&CC, &RC[1]);
+ EXPECT_EQ(&BC, &RC[2]);
+ EXPECT_EQ(&EC, &RC[3]);
+ EXPECT_EQ(&AC, &RC[4]);
}
}
diff --git a/unittests/Analysis/LoopPassManagerTest.cpp b/unittests/Analysis/LoopPassManagerTest.cpp
new file mode 100644
index 000000000000..5858e174aabb
--- /dev/null
+++ b/unittests/Analysis/LoopPassManagerTest.cpp
@@ -0,0 +1,205 @@
+//===- llvm/unittest/Analysis/LoopPassManagerTest.cpp - LPM tests ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "gtest/gtest.h"
+#include "llvm/Analysis/LoopPassManager.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/SourceMgr.h"
+
+using namespace llvm;
+
+namespace {
+
+class TestLoopAnalysis {
+ /// \brief Private static data to provide unique ID.
+ static char PassID;
+
+ int &Runs;
+
+public:
+ struct Result {
+ Result(int Count) : BlockCount(Count) {}
+ int BlockCount;
+ };
+
+ /// \brief Returns an opaque, unique ID for this pass type.
+ static void *ID() { return (void *)&PassID; }
+
+ /// \brief Returns the name of the analysis.
+ static StringRef name() { return "TestLoopAnalysis"; }
+
+ TestLoopAnalysis(int &Runs) : Runs(Runs) {}
+
+ /// \brief Run the analysis pass over the loop and return a result.
+ Result run(Loop &L, AnalysisManager<Loop> &AM) {
+ ++Runs;
+ int Count = 0;
+
+ for (auto I = L.block_begin(), E = L.block_end(); I != E; ++I)
+ ++Count;
+ return Result(Count);
+ }
+};
+
+char TestLoopAnalysis::PassID;
+
+class TestLoopPass {
+ std::vector<StringRef> &VisitedLoops;
+ int &AnalyzedBlockCount;
+ bool OnlyUseCachedResults;
+
+public:
+ TestLoopPass(std::vector<StringRef> &VisitedLoops, int &AnalyzedBlockCount,
+ bool OnlyUseCachedResults = false)
+ : VisitedLoops(VisitedLoops), AnalyzedBlockCount(AnalyzedBlockCount),
+ OnlyUseCachedResults(OnlyUseCachedResults) {}
+
+ PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM) {
+ VisitedLoops.push_back(L.getName());
+
+ if (OnlyUseCachedResults) {
+ // Hack to force the use of the cached interface.
+ if (auto *AR = AM.getCachedResult<TestLoopAnalysis>(L))
+ AnalyzedBlockCount += AR->BlockCount;
+ } else {
+ // Typical path just runs the analysis as needed.
+ auto &AR = AM.getResult<TestLoopAnalysis>(L);
+ AnalyzedBlockCount += AR.BlockCount;
+ }
+
+ return PreservedAnalyses::all();
+ }
+
+ static StringRef name() { return "TestLoopPass"; }
+};
+
+// A test loop pass that invalidates the analysis for loops with the given name.
+class TestLoopInvalidatingPass {
+ StringRef Name;
+
+public:
+ TestLoopInvalidatingPass(StringRef LoopName) : Name(LoopName) {}
+
+ PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM) {
+ return L.getName() == Name ? getLoopPassPreservedAnalyses()
+ : PreservedAnalyses::all();
+ }
+
+ static StringRef name() { return "TestLoopInvalidatingPass"; }
+};
+
+std::unique_ptr<Module> parseIR(LLVMContext &C, const char *IR) {
+ SMDiagnostic Err;
+ return parseAssemblyString(IR, Err, C);
+}
+
+class LoopPassManagerTest : public ::testing::Test {
+protected:
+ LLVMContext Context;
+ std::unique_ptr<Module> M;
+
+public:
+ LoopPassManagerTest()
+ : M(parseIR(Context, "define void @f() {\n"
+ "entry:\n"
+ " br label %loop.0\n"
+ "loop.0:\n"
+ " br i1 undef, label %loop.0.0, label %end\n"
+ "loop.0.0:\n"
+ " br i1 undef, label %loop.0.0, label %loop.0.1\n"
+ "loop.0.1:\n"
+ " br i1 undef, label %loop.0.1, label %loop.0\n"
+ "end:\n"
+ " ret void\n"
+ "}\n"
+ "\n"
+ "define void @g() {\n"
+ "entry:\n"
+ " br label %loop.g.0\n"
+ "loop.g.0:\n"
+ " br i1 undef, label %loop.g.0, label %end\n"
+ "end:\n"
+ " ret void\n"
+ "}\n")) {}
+};
+
+#define EXPECT_N_ELEMENTS_EQ(N, EXPECTED, ACTUAL) \
+ do { \
+ EXPECT_EQ(N##UL, ACTUAL.size()); \
+ for (int I = 0; I < N; ++I) \
+ EXPECT_TRUE(EXPECTED[I] == ACTUAL[I]) << "Element " << I << " is " \
+ << ACTUAL[I] << ". Expected " \
+ << EXPECTED[I] << "."; \
+ } while (0)
+
+TEST_F(LoopPassManagerTest, Basic) {
+ LoopAnalysisManager LAM(true);
+ int LoopAnalysisRuns = 0;
+ LAM.registerPass([&] { return TestLoopAnalysis(LoopAnalysisRuns); });
+
+ FunctionAnalysisManager FAM(true);
+ // We need DominatorTreeAnalysis for LoopAnalysis.
+ FAM.registerPass([&] { return DominatorTreeAnalysis(); });
+ FAM.registerPass([&] { return LoopAnalysis(); });
+ FAM.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); });
+ LAM.registerPass([&] { return FunctionAnalysisManagerLoopProxy(FAM); });
+
+ ModuleAnalysisManager MAM(true);
+ MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); });
+ FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); });
+
+ ModulePassManager MPM(true);
+ FunctionPassManager FPM(true);
+
+ // Visit all of the loops.
+ std::vector<StringRef> VisitedLoops1;
+ int AnalyzedBlockCount1 = 0;
+ {
+ LoopPassManager LPM;
+ LPM.addPass(TestLoopPass(VisitedLoops1, AnalyzedBlockCount1));
+
+ FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
+ }
+
+ // Only use cached analyses.
+ std::vector<StringRef> VisitedLoops2;
+ int AnalyzedBlockCount2 = 0;
+ {
+ LoopPassManager LPM;
+ LPM.addPass(TestLoopInvalidatingPass("loop.g.0"));
+ LPM.addPass(TestLoopPass(VisitedLoops2, AnalyzedBlockCount2,
+ /*OnlyUseCachedResults=*/true));
+
+ FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
+ }
+
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+ MPM.run(*M, MAM);
+
+ StringRef ExpectedLoops[] = {"loop.0.0", "loop.0.1", "loop.0", "loop.g.0"};
+
+ // Validate the counters and order of loops visited.
+ // loop.0 has 3 blocks whereas loop.0.0, loop.0.1, and loop.g.0 each have 1.
+ EXPECT_N_ELEMENTS_EQ(4, ExpectedLoops, VisitedLoops1);
+ EXPECT_EQ(6, AnalyzedBlockCount1);
+
+ EXPECT_N_ELEMENTS_EQ(4, ExpectedLoops, VisitedLoops2);
+ // The block from loop.g.0 won't be counted, since it wasn't cached.
+ EXPECT_EQ(5, AnalyzedBlockCount2);
+
+ // The first LPM runs the loop analysis for all four loops, the second uses
+ // cached results for everything.
+ EXPECT_EQ(4, LoopAnalysisRuns);
+}
+}
diff --git a/unittests/Analysis/Makefile b/unittests/Analysis/Makefile
deleted file mode 100644
index 527f4525e87e..000000000000
--- a/unittests/Analysis/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- unittests/Analysis/Makefile -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TESTNAME = Analysis
-LINK_COMPONENTS := analysis asmparser
-
-include $(LEVEL)/Makefile.config
-include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest
diff --git a/unittests/Analysis/MixedTBAATest.cpp b/unittests/Analysis/MixedTBAATest.cpp
index d0cfa59f6459..d70324f2c6ab 100644
--- a/unittests/Analysis/MixedTBAATest.cpp
+++ b/unittests/Analysis/MixedTBAATest.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
+#include "llvm/Analysis/AliasAnalysisEvaluator.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
diff --git a/unittests/Analysis/ScalarEvolutionTest.cpp b/unittests/Analysis/ScalarEvolutionTest.cpp
index 938dafe60384..c4cd74f01c72 100644
--- a/unittests/Analysis/ScalarEvolutionTest.cpp
+++ b/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -237,5 +237,32 @@ TEST_F(ScalarEvolutionsTest, SCEVMultiplyAddRecs) {
EXPECT_EQ(Product->getOperand(8), SE.getAddExpr(Sum));
}
+TEST_F(ScalarEvolutionsTest, SimplifiedPHI) {
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
+ std::vector<Type *>(), false);
+ Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+ BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+ BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
+ BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
+ BranchInst::Create(LoopBB, EntryBB);
+ BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)),
+ LoopBB);
+ ReturnInst::Create(Context, nullptr, ExitBB);
+ auto *Ty = Type::getInt32Ty(Context);
+ auto *PN = PHINode::Create(Ty, 2, "", &*LoopBB->begin());
+ PN->addIncoming(Constant::getNullValue(Ty), EntryBB);
+ PN->addIncoming(UndefValue::get(Ty), LoopBB);
+ ScalarEvolution SE = buildSE(*F);
+ auto *S1 = SE.getSCEV(PN);
+ auto *S2 = SE.getSCEV(PN);
+ auto *ZeroConst = SE.getConstant(Ty, 0);
+
+ // At some point, only the first call to getSCEV returned the simplified
+ // SCEVConstant and later calls just returned a SCEVUnknown referencing the
+ // PHI node.
+ EXPECT_EQ(S1, ZeroConst);
+ EXPECT_EQ(S1, S2);
+}
+
} // end anonymous namespace
} // end namespace llvm
diff --git a/unittests/Analysis/UnrollAnalyzer.cpp b/unittests/Analysis/UnrollAnalyzer.cpp
new file mode 100644
index 000000000000..6d11ab1f2f1b
--- /dev/null
+++ b/unittests/Analysis/UnrollAnalyzer.cpp
@@ -0,0 +1,331 @@
+//===- UnrollAnalyzerTest.cpp - UnrollAnalyzer unit tests -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Analysis/LoopUnrollAnalyzer.h"
+#include "llvm/IR/Dominators.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+namespace llvm {
+void initializeUnrollAnalyzerTestPass(PassRegistry &);
+
+static SmallVector<DenseMap<Value *, Constant *>, 16> SimplifiedValuesVector;
+static unsigned TripCount = 0;
+
+namespace {
+struct UnrollAnalyzerTest : public FunctionPass {
+ static char ID;
+ bool runOnFunction(Function &F) override {
+ LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
+
+ Function::iterator FI = F.begin();
+ FI++; // First basic block is entry - skip it.
+ BasicBlock *Header = &*FI++;
+ Loop *L = LI->getLoopFor(Header);
+ BasicBlock *Exiting = L->getExitingBlock();
+
+ SimplifiedValuesVector.clear();
+ TripCount = SE->getSmallConstantTripCount(L, Exiting);
+ for (unsigned Iteration = 0; Iteration < TripCount; Iteration++) {
+ DenseMap<Value *, Constant *> SimplifiedValues;
+ UnrolledInstAnalyzer Analyzer(Iteration, SimplifiedValues, *SE, L);
+ for (auto *BB : L->getBlocks())
+ for (Instruction &I : *BB)
+ Analyzer.visit(I);
+ SimplifiedValuesVector.push_back(SimplifiedValues);
+ }
+ return false;
+ }
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<LoopInfoWrapperPass>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
+ AU.setPreservesAll();
+ }
+ UnrollAnalyzerTest() : FunctionPass(ID) {
+ initializeUnrollAnalyzerTestPass(*PassRegistry::getPassRegistry());
+ }
+};
+}
+
+char UnrollAnalyzerTest::ID = 0;
+
+std::unique_ptr<Module> makeLLVMModule(LLVMContext &Context,
+ UnrollAnalyzerTest *P,
+ const char *ModuleStr) {
+ SMDiagnostic Err;
+ return parseAssemblyString(ModuleStr, Err, Context);
+}
+
+TEST(UnrollAnalyzerTest, BasicSimplifications) {
+ const char *ModuleStr =
+ "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+ "define i64 @propagate_loop_phis() {\n"
+ "entry:\n"
+ " br label %loop\n"
+ "loop:\n"
+ " %iv = phi i64 [ 0, %entry ], [ %inc, %loop ]\n"
+ " %x0 = phi i64 [ 0, %entry ], [ %x2, %loop ]\n"
+ " %x1 = or i64 %x0, 1\n"
+ " %x2 = or i64 %x1, 2\n"
+ " %inc = add nuw nsw i64 %iv, 1\n"
+ " %cond = icmp sge i64 %inc, 8\n"
+ " br i1 %cond, label %loop.end, label %loop\n"
+ "loop.end:\n"
+ " %x.lcssa = phi i64 [ %x2, %loop ]\n"
+ " ret i64 %x.lcssa\n"
+ "}\n";
+ UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, P, ModuleStr);
+ legacy::PassManager Passes;
+ Passes.add(P);
+ Passes.run(*M);
+
+ // Perform checks
+ Module::iterator MI = M->begin();
+ Function *F = &*MI++;
+ Function::iterator FI = F->begin();
+ FI++; // First basic block is entry - skip it.
+ BasicBlock *Header = &*FI++;
+
+ BasicBlock::iterator BBI = Header->begin();
+ std::advance(BBI, 4);
+ Instruction *Y1 = &*BBI++;
+ Instruction *Y2 = &*BBI++;
+ // Check simplification expected on the 1st iteration.
+ // Check that "%inc = add nuw nsw i64 %iv, 1" is simplified to 1
+ auto I1 = SimplifiedValuesVector[0].find(Y1);
+ EXPECT_TRUE(I1 != SimplifiedValuesVector[0].end());
+ EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), 1U);
+
+ // Check that "%cond = icmp sge i64 %inc, 10" is simplified to false
+ auto I2 = SimplifiedValuesVector[0].find(Y2);
+ EXPECT_TRUE(I2 != SimplifiedValuesVector[0].end());
+ EXPECT_FALSE(cast<ConstantInt>((*I2).second)->getZExtValue());
+
+ // Check simplification expected on the last iteration.
+ // Check that "%inc = add nuw nsw i64 %iv, 1" is simplified to 8
+ I1 = SimplifiedValuesVector[TripCount - 1].find(Y1);
+ EXPECT_TRUE(I1 != SimplifiedValuesVector[TripCount - 1].end());
+ EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), TripCount);
+
+ // Check that "%cond = icmp sge i64 %inc, 10" is simplified to false
+ I2 = SimplifiedValuesVector[TripCount - 1].find(Y2);
+ EXPECT_TRUE(I2 != SimplifiedValuesVector[TripCount - 1].end());
+ EXPECT_TRUE(cast<ConstantInt>((*I2).second)->getZExtValue());
+}
+
+TEST(UnrollAnalyzerTest, OuterLoopSimplification) {
+ const char *ModuleStr =
+ "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+ "define void @foo() {\n"
+ "entry:\n"
+ " br label %outer.loop\n"
+ "outer.loop:\n"
+ " %iv.outer = phi i64 [ 0, %entry ], [ %iv.outer.next, %outer.loop.latch ]\n"
+ " %iv.outer.next = add nuw nsw i64 %iv.outer, 1\n"
+ " br label %inner.loop\n"
+ "inner.loop:\n"
+ " %iv.inner = phi i64 [ 0, %outer.loop ], [ %iv.inner.next, %inner.loop ]\n"
+ " %iv.inner.next = add nuw nsw i64 %iv.inner, 1\n"
+ " %exitcond.inner = icmp eq i64 %iv.inner.next, 1000\n"
+ " br i1 %exitcond.inner, label %outer.loop.latch, label %inner.loop\n"
+ "outer.loop.latch:\n"
+ " %exitcond.outer = icmp eq i64 %iv.outer.next, 40\n"
+ " br i1 %exitcond.outer, label %exit, label %outer.loop\n"
+ "exit:\n"
+ " ret void\n"
+ "}\n";
+
+ UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, P, ModuleStr);
+ legacy::PassManager Passes;
+ Passes.add(P);
+ Passes.run(*M);
+
+ Module::iterator MI = M->begin();
+ Function *F = &*MI++;
+ Function::iterator FI = F->begin();
+ FI++;
+ BasicBlock *Header = &*FI++;
+ BasicBlock *InnerBody = &*FI++;
+
+ BasicBlock::iterator BBI = Header->begin();
+ BBI++;
+ Instruction *Y1 = &*BBI;
+ BBI = InnerBody->begin();
+ BBI++;
+ Instruction *Y2 = &*BBI;
+ // Check that we can simplify IV of the outer loop, but can't simplify the IV
+ // of the inner loop if we only know the iteration number of the outer loop.
+ //
+ // Y1 is %iv.outer.next, Y2 is %iv.inner.next
+ auto I1 = SimplifiedValuesVector[0].find(Y1);
+ EXPECT_TRUE(I1 != SimplifiedValuesVector[0].end());
+ auto I2 = SimplifiedValuesVector[0].find(Y2);
+ EXPECT_TRUE(I2 == SimplifiedValuesVector[0].end());
+}
+TEST(UnrollAnalyzerTest, CmpSimplifications) {
+ const char *ModuleStr =
+ "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+ "define void @branch_iv_trunc() {\n"
+ "entry:\n"
+ " br label %for.body\n"
+ "for.body:\n"
+ " %indvars.iv = phi i64 [ 0, %entry ], [ %tmp3, %for.body ]\n"
+ " %tmp2 = trunc i64 %indvars.iv to i32\n"
+ " %cmp3 = icmp eq i32 %tmp2, 5\n"
+ " %tmp3 = add nuw nsw i64 %indvars.iv, 1\n"
+ " %exitcond = icmp eq i64 %tmp3, 10\n"
+ " br i1 %exitcond, label %for.end, label %for.body\n"
+ "for.end:\n"
+ " ret void\n"
+ "}\n";
+ UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, P, ModuleStr);
+ legacy::PassManager Passes;
+ Passes.add(P);
+ Passes.run(*M);
+
+ // Perform checks
+ Module::iterator MI = M->begin();
+ Function *F = &*MI++;
+ Function::iterator FI = F->begin();
+ FI++; // First basic block is entry - skip it.
+ BasicBlock *Header = &*FI++;
+
+ BasicBlock::iterator BBI = Header->begin();
+ BBI++;
+ Instruction *Y1 = &*BBI++;
+ Instruction *Y2 = &*BBI++;
+ // Check simplification expected on the 5th iteration.
+ // Check that "%tmp2 = trunc i64 %indvars.iv to i32" is simplified to 5
+ // and "%cmp3 = icmp eq i32 %tmp2, 5" is simplified to 1 (i.e. true).
+ auto I1 = SimplifiedValuesVector[5].find(Y1);
+ EXPECT_TRUE(I1 != SimplifiedValuesVector[5].end());
+ EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), 5U);
+ auto I2 = SimplifiedValuesVector[5].find(Y2);
+ EXPECT_TRUE(I2 != SimplifiedValuesVector[5].end());
+ EXPECT_EQ(cast<ConstantInt>((*I2).second)->getZExtValue(), 1U);
+}
+TEST(UnrollAnalyzerTest, PtrCmpSimplifications) {
+ const char *ModuleStr =
+ "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+ "define void @ptr_cmp(i8 *%a) {\n"
+ "entry:\n"
+ " %limit = getelementptr i8, i8* %a, i64 40\n"
+ " %start.iv2 = getelementptr i8, i8* %a, i64 7\n"
+ " br label %loop.body\n"
+ "loop.body:\n"
+ " %iv.0 = phi i8* [ %a, %entry ], [ %iv.1, %loop.body ]\n"
+ " %iv2.0 = phi i8* [ %start.iv2, %entry ], [ %iv2.1, %loop.body ]\n"
+ " %cmp = icmp eq i8* %iv2.0, %iv.0\n"
+ " %iv.1 = getelementptr inbounds i8, i8* %iv.0, i64 1\n"
+ " %iv2.1 = getelementptr inbounds i8, i8* %iv2.0, i64 1\n"
+ " %exitcond = icmp ne i8* %iv.1, %limit\n"
+ " br i1 %exitcond, label %loop.body, label %loop.exit\n"
+ "loop.exit:\n"
+ " ret void\n"
+ "}\n";
+ UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, P, ModuleStr);
+ legacy::PassManager Passes;
+ Passes.add(P);
+ Passes.run(*M);
+
+ // Perform checks
+ Module::iterator MI = M->begin();
+ Function *F = &*MI++;
+ Function::iterator FI = F->begin();
+ FI++; // First basic block is entry - skip it.
+ BasicBlock *Header = &*FI;
+
+ BasicBlock::iterator BBI = Header->begin();
+ std::advance(BBI, 2);
+ Instruction *Y1 = &*BBI;
+ // Check simplification expected on the 5th iteration.
+ // Check that "%cmp = icmp eq i8* %iv2.0, %iv.0" is simplified to 0.
+ auto I1 = SimplifiedValuesVector[5].find(Y1);
+ EXPECT_TRUE(I1 != SimplifiedValuesVector[5].end());
+ EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), 0U);
+}
+TEST(UnrollAnalyzerTest, CastSimplifications) {
+ const char *ModuleStr =
+ "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+ "@known_constant = internal unnamed_addr constant [10 x i32] [i32 0, i32 1, i32 0, i32 1, i32 0, i32 259, i32 0, i32 1, i32 0, i32 1], align 16\n"
+ "define void @const_load_cast() {\n"
+ "entry:\n"
+ " br label %loop\n"
+ "\n"
+ "loop:\n"
+ " %iv = phi i64 [ 0, %entry ], [ %inc, %loop ]\n"
+ " %array_const_idx = getelementptr inbounds [10 x i32], [10 x i32]* @known_constant, i64 0, i64 %iv\n"
+ " %const_array_element = load i32, i32* %array_const_idx, align 4\n"
+ " %se = sext i32 %const_array_element to i64\n"
+ " %ze = zext i32 %const_array_element to i64\n"
+ " %tr = trunc i32 %const_array_element to i8\n"
+ " %inc = add nuw nsw i64 %iv, 1\n"
+ " %exitcond86.i = icmp eq i64 %inc, 10\n"
+ " br i1 %exitcond86.i, label %loop.end, label %loop\n"
+ "\n"
+ "loop.end:\n"
+ " ret void\n"
+ "}\n";
+
+ UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, P, ModuleStr);
+ legacy::PassManager Passes;
+ Passes.add(P);
+ Passes.run(*M);
+
+ // Perform checks
+ Module::iterator MI = M->begin();
+ Function *F = &*MI++;
+ Function::iterator FI = F->begin();
+ FI++; // First basic block is entry - skip it.
+ BasicBlock *Header = &*FI++;
+
+ BasicBlock::iterator BBI = Header->begin();
+ std::advance(BBI, 3);
+ Instruction *Y1 = &*BBI++;
+ Instruction *Y2 = &*BBI++;
+ Instruction *Y3 = &*BBI++;
+ // Check simplification expected on the 5th iteration.
+ // "%se = sext i32 %const_array_element to i64" should be simplified to 259,
+ // "%ze = zext i32 %const_array_element to i64" should be simplified to 259,
+ // "%tr = trunc i32 %const_array_element to i8" should be simplified to 3.
+ auto I1 = SimplifiedValuesVector[5].find(Y1);
+ EXPECT_TRUE(I1 != SimplifiedValuesVector[5].end());
+ EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), 259U);
+ auto I2 = SimplifiedValuesVector[5].find(Y2);
+ EXPECT_TRUE(I2 != SimplifiedValuesVector[5].end());
+ EXPECT_EQ(cast<ConstantInt>((*I2).second)->getZExtValue(), 259U);
+ auto I3 = SimplifiedValuesVector[5].find(Y3);
+ EXPECT_TRUE(I3 != SimplifiedValuesVector[5].end());
+ EXPECT_EQ(cast<ConstantInt>((*I3).second)->getZExtValue(), 3U);
+}
+
+} // end namespace llvm
+
+INITIALIZE_PASS_BEGIN(UnrollAnalyzerTest, "unrollanalyzertestpass",
+ "unrollanalyzertestpass", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
+INITIALIZE_PASS_END(UnrollAnalyzerTest, "unrollanalyzertestpass",
+ "unrollanalyzertestpass", false, false)
diff --git a/unittests/Analysis/ValueTrackingTest.cpp b/unittests/Analysis/ValueTrackingTest.cpp
index 3af856ea2039..f429c3b99149 100644
--- a/unittests/Analysis/ValueTrackingTest.cpp
+++ b/unittests/Analysis/ValueTrackingTest.cpp
@@ -25,7 +25,7 @@ class MatchSelectPatternTest : public testing::Test {
protected:
void parseAssembly(const char *Assembly) {
SMDiagnostic Error;
- M = parseAssemblyString(Assembly, Error, getGlobalContext());
+ M = parseAssemblyString(Assembly, Error, Context);
std::string errMsg;
raw_string_ostream os(errMsg);
@@ -59,6 +59,7 @@ protected:
EXPECT_EQ(P.Ordered, R.Ordered);
}
+ LLVMContext Context;
std::unique_ptr<Module> M;
Instruction *A, *B;
};