aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-01-20 11:41:25 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-01-20 11:41:25 +0000
commitd9484dd61cc151c4f34c31e07f693fefa66316b5 (patch)
treeab0560b3da293f1fafd3269c59692e929418f5c2 /contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
parent79e0962d4c3cf1f0acf359a9d69cb3ac68c414c4 (diff)
parentd8e91e46262bc44006913e6796843909f1ac7bcd (diff)
downloadsrc-d9484dd61cc151c4f34c31e07f693fefa66316b5.tar.gz
src-d9484dd61cc151c4f34c31e07f693fefa66316b5.zip
Merge llvm trunk r351319, resolve conflicts, and update FREEBSD-Xlist.
Notes
Notes: svn path=/projects/clang800-import/; revision=343210
Diffstat (limited to 'contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp')
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp68
1 files changed, 35 insertions, 33 deletions
diff --git a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 9a7405e98e7d..469930ca6a19 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -71,7 +71,7 @@ using namespace llvm;
STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
STATISTIC(NumFastStores, "Number of stores deleted");
-STATISTIC(NumFastOther , "Number of other instrs removed");
+STATISTIC(NumFastOther, "Number of other instrs removed");
STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
STATISTIC(NumModifiedStores, "Number of stores modified");
@@ -349,11 +349,14 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
InstOverlapIntervalsTy &IOL,
AliasAnalysis &AA,
const Function *F) {
- // If we don't know the sizes of either access, then we can't do a comparison.
- if (Later.Size == MemoryLocation::UnknownSize ||
- Earlier.Size == MemoryLocation::UnknownSize)
+ // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
+ // get imprecise values here, though (except for unknown sizes).
+ if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise())
return OW_Unknown;
+ const uint64_t LaterSize = Later.Size.getValue();
+ const uint64_t EarlierSize = Earlier.Size.getValue();
+
const Value *P1 = Earlier.Ptr->stripPointerCasts();
const Value *P2 = Later.Ptr->stripPointerCasts();
@@ -361,7 +364,7 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// the later store was larger than the earlier store.
if (P1 == P2 || AA.isMustAlias(P1, P2)) {
// Make sure that the Later size is >= the Earlier size.
- if (Later.Size >= Earlier.Size)
+ if (LaterSize >= EarlierSize)
return OW_Complete;
}
@@ -379,7 +382,7 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// If the "Later" store is to a recognizable object, get its size.
uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
if (ObjectSize != MemoryLocation::UnknownSize)
- if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
+ if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
return OW_Complete;
// Okay, we have stores to two completely different pointers. Try to
@@ -410,8 +413,8 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
//
// We have to be careful here as *Off is signed while *.Size is unsigned.
if (EarlierOff >= LaterOff &&
- Later.Size >= Earlier.Size &&
- uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
+ LaterSize >= EarlierSize &&
+ uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
return OW_Complete;
// We may now overlap, although the overlap is not complete. There might also
@@ -420,21 +423,21 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// Note: The correctness of this logic depends on the fact that this function
// is not even called providing DepWrite when there are any intervening reads.
if (EnablePartialOverwriteTracking &&
- LaterOff < int64_t(EarlierOff + Earlier.Size) &&
- int64_t(LaterOff + Later.Size) >= EarlierOff) {
+ LaterOff < int64_t(EarlierOff + EarlierSize) &&
+ int64_t(LaterOff + LaterSize) >= EarlierOff) {
// Insert our part of the overlap into the map.
auto &IM = IOL[DepWrite];
LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
- << ", " << int64_t(EarlierOff + Earlier.Size)
+ << ", " << int64_t(EarlierOff + EarlierSize)
<< ") Later [" << LaterOff << ", "
- << int64_t(LaterOff + Later.Size) << ")\n");
+ << int64_t(LaterOff + LaterSize) << ")\n");
// Make sure that we only insert non-overlapping intervals and combine
// adjacent intervals. The intervals are stored in the map with the ending
// offset as the key (in the half-open sense) and the starting offset as
// the value.
- int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + Later.Size;
+ int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
// Find any intervals ending at, or after, LaterIntStart which start
// before LaterIntEnd.
@@ -464,10 +467,10 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
ILI = IM.begin();
if (ILI->second <= EarlierOff &&
- ILI->first >= int64_t(EarlierOff + Earlier.Size)) {
+ ILI->first >= int64_t(EarlierOff + EarlierSize)) {
LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", "
- << int64_t(EarlierOff + Earlier.Size)
+ << int64_t(EarlierOff + EarlierSize)
<< ") Composite Later [" << ILI->second << ", "
<< ILI->first << ")\n");
++NumCompletePartials;
@@ -478,13 +481,13 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// Check for an earlier store which writes to all the memory locations that
// the later store writes to.
if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
- int64_t(EarlierOff + Earlier.Size) > LaterOff &&
- uint64_t(LaterOff - EarlierOff) + Later.Size <= Earlier.Size) {
+ int64_t(EarlierOff + EarlierSize) > LaterOff &&
+ uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", "
- << int64_t(EarlierOff + Earlier.Size)
+ << int64_t(EarlierOff + EarlierSize)
<< ") by a later store [" << LaterOff << ", "
- << int64_t(LaterOff + Later.Size) << ")\n");
+ << int64_t(LaterOff + LaterSize) << ")\n");
// TODO: Maybe come up with a better name?
return OW_PartialEarlierWithFullLater;
}
@@ -498,8 +501,8 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// In this case we may want to trim the size of earlier to avoid generating
// writes to addresses which will definitely be overwritten later
if (!EnablePartialOverwriteTracking &&
- (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + Earlier.Size) &&
- int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)))
+ (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
+ int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
return OW_End;
// Finally, we also need to check if the later store overwrites the beginning
@@ -512,9 +515,8 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// of earlier to avoid generating writes to addresses which will definitely
// be overwritten later.
if (!EnablePartialOverwriteTracking &&
- (LaterOff <= EarlierOff && int64_t(LaterOff + Later.Size) > EarlierOff)) {
- assert(int64_t(LaterOff + Later.Size) <
- int64_t(EarlierOff + Earlier.Size) &&
+ (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
+ assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
"Expect to be handled as OW_Complete");
return OW_Begin;
}
@@ -641,7 +643,7 @@ static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
BasicBlock *Pred = *I;
if (Pred == BB) continue;
- TerminatorInst *PredTI = Pred->getTerminator();
+ Instruction *PredTI = Pred->getTerminator();
if (PredTI->getNumSuccessors() != 1)
continue;
@@ -832,7 +834,7 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
continue;
}
- if (auto CS = CallSite(&*BBI)) {
+ if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
// Remove allocation function calls from the list of dead stack objects;
// there can't be any references before the definition.
if (isAllocLikeFn(&*BBI, TLI))
@@ -840,15 +842,15 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
// If this call does not access memory, it can't be loading any of our
// pointers.
- if (AA->doesNotAccessMemory(CS))
+ if (AA->doesNotAccessMemory(Call))
continue;
// If the call might load from any of our allocas, then any store above
// the call is live.
DeadStackObjects.remove_if([&](Value *I) {
// See if the call site touches the value.
- return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI,
- BB.getParent())));
+ return isRefSet(AA->getModRefInfo(
+ Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
});
// If all of the allocas were clobbered by the call then we're not going
@@ -1002,11 +1004,10 @@ static bool removePartiallyOverlappedStores(AliasAnalysis *AA,
Instruction *EarlierWrite = OI.first;
MemoryLocation Loc = getLocForWrite(EarlierWrite);
assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
- assert(Loc.Size != MemoryLocation::UnknownSize && "Unexpected mem loc");
const Value *Ptr = Loc.Ptr->stripPointerCasts();
int64_t EarlierStart = 0;
- int64_t EarlierSize = int64_t(Loc.Size);
+ int64_t EarlierSize = int64_t(Loc.Size.getValue());
GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
OverlapIntervalsTy &IntervalMap = OI.second;
Changed |=
@@ -1203,8 +1204,9 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
"when partial-overwrite "
"tracking is enabled");
- int64_t EarlierSize = DepLoc.Size;
- int64_t LaterSize = Loc.Size;
+ // The overwrite result is known, so these must be known, too.
+ int64_t EarlierSize = DepLoc.Size.getValue();
+ int64_t LaterSize = Loc.Size.getValue();
bool IsOverwriteEnd = (OR == OW_End);
MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
InstWriteOffset, LaterSize, IsOverwriteEnd);