aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/AtomicExpandPass.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2024-07-26 22:04:10 +0000
committerDimitry Andric <dim@FreeBSD.org>2024-07-26 22:04:10 +0000
commitac9a064cb179f3425b310fa2847f8764ac970a4d (patch)
tree6f945cdaa68c2b4c688dcf9fec4f922d35f4d1a4 /llvm/lib/CodeGen/AtomicExpandPass.cpp
parent4df029cc74e5ec124f14a5682e44999ce4f086df (diff)
downloadsrc-ac9a064cb179f3425b310fa2847f8764ac970a4d.tar.gz
src-ac9a064cb179f3425b310fa2847f8764ac970a4d.zip
Vendor import of llvm-project main llvmorg-19-init-18630-gf2ccf80136a0,vendor/llvm-project/llvmorg-19-init-18630-gf2ccf80136a0vendor/llvm-project/main
the last commit before the upstream release/19.x branch was created.
Diffstat (limited to 'llvm/lib/CodeGen/AtomicExpandPass.cpp')
-rw-r--r--llvm/lib/CodeGen/AtomicExpandPass.cpp266
1 files changed, 175 insertions, 91 deletions
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index ccf3e9ec6492..ebcf76175a36 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -19,8 +19,9 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/InstSimplifyFolder.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/CodeGen/AtomicExpand.h"
#include "llvm/CodeGen/AtomicExpandUtils.h"
-#include "llvm/CodeGen/RuntimeLibcalls.h"
+#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
@@ -36,6 +37,8 @@
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
@@ -59,19 +62,10 @@ using namespace llvm;
namespace {
-class AtomicExpand : public FunctionPass {
+class AtomicExpandImpl {
const TargetLowering *TLI = nullptr;
const DataLayout *DL = nullptr;
-public:
- static char ID; // Pass identification, replacement for typeid
-
- AtomicExpand() : FunctionPass(ID) {
- initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
- }
-
- bool runOnFunction(Function &F) override;
-
private:
bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
@@ -124,47 +118,77 @@ private:
friend bool
llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
CreateCmpXchgInstFun CreateCmpXchg);
+
+public:
+ bool run(Function &F, const TargetMachine *TM);
+};
+
+class AtomicExpandLegacy : public FunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ AtomicExpandLegacy() : FunctionPass(ID) {
+ initializeAtomicExpandLegacyPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override;
};
// IRBuilder to be used for replacement atomic instructions.
-struct ReplacementIRBuilder : IRBuilder<InstSimplifyFolder> {
+struct ReplacementIRBuilder
+ : IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> {
+ MDNode *MMRAMD = nullptr;
+
// Preserves the DebugLoc from I, and preserves still valid metadata.
+ // Enable StrictFP builder mode when appropriate.
explicit ReplacementIRBuilder(Instruction *I, const DataLayout &DL)
- : IRBuilder(I->getContext(), DL) {
+ : IRBuilder(I->getContext(), DL,
+ IRBuilderCallbackInserter(
+ [this](Instruction *I) { addMMRAMD(I); })) {
SetInsertPoint(I);
this->CollectMetadataToCopy(I, {LLVMContext::MD_pcsections});
+ if (BB->getParent()->getAttributes().hasFnAttr(Attribute::StrictFP))
+ this->setIsFPConstrained(true);
+
+ MMRAMD = I->getMetadata(LLVMContext::MD_mmra);
+ }
+
+ void addMMRAMD(Instruction *I) {
+ if (canInstructionHaveMMRAs(*I))
+ I->setMetadata(LLVMContext::MD_mmra, MMRAMD);
}
};
} // end anonymous namespace
-char AtomicExpand::ID = 0;
+char AtomicExpandLegacy::ID = 0;
-char &llvm::AtomicExpandID = AtomicExpand::ID;
+char &llvm::AtomicExpandID = AtomicExpandLegacy::ID;
-INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions", false,
- false)
-
-FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); }
+INITIALIZE_PASS_BEGIN(AtomicExpandLegacy, DEBUG_TYPE,
+ "Expand Atomic instructions", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(AtomicExpandLegacy, DEBUG_TYPE,
+ "Expand Atomic instructions", false, false)
// Helper functions to retrieve the size of atomic instructions.
static unsigned getAtomicOpSize(LoadInst *LI) {
- const DataLayout &DL = LI->getModule()->getDataLayout();
+ const DataLayout &DL = LI->getDataLayout();
return DL.getTypeStoreSize(LI->getType());
}
static unsigned getAtomicOpSize(StoreInst *SI) {
- const DataLayout &DL = SI->getModule()->getDataLayout();
+ const DataLayout &DL = SI->getDataLayout();
return DL.getTypeStoreSize(SI->getValueOperand()->getType());
}
static unsigned getAtomicOpSize(AtomicRMWInst *RMWI) {
- const DataLayout &DL = RMWI->getModule()->getDataLayout();
+ const DataLayout &DL = RMWI->getDataLayout();
return DL.getTypeStoreSize(RMWI->getValOperand()->getType());
}
static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) {
- const DataLayout &DL = CASI->getModule()->getDataLayout();
+ const DataLayout &DL = CASI->getDataLayout();
return DL.getTypeStoreSize(CASI->getCompareOperand()->getType());
}
@@ -179,17 +203,12 @@ static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
}
-bool AtomicExpand::runOnFunction(Function &F) {
- auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
- if (!TPC)
- return false;
-
- auto &TM = TPC->getTM<TargetMachine>();
- const auto *Subtarget = TM.getSubtargetImpl(F);
+bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {
+ const auto *Subtarget = TM->getSubtargetImpl(F);
if (!Subtarget->enableAtomicExpand())
return false;
TLI = Subtarget->getTargetLowering();
- DL = &F.getParent()->getDataLayout();
+ DL = &F.getDataLayout();
SmallVector<Instruction *, 1> AtomicInsts;
@@ -322,16 +341,6 @@ bool AtomicExpand::runOnFunction(Function &F) {
if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
MadeChange = true;
} else {
- AtomicRMWInst::BinOp Op = RMWI->getOperation();
- unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
- unsigned ValueSize = getAtomicOpSize(RMWI);
- if (ValueSize < MinCASSize &&
- (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
- Op == AtomicRMWInst::And)) {
- RMWI = widenPartwordAtomicRMW(RMWI);
- MadeChange = true;
- }
-
MadeChange |= tryExpandAtomicRMW(RMWI);
}
} else if (CASI)
@@ -340,7 +349,33 @@ bool AtomicExpand::runOnFunction(Function &F) {
return MadeChange;
}
-bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
+bool AtomicExpandLegacy::runOnFunction(Function &F) {
+
+ auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+ if (!TPC)
+ return false;
+ auto *TM = &TPC->getTM<TargetMachine>();
+ AtomicExpandImpl AE;
+ return AE.run(F, TM);
+}
+
+FunctionPass *llvm::createAtomicExpandLegacyPass() {
+ return new AtomicExpandLegacy();
+}
+
+PreservedAnalyses AtomicExpandPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ AtomicExpandImpl AE;
+
+ bool Changed = AE.run(F, TM);
+ if (!Changed)
+ return PreservedAnalyses::all();
+
+ return PreservedAnalyses::none();
+}
+
+bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,
+ AtomicOrdering Order) {
ReplacementIRBuilder Builder(I, *DL);
auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
@@ -355,8 +390,8 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
}
/// Get the iX type with the same bitwidth as T.
-IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
- const DataLayout &DL) {
+IntegerType *
+AtomicExpandImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {
EVT VT = TLI->getMemValueType(DL, T);
unsigned BitWidth = VT.getStoreSizeInBits();
assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
@@ -366,7 +401,7 @@ IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
/// Convert an atomic load of a non-integral type to an integer load of the
/// equivalent bitwidth. See the function comment on
/// convertAtomicStoreToIntegerType for background.
-LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
+LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
auto *M = LI->getModule();
Type *NewTy = getCorrespondingIntegerType(LI->getType(), M->getDataLayout());
@@ -387,7 +422,7 @@ LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
}
AtomicRMWInst *
-AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
+AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
auto *M = RMWI->getModule();
Type *NewTy =
getCorrespondingIntegerType(RMWI->getType(), M->getDataLayout());
@@ -400,9 +435,9 @@ AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
? Builder.CreatePtrToInt(Val, NewTy)
: Builder.CreateBitCast(Val, NewTy);
- auto *NewRMWI =
- Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, Addr, NewVal,
- RMWI->getAlign(), RMWI->getOrdering());
+ auto *NewRMWI = Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, Addr, NewVal,
+ RMWI->getAlign(), RMWI->getOrdering(),
+ RMWI->getSyncScopeID());
NewRMWI->setVolatile(RMWI->isVolatile());
LLVM_DEBUG(dbgs() << "Replaced " << *RMWI << " with " << *NewRMWI << "\n");
@@ -414,7 +449,7 @@ AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
return NewRMWI;
}
-bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
+bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
case TargetLoweringBase::AtomicExpansionKind::None:
return false;
@@ -436,7 +471,7 @@ bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
}
}
-bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) {
+bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
case TargetLoweringBase::AtomicExpansionKind::None:
return false;
@@ -451,7 +486,7 @@ bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) {
}
}
-bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
+bool AtomicExpandImpl::expandAtomicLoadToLL(LoadInst *LI) {
ReplacementIRBuilder Builder(LI, *DL);
// On some architectures, load-linked instructions are atomic for larger
@@ -467,7 +502,7 @@ bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
return true;
}
-bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
+bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
ReplacementIRBuilder Builder(LI, *DL);
AtomicOrdering Order = LI->getOrdering();
if (Order == AtomicOrdering::Unordered)
@@ -496,7 +531,7 @@ bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
/// instruction select from the original atomic store, but as a migration
/// mechanism, we convert back to the old format which the backends understand.
/// Each backend will need individual work to recognize the new format.
-StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
+StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
ReplacementIRBuilder Builder(SI, *DL);
auto *M = SI->getModule();
Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
@@ -514,7 +549,7 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
return NewSI;
}
-void AtomicExpand::expandAtomicStore(StoreInst *SI) {
+void AtomicExpandImpl::expandAtomicStore(StoreInst *SI) {
// This function is only called on atomic stores that are too large to be
// atomic if implemented as a native store. So we replace them by an
// atomic swap, that can be implemented for example as a ldrex/strex on ARM
@@ -542,9 +577,9 @@ static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
Value *&Success, Value *&NewLoaded) {
Type *OrigTy = NewVal->getType();
- // This code can go away when cmpxchg supports FP types.
+ // This code can go away when cmpxchg supports FP and vector types.
assert(!OrigTy->isPointerTy());
- bool NeedBitcast = OrigTy->isFloatingPointTy();
+ bool NeedBitcast = OrigTy->isFloatingPointTy() || OrigTy->isVectorTy();
if (NeedBitcast) {
IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits());
NewVal = Builder.CreateBitCast(NewVal, IntTy);
@@ -561,7 +596,7 @@ static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
}
-bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
+bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
LLVMContext &Ctx = AI->getModule()->getContext();
TargetLowering::AtomicExpansionKind Kind = TLI->shouldExpandAtomicRMWInIR(AI);
switch (Kind) {
@@ -607,6 +642,17 @@ bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
return true;
}
case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
+ unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
+ unsigned ValueSize = getAtomicOpSize(AI);
+ if (ValueSize < MinCASSize) {
+ AtomicRMWInst::BinOp Op = AI->getOperation();
+ // Widen And/Or/Xor and give the target another chance at expanding it.
+ if (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
+ Op == AtomicRMWInst::And) {
+ tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
+ return true;
+ }
+ }
expandAtomicRMWToMaskedIntrinsic(AI);
return true;
}
@@ -700,7 +746,7 @@ static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder,
unsigned ValueSize = DL.getTypeStoreSize(ValueType);
PMV.ValueType = PMV.IntValueType = ValueType;
- if (PMV.ValueType->isFloatingPointTy())
+ if (PMV.ValueType->isFloatingPointTy() || PMV.ValueType->isVectorTy())
PMV.IntValueType =
Type::getIntNTy(Ctx, ValueType->getPrimitiveSizeInBits());
@@ -719,7 +765,7 @@ static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder,
assert(ValueSize < MinWordSize);
PointerType *PtrTy = cast<PointerType>(Addr->getType());
- IntegerType *IntTy = DL.getIntPtrType(Ctx, PtrTy->getAddressSpace());
+ IntegerType *IntTy = DL.getIndexType(Ctx, PtrTy->getAddressSpace());
Value *PtrLSB;
if (AddrAlign < MinWordSize) {
@@ -843,8 +889,15 @@ static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
/// way as a typical atomicrmw expansion. The only difference here is
/// that the operation inside of the loop may operate upon only a
/// part of the value.
-void AtomicExpand::expandPartwordAtomicRMW(
+void AtomicExpandImpl::expandPartwordAtomicRMW(
AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
+ // Widen And/Or/Xor and give the target another chance at expanding it.
+ AtomicRMWInst::BinOp Op = AI->getOperation();
+ if (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
+ Op == AtomicRMWInst::And) {
+ tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
+ return;
+ }
AtomicOrdering MemOpOrder = AI->getOrdering();
SyncScope::ID SSID = AI->getSyncScopeID();
@@ -855,18 +908,17 @@ void AtomicExpand::expandPartwordAtomicRMW(
AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
Value *ValOperand_Shifted = nullptr;
- if (AI->getOperation() == AtomicRMWInst::Xchg ||
- AI->getOperation() == AtomicRMWInst::Add ||
- AI->getOperation() == AtomicRMWInst::Sub ||
- AI->getOperation() == AtomicRMWInst::Nand) {
+ if (Op == AtomicRMWInst::Xchg || Op == AtomicRMWInst::Add ||
+ Op == AtomicRMWInst::Sub || Op == AtomicRMWInst::Nand) {
+ Value *ValOp = Builder.CreateBitCast(AI->getValOperand(), PMV.IntValueType);
ValOperand_Shifted =
- Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType),
- PMV.ShiftAmt, "ValOperand_Shifted");
+ Builder.CreateShl(Builder.CreateZExt(ValOp, PMV.WordType), PMV.ShiftAmt,
+ "ValOperand_Shifted");
}
auto PerformPartwordOp = [&](IRBuilderBase &Builder, Value *Loaded) {
- return performMaskedAtomicOp(AI->getOperation(), Builder, Loaded,
- ValOperand_Shifted, AI->getValOperand(), PMV);
+ return performMaskedAtomicOp(Op, Builder, Loaded, ValOperand_Shifted,
+ AI->getValOperand(), PMV);
};
Value *OldResult;
@@ -886,8 +938,38 @@ void AtomicExpand::expandPartwordAtomicRMW(
AI->eraseFromParent();
}
+/// Copy metadata that's safe to preserve when widening atomics.
+static void copyMetadataForAtomic(Instruction &Dest,
+ const Instruction &Source) {
+ SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
+ Source.getAllMetadata(MD);
+ LLVMContext &Ctx = Dest.getContext();
+ MDBuilder MDB(Ctx);
+
+ for (auto [ID, N] : MD) {
+ switch (ID) {
+ case LLVMContext::MD_dbg:
+ case LLVMContext::MD_tbaa:
+ case LLVMContext::MD_tbaa_struct:
+ case LLVMContext::MD_alias_scope:
+ case LLVMContext::MD_noalias:
+ case LLVMContext::MD_access_group:
+ case LLVMContext::MD_mmra:
+ Dest.setMetadata(ID, N);
+ break;
+ default:
+ if (ID == Ctx.getMDKindID("amdgpu.no.remote.memory"))
+ Dest.setMetadata(ID, N);
+ else if (ID == Ctx.getMDKindID("amdgpu.no.fine.grained.memory"))
+ Dest.setMetadata(ID, N);
+
+ break;
+ }
+ }
+}
+
// Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
-AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
+AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
ReplacementIRBuilder Builder(AI, *DL);
AtomicRMWInst::BinOp Op = AI->getOperation();
@@ -907,14 +989,15 @@ AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
if (Op == AtomicRMWInst::And)
NewOperand =
- Builder.CreateOr(PMV.Inv_Mask, ValOperand_Shifted, "AndOperand");
+ Builder.CreateOr(ValOperand_Shifted, PMV.Inv_Mask, "AndOperand");
else
NewOperand = ValOperand_Shifted;
AtomicRMWInst *NewAI = Builder.CreateAtomicRMW(
Op, PMV.AlignedAddr, NewOperand, PMV.AlignedAddrAlignment,
AI->getOrdering(), AI->getSyncScopeID());
- // TODO: Preserve metadata
+
+ copyMetadataForAtomic(*NewAI, *AI);
Value *FinalOldResult = extractMaskedValue(Builder, NewAI, PMV);
AI->replaceAllUsesWith(FinalOldResult);
@@ -922,7 +1005,7 @@ AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
return NewAI;
}
-bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
+bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
// The basic idea here is that we're expanding a cmpxchg of a
// smaller memory size up to a word-sized cmpxchg. To do this, we
// need to add a retry-loop for strong cmpxchg, so that
@@ -1047,7 +1130,7 @@ bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
return true;
}
-void AtomicExpand::expandAtomicOpToLLSC(
+void AtomicExpandImpl::expandAtomicOpToLLSC(
Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign,
AtomicOrdering MemOpOrder,
function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
@@ -1059,7 +1142,7 @@ void AtomicExpand::expandAtomicOpToLLSC(
I->eraseFromParent();
}
-void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
+void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
ReplacementIRBuilder Builder(AI, *DL);
PartwordMaskValues PMV =
@@ -1085,7 +1168,8 @@ void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
AI->eraseFromParent();
}
-void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
+void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
+ AtomicCmpXchgInst *CI) {
ReplacementIRBuilder Builder(CI, *DL);
PartwordMaskValues PMV = createMaskInstrs(
@@ -1112,7 +1196,7 @@ void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
CI->eraseFromParent();
}
-Value *AtomicExpand::insertRMWLLSCLoop(
+Value *AtomicExpandImpl::insertRMWLLSCLoop(
IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
AtomicOrdering MemOpOrder,
function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
@@ -1121,7 +1205,7 @@ Value *AtomicExpand::insertRMWLLSCLoop(
Function *F = BB->getParent();
assert(AddrAlign >=
- F->getParent()->getDataLayout().getTypeStoreSize(ResultTy) &&
+ F->getDataLayout().getTypeStoreSize(ResultTy) &&
"Expected at least natural alignment at this point.");
// Given: atomicrmw some_op iN* %addr, iN %incr ordering
@@ -1168,7 +1252,7 @@ Value *AtomicExpand::insertRMWLLSCLoop(
/// way to represent a pointer cmpxchg so that we can update backends one by
/// one.
AtomicCmpXchgInst *
-AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
+AtomicExpandImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
auto *M = CI->getModule();
Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
M->getDataLayout());
@@ -1201,7 +1285,7 @@ AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
return NewCI;
}
-bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
AtomicOrdering FailureOrder = CI->getFailureOrdering();
Value *Addr = CI->getPointerOperand();
@@ -1447,7 +1531,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
return true;
}
-bool AtomicExpand::isIdempotentRMW(AtomicRMWInst *RMWI) {
+bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
if (!C)
return false;
@@ -1467,7 +1551,7 @@ bool AtomicExpand::isIdempotentRMW(AtomicRMWInst *RMWI) {
}
}
-bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
+bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
tryExpandAtomicLoad(ResultingLoad);
return true;
@@ -1475,7 +1559,7 @@ bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
return false;
}
-Value *AtomicExpand::insertRMWCmpXchgLoop(
+Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
AtomicOrdering MemOpOrder, SyncScope::ID SSID,
function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
@@ -1536,7 +1620,7 @@ Value *AtomicExpand::insertRMWCmpXchgLoop(
return NewLoaded;
}
-bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
unsigned ValueSize = getAtomicOpSize(CI);
@@ -1561,13 +1645,13 @@ bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// Note: This function is exposed externally by AtomicExpandUtils.h
bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
CreateCmpXchgInstFun CreateCmpXchg) {
- ReplacementIRBuilder Builder(AI, AI->getModule()->getDataLayout());
+ ReplacementIRBuilder Builder(AI, AI->getDataLayout());
Builder.setIsFPConstrained(
AI->getFunction()->hasFnAttribute(Attribute::StrictFP));
// FIXME: If FP exceptions are observable, we should force them off for the
// loop for the FP atomics.
- Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop(
+ Value *Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(),
AI->getOrdering(), AI->getSyncScopeID(),
[&](IRBuilderBase &Builder, Value *Loaded) {
@@ -1601,7 +1685,7 @@ static bool canUseSizedAtomicCall(unsigned Size, Align Alignment,
Size <= LargestSize;
}
-void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) {
+void AtomicExpandImpl::expandAtomicLoadToLibcall(LoadInst *I) {
static const RTLIB::Libcall Libcalls[6] = {
RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
@@ -1614,7 +1698,7 @@ void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) {
report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load");
}
-void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) {
+void AtomicExpandImpl::expandAtomicStoreToLibcall(StoreInst *I) {
static const RTLIB::Libcall Libcalls[6] = {
RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
@@ -1627,7 +1711,7 @@ void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) {
report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store");
}
-void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
+void AtomicExpandImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
static const RTLIB::Libcall Libcalls[6] = {
RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
@@ -1705,7 +1789,7 @@ static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
llvm_unreachable("Unexpected AtomicRMW operation.");
}
-void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
+void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation());
unsigned Size = getAtomicOpSize(I);
@@ -1744,7 +1828,7 @@ void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
// ATOMIC libcalls to be emitted. All of the other arguments besides
// 'I' are extracted from the Instruction subclass by the
// caller. Depending on the particular call, some will be null.
-bool AtomicExpand::expandAtomicOpToLibcall(
+bool AtomicExpandImpl::expandAtomicOpToLibcall(
Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand,
Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering,
AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) {