aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/MCA
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/MCA')
-rw-r--r--llvm/lib/MCA/HardwareUnits/LSUnit.cpp13
-rw-r--r--llvm/lib/MCA/Stages/DispatchStage.cpp2
-rw-r--r--llvm/lib/MCA/Stages/InOrderIssueStage.cpp2
3 files changed, 9 insertions, 8 deletions
diff --git a/llvm/lib/MCA/HardwareUnits/LSUnit.cpp b/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
index 07be7b077bc9..121d320f10e6 100644
--- a/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
+++ b/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
@@ -68,7 +68,8 @@ void LSUnitBase::dump() const {
unsigned LSUnit::dispatch(const InstRef &IR) {
const InstrDesc &Desc = IR.getInstruction()->getDesc();
- unsigned IsMemBarrier = Desc.HasSideEffects;
+ bool IsStoreBarrier = IR.getInstruction()->isAStoreBarrier();
+ bool IsLoadBarrier = IR.getInstruction()->isALoadBarrier();
assert((Desc.MayLoad || Desc.MayStore) && "Not a memory operation!");
if (Desc.MayLoad)
@@ -111,12 +112,12 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
CurrentStoreGroupID = NewGID;
- if (IsMemBarrier)
+ if (IsStoreBarrier)
CurrentStoreBarrierGroupID = NewGID;
if (Desc.MayLoad) {
CurrentLoadGroupID = NewGID;
- if (IsMemBarrier)
+ if (IsLoadBarrier)
CurrentLoadBarrierGroupID = NewGID;
}
@@ -141,7 +142,7 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
// However that group has already started execution, so we cannot add
// this load to it.
bool ShouldCreateANewGroup =
- IsMemBarrier || !ImmediateLoadDominator ||
+ IsLoadBarrier || !ImmediateLoadDominator ||
CurrentLoadBarrierGroupID == ImmediateLoadDominator ||
ImmediateLoadDominator <= CurrentStoreGroupID ||
getGroup(ImmediateLoadDominator).isExecuting();
@@ -161,7 +162,7 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
}
// A load barrier may not pass a previous load or load barrier.
- if (IsMemBarrier) {
+ if (IsLoadBarrier) {
if (ImmediateLoadDominator) {
MemoryGroup &LoadGroup = getGroup(ImmediateLoadDominator);
LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: ("
@@ -181,7 +182,7 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
}
CurrentLoadGroupID = NewGID;
- if (IsMemBarrier)
+ if (IsLoadBarrier)
CurrentLoadBarrierGroupID = NewGID;
return NewGID;
}
diff --git a/llvm/lib/MCA/Stages/DispatchStage.cpp b/llvm/lib/MCA/Stages/DispatchStage.cpp
index 5385142698e6..66228bd5a862 100644
--- a/llvm/lib/MCA/Stages/DispatchStage.cpp
+++ b/llvm/lib/MCA/Stages/DispatchStage.cpp
@@ -30,7 +30,7 @@ DispatchStage::DispatchStage(const MCSubtargetInfo &Subtarget,
unsigned MaxDispatchWidth, RetireControlUnit &R,
RegisterFile &F)
: DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
- CarryOver(0U), CarriedOver(), STI(Subtarget), RCU(R), PRF(F) {
+ CarryOver(0U), STI(Subtarget), RCU(R), PRF(F) {
if (!DispatchWidth)
DispatchWidth = Subtarget.getSchedModel().IssueWidth;
}
diff --git a/llvm/lib/MCA/Stages/InOrderIssueStage.cpp b/llvm/lib/MCA/Stages/InOrderIssueStage.cpp
index fa5c0fc66b9e..abfbc80f17c9 100644
--- a/llvm/lib/MCA/Stages/InOrderIssueStage.cpp
+++ b/llvm/lib/MCA/Stages/InOrderIssueStage.cpp
@@ -47,7 +47,7 @@ InOrderIssueStage::InOrderIssueStage(const MCSubtargetInfo &STI,
RegisterFile &PRF, CustomBehaviour &CB,
LSUnit &LSU)
: STI(STI), PRF(PRF), RM(STI.getSchedModel()), CB(CB), LSU(LSU),
- NumIssued(), SI(), CarryOver(), Bandwidth(), LastWriteBackCycle() {}
+ NumIssued(), CarryOver(), Bandwidth(), LastWriteBackCycle() {}
unsigned InOrderIssueStage::getIssueWidth() const {
return STI.getSchedModel().IssueWidth;