aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/CodeGen/GlobalISel
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/CodeGen/GlobalISel')
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/Combiner.cpp81
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp41
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp606
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp106
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp44
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp101
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp51
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp11
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp643
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp245
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/Localizer.cpp23
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp638
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp67
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/RegisterBank.cpp1
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp41
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/Utils.cpp99
17 files changed, 1937 insertions, 864 deletions
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 114c068749eb..07de31bec660 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -38,6 +38,9 @@ bool CallLowering::lowerCall(
ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{},
i < NumFixedArgs};
setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS);
+ // We don't currently support swifterror or swiftself args.
+ if (OrigArg.Flags.isSwiftError() || OrigArg.Flags.isSwiftSelf())
+ return false;
OrigArgs.push_back(OrigArg);
++i;
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/Combiner.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/Combiner.cpp
new file mode 100644
index 000000000000..0bc5b87de150
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/Combiner.cpp
@@ -0,0 +1,81 @@
+//===-- lib/CodeGen/GlobalISel/GICombiner.cpp -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file constains common code to combine machine functions at generic
+// level.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/Combiner.h"
+#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
+#include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "gi-combiner"
+
+using namespace llvm;
+
+Combiner::Combiner(CombinerInfo &Info, const TargetPassConfig *TPC)
+ : CInfo(Info), TPC(TPC) {
+ (void)this->TPC; // FIXME: Remove when used.
+}
+
+bool Combiner::combineMachineInstrs(MachineFunction &MF) {
+ // If the ISel pipeline failed, do not bother running this pass.
+ // FIXME: Should this be here or in individual combiner passes.
+ if (MF.getProperties().hasProperty(
+ MachineFunctionProperties::Property::FailedISel))
+ return false;
+
+ MRI = &MF.getRegInfo();
+ Builder.setMF(MF);
+
+ LLVM_DEBUG(dbgs() << "Generic MI Combiner for: " << MF.getName() << '\n');
+
+ MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);
+
+ bool MFChanged = false;
+ bool Changed;
+
+ do {
+ // Collect all instructions. Do a post order traversal for basic blocks and
+ // insert with list bottom up, so while we pop_back_val, we'll traverse top
+ // down RPOT.
+ Changed = false;
+ GISelWorkList<512> WorkList;
+ for (MachineBasicBlock *MBB : post_order(&MF)) {
+ if (MBB->empty())
+ continue;
+ for (auto MII = MBB->rbegin(), MIE = MBB->rend(); MII != MIE;) {
+ MachineInstr *CurMI = &*MII;
+ ++MII;
+ // Erase dead insts before even adding to the list.
+ if (isTriviallyDead(*CurMI, *MRI)) {
+ LLVM_DEBUG(dbgs() << *CurMI << "Is dead; erasing.\n");
+ CurMI->eraseFromParentAndMarkDBGValuesForRemoval();
+ continue;
+ }
+ WorkList.insert(CurMI);
+ }
+ }
+ // Main Loop. Process the instructions here.
+ while (!WorkList.empty()) {
+ MachineInstr *CurrInst = WorkList.pop_back_val();
+ LLVM_DEBUG(dbgs() << "Try combining " << *CurrInst << "\n";);
+ Changed |= CInfo.combine(*CurrInst, Builder);
+ }
+ MFChanged |= Changed;
+ } while (Changed);
+
+ return MFChanged;
+}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
new file mode 100644
index 000000000000..44e904a6391b
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -0,0 +1,41 @@
+//== ---lib/CodeGen/GlobalISel/GICombinerHelper.cpp --------------------- == //
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+#define DEBUG_TYPE "gi-combine"
+
+using namespace llvm;
+
+CombinerHelper::CombinerHelper(MachineIRBuilder &B) :
+ Builder(B), MRI(Builder.getMF().getRegInfo()) {}
+
+bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
+ if (MI.getOpcode() != TargetOpcode::COPY)
+ return false;
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned SrcReg = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT SrcTy = MRI.getType(SrcReg);
+ // Simple Copy Propagation.
+ // a(sx) = COPY b(sx) -> Replace all uses of a with b.
+ if (DstTy.isValid() && SrcTy.isValid() && DstTy == SrcTy) {
+ MI.eraseFromParent();
+ MRI.replaceRegWith(DstReg, SrcReg);
+ return true;
+ }
+ return false;
+}
+
+bool CombinerHelper::tryCombine(MachineInstr &MI) {
+ return tryCombineCopy(MI);
+}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index a329a71e2c95..bafb7a05536d 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -26,6 +26,7 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/StackProtector.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetPassConfig.h"
@@ -102,37 +103,103 @@ IRTranslator::IRTranslator() : MachineFunctionPass(ID) {
}
void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<StackProtector>();
AU.addRequired<TargetPassConfig>();
+ getSelectionDAGFallbackAnalysisUsage(AU);
MachineFunctionPass::getAnalysisUsage(AU);
}
-unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
- unsigned &ValReg = ValToVReg[&Val];
+static void computeValueLLTs(const DataLayout &DL, Type &Ty,
+ SmallVectorImpl<LLT> &ValueTys,
+ SmallVectorImpl<uint64_t> *Offsets = nullptr,
+ uint64_t StartingOffset = 0) {
+ // Given a struct type, recursively traverse the elements.
+ if (StructType *STy = dyn_cast<StructType>(&Ty)) {
+ const StructLayout *SL = DL.getStructLayout(STy);
+ for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
+ computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
+ StartingOffset + SL->getElementOffset(I));
+ return;
+ }
+ // Given an array type, recursively traverse the elements.
+ if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
+ Type *EltTy = ATy->getElementType();
+ uint64_t EltSize = DL.getTypeAllocSize(EltTy);
+ for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
+ computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
+ StartingOffset + i * EltSize);
+ return;
+ }
+ // Interpret void as zero return values.
+ if (Ty.isVoidTy())
+ return;
+ // Base case: we can get an LLT for this LLVM IR type.
+ ValueTys.push_back(getLLTForType(Ty, DL));
+ if (Offsets != nullptr)
+ Offsets->push_back(StartingOffset * 8);
+}
+
+IRTranslator::ValueToVRegInfo::VRegListT &
+IRTranslator::allocateVRegs(const Value &Val) {
+ assert(!VMap.contains(Val) && "Value already allocated in VMap");
+ auto *Regs = VMap.getVRegs(Val);
+ auto *Offsets = VMap.getOffsets(Val);
+ SmallVector<LLT, 4> SplitTys;
+ computeValueLLTs(*DL, *Val.getType(), SplitTys,
+ Offsets->empty() ? Offsets : nullptr);
+ for (unsigned i = 0; i < SplitTys.size(); ++i)
+ Regs->push_back(0);
+ return *Regs;
+}
+
+ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
+ auto VRegsIt = VMap.findVRegs(Val);
+ if (VRegsIt != VMap.vregs_end())
+ return *VRegsIt->second;
- if (ValReg)
- return ValReg;
+ if (Val.getType()->isVoidTy())
+ return *VMap.getVRegs(Val);
+
+ // Create entry for this type.
+ auto *VRegs = VMap.getVRegs(Val);
+ auto *Offsets = VMap.getOffsets(Val);
- // Fill ValRegsSequence with the sequence of registers
- // we need to concat together to produce the value.
assert(Val.getType()->isSized() &&
"Don't know how to create an empty vreg");
- unsigned VReg =
- MRI->createGenericVirtualRegister(getLLTForType(*Val.getType(), *DL));
- ValReg = VReg;
- if (auto CV = dyn_cast<Constant>(&Val)) {
- bool Success = translate(*CV, VReg);
+ SmallVector<LLT, 4> SplitTys;
+ computeValueLLTs(*DL, *Val.getType(), SplitTys,
+ Offsets->empty() ? Offsets : nullptr);
+
+ if (!isa<Constant>(Val)) {
+ for (auto Ty : SplitTys)
+ VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
+ return *VRegs;
+ }
+
+ if (Val.getType()->isAggregateType()) {
+ // UndefValue, ConstantAggregateZero
+ auto &C = cast<Constant>(Val);
+ unsigned Idx = 0;
+ while (auto Elt = C.getAggregateElement(Idx++)) {
+ auto EltRegs = getOrCreateVRegs(*Elt);
+ std::copy(EltRegs.begin(), EltRegs.end(), std::back_inserter(*VRegs));
+ }
+ } else {
+ assert(SplitTys.size() == 1 && "unexpectedly split LLT");
+ VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
+ bool Success = translate(cast<Constant>(Val), VRegs->front());
if (!Success) {
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
MF->getFunction().getSubprogram(),
&MF->getFunction().getEntryBlock());
R << "unable to translate constant: " << ore::NV("Type", Val.getType());
reportTranslationError(*MF, *TPC, *ORE, R);
- return VReg;
+ return *VRegs;
}
}
- return VReg;
+ return *VRegs;
}
int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
@@ -164,6 +231,20 @@ unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
} else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
Alignment = LI->getAlignment();
ValTy = LI->getType();
+ } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
+ // TODO(PR27168): This instruction has no alignment attribute, but unlike
+ // the default alignment for load/store, the default here is to assume
+ // it has NATURAL alignment, not DataLayout-specified alignment.
+ const DataLayout &DL = AI->getModule()->getDataLayout();
+ Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
+ ValTy = AI->getCompareOperand()->getType();
+ } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
+ // TODO(PR27168): This instruction has no alignment attribute, but unlike
+ // the default alignment for load/store, the default here is to assume
+ // it has NATURAL alignment, not DataLayout-specified alignment.
+ const DataLayout &DL = AI->getModule()->getDataLayout();
+ Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
+ ValTy = AI->getType();
} else {
OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
R << "unable to translate memop: " << ore::NV("Opcode", &I);
@@ -243,7 +324,11 @@ bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
// The target may mess up with the insertion point, but
// this is not important as a return is the last instruction
// of the block anyway.
- return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
+
+ // FIXME: this interface should simplify when CallLowering gets adapted to
+ // multiple VRegs per Value.
+ unsigned VReg = Ret ? packRegs(*Ret, MIRBuilder) : 0;
+ return CLI->lowerReturn(MIRBuilder, Ret, VReg);
}
bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
@@ -342,15 +427,23 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
if (DL->getTypeStoreSize(LI.getType()) == 0)
return true;
- unsigned Res = getOrCreateVReg(LI);
- unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
+ ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
+ ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
+ unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
+
+ for (unsigned i = 0; i < Regs.size(); ++i) {
+ unsigned Addr = 0;
+ MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
+
+ MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
+ unsigned BaseAlign = getMemOpAlignment(LI);
+ auto MMO = MF->getMachineMemOperand(
+ Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
+ MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
+ LI.getSyncScopeID(), LI.getOrdering());
+ MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
+ }
- MIRBuilder.buildLoad(
- Res, Addr,
- *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
- Flags, DL->getTypeStoreSize(LI.getType()),
- getMemOpAlignment(LI), AAMDNodes(), nullptr,
- LI.getSyncScopeID(), LI.getOrdering()));
return true;
}
@@ -363,50 +456,61 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
return true;
- unsigned Val = getOrCreateVReg(*SI.getValueOperand());
- unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
-
- MIRBuilder.buildStore(
- Val, Addr,
- *MF->getMachineMemOperand(
- MachinePointerInfo(SI.getPointerOperand()), Flags,
- DL->getTypeStoreSize(SI.getValueOperand()->getType()),
- getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSyncScopeID(),
- SI.getOrdering()));
+ ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
+ ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
+ unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
+
+ for (unsigned i = 0; i < Vals.size(); ++i) {
+ unsigned Addr = 0;
+ MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
+
+ MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
+ unsigned BaseAlign = getMemOpAlignment(SI);
+ auto MMO = MF->getMachineMemOperand(
+ Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
+ MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
+ SI.getSyncScopeID(), SI.getOrdering());
+ MIRBuilder.buildStore(Vals[i], Addr, *MMO);
+ }
return true;
}
-bool IRTranslator::translateExtractValue(const User &U,
- MachineIRBuilder &MIRBuilder) {
+static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
const Value *Src = U.getOperand(0);
Type *Int32Ty = Type::getInt32Ty(U.getContext());
- SmallVector<Value *, 1> Indices;
-
- // If Src is a single element ConstantStruct, translate extractvalue
- // to that element to avoid inserting a cast instruction.
- if (auto CS = dyn_cast<ConstantStruct>(Src))
- if (CS->getNumOperands() == 1) {
- unsigned Res = getOrCreateVReg(*CS->getOperand(0));
- ValToVReg[&U] = Res;
- return true;
- }
// getIndexedOffsetInType is designed for GEPs, so the first index is the
// usual array element rather than looking into the actual aggregate.
+ SmallVector<Value *, 1> Indices;
Indices.push_back(ConstantInt::get(Int32Ty, 0));
if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
for (auto Idx : EVI->indices())
Indices.push_back(ConstantInt::get(Int32Ty, Idx));
+ } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
+ for (auto Idx : IVI->indices())
+ Indices.push_back(ConstantInt::get(Int32Ty, Idx));
} else {
for (unsigned i = 1; i < U.getNumOperands(); ++i)
Indices.push_back(U.getOperand(i));
}
- uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
+ return 8 * static_cast<uint64_t>(
+ DL.getIndexedOffsetInType(Src->getType(), Indices));
+}
- unsigned Res = getOrCreateVReg(U);
- MIRBuilder.buildExtract(Res, getOrCreateVReg(*Src), Offset);
+bool IRTranslator::translateExtractValue(const User &U,
+ MachineIRBuilder &MIRBuilder) {
+ const Value *Src = U.getOperand(0);
+ uint64_t Offset = getOffsetFromIndices(U, *DL);
+ ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
+ ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
+ unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
+ Offsets.begin();
+ auto &DstRegs = allocateVRegs(U);
+
+ for (unsigned i = 0; i < DstRegs.size(); ++i)
+ DstRegs[i] = SrcRegs[Idx++];
return true;
}
@@ -414,37 +518,33 @@ bool IRTranslator::translateExtractValue(const User &U,
bool IRTranslator::translateInsertValue(const User &U,
MachineIRBuilder &MIRBuilder) {
const Value *Src = U.getOperand(0);
- Type *Int32Ty = Type::getInt32Ty(U.getContext());
- SmallVector<Value *, 1> Indices;
-
- // getIndexedOffsetInType is designed for GEPs, so the first index is the
- // usual array element rather than looking into the actual aggregate.
- Indices.push_back(ConstantInt::get(Int32Ty, 0));
-
- if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
- for (auto Idx : IVI->indices())
- Indices.push_back(ConstantInt::get(Int32Ty, Idx));
- } else {
- for (unsigned i = 2; i < U.getNumOperands(); ++i)
- Indices.push_back(U.getOperand(i));
+ uint64_t Offset = getOffsetFromIndices(U, *DL);
+ auto &DstRegs = allocateVRegs(U);
+ ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
+ ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
+ ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
+ auto InsertedIt = InsertedRegs.begin();
+
+ for (unsigned i = 0; i < DstRegs.size(); ++i) {
+ if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
+ DstRegs[i] = *InsertedIt++;
+ else
+ DstRegs[i] = SrcRegs[i];
}
- uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
-
- unsigned Res = getOrCreateVReg(U);
- unsigned Inserted = getOrCreateVReg(*U.getOperand(1));
- MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), Inserted, Offset);
-
return true;
}
bool IRTranslator::translateSelect(const User &U,
MachineIRBuilder &MIRBuilder) {
- unsigned Res = getOrCreateVReg(U);
unsigned Tst = getOrCreateVReg(*U.getOperand(0));
- unsigned Op0 = getOrCreateVReg(*U.getOperand(1));
- unsigned Op1 = getOrCreateVReg(*U.getOperand(2));
- MIRBuilder.buildSelect(Res, Tst, Op0, Op1);
+ ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
+ ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
+ ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
+
+ for (unsigned i = 0; i < ResRegs.size(); ++i)
+ MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
+
return true;
}
@@ -453,15 +553,16 @@ bool IRTranslator::translateBitCast(const User &U,
// If we're bitcasting to the source type, we can reuse the source vreg.
if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
getLLTForType(*U.getType(), *DL)) {
- // Get the source vreg now, to avoid invalidating ValToVReg.
unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
- unsigned &Reg = ValToVReg[&U];
+ auto &Regs = *VMap.getVRegs(U);
// If we already assigned a vreg for this bitcast, we can't change that.
// Emit a copy to satisfy the users we already emitted.
- if (Reg)
- MIRBuilder.buildCopy(Reg, SrcReg);
- else
- Reg = SrcReg;
+ if (!Regs.empty())
+ MIRBuilder.buildCopy(Regs[0], SrcReg);
+ else {
+ Regs.push_back(SrcReg);
+ VMap.getOffsets(U)->push_back(0);
+ }
return true;
}
return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
@@ -516,10 +617,6 @@ bool IRTranslator::translateGetElementPtr(const User &U,
Offset = 0;
}
- // N = N + Idx * ElementSize;
- unsigned ElementSizeReg =
- getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
-
unsigned IdxReg = getOrCreateVReg(*Idx);
if (MRI->getType(IdxReg) != OffsetTy) {
unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
@@ -527,11 +624,20 @@ bool IRTranslator::translateGetElementPtr(const User &U,
IdxReg = NewIdxReg;
}
- unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
- MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
+ // N = N + Idx * ElementSize;
+ // Avoid doing it for ElementSize of 1.
+ unsigned GepOffsetReg;
+ if (ElementSize != 1) {
+ unsigned ElementSizeReg =
+ getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
+
+ GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
+ MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
+ } else
+ GepOffsetReg = IdxReg;
unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
- MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
+ MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
BaseReg = NewBaseReg;
}
}
@@ -607,14 +713,10 @@ void IRTranslator::getStackGuard(unsigned DstReg,
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder) {
- LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL);
- LLT s1 = LLT::scalar(1);
- unsigned Width = Ty.getSizeInBits();
- unsigned Res = MRI->createGenericVirtualRegister(Ty);
- unsigned Overflow = MRI->createGenericVirtualRegister(s1);
+ ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
auto MIB = MIRBuilder.buildInstr(Op)
- .addDef(Res)
- .addDef(Overflow)
+ .addDef(ResRegs[0])
+ .addDef(ResRegs[1])
.addUse(getOrCreateVReg(*CI.getOperand(0)))
.addUse(getOrCreateVReg(*CI.getOperand(1)));
@@ -624,7 +726,6 @@ bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MIB.addUse(Zero);
}
- MIRBuilder.buildSequence(getOrCreateVReg(CI), {Res, Overflow}, {0, Width});
return true;
}
@@ -647,7 +748,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
const Value *Address = DI.getAddress();
if (!Address || isa<UndefValue>(Address)) {
- DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
return true;
}
@@ -741,6 +842,11 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)));
return true;
+ case Intrinsic::fabs:
+ MIRBuilder.buildInstr(TargetOpcode::G_FABS)
+ .addDef(getOrCreateVReg(CI))
+ .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
+ return true;
case Intrinsic::fma:
MIRBuilder.buildInstr(TargetOpcode::G_FMA)
.addDef(getOrCreateVReg(CI))
@@ -748,6 +854,25 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
.addUse(getOrCreateVReg(*CI.getArgOperand(1)))
.addUse(getOrCreateVReg(*CI.getArgOperand(2)));
return true;
+ case Intrinsic::fmuladd: {
+ const TargetMachine &TM = MF->getTarget();
+ const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
+ unsigned Dst = getOrCreateVReg(CI);
+ unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
+ unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
+ unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
+ if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
+ TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
+ // TODO: Revisit this to see if we should move this part of the
+ // lowering to the combiner.
+ MIRBuilder.buildInstr(TargetOpcode::G_FMA, Dst, Op0, Op1, Op2);
+ } else {
+ LLT Ty = getLLTForType(*CI.getType(), *DL);
+ auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, Ty, Op0, Op1);
+ MIRBuilder.buildInstr(TargetOpcode::G_FADD, Dst, FMul, Op2);
+ }
+ return true;
+ }
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset:
@@ -807,6 +932,34 @@ bool IRTranslator::translateInlineAsm(const CallInst &CI,
return true;
}
+unsigned IRTranslator::packRegs(const Value &V,
+ MachineIRBuilder &MIRBuilder) {
+ ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
+ ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
+ LLT BigTy = getLLTForType(*V.getType(), *DL);
+
+ if (Regs.size() == 1)
+ return Regs[0];
+
+ unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
+ MIRBuilder.buildUndef(Dst);
+ for (unsigned i = 0; i < Regs.size(); ++i) {
+ unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
+ MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
+ Dst = NewDst;
+ }
+ return Dst;
+}
+
+void IRTranslator::unpackRegs(const Value &V, unsigned Src,
+ MachineIRBuilder &MIRBuilder) {
+ ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
+ ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
+
+ for (unsigned i = 0; i < Regs.size(); ++i)
+ MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
+}
+
bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
const CallInst &CI = cast<CallInst>(U);
auto TII = MF->getTarget().getIntrinsicInfo();
@@ -826,16 +979,24 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
}
+ bool IsSplitType = valueIsSplit(CI);
if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
- unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
+ unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
+ getLLTForType(*CI.getType(), *DL))
+ : getOrCreateVReg(CI);
+
SmallVector<unsigned, 8> Args;
for (auto &Arg: CI.arg_operands())
- Args.push_back(getOrCreateVReg(*Arg));
+ Args.push_back(packRegs(*Arg, MIRBuilder));
MF->getFrameInfo().setHasCalls(true);
- return CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
+ bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
return getOrCreateVReg(*CI.getCalledValue());
});
+
+ if (IsSplitType)
+ unpackRegs(CI, Res, MIRBuilder);
+ return Success;
}
assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
@@ -843,7 +1004,14 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
if (translateKnownIntrinsic(CI, ID, MIRBuilder))
return true;
- unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
+ unsigned Res = 0;
+ if (!CI.getType()->isVoidTy()) {
+ if (IsSplitType)
+ Res =
+ MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL));
+ else
+ Res = getOrCreateVReg(CI);
+ }
MachineInstrBuilder MIB =
MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
@@ -851,9 +1019,12 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
// Some intrinsics take metadata parameters. Reject them.
if (isa<MetadataAsValue>(Arg))
return false;
- MIB.addUse(getOrCreateVReg(*Arg));
+ MIB.addUse(packRegs(*Arg, MIRBuilder));
}
+ if (IsSplitType)
+ unpackRegs(CI, Res, MIRBuilder);
+
// Add a MachineMemOperand if it is a target mem intrinsic.
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
TargetLowering::IntrinsicInfo Info;
@@ -897,15 +1068,18 @@ bool IRTranslator::translateInvoke(const User &U,
MCSymbol *BeginSymbol = Context.createTempSymbol();
MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
- unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I);
+ unsigned Res =
+ MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
SmallVector<unsigned, 8> Args;
for (auto &Arg: I.arg_operands())
- Args.push_back(getOrCreateVReg(*Arg));
+ Args.push_back(packRegs(*Arg, MIRBuilder));
if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
[&]() { return getOrCreateVReg(*I.getCalledValue()); }))
return false;
+ unpackRegs(I, Res, MIRBuilder);
+
MCSymbol *EndSymbol = Context.createTempSymbol();
MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
@@ -964,27 +1138,18 @@ bool IRTranslator::translateLandingPad(const User &U,
return false;
MBB.addLiveIn(ExceptionReg);
- unsigned VReg = MRI->createGenericVirtualRegister(Tys[0]),
- Tmp = MRI->createGenericVirtualRegister(Ty);
- MIRBuilder.buildCopy(VReg, ExceptionReg);
- MIRBuilder.buildInsert(Tmp, Undef, VReg, 0);
+ ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
+ MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
if (!SelectorReg)
return false;
MBB.addLiveIn(SelectorReg);
-
- // N.b. the exception selector register always has pointer type and may not
- // match the actual IR-level type in the landingpad so an extra cast is
- // needed.
unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
MIRBuilder.buildCopy(PtrVReg, SelectorReg);
+ MIRBuilder.buildCast(ResRegs[1], PtrVReg);
- VReg = MRI->createGenericVirtualRegister(Tys[1]);
- MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT).addDef(VReg).addUse(PtrVReg);
- MIRBuilder.buildInsert(getOrCreateVReg(LP), Tmp, VReg,
- Tys[0].getSizeInBits());
return true;
}
@@ -992,6 +1157,9 @@ bool IRTranslator::translateAlloca(const User &U,
MachineIRBuilder &MIRBuilder) {
auto &AI = cast<AllocaInst>(U);
+ if (AI.isSwiftError())
+ return false;
+
if (AI.isStaticAlloca()) {
unsigned Res = getOrCreateVReg(AI);
int FI = getOrCreateFrameIndex(AI);
@@ -999,6 +1167,10 @@ bool IRTranslator::translateAlloca(const User &U,
return true;
}
+ // FIXME: support stack probing for Windows.
+ if (MF->getTarget().getTargetTriple().isOSWindows())
+ return false;
+
// Now we're in the harder dynamic case.
Type *Ty = AI.getAllocatedType();
unsigned Align =
@@ -1070,9 +1242,16 @@ bool IRTranslator::translateInsertElement(const User &U,
// not a legal vector type in LLT.
if (U.getType()->getVectorNumElements() == 1) {
unsigned Elt = getOrCreateVReg(*U.getOperand(1));
- ValToVReg[&U] = Elt;
+ auto &Regs = *VMap.getVRegs(U);
+ if (Regs.empty()) {
+ Regs.push_back(Elt);
+ VMap.getOffsets(U)->push_back(0);
+ } else {
+ MIRBuilder.buildCopy(Regs[0], Elt);
+ }
return true;
}
+
unsigned Res = getOrCreateVReg(U);
unsigned Val = getOrCreateVReg(*U.getOperand(0));
unsigned Elt = getOrCreateVReg(*U.getOperand(1));
@@ -1087,7 +1266,13 @@ bool IRTranslator::translateExtractElement(const User &U,
// not a legal vector type in LLT.
if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
unsigned Elt = getOrCreateVReg(*U.getOperand(0));
- ValToVReg[&U] = Elt;
+ auto &Regs = *VMap.getVRegs(U);
+ if (Regs.empty()) {
+ Regs.push_back(Elt);
+ VMap.getOffsets(U)->push_back(0);
+ } else {
+ MIRBuilder.buildCopy(Regs[0], Elt);
+ }
return true;
}
unsigned Res = getOrCreateVReg(U);
@@ -1109,17 +1294,115 @@ bool IRTranslator::translateShuffleVector(const User &U,
bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
const PHINode &PI = cast<PHINode>(U);
- auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI);
- MIB.addDef(getOrCreateVReg(PI));
- PendingPHIs.emplace_back(&PI, MIB.getInstr());
+ SmallVector<MachineInstr *, 4> Insts;
+ for (auto Reg : getOrCreateVRegs(PI)) {
+ auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, Reg);
+ Insts.push_back(MIB.getInstr());
+ }
+
+ PendingPHIs.emplace_back(&PI, std::move(Insts));
+ return true;
+}
+
+bool IRTranslator::translateAtomicCmpXchg(const User &U,
+ MachineIRBuilder &MIRBuilder) {
+ const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
+
+ if (I.isWeak())
+ return false;
+
+ auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
+ : MachineMemOperand::MONone;
+ Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+
+ Type *ResType = I.getType();
+ Type *ValType = ResType->Type::getStructElementType(0);
+
+ auto Res = getOrCreateVRegs(I);
+ unsigned OldValRes = Res[0];
+ unsigned SuccessRes = Res[1];
+ unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
+ unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
+ unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
+
+ MIRBuilder.buildAtomicCmpXchgWithSuccess(
+ OldValRes, SuccessRes, Addr, Cmp, NewVal,
+ *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
+ Flags, DL->getTypeStoreSize(ValType),
+ getMemOpAlignment(I), AAMDNodes(), nullptr,
+ I.getSyncScopeID(), I.getSuccessOrdering(),
+ I.getFailureOrdering()));
+ return true;
+}
+
+bool IRTranslator::translateAtomicRMW(const User &U,
+ MachineIRBuilder &MIRBuilder) {
+ const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
+
+ auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
+ : MachineMemOperand::MONone;
+ Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+
+ Type *ResType = I.getType();
+
+ unsigned Res = getOrCreateVReg(I);
+ unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
+ unsigned Val = getOrCreateVReg(*I.getValOperand());
+
+ unsigned Opcode = 0;
+ switch (I.getOperation()) {
+ default:
+ llvm_unreachable("Unknown atomicrmw op");
+ return false;
+ case AtomicRMWInst::Xchg:
+ Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
+ break;
+ case AtomicRMWInst::Add:
+ Opcode = TargetOpcode::G_ATOMICRMW_ADD;
+ break;
+ case AtomicRMWInst::Sub:
+ Opcode = TargetOpcode::G_ATOMICRMW_SUB;
+ break;
+ case AtomicRMWInst::And:
+ Opcode = TargetOpcode::G_ATOMICRMW_AND;
+ break;
+ case AtomicRMWInst::Nand:
+ Opcode = TargetOpcode::G_ATOMICRMW_NAND;
+ break;
+ case AtomicRMWInst::Or:
+ Opcode = TargetOpcode::G_ATOMICRMW_OR;
+ break;
+ case AtomicRMWInst::Xor:
+ Opcode = TargetOpcode::G_ATOMICRMW_XOR;
+ break;
+ case AtomicRMWInst::Max:
+ Opcode = TargetOpcode::G_ATOMICRMW_MAX;
+ break;
+ case AtomicRMWInst::Min:
+ Opcode = TargetOpcode::G_ATOMICRMW_MIN;
+ break;
+ case AtomicRMWInst::UMax:
+ Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
+ break;
+ case AtomicRMWInst::UMin:
+ Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
+ break;
+ }
+
+ MIRBuilder.buildAtomicRMW(
+ Opcode, Res, Addr, Val,
+ *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
+ Flags, DL->getTypeStoreSize(ResType),
+ getMemOpAlignment(I), AAMDNodes(), nullptr,
+ I.getSyncScopeID(), I.getOrdering()));
return true;
}
void IRTranslator::finishPendingPhis() {
- for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
+ for (auto &Phi : PendingPHIs) {
const PHINode *PI = Phi.first;
- MachineInstrBuilder MIB(*MF, Phi.second);
+ ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
// All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
// won't create extra control flow here, otherwise we need to find the
@@ -1133,17 +1416,27 @@ void IRTranslator::finishPendingPhis() {
continue;
HandledPreds.insert(IRPred);
- unsigned ValReg = getOrCreateVReg(*PI->getIncomingValue(i));
+ ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
- assert(Pred->isSuccessor(MIB->getParent()) &&
+ assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
"incorrect CFG at MachineBasicBlock level");
- MIB.addUse(ValReg);
- MIB.addMBB(Pred);
+ for (unsigned j = 0; j < ValRegs.size(); ++j) {
+ MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
+ MIB.addUse(ValRegs[j]);
+ MIB.addMBB(Pred);
+ }
}
}
}
}
+bool IRTranslator::valueIsSplit(const Value &V,
+ SmallVectorImpl<uint64_t> *Offsets) {
+ SmallVector<LLT, 4> SplitTys;
+ computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
+ return SplitTys.size() > 1;
+}
+
bool IRTranslator::translate(const Instruction &Inst) {
CurBuilder.setDebugLoc(Inst.getDebugLoc());
switch(Inst.getOpcode()) {
@@ -1162,9 +1455,15 @@ bool IRTranslator::translate(const Constant &C, unsigned Reg) {
EntryBuilder.buildFConstant(Reg, *CF);
else if (isa<UndefValue>(C))
EntryBuilder.buildUndef(Reg);
- else if (isa<ConstantPointerNull>(C))
- EntryBuilder.buildConstant(Reg, 0);
- else if (auto GV = dyn_cast<GlobalValue>(&C))
+ else if (isa<ConstantPointerNull>(C)) {
+ // As we are trying to build a constant val of 0 into a pointer,
+ // insert a cast to make them correct with respect to types.
+ unsigned NullSize = DL->getTypeSizeInBits(C.getType());
+ auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
+ auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
+ unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
+ EntryBuilder.buildCast(Reg, ZeroReg);
+ } else if (auto GV = dyn_cast<GlobalValue>(&C))
EntryBuilder.buildGlobalValue(Reg, GV);
else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
if (!CAZ->getType()->isVectorTy())
@@ -1196,23 +1495,6 @@ bool IRTranslator::translate(const Constant &C, unsigned Reg) {
default:
return false;
}
- } else if (auto CS = dyn_cast<ConstantStruct>(&C)) {
- // Return the element if it is a single element ConstantStruct.
- if (CS->getNumOperands() == 1) {
- unsigned EltReg = getOrCreateVReg(*CS->getOperand(0));
- EntryBuilder.buildCast(Reg, EltReg);
- return true;
- }
- SmallVector<unsigned, 4> Ops;
- SmallVector<uint64_t, 4> Indices;
- uint64_t Offset = 0;
- for (unsigned i = 0; i < CS->getNumOperands(); ++i) {
- unsigned OpReg = getOrCreateVReg(*CS->getOperand(i));
- Ops.push_back(OpReg);
- Indices.push_back(Offset);
- Offset += MRI->getType(OpReg).getSizeInBits();
- }
- EntryBuilder.buildSequence(Reg, Ops, Indices);
} else if (auto CV = dyn_cast<ConstantVector>(&C)) {
if (CV->getNumOperands() == 1)
return translate(*CV->getOperand(0), Reg);
@@ -1231,7 +1513,7 @@ void IRTranslator::finalizeFunction() {
// Release the memory used by the different maps we
// needed during the translation.
PendingPHIs.clear();
- ValToVReg.clear();
+ VMap.reset();
FrameIndices.clear();
MachinePreds.clear();
// MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
@@ -1291,8 +1573,22 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
for (const Argument &Arg: F.args()) {
if (DL->getTypeStoreSize(Arg.getType()) == 0)
continue; // Don't handle zero sized types.
- VRegArgs.push_back(getOrCreateVReg(Arg));
+ VRegArgs.push_back(
+ MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
}
+
+ // We don't currently support translating swifterror or swiftself functions.
+ for (auto &Arg : F.args()) {
+ if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
+ OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
+ F.getSubprogram(), &F.getEntryBlock());
+ R << "unable to lower arguments due to swifterror/swiftself: "
+ << ore::NV("Prototype", F.getType());
+ reportTranslationError(*MF, *TPC, *ORE, R);
+ return false;
+ }
+ }
+
if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
F.getSubprogram(), &F.getEntryBlock());
@@ -1301,14 +1597,28 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
return false;
}
+ auto ArgIt = F.arg_begin();
+ for (auto &VArg : VRegArgs) {
+ // If the argument is an unsplit scalar then don't use unpackRegs to avoid
+ // creating redundant copies.
+ if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
+ auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
+ assert(VRegs.empty() && "VRegs already populated?");
+ VRegs.push_back(VArg);
+ } else {
+ unpackRegs(*ArgIt, VArg, EntryBuilder);
+ }
+ ArgIt++;
+ }
+
// And translate the function!
- for (const BasicBlock &BB: F) {
+ for (const BasicBlock &BB : F) {
MachineBasicBlock &MBB = getMBB(BB);
// Set the insertion point of all the following translations to
// the end of this basic block.
CurBuilder.setMBB(MBB);
- for (const Instruction &Inst: BB) {
+ for (const Instruction &Inst : BB) {
if (translate(Inst))
continue;
@@ -1358,5 +1668,9 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
assert(&MF->front() == &NewEntryBB &&
"New entry wasn't next in the list of basic block!");
+ // Initialize stack protector information.
+ StackProtector &SP = getAnalysis<StackProtector>();
+ SP.copyToMachineFrameInfo(MF->getFrameInfo());
+
return false;
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index 422cc2219aa8..c83c791327e4 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -12,7 +12,6 @@
#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
@@ -57,23 +56,17 @@ InstructionSelect::InstructionSelect() : MachineFunctionPass(ID) {
void InstructionSelect::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
+ getSelectionDAGFallbackAnalysisUsage(AU);
MachineFunctionPass::getAnalysisUsage(AU);
}
bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
- const MachineRegisterInfo &MRI = MF.getRegInfo();
-
- // No matter what happens, whether we successfully select the function or not,
- // nothing is going to use the vreg types after us. Make sure they disappear.
- auto ClearVRegTypesOnReturn =
- make_scope_exit([&]() { MRI.getVRegToType().clear(); });
-
// If the ISel pipeline failed, do not bother running that pass.
if (MF.getProperties().hasProperty(
MachineFunctionProperties::Property::FailedISel))
return false;
- DEBUG(dbgs() << "Selecting function: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Selecting function: " << MF.getName() << '\n');
const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
const InstructionSelector *ISel = MF.getSubtarget().getInstructionSelector();
@@ -85,23 +78,18 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
// FIXME: There are many other MF/MFI fields we need to initialize.
+ MachineRegisterInfo &MRI = MF.getRegInfo();
#ifndef NDEBUG
// Check that our input is fully legal: we require the function to have the
// Legalized property, so it should be.
- // FIXME: This should be in the MachineVerifier, but it can't use the
- // LegalizerInfo as it's currently in the separate GlobalISel library.
- // The RegBankSelected property is already checked in the verifier. Note
- // that it has the same layering problem, but we only use inline methods so
- // end up not needing to link against the GlobalISel library.
- if (const LegalizerInfo *MLI = MF.getSubtarget().getLegalizerInfo())
- for (MachineBasicBlock &MBB : MF)
- for (MachineInstr &MI : MBB)
- if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI, MRI)) {
- reportGISelFailure(MF, TPC, MORE, "gisel-select",
- "instruction is not legal", MI);
- return false;
- }
-
+ // FIXME: This should be in the MachineVerifier, as the RegBankSelected
+ // property check already is.
+ if (!DisableGISelLegalityCheck)
+ if (const MachineInstr *MI = machineFunctionIsIllegal(MF)) {
+ reportGISelFailure(MF, TPC, MORE, "gisel-select",
+ "instruction is not legal", *MI);
+ return false;
+ }
#endif
// FIXME: We could introduce new blocks and will need to fix the outer loop.
// Until then, keep track of the number of blocks to assert that we don't.
@@ -129,12 +117,12 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
else
--MII;
- DEBUG(dbgs() << "Selecting: \n " << MI);
+ LLVM_DEBUG(dbgs() << "Selecting: \n " << MI);
// We could have folded this instruction away already, making it dead.
// If so, erase it.
if (isTriviallyDead(MI, MRI)) {
- DEBUG(dbgs() << "Is dead; erasing.\n");
+ LLVM_DEBUG(dbgs() << "Is dead; erasing.\n");
MI.eraseFromParentAndMarkDBGValuesForRemoval();
continue;
}
@@ -147,7 +135,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
}
// Dump the range of instructions that MI expanded into.
- DEBUG({
+ LLVM_DEBUG({
auto InsertedBegin = ReachedBegin ? MBB->begin() : std::next(MII);
dbgs() << "Into:\n";
for (auto &InsertedMI : make_range(InsertedBegin, AfterIt))
@@ -159,30 +147,63 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ for (MachineBasicBlock &MBB : MF) {
+ if (MBB.empty())
+ continue;
+
+ // Try to find redundant copies b/w vregs of the same register class.
+ bool ReachedBegin = false;
+ for (auto MII = std::prev(MBB.end()), Begin = MBB.begin(); !ReachedBegin;) {
+ // Select this instruction.
+ MachineInstr &MI = *MII;
+
+ // And have our iterator point to the next instruction, if there is one.
+ if (MII == Begin)
+ ReachedBegin = true;
+ else
+ --MII;
+ if (MI.getOpcode() != TargetOpcode::COPY)
+ continue;
+ unsigned SrcReg = MI.getOperand(1).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(SrcReg) &&
+ TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ auto SrcRC = MRI.getRegClass(SrcReg);
+ auto DstRC = MRI.getRegClass(DstReg);
+ if (SrcRC == DstRC) {
+ MRI.replaceRegWith(DstReg, SrcReg);
+ MI.eraseFromParentAndMarkDBGValuesForRemoval();
+ }
+ }
+ }
+ }
+
// Now that selection is complete, there are no more generic vregs. Verify
// that the size of the now-constrained vreg is unchanged and that it has a
// register class.
- for (auto &VRegToType : MRI.getVRegToType()) {
- unsigned VReg = VRegToType.first;
- auto *RC = MRI.getRegClassOrNull(VReg);
+ for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
+ unsigned VReg = TargetRegisterInfo::index2VirtReg(I);
+
MachineInstr *MI = nullptr;
if (!MRI.def_empty(VReg))
MI = &*MRI.def_instr_begin(VReg);
else if (!MRI.use_empty(VReg))
MI = &*MRI.use_instr_begin(VReg);
+ if (!MI)
+ continue;
- if (MI && !RC) {
+ const TargetRegisterClass *RC = MRI.getRegClassOrNull(VReg);
+ if (!RC) {
reportGISelFailure(MF, TPC, MORE, "gisel-select",
"VReg has no regclass after selection", *MI);
return false;
- } else if (!RC)
- continue;
+ }
- if (VRegToType.second.isValid() &&
- VRegToType.second.getSizeInBits() > TRI.getRegSizeInBits(*RC)) {
- reportGISelFailure(MF, TPC, MORE, "gisel-select",
- "VReg has explicit size different from class size",
- *MI);
+ const LLT Ty = MRI.getType(VReg);
+ if (Ty.isValid() && Ty.getSizeInBits() > TRI.getRegSizeInBits(*RC)) {
+ reportGISelFailure(
+ MF, TPC, MORE, "gisel-select",
+ "VReg's low-level type and register class have different sizes", *MI);
return false;
}
}
@@ -199,6 +220,12 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
auto &TLI = *MF.getSubtarget().getTargetLowering();
TLI.finalizeLowering(MF);
+ LLVM_DEBUG({
+ dbgs() << "Rules covered by selecting function: " << MF.getName() << ":";
+ for (auto RuleID : CoverageInfo.covered())
+ dbgs() << " id" << RuleID;
+ dbgs() << "\n\n";
+ });
CoverageInfo.emit(CoveragePrefix,
MF.getSubtarget()
.getTargetLowering()
@@ -206,6 +233,11 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
.getTarget()
.getBackendName());
+ // If we successfully selected the function nothing is going to use the vreg
+ // types after us (otherwise MIRPrinter would need them). Make sure the types
+ // disappear.
+ MRI.clearVirtRegTypes();
+
// FIXME: Should we accurately track changes?
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
index 88669bd68c00..5e77fcbb0ed9 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
@@ -46,50 +46,6 @@ bool InstructionSelector::constrainOperandRegToRegClass(
constrainRegToClass(MRI, TII, RBI, I, I.getOperand(OpIdx).getReg(), RC);
}
-bool InstructionSelector::constrainSelectedInstRegOperands(
- MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI,
- const RegisterBankInfo &RBI) const {
- MachineBasicBlock &MBB = *I.getParent();
- MachineFunction &MF = *MBB.getParent();
- MachineRegisterInfo &MRI = MF.getRegInfo();
-
- for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
- MachineOperand &MO = I.getOperand(OpI);
-
- // There's nothing to be done on non-register operands.
- if (!MO.isReg())
- continue;
-
- DEBUG(dbgs() << "Converting operand: " << MO << '\n');
- assert(MO.isReg() && "Unsupported non-reg operand");
-
- unsigned Reg = MO.getReg();
- // Physical registers don't need to be constrained.
- if (TRI.isPhysicalRegister(Reg))
- continue;
-
- // Register operands with a value of 0 (e.g. predicate operands) don't need
- // to be constrained.
- if (Reg == 0)
- continue;
-
- // If the operand is a vreg, we should constrain its regclass, and only
- // insert COPYs if that's impossible.
- // constrainOperandRegClass does that for us.
- MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
- Reg, OpI));
-
- // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
- // done.
- if (MO.isUse()) {
- int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
- if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
- I.tieOperands(DefIdx, OpI);
- }
- }
- return true;
-}
-
bool InstructionSelector::isOperandImmEqual(
const MachineOperand &MO, int64_t Value,
const MachineRegisterInfo &MRI) const {
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
new file mode 100644
index 000000000000..344f573a67f5
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
@@ -0,0 +1,101 @@
+//===- lib/CodeGen/GlobalISel/LegalizerPredicates.cpp - Predicates --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A library of predicate factories to use for LegalityPredicate.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+
+using namespace llvm;
+
+LegalityPredicate LegalityPredicates::typeIs(unsigned TypeIdx, LLT Type) {
+ return
+ [=](const LegalityQuery &Query) { return Query.Types[TypeIdx] == Type; };
+}
+
+LegalityPredicate
+LegalityPredicates::typeInSet(unsigned TypeIdx,
+ std::initializer_list<LLT> TypesInit) {
+ SmallVector<LLT, 4> Types = TypesInit;
+ return [=](const LegalityQuery &Query) {
+ return std::find(Types.begin(), Types.end(), Query.Types[TypeIdx]) != Types.end();
+ };
+}
+
+LegalityPredicate LegalityPredicates::typePairInSet(
+ unsigned TypeIdx0, unsigned TypeIdx1,
+ std::initializer_list<std::pair<LLT, LLT>> TypesInit) {
+ SmallVector<std::pair<LLT, LLT>, 4> Types = TypesInit;
+ return [=](const LegalityQuery &Query) {
+ std::pair<LLT, LLT> Match = {Query.Types[TypeIdx0], Query.Types[TypeIdx1]};
+ return std::find(Types.begin(), Types.end(), Match) != Types.end();
+ };
+}
+
+LegalityPredicate LegalityPredicates::typePairAndMemSizeInSet(
+ unsigned TypeIdx0, unsigned TypeIdx1, unsigned MMOIdx,
+ std::initializer_list<TypePairAndMemSize> TypesAndMemSizeInit) {
+ SmallVector<TypePairAndMemSize, 4> TypesAndMemSize = TypesAndMemSizeInit;
+ return [=](const LegalityQuery &Query) {
+ TypePairAndMemSize Match = {Query.Types[TypeIdx0], Query.Types[TypeIdx1],
+ Query.MMODescrs[MMOIdx].Size};
+ return std::find(TypesAndMemSize.begin(), TypesAndMemSize.end(), Match) !=
+ TypesAndMemSize.end();
+ };
+}
+
+LegalityPredicate LegalityPredicates::isScalar(unsigned TypeIdx) {
+ return [=](const LegalityQuery &Query) {
+ return Query.Types[TypeIdx].isScalar();
+ };
+}
+
+LegalityPredicate LegalityPredicates::narrowerThan(unsigned TypeIdx,
+ unsigned Size) {
+ return [=](const LegalityQuery &Query) {
+ const LLT &QueryTy = Query.Types[TypeIdx];
+ return QueryTy.isScalar() && QueryTy.getSizeInBits() < Size;
+ };
+}
+
+LegalityPredicate LegalityPredicates::widerThan(unsigned TypeIdx,
+ unsigned Size) {
+ return [=](const LegalityQuery &Query) {
+ const LLT &QueryTy = Query.Types[TypeIdx];
+ return QueryTy.isScalar() && QueryTy.getSizeInBits() > Size;
+ };
+}
+
+LegalityPredicate LegalityPredicates::sizeNotPow2(unsigned TypeIdx) {
+ return [=](const LegalityQuery &Query) {
+ const LLT &QueryTy = Query.Types[TypeIdx];
+ return QueryTy.isScalar() && !isPowerOf2_32(QueryTy.getSizeInBits());
+ };
+}
+
+LegalityPredicate LegalityPredicates::memSizeInBytesNotPow2(unsigned MMOIdx) {
+ return [=](const LegalityQuery &Query) {
+ return !isPowerOf2_32(Query.MMODescrs[MMOIdx].Size /* In Bytes */);
+ };
+}
+
+LegalityPredicate LegalityPredicates::numElementsNotPow2(unsigned TypeIdx) {
+ return [=](const LegalityQuery &Query) {
+ const LLT &QueryTy = Query.Types[TypeIdx];
+ return QueryTy.isVector() && isPowerOf2_32(QueryTy.getNumElements());
+ };
+}
+
+LegalityPredicate LegalityPredicates::atomicOrderingAtLeastOrStrongerThan(
+ unsigned MMOIdx, AtomicOrdering Ordering) {
+ return [=](const LegalityQuery &Query) {
+ return isAtLeastOrStrongerThan(Query.MMODescrs[MMOIdx].Ordering, Ordering);
+ };
+}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
new file mode 100644
index 000000000000..a29b32ecdc03
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
@@ -0,0 +1,51 @@
+//===- lib/CodeGen/GlobalISel/LegalizerMutations.cpp - Mutations ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A library of mutation factories to use for LegalityMutation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+
+using namespace llvm;
+
+LegalizeMutation LegalizeMutations::changeTo(unsigned TypeIdx, LLT Ty) {
+ return
+ [=](const LegalityQuery &Query) { return std::make_pair(TypeIdx, Ty); };
+}
+
+LegalizeMutation LegalizeMutations::changeTo(unsigned TypeIdx,
+ unsigned FromTypeIdx) {
+ return [=](const LegalityQuery &Query) {
+ return std::make_pair(TypeIdx, Query.Types[FromTypeIdx]);
+ };
+}
+
+LegalizeMutation LegalizeMutations::widenScalarToNextPow2(unsigned TypeIdx,
+ unsigned Min) {
+ return [=](const LegalityQuery &Query) {
+ unsigned NewSizeInBits =
+ 1 << Log2_32_Ceil(Query.Types[TypeIdx].getSizeInBits());
+ if (NewSizeInBits < Min)
+ NewSizeInBits = Min;
+ return std::make_pair(TypeIdx, LLT::scalar(NewSizeInBits));
+ };
+}
+
+LegalizeMutation LegalizeMutations::moreElementsToNextPow2(unsigned TypeIdx,
+ unsigned Min) {
+ return [=](const LegalityQuery &Query) {
+ const LLT &VecTy = Query.Types[TypeIdx];
+ unsigned NewNumElements = 1 << Log2_32_Ceil(VecTy.getNumElements());
+ if (NewNumElements < Min)
+ NewNumElements = Min;
+ return std::make_pair(
+ TypeIdx, LLT::vector(NewNumElements, VecTy.getScalarSizeInBits()));
+ };
+}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
index f09b0d9f11e7..9a2aac998a84 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
@@ -47,6 +47,7 @@ Legalizer::Legalizer() : MachineFunctionPass(ID) {
void Legalizer::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
+ getSelectionDAGFallbackAnalysisUsage(AU);
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -72,7 +73,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
if (MF.getProperties().hasProperty(
MachineFunctionProperties::Property::FailedISel))
return false;
- DEBUG(dbgs() << "Legalize Machine IR for: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Legalize Machine IR for: " << MF.getName() << '\n');
init(MF);
const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);
@@ -112,7 +113,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
else
InstList.insert(MI);
}
- DEBUG(dbgs() << ".. .. New MI: " << *MI;);
+ LLVM_DEBUG(dbgs() << ".. .. New MI: " << *MI;);
});
const LegalizerInfo &LInfo(Helper.getLegalizerInfo());
LegalizationArtifactCombiner ArtCombiner(Helper.MIRBuilder, MF.getRegInfo(), LInfo);
@@ -127,7 +128,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
MachineInstr &MI = *InstList.pop_back_val();
assert(isPreISelGenericOpcode(MI.getOpcode()) && "Expecting generic opcode");
if (isTriviallyDead(MI, MRI)) {
- DEBUG(dbgs() << MI << "Is dead; erasing.\n");
+ LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
MI.eraseFromParentAndMarkDBGValuesForRemoval();
continue;
}
@@ -148,7 +149,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
MachineInstr &MI = *ArtifactList.pop_back_val();
assert(isPreISelGenericOpcode(MI.getOpcode()) && "Expecting generic opcode");
if (isTriviallyDead(MI, MRI)) {
- DEBUG(dbgs() << MI << "Is dead; erasing.\n");
+ LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
RemoveDeadInstFromLists(&MI);
MI.eraseFromParentAndMarkDBGValuesForRemoval();
continue;
@@ -156,7 +157,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
SmallVector<MachineInstr *, 4> DeadInstructions;
if (ArtCombiner.tryCombineInstruction(MI, DeadInstructions)) {
for (auto *DeadMI : DeadInstructions) {
- DEBUG(dbgs() << ".. Erasing Dead Instruction " << *DeadMI);
+ LLVM_DEBUG(dbgs() << ".. Erasing Dead Instruction " << *DeadMI);
RemoveDeadInstFromLists(DeadMI);
DeadMI->eraseFromParentAndMarkDBGValuesForRemoval();
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 6bebe180fefd..87086af121b7 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -26,6 +26,7 @@
#define DEBUG_TYPE "legalizer"
using namespace llvm;
+using namespace LegalizeActions;
LegalizerHelper::LegalizerHelper(MachineFunction &MF)
: MRI(MF.getRegInfo()), LI(*MF.getSubtarget().getLegalizerInfo()) {
@@ -34,34 +35,34 @@ LegalizerHelper::LegalizerHelper(MachineFunction &MF)
LegalizerHelper::LegalizeResult
LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
- DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
+ LLVM_DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
- auto Action = LI.getAction(MI, MRI);
- switch (std::get<0>(Action)) {
- case LegalizerInfo::Legal:
- DEBUG(dbgs() << ".. Already legal\n");
+ auto Step = LI.getAction(MI, MRI);
+ switch (Step.Action) {
+ case Legal:
+ LLVM_DEBUG(dbgs() << ".. Already legal\n");
return AlreadyLegal;
- case LegalizerInfo::Libcall:
- DEBUG(dbgs() << ".. Convert to libcall\n");
+ case Libcall:
+ LLVM_DEBUG(dbgs() << ".. Convert to libcall\n");
return libcall(MI);
- case LegalizerInfo::NarrowScalar:
- DEBUG(dbgs() << ".. Narrow scalar\n");
- return narrowScalar(MI, std::get<1>(Action), std::get<2>(Action));
- case LegalizerInfo::WidenScalar:
- DEBUG(dbgs() << ".. Widen scalar\n");
- return widenScalar(MI, std::get<1>(Action), std::get<2>(Action));
- case LegalizerInfo::Lower:
- DEBUG(dbgs() << ".. Lower\n");
- return lower(MI, std::get<1>(Action), std::get<2>(Action));
- case LegalizerInfo::FewerElements:
- DEBUG(dbgs() << ".. Reduce number of elements\n");
- return fewerElementsVector(MI, std::get<1>(Action), std::get<2>(Action));
- case LegalizerInfo::Custom:
- DEBUG(dbgs() << ".. Custom legalization\n");
+ case NarrowScalar:
+ LLVM_DEBUG(dbgs() << ".. Narrow scalar\n");
+ return narrowScalar(MI, Step.TypeIdx, Step.NewType);
+ case WidenScalar:
+ LLVM_DEBUG(dbgs() << ".. Widen scalar\n");
+ return widenScalar(MI, Step.TypeIdx, Step.NewType);
+ case Lower:
+ LLVM_DEBUG(dbgs() << ".. Lower\n");
+ return lower(MI, Step.TypeIdx, Step.NewType);
+ case FewerElements:
+ LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n");
+ return fewerElementsVector(MI, Step.TypeIdx, Step.NewType);
+ case Custom:
+ LLVM_DEBUG(dbgs() << ".. Custom legalization\n");
return LI.legalizeCustom(MI, MRI, MIRBuilder) ? Legalized
: UnableToLegalize;
default:
- DEBUG(dbgs() << ".. Unable to legalize\n");
+ LLVM_DEBUG(dbgs() << ".. Unable to legalize\n");
return UnableToLegalize;
}
}
@@ -103,6 +104,9 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
return Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32;
case TargetOpcode::G_FPOW:
return Size == 64 ? RTLIB::POW_F64 : RTLIB::POW_F32;
+ case TargetOpcode::G_FMA:
+ assert((Size == 32 || Size == 64) && "Unsupported size");
+ return Size == 64 ? RTLIB::FMA_F64 : RTLIB::FMA_F32;
}
llvm_unreachable("Unknown libcall function");
}
@@ -123,13 +127,47 @@ llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
return LegalizerHelper::Legalized;
}
+// Useful for libcalls where all operands have the same type.
static LegalizerHelper::LegalizeResult
simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
Type *OpType) {
auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
+
+ SmallVector<CallLowering::ArgInfo, 3> Args;
+ for (unsigned i = 1; i < MI.getNumOperands(); i++)
+ Args.push_back({MI.getOperand(i).getReg(), OpType});
return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), OpType},
- {{MI.getOperand(1).getReg(), OpType},
- {MI.getOperand(2).getReg(), OpType}});
+ Args);
+}
+
+static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType,
+ Type *FromType) {
+ auto ToMVT = MVT::getVT(ToType);
+ auto FromMVT = MVT::getVT(FromType);
+
+ switch (Opcode) {
+ case TargetOpcode::G_FPEXT:
+ return RTLIB::getFPEXT(FromMVT, ToMVT);
+ case TargetOpcode::G_FPTRUNC:
+ return RTLIB::getFPROUND(FromMVT, ToMVT);
+ case TargetOpcode::G_FPTOSI:
+ return RTLIB::getFPTOSINT(FromMVT, ToMVT);
+ case TargetOpcode::G_FPTOUI:
+ return RTLIB::getFPTOUINT(FromMVT, ToMVT);
+ case TargetOpcode::G_SITOFP:
+ return RTLIB::getSINTTOFP(FromMVT, ToMVT);
+ case TargetOpcode::G_UITOFP:
+ return RTLIB::getUINTTOFP(FromMVT, ToMVT);
+ }
+ llvm_unreachable("Unsupported libcall function");
+}
+
+static LegalizerHelper::LegalizeResult
+conversionLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, Type *ToType,
+ Type *FromType) {
+ RTLIB::Libcall Libcall = getConvRTLibDesc(MI.getOpcode(), ToType, FromType);
+ return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), ToType},
+ {{MI.getOperand(1).getReg(), FromType}});
}
LegalizerHelper::LegalizeResult
@@ -157,6 +195,7 @@ LegalizerHelper::libcall(MachineInstr &MI) {
case TargetOpcode::G_FSUB:
case TargetOpcode::G_FMUL:
case TargetOpcode::G_FDIV:
+ case TargetOpcode::G_FMA:
case TargetOpcode::G_FPOW:
case TargetOpcode::G_FREM: {
Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
@@ -165,6 +204,59 @@ LegalizerHelper::libcall(MachineInstr &MI) {
return Status;
break;
}
+ case TargetOpcode::G_FPEXT: {
+ // FIXME: Support other floating point types (half, fp128 etc)
+ unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
+ unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
+ if (ToSize != 64 || FromSize != 32)
+ return UnableToLegalize;
+ LegalizeResult Status = conversionLibcall(
+ MI, MIRBuilder, Type::getDoubleTy(Ctx), Type::getFloatTy(Ctx));
+ if (Status != Legalized)
+ return Status;
+ break;
+ }
+ case TargetOpcode::G_FPTRUNC: {
+ // FIXME: Support other floating point types (half, fp128 etc)
+ unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
+ unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
+ if (ToSize != 32 || FromSize != 64)
+ return UnableToLegalize;
+ LegalizeResult Status = conversionLibcall(
+ MI, MIRBuilder, Type::getFloatTy(Ctx), Type::getDoubleTy(Ctx));
+ if (Status != Legalized)
+ return Status;
+ break;
+ }
+ case TargetOpcode::G_FPTOSI:
+ case TargetOpcode::G_FPTOUI: {
+ // FIXME: Support other types
+ unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
+ unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
+ if (ToSize != 32 || (FromSize != 32 && FromSize != 64))
+ return UnableToLegalize;
+ LegalizeResult Status = conversionLibcall(
+ MI, MIRBuilder, Type::getInt32Ty(Ctx),
+ FromSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx));
+ if (Status != Legalized)
+ return Status;
+ break;
+ }
+ case TargetOpcode::G_SITOFP:
+ case TargetOpcode::G_UITOFP: {
+ // FIXME: Support other types
+ unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
+ unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
+ if (FromSize != 32 || (ToSize != 32 && ToSize != 64))
+ return UnableToLegalize;
+ LegalizeResult Status = conversionLibcall(
+ MI, MIRBuilder,
+ ToSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx),
+ Type::getInt32Ty(Ctx));
+ if (Status != Legalized)
+ return Status;
+ break;
+ }
}
MI.eraseFromParent();
@@ -180,8 +272,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
MIRBuilder.setInstr(MI);
- int64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
- int64_t NarrowSize = NarrowTy.getSizeInBits();
+ uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
+ uint64_t NarrowSize = NarrowTy.getSizeInBits();
switch (MI.getOpcode()) {
default:
@@ -194,11 +286,9 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
int NumParts = SizeOp0 / NarrowSize;
SmallVector<unsigned, 2> DstRegs;
- for (int i = 0; i < NumParts; ++i) {
- unsigned Dst = MRI.createGenericVirtualRegister(NarrowTy);
- MIRBuilder.buildUndef(Dst);
- DstRegs.push_back(Dst);
- }
+ for (int i = 0; i < NumParts; ++i)
+ DstRegs.push_back(
+ MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
MI.eraseFromParent();
return Legalized;
@@ -249,8 +339,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
unsigned OpReg = MI.getOperand(0).getReg();
- int64_t OpStart = MI.getOperand(2).getImm();
- int64_t OpSize = MRI.getType(OpReg).getSizeInBits();
+ uint64_t OpStart = MI.getOperand(2).getImm();
+ uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
for (int i = 0; i < NumParts; ++i) {
unsigned SrcStart = i * NarrowSize;
@@ -265,7 +355,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
// OpSegStart is where this destination segment would start in OpReg if it
// extended infinitely in both directions.
- int64_t ExtractOffset, SegSize;
+ int64_t ExtractOffset;
+ uint64_t SegSize;
if (OpStart < SrcStart) {
ExtractOffset = 0;
SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart);
@@ -301,8 +392,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
unsigned OpReg = MI.getOperand(2).getReg();
- int64_t OpStart = MI.getOperand(3).getImm();
- int64_t OpSize = MRI.getType(OpReg).getSizeInBits();
+ uint64_t OpStart = MI.getOperand(3).getImm();
+ uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
for (int i = 0; i < NumParts; ++i) {
unsigned DstStart = i * NarrowSize;
@@ -319,7 +410,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
// OpSegStart is where this destination segment would start in OpReg if it
// extended infinitely in both directions.
- int64_t ExtractOffset, InsertOffset, SegSize;
+ int64_t ExtractOffset, InsertOffset;
+ uint64_t SegSize;
if (OpStart < DstStart) {
InsertOffset = 0;
ExtractOffset = DstStart - OpStart;
@@ -353,6 +445,14 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
// NarrowSize.
if (SizeOp0 % NarrowSize != 0)
return UnableToLegalize;
+
+ const auto &MMO = **MI.memoperands_begin();
+ // This implementation doesn't work for atomics. Give up instead of doing
+ // something invalid.
+ if (MMO.getOrdering() != AtomicOrdering::NotAtomic ||
+ MMO.getFailureOrdering() != AtomicOrdering::NotAtomic)
+ return UnableToLegalize;
+
int NumParts = SizeOp0 / NarrowSize;
LLT OffsetTy = LLT::scalar(
MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits());
@@ -363,12 +463,16 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
unsigned SrcReg = 0;
unsigned Adjustment = i * NarrowSize / 8;
+ MachineMemOperand *SplitMMO = MIRBuilder.getMF().getMachineMemOperand(
+ MMO.getPointerInfo().getWithOffset(Adjustment), MMO.getFlags(),
+ NarrowSize / 8, i == 0 ? MMO.getAlignment() : NarrowSize / 8,
+ MMO.getAAInfo(), MMO.getRanges(), MMO.getSyncScopeID(),
+ MMO.getOrdering(), MMO.getFailureOrdering());
+
MIRBuilder.materializeGEP(SrcReg, MI.getOperand(1).getReg(), OffsetTy,
Adjustment);
- // TODO: This is conservatively correct, but we probably want to split the
- // memory operands in the future.
- MIRBuilder.buildLoad(DstReg, SrcReg, **MI.memoperands_begin());
+ MIRBuilder.buildLoad(DstReg, SrcReg, *SplitMMO);
DstRegs.push_back(DstReg);
}
@@ -382,6 +486,14 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
// NarrowSize.
if (SizeOp0 % NarrowSize != 0)
return UnableToLegalize;
+
+ const auto &MMO = **MI.memoperands_begin();
+ // This implementation doesn't work for atomics. Give up instead of doing
+ // something invalid.
+ if (MMO.getOrdering() != AtomicOrdering::NotAtomic ||
+ MMO.getFailureOrdering() != AtomicOrdering::NotAtomic)
+ return UnableToLegalize;
+
int NumParts = SizeOp0 / NarrowSize;
LLT OffsetTy = LLT::scalar(
MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits());
@@ -393,12 +505,16 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
unsigned DstReg = 0;
unsigned Adjustment = i * NarrowSize / 8;
+ MachineMemOperand *SplitMMO = MIRBuilder.getMF().getMachineMemOperand(
+ MMO.getPointerInfo().getWithOffset(Adjustment), MMO.getFlags(),
+ NarrowSize / 8, i == 0 ? MMO.getAlignment() : NarrowSize / 8,
+ MMO.getAAInfo(), MMO.getRanges(), MMO.getSyncScopeID(),
+ MMO.getOrdering(), MMO.getFailureOrdering());
+
MIRBuilder.materializeGEP(DstReg, MI.getOperand(1).getReg(), OffsetTy,
Adjustment);
- // TODO: This is conservatively correct, but we probably want to split the
- // memory operands in the future.
- MIRBuilder.buildStore(SrcRegs[i], DstReg, **MI.memoperands_begin());
+ MIRBuilder.buildStore(SrcRegs[i], DstReg, *SplitMMO);
}
MI.eraseFromParent();
return Legalized;
@@ -475,6 +591,22 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
}
}
+void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy,
+ unsigned OpIdx, unsigned ExtOpcode) {
+ MachineOperand &MO = MI.getOperand(OpIdx);
+ auto ExtB = MIRBuilder.buildInstr(ExtOpcode, WideTy, MO.getReg());
+ MO.setReg(ExtB->getOperand(0).getReg());
+}
+
+void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
+ unsigned OpIdx, unsigned TruncOpcode) {
+ MachineOperand &MO = MI.getOperand(OpIdx);
+ unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
+ MIRBuilder.buildInstr(TruncOpcode, MO.getReg(), DstExt);
+ MO.setReg(DstExt);
+}
+
LegalizerHelper::LegalizeResult
LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
MIRBuilder.setInstr(MI);
@@ -482,303 +614,201 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
switch (MI.getOpcode()) {
default:
return UnableToLegalize;
+
case TargetOpcode::G_ADD:
case TargetOpcode::G_AND:
case TargetOpcode::G_MUL:
case TargetOpcode::G_OR:
case TargetOpcode::G_XOR:
case TargetOpcode::G_SUB:
- case TargetOpcode::G_SHL: {
// Perform operation at larger width (any extension is fine here, high bits
// don't affect the result) and then truncate the result back to the
// original type.
- unsigned Src1Ext = MRI.createGenericVirtualRegister(WideTy);
- unsigned Src2Ext = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildAnyExt(Src1Ext, MI.getOperand(1).getReg());
- MIRBuilder.buildAnyExt(Src2Ext, MI.getOperand(2).getReg());
-
- unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildInstr(MI.getOpcode())
- .addDef(DstExt)
- .addUse(Src1Ext)
- .addUse(Src2Ext);
-
- MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
- MI.eraseFromParent();
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
- }
+
+ case TargetOpcode::G_SHL:
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
+ // The "number of bits to shift" operand must preserve its value as an
+ // unsigned integer:
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
+ return Legalized;
+
case TargetOpcode::G_SDIV:
- case TargetOpcode::G_UDIV:
case TargetOpcode::G_SREM:
- case TargetOpcode::G_UREM:
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
+ return Legalized;
+
case TargetOpcode::G_ASHR:
- case TargetOpcode::G_LSHR: {
- unsigned ExtOp = MI.getOpcode() == TargetOpcode::G_SDIV ||
- MI.getOpcode() == TargetOpcode::G_SREM ||
- MI.getOpcode() == TargetOpcode::G_ASHR
- ? TargetOpcode::G_SEXT
- : TargetOpcode::G_ZEXT;
-
- unsigned LHSExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildInstr(ExtOp).addDef(LHSExt).addUse(
- MI.getOperand(1).getReg());
-
- unsigned RHSExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildInstr(ExtOp).addDef(RHSExt).addUse(
- MI.getOperand(2).getReg());
-
- unsigned ResExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildInstr(MI.getOpcode())
- .addDef(ResExt)
- .addUse(LHSExt)
- .addUse(RHSExt);
-
- MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), ResExt);
- MI.eraseFromParent();
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
+ // The "number of bits to shift" operand must preserve its value as an
+ // unsigned integer:
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
- }
- case TargetOpcode::G_SELECT: {
+
+ case TargetOpcode::G_UDIV:
+ case TargetOpcode::G_UREM:
+ case TargetOpcode::G_LSHR:
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
+ return Legalized;
+
+ case TargetOpcode::G_SELECT:
if (TypeIdx != 0)
return UnableToLegalize;
-
// Perform operation at larger width (any extension is fine here, high bits
// don't affect the result) and then truncate the result back to the
// original type.
- unsigned Src1Ext = MRI.createGenericVirtualRegister(WideTy);
- unsigned Src2Ext = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildAnyExt(Src1Ext, MI.getOperand(2).getReg());
- MIRBuilder.buildAnyExt(Src2Ext, MI.getOperand(3).getReg());
-
- unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildInstr(TargetOpcode::G_SELECT)
- .addDef(DstExt)
- .addReg(MI.getOperand(1).getReg())
- .addUse(Src1Ext)
- .addUse(Src2Ext);
-
- MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
- MI.eraseFromParent();
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
+ widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
- }
+
case TargetOpcode::G_FPTOSI:
- case TargetOpcode::G_FPTOUI: {
+ case TargetOpcode::G_FPTOUI:
if (TypeIdx != 0)
return UnableToLegalize;
-
- unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildInstr(MI.getOpcode())
- .addDef(DstExt)
- .addUse(MI.getOperand(1).getReg());
-
- MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
- MI.eraseFromParent();
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
- }
+
case TargetOpcode::G_SITOFP:
- case TargetOpcode::G_UITOFP: {
if (TypeIdx != 1)
return UnableToLegalize;
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
+ MIRBuilder.recordInsertion(&MI);
+ return Legalized;
- unsigned Src = MI.getOperand(1).getReg();
- unsigned SrcExt = MRI.createGenericVirtualRegister(WideTy);
-
- if (MI.getOpcode() == TargetOpcode::G_SITOFP) {
- MIRBuilder.buildSExt(SrcExt, Src);
- } else {
- assert(MI.getOpcode() == TargetOpcode::G_UITOFP && "Unexpected conv op");
- MIRBuilder.buildZExt(SrcExt, Src);
- }
-
- MIRBuilder.buildInstr(MI.getOpcode())
- .addDef(MI.getOperand(0).getReg())
- .addUse(SrcExt);
-
- MI.eraseFromParent();
+ case TargetOpcode::G_UITOFP:
+ if (TypeIdx != 1)
+ return UnableToLegalize;
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
- }
- case TargetOpcode::G_INSERT: {
+
+ case TargetOpcode::G_INSERT:
if (TypeIdx != 0)
return UnableToLegalize;
-
- unsigned Src = MI.getOperand(1).getReg();
- unsigned SrcExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildAnyExt(SrcExt, Src);
-
- unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
- auto MIB = MIRBuilder.buildInsert(DstExt, SrcExt, MI.getOperand(2).getReg(),
- MI.getOperand(3).getImm());
- for (unsigned OpNum = 4; OpNum < MI.getNumOperands(); OpNum += 2) {
- MIB.addReg(MI.getOperand(OpNum).getReg());
- MIB.addImm(MI.getOperand(OpNum + 1).getImm());
- }
-
- MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
- MI.eraseFromParent();
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
- }
- case TargetOpcode::G_LOAD: {
- assert(alignTo(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(), 8) ==
- WideTy.getSizeInBits() &&
- "illegal to increase number of bytes loaded");
-
- unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildLoad(DstExt, MI.getOperand(1).getReg(),
- **MI.memoperands_begin());
- MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
- MI.eraseFromParent();
+
+ case TargetOpcode::G_LOAD:
+ // For some types like i24, we might try to widen to i32. To properly handle
+ // this we should be using a dedicated extending load, until then avoid
+ // trying to legalize.
+ if (alignTo(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(), 8) !=
+ WideTy.getSizeInBits())
+ return UnableToLegalize;
+ LLVM_FALLTHROUGH;
+ case TargetOpcode::G_SEXTLOAD:
+ case TargetOpcode::G_ZEXTLOAD:
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
- }
+
case TargetOpcode::G_STORE: {
if (MRI.getType(MI.getOperand(0).getReg()) != LLT::scalar(1) ||
WideTy != LLT::scalar(8))
return UnableToLegalize;
- auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
- auto Content = TLI.getBooleanContents(false, false);
-
- unsigned ExtOp = TargetOpcode::G_ANYEXT;
- if (Content == TargetLoweringBase::ZeroOrOneBooleanContent)
- ExtOp = TargetOpcode::G_ZEXT;
- else if (Content == TargetLoweringBase::ZeroOrNegativeOneBooleanContent)
- ExtOp = TargetOpcode::G_SEXT;
- else
- ExtOp = TargetOpcode::G_ANYEXT;
-
- unsigned SrcExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildInstr(ExtOp).addDef(SrcExt).addUse(
- MI.getOperand(0).getReg());
- MIRBuilder.buildStore(SrcExt, MI.getOperand(1).getReg(),
- **MI.memoperands_begin());
- MI.eraseFromParent();
+ widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ZEXT);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
}
case TargetOpcode::G_CONSTANT: {
- unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildConstant(DstExt, *MI.getOperand(1).getCImm());
- MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
- MI.eraseFromParent();
+ MachineOperand &SrcMO = MI.getOperand(1);
+ LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
+ const APInt &Val = SrcMO.getCImm()->getValue().sext(WideTy.getSizeInBits());
+ SrcMO.setCImm(ConstantInt::get(Ctx, Val));
+
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
}
case TargetOpcode::G_FCONSTANT: {
- unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
- const ConstantFP *CFP = MI.getOperand(1).getFPImm();
- APFloat Val = CFP->getValueAPF();
+ MachineOperand &SrcMO = MI.getOperand(1);
LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
- auto LLT2Sem = [](LLT Ty) {
- switch (Ty.getSizeInBits()) {
- case 32:
- return &APFloat::IEEEsingle();
- break;
- case 64:
- return &APFloat::IEEEdouble();
- break;
- default:
- llvm_unreachable("Unhandled fp widen type");
- }
- };
+ APFloat Val = SrcMO.getFPImm()->getValueAPF();
bool LosesInfo;
- Val.convert(*LLT2Sem(WideTy), APFloat::rmTowardZero, &LosesInfo);
- MIRBuilder.buildFConstant(DstExt, *ConstantFP::get(Ctx, Val));
- MIRBuilder.buildFPTrunc(MI.getOperand(0).getReg(), DstExt);
- MI.eraseFromParent();
+ switch (WideTy.getSizeInBits()) {
+ case 32:
+ Val.convert(APFloat::IEEEsingle(), APFloat::rmTowardZero, &LosesInfo);
+ break;
+ case 64:
+ Val.convert(APFloat::IEEEdouble(), APFloat::rmTowardZero, &LosesInfo);
+ break;
+ default:
+ llvm_unreachable("Unhandled fp widen type");
+ }
+ SrcMO.setFPImm(ConstantFP::get(Ctx, Val));
+
+ widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
}
- case TargetOpcode::G_BRCOND: {
- unsigned TstExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildAnyExt(TstExt, MI.getOperand(0).getReg());
- MIRBuilder.buildBrCond(TstExt, *MI.getOperand(1).getMBB());
- MI.eraseFromParent();
+ case TargetOpcode::G_BRCOND:
+ widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ANYEXT);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
- }
- case TargetOpcode::G_FCMP: {
- unsigned Op0Ext, Op1Ext, DstReg;
- unsigned Cmp1 = MI.getOperand(2).getReg();
- unsigned Cmp2 = MI.getOperand(3).getReg();
- if (TypeIdx == 0) {
- Op0Ext = Cmp1;
- Op1Ext = Cmp2;
- DstReg = MRI.createGenericVirtualRegister(WideTy);
- } else {
- Op0Ext = MRI.createGenericVirtualRegister(WideTy);
- Op1Ext = MRI.createGenericVirtualRegister(WideTy);
- DstReg = MI.getOperand(0).getReg();
- MIRBuilder.buildInstr(TargetOpcode::G_FPEXT, Op0Ext, Cmp1);
- MIRBuilder.buildInstr(TargetOpcode::G_FPEXT, Op1Ext, Cmp2);
- }
- MIRBuilder.buildFCmp(
- static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()),
- DstReg, Op0Ext, Op1Ext);
+
+ case TargetOpcode::G_FCMP:
if (TypeIdx == 0)
- MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, MI.getOperand(0).getReg(),
- DstReg);
- MI.eraseFromParent();
- return Legalized;
- }
- case TargetOpcode::G_ICMP: {
- bool IsSigned = CmpInst::isSigned(
- static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()));
- unsigned Cmp1 = MI.getOperand(2).getReg();
- unsigned Cmp2 = MI.getOperand(3).getReg();
- unsigned Op0Ext, Op1Ext, DstReg;
- if (TypeIdx == 0) {
- Op0Ext = Cmp1;
- Op1Ext = Cmp2;
- DstReg = MRI.createGenericVirtualRegister(WideTy);
- } else {
- Op0Ext = MRI.createGenericVirtualRegister(WideTy);
- Op1Ext = MRI.createGenericVirtualRegister(WideTy);
- DstReg = MI.getOperand(0).getReg();
- if (IsSigned) {
- MIRBuilder.buildSExt(Op0Ext, Cmp1);
- MIRBuilder.buildSExt(Op1Ext, Cmp2);
- } else {
- MIRBuilder.buildZExt(Op0Ext, Cmp1);
- MIRBuilder.buildZExt(Op1Ext, Cmp2);
- }
+ widenScalarDst(MI, WideTy);
+ else {
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
+ widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT);
}
- MIRBuilder.buildICmp(
- static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()),
- DstReg, Op0Ext, Op1Ext);
+ MIRBuilder.recordInsertion(&MI);
+ return Legalized;
+
+ case TargetOpcode::G_ICMP:
if (TypeIdx == 0)
- MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, MI.getOperand(0).getReg(),
- DstReg);
- MI.eraseFromParent();
+ widenScalarDst(MI, WideTy);
+ else {
+ unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>(
+ MI.getOperand(1).getPredicate()))
+ ? TargetOpcode::G_SEXT
+ : TargetOpcode::G_ZEXT;
+ widenScalarSrc(MI, WideTy, 2, ExtOpcode);
+ widenScalarSrc(MI, WideTy, 3, ExtOpcode);
+ }
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
- }
- case TargetOpcode::G_GEP: {
+
+ case TargetOpcode::G_GEP:
assert(TypeIdx == 1 && "unable to legalize pointer of GEP");
- unsigned OffsetExt = MRI.createGenericVirtualRegister(WideTy);
- MIRBuilder.buildSExt(OffsetExt, MI.getOperand(2).getReg());
- MI.getOperand(2).setReg(OffsetExt);
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
- }
+
case TargetOpcode::G_PHI: {
assert(TypeIdx == 0 && "Expecting only Idx 0");
- auto getExtendedReg = [&](unsigned Reg, MachineBasicBlock &MBB) {
- auto FirstTermIt = MBB.getFirstTerminator();
- MIRBuilder.setInsertPt(MBB, FirstTermIt);
- MachineInstr *DefMI = MRI.getVRegDef(Reg);
- MachineInstrBuilder MIB;
- if (DefMI->getOpcode() == TargetOpcode::G_TRUNC)
- MIB = MIRBuilder.buildAnyExtOrTrunc(WideTy,
- DefMI->getOperand(1).getReg());
- else
- MIB = MIRBuilder.buildAnyExt(WideTy, Reg);
- return MIB->getOperand(0).getReg();
- };
- auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, WideTy);
- for (auto OpIt = MI.operands_begin() + 1, OpE = MI.operands_end();
- OpIt != OpE;) {
- unsigned Reg = OpIt++->getReg();
- MachineBasicBlock *OpMBB = OpIt++->getMBB();
- MIB.addReg(getExtendedReg(Reg, *OpMBB));
- MIB.addMBB(OpMBB);
+
+ for (unsigned I = 1; I < MI.getNumOperands(); I += 2) {
+ MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
+ MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
+ widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT);
}
- auto *MBB = MI.getParent();
- MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI());
- MIRBuilder.buildTrunc(MI.getOperand(0).getReg(),
- MIB->getOperand(0).getReg());
- MI.eraseFromParent();
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
+ widenScalarDst(MI, WideTy);
+ MIRBuilder.recordInsertion(&MI);
return Legalized;
}
}
@@ -874,11 +904,10 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
}
ConstantFP &ZeroForNegation =
*cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy));
- unsigned Zero = MRI.createGenericVirtualRegister(Ty);
- MIRBuilder.buildFConstant(Zero, ZeroForNegation);
+ auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation);
MIRBuilder.buildInstr(TargetOpcode::G_FSUB)
.addDef(Res)
- .addUse(Zero)
+ .addUse(Zero->getOperand(0).getReg())
.addUse(MI.getOperand(1).getReg());
MI.eraseFromParent();
return Legalized;
@@ -887,7 +916,7 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
// Lower (G_FSUB LHS, RHS) to (G_FADD LHS, (G_FNEG RHS)).
// First, check if G_FNEG is marked as Lower. If so, we may
// end up with an infinite loop as G_FSUB is used to legalize G_FNEG.
- if (LI.getAction({G_FNEG, Ty}).first == LegalizerInfo::Lower)
+ if (LI.getAction({G_FNEG, {Ty}}).Action == Lower)
return UnableToLegalize;
unsigned Res = MI.getOperand(0).getReg();
unsigned LHS = MI.getOperand(1).getReg();
@@ -913,6 +942,48 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
MI.eraseFromParent();
return Legalized;
}
+ case TargetOpcode::G_LOAD:
+ case TargetOpcode::G_SEXTLOAD:
+ case TargetOpcode::G_ZEXTLOAD: {
+ // Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned PtrReg = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ auto &MMO = **MI.memoperands_begin();
+
+ if (DstTy.getSizeInBits() == MMO.getSize() /* in bytes */ * 8) {
+ // In the case of G_LOAD, this was a non-extending load already and we're
+ // about to lower to the same instruction.
+ if (MI.getOpcode() == TargetOpcode::G_LOAD)
+ return UnableToLegalize;
+ MIRBuilder.buildLoad(DstReg, PtrReg, MMO);
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ if (DstTy.isScalar()) {
+ unsigned TmpReg = MRI.createGenericVirtualRegister(
+ LLT::scalar(MMO.getSize() /* in bytes */ * 8));
+ MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ case TargetOpcode::G_LOAD:
+ MIRBuilder.buildAnyExt(DstReg, TmpReg);
+ break;
+ case TargetOpcode::G_SEXTLOAD:
+ MIRBuilder.buildSExt(DstReg, TmpReg);
+ break;
+ case TargetOpcode::G_ZEXTLOAD:
+ MIRBuilder.buildZExt(DstReg, TmpReg);
+ break;
+ }
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ return UnableToLegalize;
+ }
}
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
index 9c27c59a0654..ae061b64a38c 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
@@ -24,12 +24,87 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LowLevelTypeImpl.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <map>
+
using namespace llvm;
+using namespace LegalizeActions;
+
+#define DEBUG_TYPE "legalizer-info"
+
+cl::opt<bool> llvm::DisableGISelLegalityCheck(
+ "disable-gisel-legality-check",
+ cl::desc("Don't verify that MIR is fully legal between GlobalISel passes"),
+ cl::Hidden);
+
+raw_ostream &LegalityQuery::print(raw_ostream &OS) const {
+ OS << Opcode << ", Tys={";
+ for (const auto &Type : Types) {
+ OS << Type << ", ";
+ }
+ OS << "}, Opcode=";
+
+ OS << Opcode << ", MMOs={";
+ for (const auto &MMODescr : MMODescrs) {
+ OS << MMODescr.Size << ", ";
+ }
+ OS << "}";
+
+ return OS;
+}
+
+LegalizeActionStep LegalizeRuleSet::apply(const LegalityQuery &Query) const {
+ LLVM_DEBUG(dbgs() << "Applying legalizer ruleset to: "; Query.print(dbgs());
+ dbgs() << "\n");
+ if (Rules.empty()) {
+ LLVM_DEBUG(dbgs() << ".. fallback to legacy rules (no rules defined)\n");
+ return {LegalizeAction::UseLegacyRules, 0, LLT{}};
+ }
+ for (const auto &Rule : Rules) {
+ if (Rule.match(Query)) {
+ LLVM_DEBUG(dbgs() << ".. match\n");
+ std::pair<unsigned, LLT> Mutation = Rule.determineMutation(Query);
+ LLVM_DEBUG(dbgs() << ".. .. " << (unsigned)Rule.getAction() << ", "
+ << Mutation.first << ", " << Mutation.second << "\n");
+ assert((Query.Types[Mutation.first] != Mutation.second ||
+ Rule.getAction() == Lower ||
+ Rule.getAction() == MoreElements ||
+ Rule.getAction() == FewerElements) &&
+ "Simple loop detected");
+ return {Rule.getAction(), Mutation.first, Mutation.second};
+ } else
+ LLVM_DEBUG(dbgs() << ".. no match\n");
+ }
+ LLVM_DEBUG(dbgs() << ".. unsupported\n");
+ return {LegalizeAction::Unsupported, 0, LLT{}};
+}
+
+bool LegalizeRuleSet::verifyTypeIdxsCoverage(unsigned NumTypeIdxs) const {
+#ifndef NDEBUG
+ if (Rules.empty()) {
+ LLVM_DEBUG(
+ dbgs() << ".. type index coverage check SKIPPED: no rules defined\n");
+ return true;
+ }
+ const int64_t FirstUncovered = TypeIdxsCovered.find_first_unset();
+ if (FirstUncovered < 0) {
+ LLVM_DEBUG(dbgs() << ".. type index coverage check SKIPPED:"
+ " user-defined predicate detected\n");
+ return true;
+ }
+ const bool AllCovered = (FirstUncovered >= NumTypeIdxs);
+ LLVM_DEBUG(dbgs() << ".. the first uncovered type index: " << FirstUncovered
+ << ", " << (AllCovered ? "OK" : "FAIL") << "\n");
+ return AllCovered;
+#else
+ return true;
+#endif
+}
LegalizerInfo::LegalizerInfo() : TablesInitialized(false) {
// Set defaults.
@@ -104,15 +179,16 @@ void LegalizerInfo::computeTables() {
if (TypeIdx < ScalarSizeChangeStrategies[OpcodeIdx].size() &&
ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] != nullptr)
S = ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx];
- std::sort(ScalarSpecifiedActions.begin(), ScalarSpecifiedActions.end());
+ llvm::sort(ScalarSpecifiedActions.begin(),
+ ScalarSpecifiedActions.end());
checkPartialSizeAndActionsVector(ScalarSpecifiedActions);
setScalarAction(Opcode, TypeIdx, S(ScalarSpecifiedActions));
}
// 2. Handle pointer types
for (auto PointerSpecifiedActions : AddressSpace2SpecifiedActions) {
- std::sort(PointerSpecifiedActions.second.begin(),
- PointerSpecifiedActions.second.end());
+ llvm::sort(PointerSpecifiedActions.second.begin(),
+ PointerSpecifiedActions.second.end());
checkPartialSizeAndActionsVector(PointerSpecifiedActions.second);
// For pointer types, we assume that there isn't a meaningfull way
// to change the number of bits used in the pointer.
@@ -124,8 +200,8 @@ void LegalizerInfo::computeTables() {
// 3. Handle vector types
SizeAndActionsVec ElementSizesSeen;
for (auto VectorSpecifiedActions : ElemSize2SpecifiedActions) {
- std::sort(VectorSpecifiedActions.second.begin(),
- VectorSpecifiedActions.second.end());
+ llvm::sort(VectorSpecifiedActions.second.begin(),
+ VectorSpecifiedActions.second.end());
const uint16_t ElementSize = VectorSpecifiedActions.first;
ElementSizesSeen.push_back({ElementSize, Legal});
checkPartialSizeAndActionsVector(VectorSpecifiedActions.second);
@@ -143,7 +219,7 @@ void LegalizerInfo::computeTables() {
Opcode, TypeIdx, ElementSize,
moreToWiderTypesAndLessToWidest(NumElementsActions));
}
- std::sort(ElementSizesSeen.begin(), ElementSizesSeen.end());
+ llvm::sort(ElementSizesSeen.begin(), ElementSizesSeen.end());
SizeChangeStrategy VectorElementSizeChangeStrategy =
&unsupportedForDifferentSizes;
if (TypeIdx < VectorElementSizeChangeStrategies[OpcodeIdx].size() &&
@@ -162,8 +238,8 @@ void LegalizerInfo::computeTables() {
// probably going to need specialized lookup structures for various types before
// we have any hope of doing well with something like <13 x i3>. Even the common
// cases should do better than what we have now.
-std::pair<LegalizerInfo::LegalizeAction, LLT>
-LegalizerInfo::getAction(const InstrAspect &Aspect) const {
+std::pair<LegalizeAction, LLT>
+LegalizerInfo::getAspectAction(const InstrAspect &Aspect) const {
assert(TablesInitialized && "backend forgot to call computeTables");
// These *have* to be implemented for now, they're the fundamental basis of
// how everything else is transformed.
@@ -186,9 +262,87 @@ static LLT getTypeFromTypeIdx(const MachineInstr &MI,
return MRI.getType(MI.getOperand(OpIdx).getReg());
}
-std::tuple<LegalizerInfo::LegalizeAction, unsigned, LLT>
+unsigned LegalizerInfo::getOpcodeIdxForOpcode(unsigned Opcode) const {
+ assert(Opcode >= FirstOp && Opcode <= LastOp && "Unsupported opcode");
+ return Opcode - FirstOp;
+}
+
+unsigned LegalizerInfo::getActionDefinitionsIdx(unsigned Opcode) const {
+ unsigned OpcodeIdx = getOpcodeIdxForOpcode(Opcode);
+ if (unsigned Alias = RulesForOpcode[OpcodeIdx].getAlias()) {
+ LLVM_DEBUG(dbgs() << ".. opcode " << Opcode << " is aliased to " << Alias
+ << "\n");
+ OpcodeIdx = getOpcodeIdxForOpcode(Alias);
+ LLVM_DEBUG(dbgs() << ".. opcode " << Alias << " is aliased to "
+ << RulesForOpcode[OpcodeIdx].getAlias() << "\n");
+ assert(RulesForOpcode[OpcodeIdx].getAlias() == 0 && "Cannot chain aliases");
+ }
+
+ return OpcodeIdx;
+}
+
+const LegalizeRuleSet &
+LegalizerInfo::getActionDefinitions(unsigned Opcode) const {
+ unsigned OpcodeIdx = getActionDefinitionsIdx(Opcode);
+ return RulesForOpcode[OpcodeIdx];
+}
+
+LegalizeRuleSet &LegalizerInfo::getActionDefinitionsBuilder(unsigned Opcode) {
+ unsigned OpcodeIdx = getActionDefinitionsIdx(Opcode);
+ auto &Result = RulesForOpcode[OpcodeIdx];
+ assert(!Result.isAliasedByAnother() && "Modifying this opcode will modify aliases");
+ return Result;
+}
+
+LegalizeRuleSet &LegalizerInfo::getActionDefinitionsBuilder(
+ std::initializer_list<unsigned> Opcodes) {
+ unsigned Representative = *Opcodes.begin();
+
+ assert(Opcodes.begin() != Opcodes.end() &&
+ Opcodes.begin() + 1 != Opcodes.end() &&
+ "Initializer list must have at least two opcodes");
+
+ for (auto I = Opcodes.begin() + 1, E = Opcodes.end(); I != E; ++I)
+ aliasActionDefinitions(Representative, *I);
+
+ auto &Return = getActionDefinitionsBuilder(Representative);
+ Return.setIsAliasedByAnother();
+ return Return;
+}
+
+void LegalizerInfo::aliasActionDefinitions(unsigned OpcodeTo,
+ unsigned OpcodeFrom) {
+ assert(OpcodeTo != OpcodeFrom && "Cannot alias to self");
+ assert(OpcodeTo >= FirstOp && OpcodeTo <= LastOp && "Unsupported opcode");
+ const unsigned OpcodeFromIdx = getOpcodeIdxForOpcode(OpcodeFrom);
+ RulesForOpcode[OpcodeFromIdx].aliasTo(OpcodeTo);
+}
+
+LegalizeActionStep
+LegalizerInfo::getAction(const LegalityQuery &Query) const {
+ LegalizeActionStep Step = getActionDefinitions(Query.Opcode).apply(Query);
+ if (Step.Action != LegalizeAction::UseLegacyRules) {
+ return Step;
+ }
+
+ for (unsigned i = 0; i < Query.Types.size(); ++i) {
+ auto Action = getAspectAction({Query.Opcode, i, Query.Types[i]});
+ if (Action.first != Legal) {
+ LLVM_DEBUG(dbgs() << ".. (legacy) Type " << i
+ << " Action=" << (unsigned)Action.first << ", "
+ << Action.second << "\n");
+ return {Action.first, i, Action.second};
+ } else
+ LLVM_DEBUG(dbgs() << ".. (legacy) Type " << i << " Legal\n");
+ }
+ LLVM_DEBUG(dbgs() << ".. (legacy) Legal\n");
+ return {Legal, 0, LLT{}};
+}
+
+LegalizeActionStep
LegalizerInfo::getAction(const MachineInstr &MI,
const MachineRegisterInfo &MRI) const {
+ SmallVector<LLT, 2> Types;
SmallBitVector SeenTypes(8);
const MCOperandInfo *OpInfo = MI.getDesc().OpInfo;
// FIXME: probably we'll need to cache the results here somehow?
@@ -205,16 +359,20 @@ LegalizerInfo::getAction(const MachineInstr &MI,
SeenTypes.set(TypeIdx);
LLT Ty = getTypeFromTypeIdx(MI, MRI, i, TypeIdx);
- auto Action = getAction({MI.getOpcode(), TypeIdx, Ty});
- if (Action.first != Legal)
- return std::make_tuple(Action.first, TypeIdx, Action.second);
+ Types.push_back(Ty);
}
- return std::make_tuple(Legal, 0, LLT{});
+
+ SmallVector<LegalityQuery::MemDesc, 2> MemDescrs;
+ for (const auto &MMO : MI.memoperands())
+ MemDescrs.push_back(
+ {MMO->getSize() /* in bytes */ * 8, MMO->getOrdering()});
+
+ return getAction({MI.getOpcode(), Types, MemDescrs});
}
bool LegalizerInfo::isLegal(const MachineInstr &MI,
const MachineRegisterInfo &MRI) const {
- return std::get<0>(getAction(MI, MRI)) == Legal;
+ return getAction(MI, MRI).Action == Legal;
}
bool LegalizerInfo::legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI,
@@ -312,17 +470,18 @@ LegalizerInfo::findAction(const SizeAndActionsVec &Vec, const uint32_t Size) {
case Unsupported:
return {Size, Unsupported};
case NotFound:
+ case UseLegacyRules:
llvm_unreachable("NotFound");
}
llvm_unreachable("Action has an unknown enum value");
}
-std::pair<LegalizerInfo::LegalizeAction, LLT>
+std::pair<LegalizeAction, LLT>
LegalizerInfo::findScalarLegalAction(const InstrAspect &Aspect) const {
assert(Aspect.Type.isScalar() || Aspect.Type.isPointer());
if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp)
return {NotFound, LLT()};
- const unsigned OpcodeIdx = Aspect.Opcode - FirstOp;
+ const unsigned OpcodeIdx = getOpcodeIdxForOpcode(Aspect.Opcode);
if (Aspect.Type.isPointer() &&
AddrSpace2PointerActions[OpcodeIdx].find(Aspect.Type.getAddressSpace()) ==
AddrSpace2PointerActions[OpcodeIdx].end()) {
@@ -346,14 +505,14 @@ LegalizerInfo::findScalarLegalAction(const InstrAspect &Aspect) const {
SizeAndAction.first)};
}
-std::pair<LegalizerInfo::LegalizeAction, LLT>
+std::pair<LegalizeAction, LLT>
LegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const {
assert(Aspect.Type.isVector());
// First legalize the vector element size, then legalize the number of
// lanes in the vector.
if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp)
return {NotFound, Aspect.Type};
- const unsigned OpcodeIdx = Aspect.Opcode - FirstOp;
+ const unsigned OpcodeIdx = getOpcodeIdxForOpcode(Aspect.Opcode);
const unsigned TypeIdx = Aspect.Idx;
if (TypeIdx >= ScalarInVectorActions[OpcodeIdx].size())
return {NotFound, Aspect.Type};
@@ -380,3 +539,53 @@ LegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const {
LLT::vector(NumElementsAndAction.first,
IntermediateType.getScalarSizeInBits())};
}
+
+/// \pre Type indices of every opcode form a dense set starting from 0.
+void LegalizerInfo::verify(const MCInstrInfo &MII) const {
+#ifndef NDEBUG
+ std::vector<unsigned> FailedOpcodes;
+ for (unsigned Opcode = FirstOp; Opcode <= LastOp; ++Opcode) {
+ const MCInstrDesc &MCID = MII.get(Opcode);
+ const unsigned NumTypeIdxs = std::accumulate(
+ MCID.opInfo_begin(), MCID.opInfo_end(), 0U,
+ [](unsigned Acc, const MCOperandInfo &OpInfo) {
+ return OpInfo.isGenericType()
+ ? std::max(OpInfo.getGenericTypeIndex() + 1U, Acc)
+ : Acc;
+ });
+ LLVM_DEBUG(dbgs() << MII.getName(Opcode) << " (opcode " << Opcode
+ << "): " << NumTypeIdxs << " type ind"
+ << (NumTypeIdxs == 1 ? "ex" : "ices") << "\n");
+ const LegalizeRuleSet &RuleSet = getActionDefinitions(Opcode);
+ if (!RuleSet.verifyTypeIdxsCoverage(NumTypeIdxs))
+ FailedOpcodes.push_back(Opcode);
+ }
+ if (!FailedOpcodes.empty()) {
+ errs() << "The following opcodes have ill-defined legalization rules:";
+ for (unsigned Opcode : FailedOpcodes)
+ errs() << " " << MII.getName(Opcode);
+ errs() << "\n";
+
+ report_fatal_error("ill-defined LegalizerInfo"
+ ", try -debug-only=legalizer-info for details");
+ }
+#endif
+}
+
+#ifndef NDEBUG
+// FIXME: This should be in the MachineVerifier, but it can't use the
+// LegalizerInfo as it's currently in the separate GlobalISel library.
+// Note that RegBankSelected property already checked in the verifier
+// has the same layering problem, but we only use inline methods so
+// end up not needing to link against the GlobalISel library.
+const MachineInstr *llvm::machineFunctionIsIllegal(const MachineFunction &MF) {
+ if (const LegalizerInfo *MLI = MF.getSubtarget().getLegalizerInfo()) {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ for (const MachineBasicBlock &MBB : MF)
+ for (const MachineInstr &MI : MBB)
+ if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI, MRI))
+ return &MI;
+ }
+ return nullptr;
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/Localizer.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/Localizer.cpp
index 8e16470b6f90..52b340753a50 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/Localizer.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/Localizer.cpp
@@ -44,6 +44,11 @@ bool Localizer::shouldLocalize(const MachineInstr &MI) {
}
}
+void Localizer::getAnalysisUsage(AnalysisUsage &AU) const {
+ getSelectionDAGFallbackAnalysisUsage(AU);
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
bool Localizer::isLocalUse(MachineOperand &MOUse, const MachineInstr &Def,
MachineBasicBlock *&InsertMBB) {
MachineInstr &MIUse = *MOUse.getParent();
@@ -59,7 +64,7 @@ bool Localizer::runOnMachineFunction(MachineFunction &MF) {
MachineFunctionProperties::Property::FailedISel))
return false;
- DEBUG(dbgs() << "Localize instructions for: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Localize instructions for: " << MF.getName() << '\n');
init(MF);
@@ -73,7 +78,7 @@ bool Localizer::runOnMachineFunction(MachineFunction &MF) {
for (MachineInstr &MI : MBB) {
if (LocalizedInstrs.count(&MI) || !shouldLocalize(MI))
continue;
- DEBUG(dbgs() << "Should localize: " << MI);
+ LLVM_DEBUG(dbgs() << "Should localize: " << MI);
assert(MI.getDesc().getNumDefs() == 1 &&
"More than one definition not supported yet");
unsigned Reg = MI.getOperand(0).getReg();
@@ -85,12 +90,12 @@ bool Localizer::runOnMachineFunction(MachineFunction &MF) {
MachineOperand &MOUse = *MOIt++;
// Check if the use is already local.
MachineBasicBlock *InsertMBB;
- DEBUG(MachineInstr &MIUse = *MOUse.getParent();
- dbgs() << "Checking use: " << MIUse
- << " #Opd: " << MIUse.getOperandNo(&MOUse) << '\n');
+ LLVM_DEBUG(MachineInstr &MIUse = *MOUse.getParent();
+ dbgs() << "Checking use: " << MIUse
+ << " #Opd: " << MIUse.getOperandNo(&MOUse) << '\n');
if (isLocalUse(MOUse, MI, InsertMBB))
continue;
- DEBUG(dbgs() << "Fixing non-local use\n");
+ LLVM_DEBUG(dbgs() << "Fixing non-local use\n");
Changed = true;
auto MBBAndReg = std::make_pair(InsertMBB, Reg);
auto NewVRegIt = MBBWithLocalDef.find(MBBAndReg);
@@ -111,10 +116,10 @@ bool Localizer::runOnMachineFunction(MachineFunction &MF) {
LocalizedMI->getOperand(0).setReg(NewReg);
NewVRegIt =
MBBWithLocalDef.insert(std::make_pair(MBBAndReg, NewReg)).first;
- DEBUG(dbgs() << "Inserted: " << *LocalizedMI);
+ LLVM_DEBUG(dbgs() << "Inserted: " << *LocalizedMI);
}
- DEBUG(dbgs() << "Update use with: " << printReg(NewVRegIt->second)
- << '\n');
+ LLVM_DEBUG(dbgs() << "Update use with: " << printReg(NewVRegIt->second)
+ << '\n');
// Update the user reg.
MOUse.setReg(NewVRegIt->second);
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 475bb82e5b9c..9df931eb81b3 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -22,96 +22,103 @@
using namespace llvm;
-void MachineIRBuilder::setMF(MachineFunction &MF) {
- this->MF = &MF;
- this->MBB = nullptr;
- this->MRI = &MF.getRegInfo();
- this->TII = MF.getSubtarget().getInstrInfo();
- this->DL = DebugLoc();
- this->II = MachineBasicBlock::iterator();
- this->InsertedInstr = nullptr;
-}
-
-void MachineIRBuilder::setMBB(MachineBasicBlock &MBB) {
- this->MBB = &MBB;
- this->II = MBB.end();
+void MachineIRBuilderBase::setMF(MachineFunction &MF) {
+ State.MF = &MF;
+ State.MBB = nullptr;
+ State.MRI = &MF.getRegInfo();
+ State.TII = MF.getSubtarget().getInstrInfo();
+ State.DL = DebugLoc();
+ State.II = MachineBasicBlock::iterator();
+ State.InsertedInstr = nullptr;
+}
+
+void MachineIRBuilderBase::setMBB(MachineBasicBlock &MBB) {
+ State.MBB = &MBB;
+ State.II = MBB.end();
assert(&getMF() == MBB.getParent() &&
"Basic block is in a different function");
}
-void MachineIRBuilder::setInstr(MachineInstr &MI) {
+void MachineIRBuilderBase::setInstr(MachineInstr &MI) {
assert(MI.getParent() && "Instruction is not part of a basic block");
setMBB(*MI.getParent());
- this->II = MI.getIterator();
+ State.II = MI.getIterator();
}
-void MachineIRBuilder::setInsertPt(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator II) {
+void MachineIRBuilderBase::setInsertPt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II) {
assert(MBB.getParent() == &getMF() &&
"Basic block is in a different function");
- this->MBB = &MBB;
- this->II = II;
+ State.MBB = &MBB;
+ State.II = II;
}
-void MachineIRBuilder::recordInsertions(
+void MachineIRBuilderBase::recordInsertion(MachineInstr *InsertedInstr) const {
+ if (State.InsertedInstr)
+ State.InsertedInstr(InsertedInstr);
+}
+
+void MachineIRBuilderBase::recordInsertions(
std::function<void(MachineInstr *)> Inserted) {
- InsertedInstr = std::move(Inserted);
+ State.InsertedInstr = std::move(Inserted);
}
-void MachineIRBuilder::stopRecordingInsertions() {
- InsertedInstr = nullptr;
+void MachineIRBuilderBase::stopRecordingInsertions() {
+ State.InsertedInstr = nullptr;
}
//------------------------------------------------------------------------------
// Build instruction variants.
//------------------------------------------------------------------------------
-MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode) {
+MachineInstrBuilder MachineIRBuilderBase::buildInstr(unsigned Opcode) {
return insertInstr(buildInstrNoInsert(Opcode));
}
-MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
- MachineInstrBuilder MIB = BuildMI(getMF(), DL, getTII().get(Opcode));
+MachineInstrBuilder MachineIRBuilderBase::buildInstrNoInsert(unsigned Opcode) {
+ MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
return MIB;
}
-
-MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
+MachineInstrBuilder MachineIRBuilderBase::insertInstr(MachineInstrBuilder MIB) {
getMBB().insert(getInsertPt(), MIB);
- if (InsertedInstr)
- InsertedInstr(MIB);
+ recordInsertion(MIB);
return MIB;
}
MachineInstrBuilder
-MachineIRBuilder::buildDirectDbgValue(unsigned Reg, const MDNode *Variable,
- const MDNode *Expr) {
+MachineIRBuilderBase::buildDirectDbgValue(unsigned Reg, const MDNode *Variable,
+ const MDNode *Expr) {
assert(isa<DILocalVariable>(Variable) && "not a variable");
assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
- assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
- "Expected inlined-at fields to agree");
- return insertInstr(BuildMI(getMF(), DL, getTII().get(TargetOpcode::DBG_VALUE),
+ assert(
+ cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
+ "Expected inlined-at fields to agree");
+ return insertInstr(BuildMI(getMF(), getDL(),
+ getTII().get(TargetOpcode::DBG_VALUE),
/*IsIndirect*/ false, Reg, Variable, Expr));
}
-MachineInstrBuilder
-MachineIRBuilder::buildIndirectDbgValue(unsigned Reg, const MDNode *Variable,
- const MDNode *Expr) {
+MachineInstrBuilder MachineIRBuilderBase::buildIndirectDbgValue(
+ unsigned Reg, const MDNode *Variable, const MDNode *Expr) {
assert(isa<DILocalVariable>(Variable) && "not a variable");
assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
- assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
- "Expected inlined-at fields to agree");
- return insertInstr(BuildMI(getMF(), DL, getTII().get(TargetOpcode::DBG_VALUE),
+ assert(
+ cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
+ "Expected inlined-at fields to agree");
+ return insertInstr(BuildMI(getMF(), getDL(),
+ getTII().get(TargetOpcode::DBG_VALUE),
/*IsIndirect*/ true, Reg, Variable, Expr));
}
-MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
- const MDNode *Variable,
- const MDNode *Expr) {
+MachineInstrBuilder
+MachineIRBuilderBase::buildFIDbgValue(int FI, const MDNode *Variable,
+ const MDNode *Expr) {
assert(isa<DILocalVariable>(Variable) && "not a variable");
assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
- assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
- "Expected inlined-at fields to agree");
+ assert(
+ cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
+ "Expected inlined-at fields to agree");
return buildInstr(TargetOpcode::DBG_VALUE)
.addFrameIndex(FI)
.addImm(0)
@@ -119,13 +126,13 @@ MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
.addMetadata(Expr);
}
-MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
- const MDNode *Variable,
- const MDNode *Expr) {
+MachineInstrBuilder MachineIRBuilderBase::buildConstDbgValue(
+ const Constant &C, const MDNode *Variable, const MDNode *Expr) {
assert(isa<DILocalVariable>(Variable) && "not a variable");
assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
- assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
- "Expected inlined-at fields to agree");
+ assert(
+ cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
+ "Expected inlined-at fields to agree");
auto MIB = buildInstr(TargetOpcode::DBG_VALUE);
if (auto *CI = dyn_cast<ConstantInt>(&C)) {
if (CI->getBitWidth() > 64)
@@ -142,17 +149,18 @@ MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
return MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
}
-MachineInstrBuilder MachineIRBuilder::buildFrameIndex(unsigned Res, int Idx) {
- assert(MRI->getType(Res).isPointer() && "invalid operand type");
+MachineInstrBuilder MachineIRBuilderBase::buildFrameIndex(unsigned Res,
+ int Idx) {
+ assert(getMRI()->getType(Res).isPointer() && "invalid operand type");
return buildInstr(TargetOpcode::G_FRAME_INDEX)
.addDef(Res)
.addFrameIndex(Idx);
}
-MachineInstrBuilder MachineIRBuilder::buildGlobalValue(unsigned Res,
- const GlobalValue *GV) {
- assert(MRI->getType(Res).isPointer() && "invalid operand type");
- assert(MRI->getType(Res).getAddressSpace() ==
+MachineInstrBuilder
+MachineIRBuilderBase::buildGlobalValue(unsigned Res, const GlobalValue *GV) {
+ assert(getMRI()->getType(Res).isPointer() && "invalid operand type");
+ assert(getMRI()->getType(Res).getAddressSpace() ==
GV->getType()->getAddressSpace() &&
"address space mismatch");
@@ -161,29 +169,20 @@ MachineInstrBuilder MachineIRBuilder::buildGlobalValue(unsigned Res,
.addGlobalAddress(GV);
}
-MachineInstrBuilder MachineIRBuilder::buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0,
- unsigned Op1) {
- assert((MRI->getType(Res).isScalar() || MRI->getType(Res).isVector()) &&
+void MachineIRBuilderBase::validateBinaryOp(unsigned Res, unsigned Op0,
+ unsigned Op1) {
+ assert((getMRI()->getType(Res).isScalar() ||
+ getMRI()->getType(Res).isVector()) &&
"invalid operand type");
- assert(MRI->getType(Res) == MRI->getType(Op0) &&
- MRI->getType(Res) == MRI->getType(Op1) && "type mismatch");
-
- return buildInstr(Opcode)
- .addDef(Res)
- .addUse(Op0)
- .addUse(Op1);
-}
-
-MachineInstrBuilder MachineIRBuilder::buildAdd(unsigned Res, unsigned Op0,
- unsigned Op1) {
- return buildBinaryOp(TargetOpcode::G_ADD, Res, Op0, Op1);
+ assert(getMRI()->getType(Res) == getMRI()->getType(Op0) &&
+ getMRI()->getType(Res) == getMRI()->getType(Op1) && "type mismatch");
}
-MachineInstrBuilder MachineIRBuilder::buildGEP(unsigned Res, unsigned Op0,
- unsigned Op1) {
- assert(MRI->getType(Res).isPointer() &&
- MRI->getType(Res) == MRI->getType(Op0) && "type mismatch");
- assert(MRI->getType(Op1).isScalar() && "invalid offset type");
+MachineInstrBuilder MachineIRBuilderBase::buildGEP(unsigned Res, unsigned Op0,
+ unsigned Op1) {
+ assert(getMRI()->getType(Res).isPointer() &&
+ getMRI()->getType(Res) == getMRI()->getType(Op0) && "type mismatch");
+ assert(getMRI()->getType(Op1).isScalar() && "invalid offset type");
return buildInstr(TargetOpcode::G_GEP)
.addDef(Res)
@@ -192,8 +191,8 @@ MachineInstrBuilder MachineIRBuilder::buildGEP(unsigned Res, unsigned Op0,
}
Optional<MachineInstrBuilder>
-MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
- const LLT &ValueTy, uint64_t Value) {
+MachineIRBuilderBase::materializeGEP(unsigned &Res, unsigned Op0,
+ const LLT &ValueTy, uint64_t Value) {
assert(Res == 0 && "Res is a result argument");
assert(ValueTy.isScalar() && "invalid offset type");
@@ -202,17 +201,18 @@ MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
return None;
}
- Res = MRI->createGenericVirtualRegister(MRI->getType(Op0));
- unsigned TmpReg = MRI->createGenericVirtualRegister(ValueTy);
+ Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
+ unsigned TmpReg = getMRI()->createGenericVirtualRegister(ValueTy);
buildConstant(TmpReg, Value);
return buildGEP(Res, Op0, TmpReg);
}
-MachineInstrBuilder MachineIRBuilder::buildPtrMask(unsigned Res, unsigned Op0,
- uint32_t NumBits) {
- assert(MRI->getType(Res).isPointer() &&
- MRI->getType(Res) == MRI->getType(Op0) && "type mismatch");
+MachineInstrBuilder MachineIRBuilderBase::buildPtrMask(unsigned Res,
+ unsigned Op0,
+ uint32_t NumBits) {
+ assert(getMRI()->getType(Res).isPointer() &&
+ getMRI()->getType(Res) == getMRI()->getType(Op0) && "type mismatch");
return buildInstr(TargetOpcode::G_PTR_MASK)
.addDef(Res)
@@ -220,92 +220,88 @@ MachineInstrBuilder MachineIRBuilder::buildPtrMask(unsigned Res, unsigned Op0,
.addImm(NumBits);
}
-MachineInstrBuilder MachineIRBuilder::buildSub(unsigned Res, unsigned Op0,
- unsigned Op1) {
- return buildBinaryOp(TargetOpcode::G_SUB, Res, Op0, Op1);
-}
-
-MachineInstrBuilder MachineIRBuilder::buildMul(unsigned Res, unsigned Op0,
- unsigned Op1) {
- return buildBinaryOp(TargetOpcode::G_MUL, Res, Op0, Op1);
-}
-
-MachineInstrBuilder MachineIRBuilder::buildAnd(unsigned Res, unsigned Op0,
- unsigned Op1) {
- return buildBinaryOp(TargetOpcode::G_AND, Res, Op0, Op1);
-}
-
-MachineInstrBuilder MachineIRBuilder::buildOr(unsigned Res, unsigned Op0,
- unsigned Op1) {
- return buildBinaryOp(TargetOpcode::G_OR, Res, Op0, Op1);
-}
-
-MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
+MachineInstrBuilder MachineIRBuilderBase::buildBr(MachineBasicBlock &Dest) {
return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
}
-MachineInstrBuilder MachineIRBuilder::buildBrIndirect(unsigned Tgt) {
- assert(MRI->getType(Tgt).isPointer() && "invalid branch destination");
+MachineInstrBuilder MachineIRBuilderBase::buildBrIndirect(unsigned Tgt) {
+ assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
}
-MachineInstrBuilder MachineIRBuilder::buildCopy(unsigned Res, unsigned Op) {
- assert(MRI->getType(Res) == LLT() || MRI->getType(Op) == LLT() ||
- MRI->getType(Res) == MRI->getType(Op));
+MachineInstrBuilder MachineIRBuilderBase::buildCopy(unsigned Res, unsigned Op) {
+ assert(getMRI()->getType(Res) == LLT() || getMRI()->getType(Op) == LLT() ||
+ getMRI()->getType(Res) == getMRI()->getType(Op));
return buildInstr(TargetOpcode::COPY).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildConstant(unsigned Res,
- const ConstantInt &Val) {
- LLT Ty = MRI->getType(Res);
+MachineInstrBuilder
+MachineIRBuilderBase::buildConstant(unsigned Res, const ConstantInt &Val) {
+ LLT Ty = getMRI()->getType(Res);
assert((Ty.isScalar() || Ty.isPointer()) && "invalid operand type");
const ConstantInt *NewVal = &Val;
if (Ty.getSizeInBits() != Val.getBitWidth())
- NewVal = ConstantInt::get(MF->getFunction().getContext(),
+ NewVal = ConstantInt::get(getMF().getFunction().getContext(),
Val.getValue().sextOrTrunc(Ty.getSizeInBits()));
return buildInstr(TargetOpcode::G_CONSTANT).addDef(Res).addCImm(NewVal);
}
-MachineInstrBuilder MachineIRBuilder::buildConstant(unsigned Res,
- int64_t Val) {
- auto IntN = IntegerType::get(MF->getFunction().getContext(),
- MRI->getType(Res).getSizeInBits());
+MachineInstrBuilder MachineIRBuilderBase::buildConstant(unsigned Res,
+ int64_t Val) {
+ auto IntN = IntegerType::get(getMF().getFunction().getContext(),
+ getMRI()->getType(Res).getSizeInBits());
ConstantInt *CI = ConstantInt::get(IntN, Val, true);
return buildConstant(Res, *CI);
}
-MachineInstrBuilder MachineIRBuilder::buildFConstant(unsigned Res,
- const ConstantFP &Val) {
- assert(MRI->getType(Res).isScalar() && "invalid operand type");
+MachineInstrBuilder
+MachineIRBuilderBase::buildFConstant(unsigned Res, const ConstantFP &Val) {
+ assert(getMRI()->getType(Res).isScalar() && "invalid operand type");
return buildInstr(TargetOpcode::G_FCONSTANT).addDef(Res).addFPImm(&Val);
}
-MachineInstrBuilder MachineIRBuilder::buildBrCond(unsigned Tst,
- MachineBasicBlock &Dest) {
- assert(MRI->getType(Tst).isScalar() && "invalid operand type");
+MachineInstrBuilder MachineIRBuilderBase::buildFConstant(unsigned Res,
+ double Val) {
+ LLT DstTy = getMRI()->getType(Res);
+ auto &Ctx = getMF().getFunction().getContext();
+ auto *CFP =
+ ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getSizeInBits()));
+ return buildFConstant(Res, *CFP);
+}
+
+MachineInstrBuilder MachineIRBuilderBase::buildBrCond(unsigned Tst,
+ MachineBasicBlock &Dest) {
+ assert(getMRI()->getType(Tst).isScalar() && "invalid operand type");
return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
}
-MachineInstrBuilder MachineIRBuilder::buildLoad(unsigned Res, unsigned Addr,
- MachineMemOperand &MMO) {
- assert(MRI->getType(Res).isValid() && "invalid operand type");
- assert(MRI->getType(Addr).isPointer() && "invalid operand type");
+MachineInstrBuilder MachineIRBuilderBase::buildLoad(unsigned Res, unsigned Addr,
+ MachineMemOperand &MMO) {
+ return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
+}
+
+MachineInstrBuilder
+MachineIRBuilderBase::buildLoadInstr(unsigned Opcode, unsigned Res,
+ unsigned Addr, MachineMemOperand &MMO) {
+ assert(getMRI()->getType(Res).isValid() && "invalid operand type");
+ assert(getMRI()->getType(Addr).isPointer() && "invalid operand type");
- return buildInstr(TargetOpcode::G_LOAD)
+ return buildInstr(Opcode)
.addDef(Res)
.addUse(Addr)
.addMemOperand(&MMO);
}
-MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
- MachineMemOperand &MMO) {
- assert(MRI->getType(Val).isValid() && "invalid operand type");
- assert(MRI->getType(Addr).isPointer() && "invalid operand type");
+MachineInstrBuilder MachineIRBuilderBase::buildStore(unsigned Val,
+ unsigned Addr,
+ MachineMemOperand &MMO) {
+ assert(getMRI()->getType(Val).isValid() && "invalid operand type");
+ assert(getMRI()->getType(Addr).isPointer() && "invalid operand type");
return buildInstr(TargetOpcode::G_STORE)
.addUse(Val)
@@ -313,15 +309,16 @@ MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
.addMemOperand(&MMO);
}
-MachineInstrBuilder MachineIRBuilder::buildUAdde(unsigned Res,
- unsigned CarryOut,
- unsigned Op0, unsigned Op1,
- unsigned CarryIn) {
- assert(MRI->getType(Res).isScalar() && "invalid operand type");
- assert(MRI->getType(Res) == MRI->getType(Op0) &&
- MRI->getType(Res) == MRI->getType(Op1) && "type mismatch");
- assert(MRI->getType(CarryOut).isScalar() && "invalid operand type");
- assert(MRI->getType(CarryOut) == MRI->getType(CarryIn) && "type mismatch");
+MachineInstrBuilder MachineIRBuilderBase::buildUAdde(unsigned Res,
+ unsigned CarryOut,
+ unsigned Op0, unsigned Op1,
+ unsigned CarryIn) {
+ assert(getMRI()->getType(Res).isScalar() && "invalid operand type");
+ assert(getMRI()->getType(Res) == getMRI()->getType(Op0) &&
+ getMRI()->getType(Res) == getMRI()->getType(Op1) && "type mismatch");
+ assert(getMRI()->getType(CarryOut).isScalar() && "invalid operand type");
+ assert(getMRI()->getType(CarryOut) == getMRI()->getType(CarryIn) &&
+ "type mismatch");
return buildInstr(TargetOpcode::G_UADDE)
.addDef(Res)
@@ -331,58 +328,64 @@ MachineInstrBuilder MachineIRBuilder::buildUAdde(unsigned Res,
.addUse(CarryIn);
}
-MachineInstrBuilder MachineIRBuilder::buildAnyExt(unsigned Res, unsigned Op) {
+MachineInstrBuilder MachineIRBuilderBase::buildAnyExt(unsigned Res,
+ unsigned Op) {
validateTruncExt(Res, Op, true);
return buildInstr(TargetOpcode::G_ANYEXT).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildSExt(unsigned Res, unsigned Op) {
+MachineInstrBuilder MachineIRBuilderBase::buildSExt(unsigned Res, unsigned Op) {
validateTruncExt(Res, Op, true);
return buildInstr(TargetOpcode::G_SEXT).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildZExt(unsigned Res, unsigned Op) {
+MachineInstrBuilder MachineIRBuilderBase::buildZExt(unsigned Res, unsigned Op) {
validateTruncExt(Res, Op, true);
return buildInstr(TargetOpcode::G_ZEXT).addDef(Res).addUse(Op);
}
-MachineInstrBuilder
-MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc, unsigned Res, unsigned Op) {
+MachineInstrBuilder MachineIRBuilderBase::buildExtOrTrunc(unsigned ExtOpc,
+ unsigned Res,
+ unsigned Op) {
assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
TargetOpcode::G_SEXT == ExtOpc) &&
"Expecting Extending Opc");
- assert(MRI->getType(Res).isScalar() || MRI->getType(Res).isVector());
- assert(MRI->getType(Res).isScalar() == MRI->getType(Op).isScalar());
+ assert(getMRI()->getType(Res).isScalar() ||
+ getMRI()->getType(Res).isVector());
+ assert(getMRI()->getType(Res).isScalar() == getMRI()->getType(Op).isScalar());
unsigned Opcode = TargetOpcode::COPY;
- if (MRI->getType(Res).getSizeInBits() > MRI->getType(Op).getSizeInBits())
+ if (getMRI()->getType(Res).getSizeInBits() >
+ getMRI()->getType(Op).getSizeInBits())
Opcode = ExtOpc;
- else if (MRI->getType(Res).getSizeInBits() < MRI->getType(Op).getSizeInBits())
+ else if (getMRI()->getType(Res).getSizeInBits() <
+ getMRI()->getType(Op).getSizeInBits())
Opcode = TargetOpcode::G_TRUNC;
else
- assert(MRI->getType(Res) == MRI->getType(Op));
+ assert(getMRI()->getType(Res) == getMRI()->getType(Op));
return buildInstr(Opcode).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(unsigned Res,
- unsigned Op) {
+MachineInstrBuilder MachineIRBuilderBase::buildSExtOrTrunc(unsigned Res,
+ unsigned Op) {
return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
}
-MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(unsigned Res,
- unsigned Op) {
+MachineInstrBuilder MachineIRBuilderBase::buildZExtOrTrunc(unsigned Res,
+ unsigned Op) {
return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
}
-MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(unsigned Res,
- unsigned Op) {
+MachineInstrBuilder MachineIRBuilderBase::buildAnyExtOrTrunc(unsigned Res,
+ unsigned Op) {
return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
}
-MachineInstrBuilder MachineIRBuilder::buildCast(unsigned Dst, unsigned Src) {
- LLT SrcTy = MRI->getType(Src);
- LLT DstTy = MRI->getType(Dst);
+MachineInstrBuilder MachineIRBuilderBase::buildCast(unsigned Dst,
+ unsigned Src) {
+ LLT SrcTy = getMRI()->getType(Src);
+ LLT DstTy = getMRI()->getType(Dst);
if (SrcTy == DstTy)
return buildCopy(Dst, Src);
@@ -399,17 +402,18 @@ MachineInstrBuilder MachineIRBuilder::buildCast(unsigned Dst, unsigned Src) {
return buildInstr(Opcode).addDef(Dst).addUse(Src);
}
-MachineInstrBuilder MachineIRBuilder::buildExtract(unsigned Res, unsigned Src,
- uint64_t Index) {
+MachineInstrBuilder
+MachineIRBuilderBase::buildExtract(unsigned Res, unsigned Src, uint64_t Index) {
#ifndef NDEBUG
- assert(MRI->getType(Src).isValid() && "invalid operand type");
- assert(MRI->getType(Res).isValid() && "invalid operand type");
- assert(Index + MRI->getType(Res).getSizeInBits() <=
- MRI->getType(Src).getSizeInBits() &&
+ assert(getMRI()->getType(Src).isValid() && "invalid operand type");
+ assert(getMRI()->getType(Res).isValid() && "invalid operand type");
+ assert(Index + getMRI()->getType(Res).getSizeInBits() <=
+ getMRI()->getType(Src).getSizeInBits() &&
"extracting off end of register");
#endif
- if (MRI->getType(Res).getSizeInBits() == MRI->getType(Src).getSizeInBits()) {
+ if (getMRI()->getType(Res).getSizeInBits() ==
+ getMRI()->getType(Src).getSizeInBits()) {
assert(Index == 0 && "insertion past the end of a register");
return buildCast(Res, Src);
}
@@ -420,25 +424,25 @@ MachineInstrBuilder MachineIRBuilder::buildExtract(unsigned Res, unsigned Src,
.addImm(Index);
}
-void MachineIRBuilder::buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
- ArrayRef<uint64_t> Indices) {
+void MachineIRBuilderBase::buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
+ ArrayRef<uint64_t> Indices) {
#ifndef NDEBUG
assert(Ops.size() == Indices.size() && "incompatible args");
assert(!Ops.empty() && "invalid trivial sequence");
assert(std::is_sorted(Indices.begin(), Indices.end()) &&
"sequence offsets must be in ascending order");
- assert(MRI->getType(Res).isValid() && "invalid operand type");
+ assert(getMRI()->getType(Res).isValid() && "invalid operand type");
for (auto Op : Ops)
- assert(MRI->getType(Op).isValid() && "invalid operand type");
+ assert(getMRI()->getType(Op).isValid() && "invalid operand type");
#endif
- LLT ResTy = MRI->getType(Res);
- LLT OpTy = MRI->getType(Ops[0]);
+ LLT ResTy = getMRI()->getType(Res);
+ LLT OpTy = getMRI()->getType(Ops[0]);
unsigned OpSize = OpTy.getSizeInBits();
bool MaybeMerge = true;
for (unsigned i = 0; i < Ops.size(); ++i) {
- if (MRI->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
+ if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
MaybeMerge = false;
break;
}
@@ -449,31 +453,32 @@ void MachineIRBuilder::buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
return;
}
- unsigned ResIn = MRI->createGenericVirtualRegister(ResTy);
+ unsigned ResIn = getMRI()->createGenericVirtualRegister(ResTy);
buildUndef(ResIn);
for (unsigned i = 0; i < Ops.size(); ++i) {
- unsigned ResOut =
- i + 1 == Ops.size() ? Res : MRI->createGenericVirtualRegister(ResTy);
+ unsigned ResOut = i + 1 == Ops.size()
+ ? Res
+ : getMRI()->createGenericVirtualRegister(ResTy);
buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
ResIn = ResOut;
}
}
-MachineInstrBuilder MachineIRBuilder::buildUndef(unsigned Res) {
+MachineInstrBuilder MachineIRBuilderBase::buildUndef(unsigned Res) {
return buildInstr(TargetOpcode::G_IMPLICIT_DEF).addDef(Res);
}
-MachineInstrBuilder MachineIRBuilder::buildMerge(unsigned Res,
- ArrayRef<unsigned> Ops) {
+MachineInstrBuilder MachineIRBuilderBase::buildMerge(unsigned Res,
+ ArrayRef<unsigned> Ops) {
#ifndef NDEBUG
assert(!Ops.empty() && "invalid trivial sequence");
- LLT Ty = MRI->getType(Ops[0]);
+ LLT Ty = getMRI()->getType(Ops[0]);
for (auto Reg : Ops)
- assert(MRI->getType(Reg) == Ty && "type mismatch in input list");
- assert(Ops.size() * MRI->getType(Ops[0]).getSizeInBits() ==
- MRI->getType(Res).getSizeInBits() &&
+ assert(getMRI()->getType(Reg) == Ty && "type mismatch in input list");
+ assert(Ops.size() * getMRI()->getType(Ops[0]).getSizeInBits() ==
+ getMRI()->getType(Res).getSizeInBits() &&
"input operands do not cover output register");
#endif
@@ -487,16 +492,16 @@ MachineInstrBuilder MachineIRBuilder::buildMerge(unsigned Res,
return MIB;
}
-MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<unsigned> Res,
- unsigned Op) {
+MachineInstrBuilder MachineIRBuilderBase::buildUnmerge(ArrayRef<unsigned> Res,
+ unsigned Op) {
#ifndef NDEBUG
assert(!Res.empty() && "invalid trivial sequence");
- LLT Ty = MRI->getType(Res[0]);
+ LLT Ty = getMRI()->getType(Res[0]);
for (auto Reg : Res)
- assert(MRI->getType(Reg) == Ty && "type mismatch in input list");
- assert(Res.size() * MRI->getType(Res[0]).getSizeInBits() ==
- MRI->getType(Op).getSizeInBits() &&
+ assert(getMRI()->getType(Reg) == Ty && "type mismatch in input list");
+ assert(Res.size() * getMRI()->getType(Res[0]).getSizeInBits() ==
+ getMRI()->getType(Op).getSizeInBits() &&
"input operands do not cover output register");
#endif
@@ -507,13 +512,15 @@ MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<unsigned> Res,
return MIB;
}
-MachineInstrBuilder MachineIRBuilder::buildInsert(unsigned Res, unsigned Src,
- unsigned Op, unsigned Index) {
- assert(Index + MRI->getType(Op).getSizeInBits() <=
- MRI->getType(Res).getSizeInBits() &&
+MachineInstrBuilder MachineIRBuilderBase::buildInsert(unsigned Res,
+ unsigned Src, unsigned Op,
+ unsigned Index) {
+ assert(Index + getMRI()->getType(Op).getSizeInBits() <=
+ getMRI()->getType(Res).getSizeInBits() &&
"insertion past the end of a register");
- if (MRI->getType(Res).getSizeInBits() == MRI->getType(Op).getSizeInBits()) {
+ if (getMRI()->getType(Res).getSizeInBits() ==
+ getMRI()->getType(Op).getSizeInBits()) {
return buildCast(Res, Op);
}
@@ -524,9 +531,9 @@ MachineInstrBuilder MachineIRBuilder::buildInsert(unsigned Res, unsigned Src,
.addImm(Index);
}
-MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
- unsigned Res,
- bool HasSideEffects) {
+MachineInstrBuilder MachineIRBuilderBase::buildIntrinsic(Intrinsic::ID ID,
+ unsigned Res,
+ bool HasSideEffects) {
auto MIB =
buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
: TargetOpcode::G_INTRINSIC);
@@ -536,28 +543,30 @@ MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
return MIB;
}
-MachineInstrBuilder MachineIRBuilder::buildTrunc(unsigned Res, unsigned Op) {
+MachineInstrBuilder MachineIRBuilderBase::buildTrunc(unsigned Res,
+ unsigned Op) {
validateTruncExt(Res, Op, false);
return buildInstr(TargetOpcode::G_TRUNC).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildFPTrunc(unsigned Res, unsigned Op) {
+MachineInstrBuilder MachineIRBuilderBase::buildFPTrunc(unsigned Res,
+ unsigned Op) {
validateTruncExt(Res, Op, false);
return buildInstr(TargetOpcode::G_FPTRUNC).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
- unsigned Res, unsigned Op0,
- unsigned Op1) {
+MachineInstrBuilder MachineIRBuilderBase::buildICmp(CmpInst::Predicate Pred,
+ unsigned Res, unsigned Op0,
+ unsigned Op1) {
#ifndef NDEBUG
- assert(MRI->getType(Op0) == MRI->getType(Op0) && "type mismatch");
+ assert(getMRI()->getType(Op0) == getMRI()->getType(Op0) && "type mismatch");
assert(CmpInst::isIntPredicate(Pred) && "invalid predicate");
- if (MRI->getType(Op0).isScalar() || MRI->getType(Op0).isPointer())
- assert(MRI->getType(Res).isScalar() && "type mismatch");
+ if (getMRI()->getType(Op0).isScalar() || getMRI()->getType(Op0).isPointer())
+ assert(getMRI()->getType(Res).isScalar() && "type mismatch");
else
- assert(MRI->getType(Res).isVector() &&
- MRI->getType(Res).getNumElements() ==
- MRI->getType(Op0).getNumElements() &&
+ assert(getMRI()->getType(Res).isVector() &&
+ getMRI()->getType(Res).getNumElements() ==
+ getMRI()->getType(Op0).getNumElements() &&
"type mismatch");
#endif
@@ -568,20 +577,21 @@ MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
.addUse(Op1);
}
-MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
- unsigned Res, unsigned Op0,
- unsigned Op1) {
+MachineInstrBuilder MachineIRBuilderBase::buildFCmp(CmpInst::Predicate Pred,
+ unsigned Res, unsigned Op0,
+ unsigned Op1) {
#ifndef NDEBUG
- assert((MRI->getType(Op0).isScalar() || MRI->getType(Op0).isVector()) &&
+ assert((getMRI()->getType(Op0).isScalar() ||
+ getMRI()->getType(Op0).isVector()) &&
"invalid operand type");
- assert(MRI->getType(Op0) == MRI->getType(Op1) && "type mismatch");
+ assert(getMRI()->getType(Op0) == getMRI()->getType(Op1) && "type mismatch");
assert(CmpInst::isFPPredicate(Pred) && "invalid predicate");
- if (MRI->getType(Op0).isScalar())
- assert(MRI->getType(Res).isScalar() && "type mismatch");
+ if (getMRI()->getType(Op0).isScalar())
+ assert(getMRI()->getType(Res).isScalar() && "type mismatch");
else
- assert(MRI->getType(Res).isVector() &&
- MRI->getType(Res).getNumElements() ==
- MRI->getType(Op0).getNumElements() &&
+ assert(getMRI()->getType(Res).isVector() &&
+ getMRI()->getType(Res).getNumElements() ==
+ getMRI()->getType(Op0).getNumElements() &&
"type mismatch");
#endif
@@ -592,21 +602,23 @@ MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
.addUse(Op1);
}
-MachineInstrBuilder MachineIRBuilder::buildSelect(unsigned Res, unsigned Tst,
- unsigned Op0, unsigned Op1) {
+MachineInstrBuilder MachineIRBuilderBase::buildSelect(unsigned Res,
+ unsigned Tst,
+ unsigned Op0,
+ unsigned Op1) {
#ifndef NDEBUG
- LLT ResTy = MRI->getType(Res);
+ LLT ResTy = getMRI()->getType(Res);
assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
"invalid operand type");
- assert(ResTy == MRI->getType(Op0) && ResTy == MRI->getType(Op1) &&
+ assert(ResTy == getMRI()->getType(Op0) && ResTy == getMRI()->getType(Op1) &&
"type mismatch");
if (ResTy.isScalar() || ResTy.isPointer())
- assert(MRI->getType(Tst).isScalar() && "type mismatch");
+ assert(getMRI()->getType(Tst).isScalar() && "type mismatch");
else
- assert((MRI->getType(Tst).isScalar() ||
- (MRI->getType(Tst).isVector() &&
- MRI->getType(Tst).getNumElements() ==
- MRI->getType(Op0).getNumElements())) &&
+ assert((getMRI()->getType(Tst).isScalar() ||
+ (getMRI()->getType(Tst).isVector() &&
+ getMRI()->getType(Tst).getNumElements() ==
+ getMRI()->getType(Op0).getNumElements())) &&
"type mismatch");
#endif
@@ -617,15 +629,14 @@ MachineInstrBuilder MachineIRBuilder::buildSelect(unsigned Res, unsigned Tst,
.addUse(Op1);
}
-MachineInstrBuilder MachineIRBuilder::buildInsertVectorElement(unsigned Res,
- unsigned Val,
- unsigned Elt,
- unsigned Idx) {
+MachineInstrBuilder
+MachineIRBuilderBase::buildInsertVectorElement(unsigned Res, unsigned Val,
+ unsigned Elt, unsigned Idx) {
#ifndef NDEBUG
- LLT ResTy = MRI->getType(Res);
- LLT ValTy = MRI->getType(Val);
- LLT EltTy = MRI->getType(Elt);
- LLT IdxTy = MRI->getType(Idx);
+ LLT ResTy = getMRI()->getType(Res);
+ LLT ValTy = getMRI()->getType(Val);
+ LLT EltTy = getMRI()->getType(Elt);
+ LLT IdxTy = getMRI()->getType(Idx);
assert(ResTy.isVector() && ValTy.isVector() && "invalid operand type");
assert(IdxTy.isScalar() && "invalid operand type");
assert(ResTy.getNumElements() == ValTy.getNumElements() && "type mismatch");
@@ -639,13 +650,13 @@ MachineInstrBuilder MachineIRBuilder::buildInsertVectorElement(unsigned Res,
.addUse(Idx);
}
-MachineInstrBuilder MachineIRBuilder::buildExtractVectorElement(unsigned Res,
- unsigned Val,
- unsigned Idx) {
+MachineInstrBuilder
+MachineIRBuilderBase::buildExtractVectorElement(unsigned Res, unsigned Val,
+ unsigned Idx) {
#ifndef NDEBUG
- LLT ResTy = MRI->getType(Res);
- LLT ValTy = MRI->getType(Val);
- LLT IdxTy = MRI->getType(Idx);
+ LLT ResTy = getMRI()->getType(Res);
+ LLT ValTy = getMRI()->getType(Val);
+ LLT IdxTy = getMRI()->getType(Idx);
assert(ValTy.isVector() && "invalid operand type");
assert((ResTy.isScalar() || ResTy.isPointer()) && "invalid operand type");
assert(IdxTy.isScalar() && "invalid operand type");
@@ -658,15 +669,42 @@ MachineInstrBuilder MachineIRBuilder::buildExtractVectorElement(unsigned Res,
.addUse(Idx);
}
+MachineInstrBuilder MachineIRBuilderBase::buildAtomicCmpXchgWithSuccess(
+ unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal,
+ unsigned NewVal, MachineMemOperand &MMO) {
+#ifndef NDEBUG
+ LLT OldValResTy = getMRI()->getType(OldValRes);
+ LLT SuccessResTy = getMRI()->getType(SuccessRes);
+ LLT AddrTy = getMRI()->getType(Addr);
+ LLT CmpValTy = getMRI()->getType(CmpVal);
+ LLT NewValTy = getMRI()->getType(NewVal);
+ assert(OldValResTy.isScalar() && "invalid operand type");
+ assert(SuccessResTy.isScalar() && "invalid operand type");
+ assert(AddrTy.isPointer() && "invalid operand type");
+ assert(CmpValTy.isValid() && "invalid operand type");
+ assert(NewValTy.isValid() && "invalid operand type");
+ assert(OldValResTy == CmpValTy && "type mismatch");
+ assert(OldValResTy == NewValTy && "type mismatch");
+#endif
+
+ return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
+ .addDef(OldValRes)
+ .addDef(SuccessRes)
+ .addUse(Addr)
+ .addUse(CmpVal)
+ .addUse(NewVal)
+ .addMemOperand(&MMO);
+}
+
MachineInstrBuilder
-MachineIRBuilder::buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
- unsigned CmpVal, unsigned NewVal,
- MachineMemOperand &MMO) {
+MachineIRBuilderBase::buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
+ unsigned CmpVal, unsigned NewVal,
+ MachineMemOperand &MMO) {
#ifndef NDEBUG
- LLT OldValResTy = MRI->getType(OldValRes);
- LLT AddrTy = MRI->getType(Addr);
- LLT CmpValTy = MRI->getType(CmpVal);
- LLT NewValTy = MRI->getType(NewVal);
+ LLT OldValResTy = getMRI()->getType(OldValRes);
+ LLT AddrTy = getMRI()->getType(Addr);
+ LLT CmpValTy = getMRI()->getType(CmpVal);
+ LLT NewValTy = getMRI()->getType(NewVal);
assert(OldValResTy.isScalar() && "invalid operand type");
assert(AddrTy.isPointer() && "invalid operand type");
assert(CmpValTy.isValid() && "invalid operand type");
@@ -683,14 +721,102 @@ MachineIRBuilder::buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
.addMemOperand(&MMO);
}
-void MachineIRBuilder::validateTruncExt(unsigned Dst, unsigned Src,
- bool IsExtend) {
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMW(unsigned Opcode, unsigned OldValRes,
+ unsigned Addr, unsigned Val,
+ MachineMemOperand &MMO) {
+#ifndef NDEBUG
+ LLT OldValResTy = getMRI()->getType(OldValRes);
+ LLT AddrTy = getMRI()->getType(Addr);
+ LLT ValTy = getMRI()->getType(Val);
+ assert(OldValResTy.isScalar() && "invalid operand type");
+ assert(AddrTy.isPointer() && "invalid operand type");
+ assert(ValTy.isValid() && "invalid operand type");
+ assert(OldValResTy == ValTy && "type mismatch");
+#endif
+
+ return buildInstr(Opcode)
+ .addDef(OldValRes)
+ .addUse(Addr)
+ .addUse(Val)
+ .addMemOperand(&MMO);
+}
+
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWXchg(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWAdd(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWSub(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWAnd(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWNand(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWOr(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWXor(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWMax(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWMin(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWUmax(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
+ MMO);
+}
+MachineInstrBuilder
+MachineIRBuilderBase::buildAtomicRMWUmin(unsigned OldValRes, unsigned Addr,
+ unsigned Val, MachineMemOperand &MMO) {
+ return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
+ MMO);
+}
+
+void MachineIRBuilderBase::validateTruncExt(unsigned Dst, unsigned Src,
+ bool IsExtend) {
#ifndef NDEBUG
- LLT SrcTy = MRI->getType(Src);
- LLT DstTy = MRI->getType(Dst);
+ LLT SrcTy = getMRI()->getType(Src);
+ LLT DstTy = getMRI()->getType(Dst);
if (DstTy.isVector()) {
- assert(SrcTy.isVector() && "mismatched cast between vecot and non-vector");
+ assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
"different number of elements in a trunc/ext");
} else
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
index 006c9ea23034..9e2d48d1dc42 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
@@ -30,6 +30,7 @@
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Function.h"
#include "llvm/Pass.h"
@@ -75,7 +76,7 @@ RegBankSelect::RegBankSelect(Mode RunningMode)
if (RegBankSelectMode.getNumOccurrences() != 0) {
OptMode = RegBankSelectMode;
if (RegBankSelectMode != RunningMode)
- DEBUG(dbgs() << "RegBankSelect mode overrided by command line\n");
+ LLVM_DEBUG(dbgs() << "RegBankSelect mode overrided by command line\n");
}
}
@@ -104,6 +105,7 @@ void RegBankSelect::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineBranchProbabilityInfo>();
}
AU.addRequired<TargetPassConfig>();
+ getSelectionDAGFallbackAnalysisUsage(AU);
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -122,11 +124,11 @@ bool RegBankSelect::assignmentMatch(
// Reg is free of assignment, a simple assignment will make the
// register bank to match.
OnlyAssign = CurRegBank == nullptr;
- DEBUG(dbgs() << "Does assignment already match: ";
- if (CurRegBank) dbgs() << *CurRegBank; else dbgs() << "none";
- dbgs() << " against ";
- assert(DesiredRegBrank && "The mapping must be valid");
- dbgs() << *DesiredRegBrank << '\n';);
+ LLVM_DEBUG(dbgs() << "Does assignment already match: ";
+ if (CurRegBank) dbgs() << *CurRegBank; else dbgs() << "none";
+ dbgs() << " against ";
+ assert(DesiredRegBrank && "The mapping must be valid");
+ dbgs() << *DesiredRegBrank << '\n';);
return CurRegBank == DesiredRegBrank;
}
@@ -159,8 +161,8 @@ bool RegBankSelect::repairReg(
// same types because the type is a placeholder when this function is called.
MachineInstr *MI =
MIRBuilder.buildInstrNoInsert(TargetOpcode::COPY).addDef(Dst).addUse(Src);
- DEBUG(dbgs() << "Copy: " << printReg(Src) << " to: " << printReg(Dst)
- << '\n');
+ LLVM_DEBUG(dbgs() << "Copy: " << printReg(Src) << " to: " << printReg(Dst)
+ << '\n');
// TODO:
// Check if MI is legal. if not, we need to legalize all the
// instructions we are going to insert.
@@ -245,7 +247,7 @@ const RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
MappingCost CurCost =
computeMapping(MI, *CurMapping, LocalRepairPts, &Cost);
if (CurCost < Cost) {
- DEBUG(dbgs() << "New best: " << CurCost << '\n');
+ LLVM_DEBUG(dbgs() << "New best: " << CurCost << '\n');
Cost = CurCost;
BestMapping = CurMapping;
RepairPts.clear();
@@ -397,11 +399,11 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
MappingCost Cost(MBFI ? MBFI->getBlockFreq(MI.getParent()) : 1);
bool Saturated = Cost.addLocalCost(InstrMapping.getCost());
assert(!Saturated && "Possible mapping saturated the cost");
- DEBUG(dbgs() << "Evaluating mapping cost for: " << MI);
- DEBUG(dbgs() << "With: " << InstrMapping << '\n');
+ LLVM_DEBUG(dbgs() << "Evaluating mapping cost for: " << MI);
+ LLVM_DEBUG(dbgs() << "With: " << InstrMapping << '\n');
RepairPts.clear();
if (BestCost && Cost > *BestCost) {
- DEBUG(dbgs() << "Mapping is too expensive from the start\n");
+ LLVM_DEBUG(dbgs() << "Mapping is too expensive from the start\n");
return Cost;
}
@@ -417,17 +419,17 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
unsigned Reg = MO.getReg();
if (!Reg)
continue;
- DEBUG(dbgs() << "Opd" << OpIdx << '\n');
+ LLVM_DEBUG(dbgs() << "Opd" << OpIdx << '\n');
const RegisterBankInfo::ValueMapping &ValMapping =
InstrMapping.getOperandMapping(OpIdx);
// If Reg is already properly mapped, this is free.
bool Assign;
if (assignmentMatch(Reg, ValMapping, Assign)) {
- DEBUG(dbgs() << "=> is free (match).\n");
+ LLVM_DEBUG(dbgs() << "=> is free (match).\n");
continue;
}
if (Assign) {
- DEBUG(dbgs() << "=> is free (simple assignment).\n");
+ LLVM_DEBUG(dbgs() << "=> is free (simple assignment).\n");
RepairPts.emplace_back(RepairingPlacement(MI, OpIdx, *TRI, *this,
RepairingPlacement::Reassign));
continue;
@@ -446,7 +448,7 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
// Check that the materialization of the repairing is possible.
if (!RepairPt.canMaterialize()) {
- DEBUG(dbgs() << "Mapping involves impossible repairing\n");
+ LLVM_DEBUG(dbgs() << "Mapping involves impossible repairing\n");
return MappingCost::ImpossibleCost();
}
@@ -473,7 +475,7 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
// This is an impossible to repair cost.
if (RepairCost == std::numeric_limits<unsigned>::max())
- continue;
+ return MappingCost::ImpossibleCost();
// Bias used for splitting: 5%.
const uint64_t PercentageForBias = 5;
@@ -509,7 +511,7 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
// Stop looking into what it takes to repair, this is already
// too expensive.
if (BestCost && Cost > *BestCost) {
- DEBUG(dbgs() << "Mapping is too expensive, stop processing\n");
+ LLVM_DEBUG(dbgs() << "Mapping is too expensive, stop processing\n");
return Cost;
}
@@ -519,7 +521,7 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
break;
}
}
- DEBUG(dbgs() << "Total cost is: " << Cost << "\n");
+ LLVM_DEBUG(dbgs() << "Total cost is: " << Cost << "\n");
return Cost;
}
@@ -559,14 +561,14 @@ bool RegBankSelect::applyMapping(
}
// Second, rewrite the instruction.
- DEBUG(dbgs() << "Actual mapping of the operands: " << OpdMapper << '\n');
+ LLVM_DEBUG(dbgs() << "Actual mapping of the operands: " << OpdMapper << '\n');
RBI->applyMapping(OpdMapper);
return true;
}
bool RegBankSelect::assignInstr(MachineInstr &MI) {
- DEBUG(dbgs() << "Assign: " << MI);
+ LLVM_DEBUG(dbgs() << "Assign: " << MI);
// Remember the repairing placement for all the operands.
SmallVector<RepairingPlacement, 4> RepairPts;
@@ -587,7 +589,7 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
// Make sure the mapping is valid for MI.
assert(BestMapping->verify(MI) && "Invalid instruction mapping");
- DEBUG(dbgs() << "Best Mapping: " << *BestMapping << '\n');
+ LLVM_DEBUG(dbgs() << "Best Mapping: " << *BestMapping << '\n');
// After this call, MI may not be valid anymore.
// Do not use it.
@@ -600,7 +602,7 @@ bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) {
MachineFunctionProperties::Property::FailedISel))
return false;
- DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n');
const Function &F = MF.getFunction();
Mode SaveOptMode = OptMode;
if (F.hasFnAttribute(Attribute::OptimizeNone))
@@ -610,20 +612,13 @@ bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) {
#ifndef NDEBUG
// Check that our input is fully legal: we require the function to have the
// Legalized property, so it should be.
- // FIXME: This should be in the MachineVerifier, but it can't use the
- // LegalizerInfo as it's currently in the separate GlobalISel library.
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- if (const LegalizerInfo *MLI = MF.getSubtarget().getLegalizerInfo()) {
- for (MachineBasicBlock &MBB : MF) {
- for (MachineInstr &MI : MBB) {
- if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI, MRI)) {
- reportGISelFailure(MF, *TPC, *MORE, "gisel-regbankselect",
- "instruction is not legal", MI);
- return false;
- }
- }
+ // FIXME: This should be in the MachineVerifier.
+ if (!DisableGISelLegalityCheck)
+ if (const MachineInstr *MI = machineFunctionIsIllegal(MF)) {
+ reportGISelFailure(MF, *TPC, *MORE, "gisel-regbankselect",
+ "instruction is not legal", *MI);
+ return false;
}
- }
#endif
// Walk the function and assign register banks to all operands.
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBank.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBank.cpp
index 4d3ae69d3a9d..16f67a217ce1 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBank.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBank.cpp
@@ -12,6 +12,7 @@
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/Config/llvm-config.h"
#define DEBUG_TYPE "registerbank"
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
index b3d9209ae6eb..dd15567ef1c1 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -72,7 +73,7 @@ bool RegisterBankInfo::verify(const TargetRegisterInfo &TRI) const {
const RegisterBank &RegBank = getRegBank(Idx);
assert(Idx == RegBank.getID() &&
"ID does not match the index in the array");
- DEBUG(dbgs() << "Verify " << RegBank << '\n');
+ LLVM_DEBUG(dbgs() << "Verify " << RegBank << '\n');
assert(RegBank.verify(TRI) && "RegBank is invalid");
}
#endif // NDEBUG
@@ -403,18 +404,18 @@ RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const {
void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
MachineInstr &MI = OpdMapper.getMI();
MachineRegisterInfo &MRI = OpdMapper.getMRI();
- DEBUG(dbgs() << "Applying default-like mapping\n");
+ LLVM_DEBUG(dbgs() << "Applying default-like mapping\n");
for (unsigned OpIdx = 0,
EndIdx = OpdMapper.getInstrMapping().getNumOperands();
OpIdx != EndIdx; ++OpIdx) {
- DEBUG(dbgs() << "OpIdx " << OpIdx);
+ LLVM_DEBUG(dbgs() << "OpIdx " << OpIdx);
MachineOperand &MO = MI.getOperand(OpIdx);
if (!MO.isReg()) {
- DEBUG(dbgs() << " is not a register, nothing to be done\n");
+ LLVM_DEBUG(dbgs() << " is not a register, nothing to be done\n");
continue;
}
if (!MO.getReg()) {
- DEBUG(dbgs() << " is %%noreg, nothing to be done\n");
+ LLVM_DEBUG(dbgs() << " is %%noreg, nothing to be done\n");
continue;
}
assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns !=
@@ -426,14 +427,14 @@ void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
iterator_range<SmallVectorImpl<unsigned>::const_iterator> NewRegs =
OpdMapper.getVRegs(OpIdx);
if (NewRegs.begin() == NewRegs.end()) {
- DEBUG(dbgs() << " has not been repaired, nothing to be done\n");
+ LLVM_DEBUG(dbgs() << " has not been repaired, nothing to be done\n");
continue;
}
unsigned OrigReg = MO.getReg();
unsigned NewReg = *NewRegs.begin();
- DEBUG(dbgs() << " changed, replace " << printReg(OrigReg, nullptr));
+ LLVM_DEBUG(dbgs() << " changed, replace " << printReg(OrigReg, nullptr));
MO.setReg(NewReg);
- DEBUG(dbgs() << " with " << printReg(NewReg, nullptr));
+ LLVM_DEBUG(dbgs() << " with " << printReg(NewReg, nullptr));
// The OperandsMapper creates plain scalar, we may have to fix that.
// Check if the types match and if not, fix that.
@@ -447,35 +448,27 @@ void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
assert(OrigTy.getSizeInBits() <= NewTy.getSizeInBits() &&
"Types with difference size cannot be handled by the default "
"mapping");
- DEBUG(dbgs() << "\nChange type of new opd from " << NewTy << " to "
- << OrigTy);
+ LLVM_DEBUG(dbgs() << "\nChange type of new opd from " << NewTy << " to "
+ << OrigTy);
MRI.setType(NewReg, OrigTy);
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
}
unsigned RegisterBankInfo::getSizeInBits(unsigned Reg,
const MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI) const {
- const TargetRegisterClass *RC = nullptr;
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
// The size is not directly available for physical registers.
// Instead, we need to access a register class that contains Reg and
// get the size of that register class.
- RC = &getMinimalPhysRegClass(Reg, TRI);
- } else {
- LLT Ty = MRI.getType(Reg);
- unsigned RegSize = Ty.isValid() ? Ty.getSizeInBits() : 0;
- // If Reg is not a generic register, query the register class to
- // get its size.
- if (RegSize)
- return RegSize;
- // Since Reg is not a generic register, it must have a register class.
- RC = MRI.getRegClass(Reg);
+ // Because this is expensive, we'll cache the register class by calling
+ auto *RC = &getMinimalPhysRegClass(Reg, TRI);
+ assert(RC && "Expecting Register class");
+ return TRI.getRegSizeInBits(*RC);
}
- assert(RC && "Unable to deduce the register class");
- return TRI.getRegSizeInBits(*RC);
+ return TRI.getRegSizeInBits(Reg, MRI);
}
//------------------------------------------------------------------------------
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index ef990b49aceb..1a5f88743d5f 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -11,12 +11,14 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/StackProtector.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
@@ -42,20 +44,94 @@ unsigned llvm::constrainRegToClass(MachineRegisterInfo &MRI,
return Reg;
}
-
unsigned llvm::constrainOperandRegClass(
const MachineFunction &MF, const TargetRegisterInfo &TRI,
MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
- unsigned Reg, unsigned OpIdx) {
+ const MachineOperand &RegMO, unsigned OpIdx) {
+ unsigned Reg = RegMO.getReg();
// Assume physical registers are properly constrained.
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"PhysReg not implemented");
const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
+ // Some of the target independent instructions, like COPY, may not impose any
+ // register class constraints on some of their operands: If it's a use, we can
+ // skip constraining as the instruction defining the register would constrain
+ // it.
+
+ // We can't constrain unallocatable register classes, because we can't create
+ // virtual registers for these classes, so we need to let targets handled this
+ // case.
+ if (RegClass && !RegClass->isAllocatable())
+ RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
+
+ if (!RegClass) {
+ assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
+ "Register class constraint is required unless either the "
+ "instruction is target independent or the operand is a use");
+ // FIXME: Just bailing out like this here could be not enough, unless we
+ // expect the users of this function to do the right thing for PHIs and
+ // COPY:
+ // v1 = COPY v0
+ // v2 = COPY v1
+ // v1 here may end up not being constrained at all. Please notice that to
+ // reproduce the issue we likely need a destination pattern of a selection
+ // rule producing such extra copies, not just an input GMIR with them as
+ // every existing target using selectImpl handles copies before calling it
+ // and they never reach this function.
+ return Reg;
+ }
return constrainRegToClass(MRI, TII, RBI, InsertPt, Reg, *RegClass);
}
+bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
+ const TargetInstrInfo &TII,
+ const TargetRegisterInfo &TRI,
+ const RegisterBankInfo &RBI) {
+ assert(!isPreISelGenericOpcode(I.getOpcode()) &&
+ "A selected instruction is expected");
+ MachineBasicBlock &MBB = *I.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
+ MachineOperand &MO = I.getOperand(OpI);
+
+ // There's nothing to be done on non-register operands.
+ if (!MO.isReg())
+ continue;
+
+ LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
+ assert(MO.isReg() && "Unsupported non-reg operand");
+
+ unsigned Reg = MO.getReg();
+ // Physical registers don't need to be constrained.
+ if (TRI.isPhysicalRegister(Reg))
+ continue;
+
+ // Register operands with a value of 0 (e.g. predicate operands) don't need
+ // to be constrained.
+ if (Reg == 0)
+ continue;
+
+ // If the operand is a vreg, we should constrain its regclass, and only
+ // insert COPYs if that's impossible.
+ // constrainOperandRegClass does that for us.
+ MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
+ MO, OpI));
+
+ // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
+ // done.
+ if (MO.isUse()) {
+ int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
+ if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
+ I.tieOperands(DefIdx, OpI);
+ }
+ }
+ return true;
+}
+
bool llvm::isTriviallyDead(const MachineInstr &MI,
const MachineRegisterInfo &MRI) {
// If we can move an instruction, we can remove it. Otherwise, it has
@@ -101,7 +177,7 @@ void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
MI.getDebugLoc(), MI.getParent());
R << Msg;
// Printing MI is expensive; only do it if expensive remarks are enabled.
- if (MORE.allowExtraAnalysis(PassName))
+ if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
R << ": " << ore::MNV("Inst", MI);
reportGISelFailure(MF, TPC, MORE, R);
}
@@ -145,3 +221,20 @@ llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, unsigned Reg,
}
return DefMI->getOpcode() == Opcode ? DefMI : nullptr;
}
+
+APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
+ if (Size == 32)
+ return APFloat(float(Val));
+ if (Size == 64)
+ return APFloat(Val);
+ if (Size != 16)
+ llvm_unreachable("Unsupported FPConstant size");
+ bool Ignored;
+ APFloat APF(Val);
+ APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
+ return APF;
+}
+
+void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
+ AU.addPreserved<StackProtector>();
+}