aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/X86/X86ISelLowering.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/X86/X86ISelLowering.h')
-rw-r--r--lib/Target/X86/X86ISelLowering.h77
1 files changed, 62 insertions, 15 deletions
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index e0be03bc3f9d..6f7e90008de4 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -17,7 +17,6 @@
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLowering.h"
-#include "llvm/Target/TargetOptions.h"
namespace llvm {
class X86Subtarget;
@@ -144,6 +143,10 @@ namespace llvm {
/// relative displacements.
WrapperRIP,
+ /// Copies a 64-bit value from an MMX vector to the low word
+ /// of an XMM vector, with the high word zero filled.
+ MOVQ2DQ,
+
/// Copies a 64-bit value from the low word of an XMM vector
/// to an MMX vector.
MOVDQ2Q,
@@ -422,7 +425,8 @@ namespace llvm {
// Tests Types Of a FP Values for scalar types.
VFPCLASSS,
- // Broadcast scalar to vector.
+ // Broadcast (splat) scalar or element 0 of a vector. If the operand is
+ // a vector, this node may change the vector length as part of the splat.
VBROADCAST,
// Broadcast mask to vector.
VBROADCASTM,
@@ -611,6 +615,9 @@ namespace llvm {
// extract_vector_elt, store.
VEXTRACT_STORE,
+ // scalar broadcast from memory
+ VBROADCAST_LOAD,
+
// Store FP control world into i16 memory.
FNSTCW16m,
@@ -680,6 +687,9 @@ namespace llvm {
bool isCalleePop(CallingConv::ID CallingConv,
bool is64Bit, bool IsVarArg, bool GuaranteeTCO);
+ /// If Op is a constant whose elements are all the same constant or
+ /// undefined, return true and return the constant value in \p SplatVal.
+ bool isConstantSplat(SDValue Op, APInt &SplatVal);
} // end namespace X86
//===--------------------------------------------------------------------===//
@@ -792,6 +802,17 @@ namespace llvm {
/// and some i16 instructions are slow.
bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override;
+ /// Return 1 if we can compute the negated form of the specified expression
+ /// for the same cost as the expression itself, or 2 if we can compute the
+ /// negated form more cheaply than the expression itself. Else return 0.
+ char isNegatibleForFree(SDValue Op, SelectionDAG &DAG, bool LegalOperations,
+ bool ForCodeSize, unsigned Depth) const override;
+
+ /// If isNegatibleForFree returns true, return the newly negated expression.
+ SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
+ bool LegalOperations, bool ForCodeSize,
+ unsigned Depth) const override;
+
MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;
@@ -840,6 +861,13 @@ namespace llvm {
bool hasAndNot(SDValue Y) const override;
+ bool hasBitTest(SDValue X, SDValue Y) const override;
+
+ bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
+ SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
+ unsigned OldShiftOpcode, unsigned NewShiftOpcode,
+ SelectionDAG &DAG) const override;
+
bool shouldFoldConstantShiftPairToMask(const SDNode *N,
CombineLevel Level) const override;
@@ -863,11 +891,7 @@ namespace llvm {
return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
}
- bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
- if (DAG.getMachineFunction().getFunction().hasMinSize())
- return false;
- return true;
- }
+ bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
bool shouldSplatInsEltVarIndex(EVT VT) const override;
@@ -913,6 +937,10 @@ namespace llvm {
TargetLoweringOpt &TLO,
unsigned Depth) const override;
+ SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
+ SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
+ SelectionDAG &DAG, unsigned Depth) const override;
+
const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
SDValue unwrapAddress(SDValue N) const override;
@@ -1090,11 +1118,12 @@ namespace llvm {
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override;
- bool reduceSelectOfFPConstantLoads(bool IsFPSetCC) const override;
+ bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const override;
bool convertSelectOfConstantsToMath(EVT VT) const override;
- bool decomposeMulByConstant(EVT VT, SDValue C) const override;
+ bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
+ SDValue C) const override;
bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
bool IsSigned) const override;
@@ -1136,8 +1165,8 @@ namespace llvm {
return nullptr; // nothing to do, move along.
}
- unsigned getRegisterByName(const char* RegName, EVT VT,
- SelectionDAG &DAG) const override;
+ Register getRegisterByName(const char* RegName, EVT VT,
+ const MachineFunction &MF) const override;
/// If a physical register, this returns the register that receives the
/// exception address on entry to an EH pad.
@@ -1189,12 +1218,18 @@ namespace llvm {
CallingConv::ID CC,
EVT VT) const override;
+ unsigned getVectorTypeBreakdownForCallingConv(
+ LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
+ unsigned &NumIntermediates, MVT &RegisterVT) const override;
+
bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
bool supportSwiftError() const override;
StringRef getStackProbeSymbolName(MachineFunction &MF) const override;
+ unsigned getStackProbeSize(MachineFunction &MF) const;
+
bool hasVectorBlend() const override { return true; }
unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
@@ -1326,6 +1361,12 @@ namespace llvm {
SDValue LowerGC_TRANSITION_START(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGC_TRANSITION_END(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
+ RTLIB::Libcall Call) const;
SDValue
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
@@ -1372,6 +1413,9 @@ namespace llvm {
LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
+ bool lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const override;
+ bool lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const override;
+
bool needsCmpXchgNb(Type *MemType) const;
void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
@@ -1462,6 +1506,9 @@ namespace llvm {
/// Reassociate floating point divisions into multiply by reciprocal.
unsigned combineRepeatedFPDivisors() const override;
+
+ SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+ SmallVectorImpl<SDNode *> &Created) const override;
};
namespace X86 {
@@ -1625,24 +1672,24 @@ namespace llvm {
/// mask. This is the reverse process to canWidenShuffleElements, but can
/// always succeed.
template <typename T>
- void scaleShuffleMask(int Scale, ArrayRef<T> Mask,
+ void scaleShuffleMask(size_t Scale, ArrayRef<T> Mask,
SmallVectorImpl<T> &ScaledMask) {
assert(0 < Scale && "Unexpected scaling factor");
size_t NumElts = Mask.size();
ScaledMask.assign(NumElts * Scale, -1);
- for (int i = 0; i != (int)NumElts; ++i) {
+ for (size_t i = 0; i != NumElts; ++i) {
int M = Mask[i];
// Repeat sentinel values in every mask element.
if (M < 0) {
- for (int s = 0; s != Scale; ++s)
+ for (size_t s = 0; s != Scale; ++s)
ScaledMask[(Scale * i) + s] = M;
continue;
}
// Scale mask element and increment across each mask element.
- for (int s = 0; s != Scale; ++s)
+ for (size_t s = 0; s != Scale; ++s)
ScaledMask[(Scale * i) + s] = (Scale * M) + s;
}
}