aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2018-07-30 16:33:32 +0000
committerDimitry Andric <dim@FreeBSD.org>2018-07-30 16:33:32 +0000
commit51315c45ff5643a27f9c84b816db54ee870ba29b (patch)
tree1d87443fa0e53d3e6b315ce25787e64be0906bf7 /contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
parent6dfd050075216be8538ae375a22d30db72916f7e (diff)
parenteb11fae6d08f479c0799db45860a98af528fa6e7 (diff)
downloadsrc-51315c45ff5643a27f9c84b816db54ee870ba29b.tar.gz
src-51315c45ff5643a27f9c84b816db54ee870ba29b.zip
Merge llvm trunk r338150, and resolve conflicts.
Notes
Notes: svn path=/projects/clang700-import/; revision=336916
Diffstat (limited to 'contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h')
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h31
1 files changed, 28 insertions, 3 deletions
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
index b3215a84829e..9b8d6435515b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -20,7 +20,6 @@
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetLowering.h"
@@ -31,6 +30,7 @@
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Type.h"
+#include "llvm/Support/MachineValueType.h"
#include <utility>
namespace llvm {
@@ -71,6 +71,9 @@ namespace llvm {
/// unsigned integers with round toward zero.
FCTIDUZ, FCTIWUZ,
+ /// Floating-point-to-interger conversion instructions
+ FP_TO_UINT_IN_VSR, FP_TO_SINT_IN_VSR,
+
/// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in
/// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
VEXTS,
@@ -186,6 +189,9 @@ namespace llvm {
/// Direct move from a GPR to a VSX register (zero)
MTVSRZ,
+ /// Direct move of 2 consective GPR to a VSX register.
+ BUILD_FP128,
+
/// Extract a subvector from signed integer vector and convert to FP.
/// It is primarily used to convert a (widened) illegal integer vector
/// type to a legal floating point vector type.
@@ -426,6 +432,9 @@ namespace llvm {
/// an xxswapd.
STXVD2X,
+ /// Store scalar integers from VSR.
+ ST_VSR_SCAL_INT,
+
/// QBRC, CHAIN = QVLFSb CHAIN, Ptr
/// The 4xf32 load used for v4i1 constants.
QVLFSb,
@@ -565,6 +574,8 @@ namespace llvm {
bool useSoftFloat() const override;
+ bool hasSPE() const;
+
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
return MVT::i32;
}
@@ -765,7 +776,7 @@ namespace llvm {
bool isFPExtFree(EVT DestVT, EVT SrcVT) const override;
- /// \brief Returns true if it is beneficial to convert a load of a constant
+ /// Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override;
@@ -822,7 +833,7 @@ namespace llvm {
FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo) const override;
- /// \brief Returns true if an argument of type Ty needs to be passed in a
+ /// Returns true if an argument of type Ty needs to be passed in a
/// contiguous block of registers in calling convention CallConv.
bool functionArgumentNeedsConsecutiveRegisters(
Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
@@ -860,6 +871,12 @@ namespace llvm {
unsigned JTI,
MCContext &Ctx) const override;
+ unsigned getNumRegistersForCallingConv(LLVMContext &Context,
+ EVT VT) const override;
+
+ MVT getRegisterTypeForCallingConv(LLVMContext &Context,
+ EVT VT) const override;
+
private:
struct ReuseLoadInfo {
SDValue Ptr;
@@ -884,6 +901,11 @@ namespace llvm {
}
};
+ bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
+ // Addrspacecasts are always noops.
+ return true;
+ }
+
bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
SelectionDAG &DAG,
ISD::LoadExtType ET = ISD::NON_EXTLOAD) const;
@@ -1054,10 +1076,12 @@ namespace llvm {
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const;
@@ -1096,6 +1120,7 @@ namespace llvm {
// tail call. This will cause the optimizers to attempt to move, or
// duplicate return instructions to help enable tail call optimizations.
bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
+ bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
}; // end class PPCTargetLowering
namespace PPC {