aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp')
-rw-r--r--contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp257
1 files changed, 186 insertions, 71 deletions
diff --git a/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp b/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 30305af21152..e80ef7176c21 100644
--- a/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -18,11 +18,15 @@
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
@@ -35,7 +39,7 @@ using namespace llvm;
/// InitLibcallNames - Set default libcall names.
///
-static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
+static void InitLibcallNames(const char **Names, const Triple &TT) {
Names[RTLIB::SHL_I16] = "__ashlhi3";
Names[RTLIB::SHL_I32] = "__ashlsi3";
Names[RTLIB::SHL_I64] = "__ashldi3";
@@ -78,16 +82,16 @@ static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
Names[RTLIB::UREM_I128] = "__umodti3";
// These are generally not available.
- Names[RTLIB::SDIVREM_I8] = 0;
- Names[RTLIB::SDIVREM_I16] = 0;
- Names[RTLIB::SDIVREM_I32] = 0;
- Names[RTLIB::SDIVREM_I64] = 0;
- Names[RTLIB::SDIVREM_I128] = 0;
- Names[RTLIB::UDIVREM_I8] = 0;
- Names[RTLIB::UDIVREM_I16] = 0;
- Names[RTLIB::UDIVREM_I32] = 0;
- Names[RTLIB::UDIVREM_I64] = 0;
- Names[RTLIB::UDIVREM_I128] = 0;
+ Names[RTLIB::SDIVREM_I8] = nullptr;
+ Names[RTLIB::SDIVREM_I16] = nullptr;
+ Names[RTLIB::SDIVREM_I32] = nullptr;
+ Names[RTLIB::SDIVREM_I64] = nullptr;
+ Names[RTLIB::SDIVREM_I128] = nullptr;
+ Names[RTLIB::UDIVREM_I8] = nullptr;
+ Names[RTLIB::UDIVREM_I16] = nullptr;
+ Names[RTLIB::UDIVREM_I32] = nullptr;
+ Names[RTLIB::UDIVREM_I64] = nullptr;
+ Names[RTLIB::UDIVREM_I128] = nullptr;
Names[RTLIB::NEG_I32] = "__negsi2";
Names[RTLIB::NEG_I64] = "__negdi2";
@@ -201,6 +205,11 @@ static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
Names[RTLIB::FLOOR_F80] = "floorl";
Names[RTLIB::FLOOR_F128] = "floorl";
Names[RTLIB::FLOOR_PPCF128] = "floorl";
+ Names[RTLIB::ROUND_F32] = "roundf";
+ Names[RTLIB::ROUND_F64] = "round";
+ Names[RTLIB::ROUND_F80] = "roundl";
+ Names[RTLIB::ROUND_F128] = "roundl";
+ Names[RTLIB::ROUND_PPCF128] = "roundl";
Names[RTLIB::COPYSIGN_F32] = "copysignf";
Names[RTLIB::COPYSIGN_F64] = "copysign";
Names[RTLIB::COPYSIGN_F80] = "copysignl";
@@ -211,6 +220,10 @@ static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
+ Names[RTLIB::FPROUND_F64_F16] = "__truncdfhf2";
+ Names[RTLIB::FPROUND_F80_F16] = "__truncxfhf2";
+ Names[RTLIB::FPROUND_F128_F16] = "__trunctfhf2";
+ Names[RTLIB::FPROUND_PPCF128_F16] = "__trunctfhf2";
Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2";
@@ -375,7 +388,7 @@ static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
Names[RTLIB::SYNC_FETCH_AND_UMIN_8] = "__sync_fetch_and_umin_8";
Names[RTLIB::SYNC_FETCH_AND_UMIN_16] = "__sync_fetch_and_umin_16";
- if (Triple(TM.getTargetTriple()).getEnvironment() == Triple::GNU) {
+ if (TT.getEnvironment() == Triple::GNU) {
Names[RTLIB::SINCOS_F32] = "sincosf";
Names[RTLIB::SINCOS_F64] = "sincos";
Names[RTLIB::SINCOS_F80] = "sincosl";
@@ -383,18 +396,18 @@ static void InitLibcallNames(const char **Names, const TargetMachine &TM) {
Names[RTLIB::SINCOS_PPCF128] = "sincosl";
} else {
// These are generally not available.
- Names[RTLIB::SINCOS_F32] = 0;
- Names[RTLIB::SINCOS_F64] = 0;
- Names[RTLIB::SINCOS_F80] = 0;
- Names[RTLIB::SINCOS_F128] = 0;
- Names[RTLIB::SINCOS_PPCF128] = 0;
+ Names[RTLIB::SINCOS_F32] = nullptr;
+ Names[RTLIB::SINCOS_F64] = nullptr;
+ Names[RTLIB::SINCOS_F80] = nullptr;
+ Names[RTLIB::SINCOS_F128] = nullptr;
+ Names[RTLIB::SINCOS_PPCF128] = nullptr;
}
- if (Triple(TM.getTargetTriple()).getOS() != Triple::OpenBSD) {
+ if (TT.getOS() != Triple::OpenBSD) {
Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = "__stack_chk_fail";
} else {
// These are generally not available.
- Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = 0;
+ Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = nullptr;
}
}
@@ -409,7 +422,10 @@ static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
/// getFPEXT - Return the FPEXT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
- if (OpVT == MVT::f32) {
+ if (OpVT == MVT::f16) {
+ if (RetVT == MVT::f32)
+ return FPEXT_F16_F32;
+ } else if (OpVT == MVT::f32) {
if (RetVT == MVT::f64)
return FPEXT_F32_F64;
if (RetVT == MVT::f128)
@@ -425,7 +441,18 @@ RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
/// getFPROUND - Return the FPROUND_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
- if (RetVT == MVT::f32) {
+ if (RetVT == MVT::f16) {
+ if (OpVT == MVT::f32)
+ return FPROUND_F32_F16;
+ if (OpVT == MVT::f64)
+ return FPROUND_F64_F16;
+ if (OpVT == MVT::f80)
+ return FPROUND_F80_F16;
+ if (OpVT == MVT::f128)
+ return FPROUND_F128_F16;
+ if (OpVT == MVT::ppcf128)
+ return FPROUND_PPCF128_F16;
+ } else if (RetVT == MVT::f32) {
if (OpVT == MVT::f64)
return FPROUND_F64_F32;
if (OpVT == MVT::f80)
@@ -659,25 +686,29 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
/// NOTE: The constructor takes ownership of TLOF.
TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
const TargetLoweringObjectFile *tlof)
- : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) {
+ : TM(tm), DL(TM.getDataLayout()), TLOF(*tlof) {
initActions();
// Perform these initializations only once.
- IsLittleEndian = TD->isLittleEndian();
+ IsLittleEndian = DL->isLittleEndian();
MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
= MaxStoresPerMemmoveOptSize = 4;
UseUnderscoreSetJmp = false;
UseUnderscoreLongJmp = false;
SelectIsExpensive = false;
+ HasMultipleConditionRegisters = false;
+ HasExtractBitsInsn = false;
IntDivIsCheap = false;
Pow2DivIsCheap = false;
JumpIsExpensive = false;
PredictableSelectIsExpensive = false;
+ MaskAndBranchFoldingIsLegal = false;
StackPointerRegisterToSaveRestore = 0;
ExceptionPointerRegister = 0;
ExceptionSelectorRegister = 0;
BooleanContents = UndefinedBooleanContent;
+ BooleanFloatContents = UndefinedBooleanContent;
BooleanVectorContents = UndefinedBooleanContent;
SchedPreferenceInfo = Sched::ILP;
JumpBufSize = 0;
@@ -690,7 +721,7 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
SupportJumpTables = true;
MinimumJumpTableEntries = 4;
- InitLibcallNames(LibcallRoutineNames, TM);
+ InitLibcallNames(LibcallRoutineNames, Triple(TM.getTargetTriple()));
InitCmpLibcallCCs(CmpLibcallCCs);
InitLibcallCallingConvs(LibcallCallingConvs);
}
@@ -718,6 +749,10 @@ void TargetLoweringBase::initActions() {
setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand);
}
+ // Most backends expect to see the node which just returns the value loaded.
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
+ (MVT::SimpleValueType)VT, Expand);
+
// These operations default to expand.
setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand);
@@ -727,8 +762,15 @@ void TargetLoweringBase::initActions() {
// These operations default to expand for vector types.
if (VT >= MVT::FIRST_VECTOR_VALUETYPE &&
- VT <= MVT::LAST_VECTOR_VALUETYPE)
+ VT <= MVT::LAST_VECTOR_VALUETYPE) {
setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG,
+ (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG,
+ (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG,
+ (MVT::SimpleValueType)VT, Expand);
+ }
}
// Most targets ignore the @llvm.prefetch intrinsic.
@@ -754,6 +796,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::FCEIL, MVT::f16, Expand);
setOperationAction(ISD::FRINT, MVT::f16, Expand);
setOperationAction(ISD::FTRUNC, MVT::f16, Expand);
+ setOperationAction(ISD::FROUND, MVT::f16, Expand);
setOperationAction(ISD::FLOG , MVT::f32, Expand);
setOperationAction(ISD::FLOG2, MVT::f32, Expand);
setOperationAction(ISD::FLOG10, MVT::f32, Expand);
@@ -764,6 +807,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::FCEIL, MVT::f32, Expand);
setOperationAction(ISD::FRINT, MVT::f32, Expand);
setOperationAction(ISD::FTRUNC, MVT::f32, Expand);
+ setOperationAction(ISD::FROUND, MVT::f32, Expand);
setOperationAction(ISD::FLOG , MVT::f64, Expand);
setOperationAction(ISD::FLOG2, MVT::f64, Expand);
setOperationAction(ISD::FLOG10, MVT::f64, Expand);
@@ -774,6 +818,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::FCEIL, MVT::f64, Expand);
setOperationAction(ISD::FRINT, MVT::f64, Expand);
setOperationAction(ISD::FTRUNC, MVT::f64, Expand);
+ setOperationAction(ISD::FROUND, MVT::f64, Expand);
setOperationAction(ISD::FLOG , MVT::f128, Expand);
setOperationAction(ISD::FLOG2, MVT::f128, Expand);
setOperationAction(ISD::FLOG10, MVT::f128, Expand);
@@ -784,6 +829,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::FCEIL, MVT::f128, Expand);
setOperationAction(ISD::FRINT, MVT::f128, Expand);
setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
+ setOperationAction(ISD::FROUND, MVT::f128, Expand);
// Default ISD::TRAP to expand (which turns it into abort).
setOperationAction(ISD::TRAP, MVT::Other, Expand);
@@ -799,7 +845,7 @@ MVT TargetLoweringBase::getPointerTy(uint32_t AS) const {
}
unsigned TargetLoweringBase::getPointerSizeInBits(uint32_t AS) const {
- return TD->getPointerSizeInBits(AS);
+ return DL->getPointerSizeInBits(AS);
}
unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const {
@@ -808,7 +854,7 @@ unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const {
}
MVT TargetLoweringBase::getScalarShiftAmountTy(EVT LHSTy) const {
- return MVT::getIntegerVT(8*TD->getPointerSize(0));
+ return MVT::getIntegerVT(8*DL->getPointerSize(0));
}
EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const {
@@ -894,6 +940,58 @@ bool TargetLoweringBase::isLegalRC(const TargetRegisterClass *RC) const {
return false;
}
+/// Replace/modify any TargetFrameIndex operands with a targte-dependent
+/// sequence of memory operands that is recognized by PrologEpilogInserter.
+MachineBasicBlock*
+TargetLoweringBase::emitPatchPoint(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ MachineFunction &MF = *MI->getParent()->getParent();
+
+ // MI changes inside this loop as we grow operands.
+ for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
+ MachineOperand &MO = MI->getOperand(OperIdx);
+ if (!MO.isFI())
+ continue;
+
+ // foldMemoryOperand builds a new MI after replacing a single FI operand
+ // with the canonical set of five x86 addressing-mode operands.
+ int FI = MO.getIndex();
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
+
+ // Copy operands before the frame-index.
+ for (unsigned i = 0; i < OperIdx; ++i)
+ MIB.addOperand(MI->getOperand(i));
+ // Add frame index operands: direct-mem-ref tag, #FI, offset.
+ MIB.addImm(StackMaps::DirectMemRefOp);
+ MIB.addOperand(MI->getOperand(OperIdx));
+ MIB.addImm(0);
+ // Copy the operands after the frame index.
+ for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ // Inherit previous memory operands.
+ MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
+
+ // Add a new memory operand for this FI.
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
+ assert(MFI.getObjectOffset(FI) != -1);
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
+ MachineMemOperand::MOLoad,
+ TM.getDataLayout()->getPointerSize(),
+ MFI.getObjectAlignment(FI));
+ MIB->addMemOperand(MF, MMO);
+
+ // Replace the instruction and update the operand index.
+ MBB->insert(MachineBasicBlock::iterator(MI), MIB);
+ OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
+ MI->eraseFromParent();
+ MI = MIB;
+ }
+ return MBB;
+}
+
/// findRepresentativeClass - Return the largest legal super-reg register class
/// of the register class for the specified type and its associated "cost".
std::pair<const TargetRegisterClass*, uint8_t>
@@ -938,7 +1036,7 @@ void TargetLoweringBase::computeRegisterProperties() {
// Find the largest integer register class.
unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
- for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
+ for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
// Every integer value type larger than this largest register takes twice as
@@ -1009,27 +1107,35 @@ void TargetLoweringBase::computeRegisterProperties() {
}
}
+ if (!isTypeLegal(MVT::f16)) {
+ NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
+ RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
+ TransformToType[MVT::f16] = MVT::i16;
+ ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat);
+ }
+
// Loop over all of the vector value types to see which need transformations.
for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
- MVT VT = (MVT::SimpleValueType)i;
- if (isTypeLegal(VT)) continue;
+ MVT VT = (MVT::SimpleValueType) i;
+ if (isTypeLegal(VT))
+ continue;
- // Determine if there is a legal wider type. If so, we should promote to
- // that wider vector type.
MVT EltVT = VT.getVectorElementType();
unsigned NElts = VT.getVectorNumElements();
- if (NElts != 1 && !shouldSplitVectorElementType(EltVT)) {
- bool IsLegalWiderType = false;
- // First try to promote the elements of integer vectors. If no legal
- // promotion was found, fallback to the widen-vector method.
- for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
- MVT SVT = (MVT::SimpleValueType)nVT;
+ bool IsLegalWiderType = false;
+ LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
+ switch (PreferredAction) {
+ case TypePromoteInteger: {
+ // Try to promote the elements of integer vectors. If no legal
+ // promotion was found, fall through to the widen-vector method.
+ for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
+ MVT SVT = (MVT::SimpleValueType) nVT;
// Promote vectors of integers to vectors with the same number
// of elements, with a wider element type.
if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits()
- && SVT.getVectorNumElements() == NElts &&
- isTypeLegal(SVT) && SVT.getScalarType().isInteger()) {
+ && SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)
+ && SVT.getScalarType().isInteger()) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
NumRegistersForVT[i] = 1;
@@ -1038,15 +1144,15 @@ void TargetLoweringBase::computeRegisterProperties() {
break;
}
}
-
- if (IsLegalWiderType) continue;
-
+ if (IsLegalWiderType)
+ break;
+ }
+ case TypeWidenVector: {
// Try to widen the vector.
- for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
- MVT SVT = (MVT::SimpleValueType)nVT;
- if (SVT.getVectorElementType() == EltVT &&
- SVT.getVectorNumElements() > NElts &&
- isTypeLegal(SVT)) {
+ for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
+ MVT SVT = (MVT::SimpleValueType) nVT;
+ if (SVT.getVectorElementType() == EltVT
+ && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
NumRegistersForVT[i] = 1;
@@ -1055,27 +1161,34 @@ void TargetLoweringBase::computeRegisterProperties() {
break;
}
}
- if (IsLegalWiderType) continue;
+ if (IsLegalWiderType)
+ break;
}
-
- MVT IntermediateVT;
- MVT RegisterVT;
- unsigned NumIntermediates;
- NumRegistersForVT[i] =
- getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
- RegisterVT, this);
- RegisterTypeForVT[i] = RegisterVT;
-
- MVT NVT = VT.getPow2VectorType();
- if (NVT == VT) {
- // Type is already a power of 2. The default action is to split.
- TransformToType[i] = MVT::Other;
- unsigned NumElts = VT.getVectorNumElements();
- ValueTypeActions.setTypeAction(VT,
- NumElts > 1 ? TypeSplitVector : TypeScalarizeVector);
- } else {
- TransformToType[i] = NVT;
- ValueTypeActions.setTypeAction(VT, TypeWidenVector);
+ case TypeSplitVector:
+ case TypeScalarizeVector: {
+ MVT IntermediateVT;
+ MVT RegisterVT;
+ unsigned NumIntermediates;
+ NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
+ NumIntermediates, RegisterVT, this);
+ RegisterTypeForVT[i] = RegisterVT;
+
+ MVT NVT = VT.getPow2VectorType();
+ if (NVT == VT) {
+ // Type is already a power of 2. The default action is to split.
+ TransformToType[i] = MVT::Other;
+ if (PreferredAction == TypeScalarizeVector)
+ ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
+ else
+ ValueTypeActions.setTypeAction(VT, TypeSplitVector);
+ } else {
+ TransformToType[i] = NVT;
+ ValueTypeActions.setTypeAction(VT, TypeWidenVector);
+ }
+ break;
+ }
+ default:
+ llvm_unreachable("Unknown vector legalization action!");
}
}
@@ -1087,7 +1200,7 @@ void TargetLoweringBase::computeRegisterProperties() {
for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
const TargetRegisterClass* RRC;
uint8_t Cost;
- tie(RRC, Cost) = findRepresentativeClass((MVT::SimpleValueType)i);
+ std::tie(RRC, Cost) = findRepresentativeClass((MVT::SimpleValueType)i);
RepRegClassForVT[i] = RRC;
RepRegClassCostForVT[i] = Cost;
}
@@ -1230,7 +1343,7 @@ void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr,
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const {
- return TD->getCallFrameTypeAlignment(Ty);
+ return DL->getABITypeAlignment(Ty);
}
//===----------------------------------------------------------------------===//
@@ -1258,7 +1371,7 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
case Mul: return ISD::MUL;
case FMul: return ISD::FMUL;
case UDiv: return ISD::UDIV;
- case SDiv: return ISD::UDIV;
+ case SDiv: return ISD::SDIV;
case FDiv: return ISD::FDIV;
case URem: return ISD::UREM;
case SRem: return ISD::SREM;
@@ -1364,6 +1477,8 @@ bool TargetLoweringBase::isLegalAddressingMode(const AddrMode &AM,
return false;
// Allow 2*r as r+r.
break;
+ default: // Don't allow n * r
+ return false;
}
return true;