diff options
Diffstat (limited to 'contrib/llvm/lib/Target/ARM/ARMSubtarget.h')
-rw-r--r-- | contrib/llvm/lib/Target/ARM/ARMSubtarget.h | 24 |
1 files changed, 23 insertions, 1 deletions
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h index 74aee9a8ed38..11841b4467a2 100644 --- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h +++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h @@ -68,7 +68,7 @@ protected: CortexR5, CortexR52, CortexR7, - ExynosM1, + Exynos, Krait, Kryo, Swift @@ -106,6 +106,7 @@ protected: ARMv82a, ARMv83a, ARMv84a, + ARMv85a, ARMv8a, ARMv8mBaseline, ARMv8mMainline, @@ -153,6 +154,7 @@ protected: bool HasV8_2aOps = false; bool HasV8_3aOps = false; bool HasV8_4aOps = false; + bool HasV8_5aOps = false; bool HasV8MBaselineOps = false; bool HasV8MMainlineOps = false; @@ -227,6 +229,9 @@ protected: /// HasFullFP16 - True if subtarget supports half-precision FP operations bool HasFullFP16 = false; + /// HasFP16FML - True if subtarget supports half-precision FP fml operations + bool HasFP16FML = false; + /// HasD16 - True if subtarget is limited to 16 double precision /// FP registers for VFPv3. bool HasD16 = false; @@ -353,6 +358,9 @@ protected: /// If true, loading into a D subregister will be penalized. bool SlowLoadDSubregister = false; + /// If true, use a wider stride when allocating VFP registers. + bool UseWideStrideVFP = false; + /// If true, the AGU and NEON/FPU units are multiplexed. bool HasMuxedUnits = false; @@ -408,6 +416,9 @@ protected: /// UseSjLjEH - If true, the target uses SjLj exception handling (e.g. iOS). bool UseSjLjEH = false; + /// Has speculation barrier + bool HasSB = false; + /// Implicitly convert an instruction to a different one if its immediates /// cannot be encoded. For example, ADD r0, r1, #FFFFFFFF -> SUB r0, r1, #1. bool NegativeImmediates = true; @@ -432,6 +443,9 @@ protected: /// operand cycle returned by the itinerary data for pre-ISel operands. int PreISelOperandLatencyAdjustment = 2; + /// What alignment is preferred for loop bodies, in log2(bytes). + unsigned PrefLoopAlignment = 0; + /// IsLittle - The target is Little Endian bool IsLittle; @@ -529,6 +543,7 @@ public: bool hasV8_2aOps() const { return HasV8_2aOps; } bool hasV8_3aOps() const { return HasV8_3aOps; } bool hasV8_4aOps() const { return HasV8_4aOps; } + bool hasV8_5aOps() const { return HasV8_5aOps; } bool hasV8MBaselineOps() const { return HasV8MBaselineOps; } bool hasV8MMainlineOps() const { return HasV8MMainlineOps; } @@ -596,6 +611,7 @@ public: bool hasVMLxHazards() const { return HasVMLxHazards; } bool hasSlowOddRegister() const { return SlowOddRegister; } bool hasSlowLoadDSubregister() const { return SlowLoadDSubregister; } + bool useWideStrideVFP() const { return UseWideStrideVFP; } bool hasMuxedUnits() const { return HasMuxedUnits; } bool dontWidenVMOVS() const { return DontWidenVMOVS; } bool useSplatVFPToNeon() const { return SplatVFPToNeon; } @@ -612,12 +628,14 @@ public: bool hasDSP() const { return HasDSP; } bool useNaClTrap() const { return UseNaClTrap; } bool useSjLjEH() const { return UseSjLjEH; } + bool hasSB() const { return HasSB; } bool genLongCalls() const { return GenLongCalls; } bool genExecuteOnly() const { return GenExecuteOnly; } bool hasFP16() const { return HasFP16; } bool hasD16() const { return HasD16; } bool hasFullFP16() const { return HasFullFP16; } + bool hasFP16FML() const { return HasFP16FML; } bool hasFuseAES() const { return HasFuseAES; } bool hasFuseLiterals() const { return HasFuseLiterals; } @@ -796,6 +814,10 @@ public: bool allowPositionIndependentMovt() const { return isROPI() || !isTargetELF(); } + + unsigned getPrefLoopAlignment() const { + return PrefLoopAlignment; + } }; } // end namespace llvm |