aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/ARM')
-rw-r--r--test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll5
-rw-r--r--test/CodeGen/ARM/2009-08-21-PostRAKill4.ll26
-rw-r--r--test/CodeGen/ARM/2009-09-01-PostRAProlog.ll106
-rw-r--r--test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll2
-rw-r--r--test/CodeGen/ARM/2009-11-02-NegativeLane.ll3
-rw-r--r--test/CodeGen/ARM/2010-03-18-ldm-rtrn.ll4
-rw-r--r--test/CodeGen/ARM/2010-04-07-DbgValueOtherTargets.ll43
-rw-r--r--test/CodeGen/ARM/2010-05-17-DAGCombineAssert.ll17
-rw-r--r--test/CodeGen/ARM/2010-06-28-DAGCombineUndef.ll10
-rw-r--r--test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll6
-rw-r--r--test/CodeGen/ARM/2010-09-21-OptCmpBug.ll84
-rw-r--r--test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll13
-rw-r--r--test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll37
-rw-r--r--test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll31
-rw-r--r--test/CodeGen/ARM/2010-11-15-SpillEarlyClobber.ll85
-rw-r--r--test/CodeGen/ARM/2010-11-29-PrologueBug.ll28
-rw-r--r--test/CodeGen/ARM/2010-11-30-reloc-movt.ll42
-rw-r--r--test/CodeGen/ARM/2010-12-07-PEIBug.ll40
-rw-r--r--test/CodeGen/ARM/2010-12-08-tpsoft.ll52
-rw-r--r--test/CodeGen/ARM/2010-12-13-reloc-pic.ll100
-rw-r--r--test/CodeGen/ARM/2010-12-15-elf-lcomm.ll35
-rw-r--r--test/CodeGen/ARM/2010-12-17-LocalStackSlotCrash.ll15
-rw-r--r--test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll127
-rw-r--r--test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll128
-rw-r--r--test/CodeGen/ARM/2011-02-07-AntidepClobber.ll89
-rw-r--r--test/CodeGen/ARM/align.ll4
-rw-r--r--test/CodeGen/ARM/arguments.ll4
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll112
-rw-r--r--test/CodeGen/ARM/atomic-cmp.ll17
-rw-r--r--test/CodeGen/ARM/bfi.ll32
-rw-r--r--test/CodeGen/ARM/bits.ll17
-rw-r--r--test/CodeGen/ARM/bswap-inline-asm.ll9
-rw-r--r--test/CodeGen/ARM/bx_fold.ll5
-rw-r--r--test/CodeGen/ARM/call-tc.ll96
-rw-r--r--test/CodeGen/ARM/clz.ll6
-rw-r--r--test/CodeGen/ARM/code-placement.ll54
-rw-r--r--test/CodeGen/ARM/constants.ll13
-rw-r--r--test/CodeGen/ARM/crash.ll29
-rw-r--r--test/CodeGen/ARM/div.ll2
-rw-r--r--test/CodeGen/ARM/fabss.ll2
-rw-r--r--test/CodeGen/ARM/fadds.ll2
-rw-r--r--test/CodeGen/ARM/fast-isel-crash.ll21
-rw-r--r--test/CodeGen/ARM/fast-isel-static.ll30
-rw-r--r--test/CodeGen/ARM/fast-isel.ll31
-rw-r--r--test/CodeGen/ARM/fcopysign.ll53
-rw-r--r--test/CodeGen/ARM/fdivs.ll2
-rw-r--r--test/CodeGen/ARM/fmacs.ll55
-rw-r--r--test/CodeGen/ARM/fmscs.ll39
-rw-r--r--test/CodeGen/ARM/fmuls.ll2
-rw-r--r--test/CodeGen/ARM/fnegs.ll20
-rw-r--r--test/CodeGen/ARM/fnmacs.ll31
-rw-r--r--test/CodeGen/ARM/fnmscs.ll64
-rw-r--r--test/CodeGen/ARM/fp.ll2
-rw-r--r--test/CodeGen/ARM/fpcmp-opt.ll1
-rw-r--r--test/CodeGen/ARM/fpcmp_ueq.ll10
-rw-r--r--test/CodeGen/ARM/fpconsts.ll8
-rw-r--r--test/CodeGen/ARM/fpconv.ll2
-rw-r--r--test/CodeGen/ARM/global-merge.ll23
-rw-r--r--test/CodeGen/ARM/hello.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt10.ll43
-rw-r--r--test/CodeGen/ARM/ifcvt11.ll59
-rw-r--r--test/CodeGen/ARM/ifcvt6.ll7
-rw-r--r--test/CodeGen/ARM/ifcvt7.ll10
-rw-r--r--test/CodeGen/ARM/ifcvt8.ll4
-rw-r--r--test/CodeGen/ARM/inlineasm3.ll4
-rw-r--r--test/CodeGen/ARM/ispositive.ll2
-rw-r--r--test/CodeGen/ARM/ldm.ll11
-rw-r--r--test/CodeGen/ARM/ldst-f32-2-i32.ll28
-rw-r--r--test/CodeGen/ARM/load-global.ll50
-rw-r--r--test/CodeGen/ARM/long.ll8
-rw-r--r--test/CodeGen/ARM/long_shift.ll10
-rw-r--r--test/CodeGen/ARM/lsr-code-insertion.ll2
-rw-r--r--test/CodeGen/ARM/lsr-on-unrolled-loops.ll23
-rw-r--r--test/CodeGen/ARM/machine-licm.ll66
-rw-r--r--test/CodeGen/ARM/mul_const.ll2
-rw-r--r--test/CodeGen/ARM/mult-alt-generic-arm.ll323
-rw-r--r--test/CodeGen/ARM/neon_div.ll48
-rw-r--r--test/CodeGen/ARM/pack.ll69
-rw-r--r--test/CodeGen/ARM/phi.ll23
-rw-r--r--test/CodeGen/ARM/prefetch.ll61
-rw-r--r--test/CodeGen/ARM/reg_sequence.ll39
-rw-r--r--test/CodeGen/ARM/remat.ll65
-rw-r--r--test/CodeGen/ARM/rev.ll41
-rw-r--r--test/CodeGen/ARM/select-imm.ll58
-rw-r--r--test/CodeGen/ARM/select.ll4
-rw-r--r--test/CodeGen/ARM/select_xform.ll63
-rw-r--r--test/CodeGen/ARM/shifter_operand.ll72
-rw-r--r--test/CodeGen/ARM/spill-q.ll36
-rw-r--r--test/CodeGen/ARM/stm.ll5
-rw-r--r--test/CodeGen/ARM/str_pre-2.ll5
-rw-r--r--test/CodeGen/ARM/tail-opts.ll9
-rw-r--r--test/CodeGen/ARM/thumb1-varalloc.ll40
-rw-r--r--test/CodeGen/ARM/umulo-32.ll14
-rw-r--r--test/CodeGen/ARM/unaligned_load_store.ll3
-rw-r--r--test/CodeGen/ARM/vbits.ll42
-rw-r--r--test/CodeGen/ARM/vceq.ll11
-rw-r--r--test/CodeGen/ARM/vcge.ll41
-rw-r--r--test/CodeGen/ARM/vcgt.ll28
-rw-r--r--test/CodeGen/ARM/vcombine.ll38
-rw-r--r--test/CodeGen/ARM/vcvt.ll20
-rw-r--r--test/CodeGen/ARM/vdup.ll18
-rw-r--r--test/CodeGen/ARM/vector-DAGCombine.ll107
-rw-r--r--test/CodeGen/ARM/vext.ll59
-rw-r--r--test/CodeGen/ARM/vget_lane.ll37
-rw-r--r--test/CodeGen/ARM/vld1.ll50
-rw-r--r--test/CodeGen/ARM/vld2.ll59
-rw-r--r--test/CodeGen/ARM/vld3.ll48
-rw-r--r--test/CodeGen/ARM/vld4.ll62
-rw-r--r--test/CodeGen/ARM/vlddup.ll212
-rw-r--r--test/CodeGen/ARM/vldlane.ll204
-rw-r--r--test/CodeGen/ARM/vmov.ll70
-rw-r--r--test/CodeGen/ARM/vmul.ll72
-rw-r--r--test/CodeGen/ARM/vrev.ll18
-rw-r--r--test/CodeGen/ARM/vst1.ll41
-rw-r--r--test/CodeGen/ARM/vst2.ll55
-rw-r--r--test/CodeGen/ARM/vst3.ll47
-rw-r--r--test/CodeGen/ARM/vst4.ll58
-rw-r--r--test/CodeGen/ARM/vstlane.ll161
118 files changed, 4035 insertions, 758 deletions
diff --git a/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll b/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
index ee63656b26d3..3694aaad5549 100644
--- a/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
+++ b/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6,+vfp2 | FileCheck %s
@quant_coef = external global [6 x [4 x [4 x i32]]] ; <[6 x [4 x [4 x i32]]]*> [#uses=1]
@dequant_coef = external global [6 x [4 x [4 x i32]]] ; <[6 x [4 x [4 x i32]]]*> [#uses=1]
@@ -8,8 +8,9 @@
define fastcc i32 @dct_luma_sp(i32 %block_x, i32 %block_y, i32* %coeff_cost) {
entry:
; Make sure to use base-updating stores for saving callee-saved registers.
+; CHECK: push
; CHECK-NOT: sub sp
-; CHECK: vstmdb sp!
+; CHECK: push
%predicted_block = alloca [4 x [4 x i32]], align 4 ; <[4 x [4 x i32]]*> [#uses=1]
br label %cond_next489
diff --git a/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll b/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll
deleted file mode 100644
index 5cfc68d09408..000000000000
--- a/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
-
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-apple-darwin9"
-
-@.str = external constant [36 x i8], align 1 ; <[36 x i8]*> [#uses=0]
-@.str1 = external constant [31 x i8], align 1 ; <[31 x i8]*> [#uses=1]
-@.str2 = external constant [4 x i8], align 1 ; <[4 x i8]*> [#uses=1]
-
-declare i32 @getUnknown(i32, ...) nounwind
-
-declare void @llvm.va_start(i8*) nounwind
-
-declare void @llvm.va_end(i8*) nounwind
-
-declare i32 @printf(i8* nocapture, ...) nounwind
-
-define i32 @main() nounwind {
-entry:
- %0 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([31 x i8]* @.str1, i32 0, i32 0), i32 1, i32 1, i32 1, i32 1, i32 1, i32 1) nounwind ; <i32> [#uses=0]
- %1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([31 x i8]* @.str1, i32 0, i32 0), i32 -128, i32 116, i32 116, i32 -3852, i32 -31232, i32 -1708916736) nounwind ; <i32> [#uses=0]
- %2 = tail call i32 (i32, ...)* @getUnknown(i32 undef, i32 116, i32 116, i32 -3852, i32 -31232, i32 30556, i32 -1708916736) nounwind ; <i32> [#uses=1]
- %3 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @.str2, i32 0, i32 0), i32 %2) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
diff --git a/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll b/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll
deleted file mode 100644
index 06a152d56e4d..000000000000
--- a/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll
+++ /dev/null
@@ -1,106 +0,0 @@
-; RUN: llc -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 < %s | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv7-apple-darwin9"
-
-@history = internal global [2 x [56 x i32]] [[56 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0], [56 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0]] ; <[2 x [56 x i32]]*> [#uses=3]
-@nodes = internal global i64 0 ; <i64*> [#uses=4]
-@.str = private constant [9 x i8] c"##-<=>+#\00", align 1 ; <[9 x i8]*> [#uses=2]
-@.str1 = private constant [6 x i8] c"%c%d\0A\00", align 1 ; <[6 x i8]*> [#uses=1]
-@.str2 = private constant [16 x i8] c"Fhourstones 2.0\00", align 1 ; <[16 x i8]*> [#uses=1]
-@.str3 = private constant [54 x i8] c"Using %d transposition table entries with %d probes.\0A\00", align 1 ; <[54 x i8]*> [#uses=1]
-@.str4 = private constant [31 x i8] c"Solving %d-ply position after \00", align 1 ; <[31 x i8]*> [#uses=1]
-@.str5 = private constant [7 x i8] c" . . .\00", align 1 ; <[7 x i8]*> [#uses=1]
-@.str6 = private constant [28 x i8] c"score = %d (%c) work = %d\0A\00", align 1 ; <[28 x i8]*> [#uses=1]
-@.str7 = private constant [36 x i8] c"%lu pos / %lu msec = %.1f Kpos/sec\0A\00", align 1 ; <[36 x i8]*> [#uses=1]
-@plycnt = internal global i32 0 ; <i32*> [#uses=21]
-@dias = internal global [19 x i32] zeroinitializer ; <[19 x i32]*> [#uses=43]
-@columns = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=18]
-@height = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=21]
-@rows = internal global [8 x i32] zeroinitializer ; <[8 x i32]*> [#uses=20]
-@colthr = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=5]
-@moves = internal global [44 x i32] zeroinitializer ; <[44 x i32]*> [#uses=9]
-@.str8 = private constant [3 x i8] c"%d\00", align 1 ; <[3 x i8]*> [#uses=1]
-@he = internal global i8* null ; <i8**> [#uses=9]
-@hits = internal global i64 0 ; <i64*> [#uses=8]
-@posed = internal global i64 0 ; <i64*> [#uses=7]
-@ht = internal global i32* null ; <i32**> [#uses=5]
-@.str16 = private constant [19 x i8] c"store rate = %.3f\0A\00", align 1 ; <[19 x i8]*> [#uses=1]
-@.str117 = private constant [45 x i8] c"- %5.3f < %5.3f = %5.3f > %5.3f + %5.3f\0A\00", align 1 ; <[45 x i8]*> [#uses=1]
-@.str218 = private constant [6 x i8] c"%7d%c\00", align 1 ; <[6 x i8]*> [#uses=1]
-@.str319 = private constant [30 x i8] c"Failed to allocate %u bytes.\0A\00", align 1 ; <[30 x i8]*> [#uses=1]
-
-declare i32 @puts(i8* nocapture) nounwind
-
-declare i32 @getchar() nounwind
-
-define internal i32 @transpose() nounwind readonly {
-; CHECK: push
-entry:
- %0 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 1), align 4 ; <i32> [#uses=1]
- %1 = shl i32 %0, 7 ; <i32> [#uses=1]
- %2 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 2), align 4 ; <i32> [#uses=1]
- %3 = or i32 %1, %2 ; <i32> [#uses=1]
- %4 = shl i32 %3, 7 ; <i32> [#uses=1]
- %5 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 3), align 4 ; <i32> [#uses=1]
- %6 = or i32 %4, %5 ; <i32> [#uses=3]
- %7 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 7), align 4 ; <i32> [#uses=1]
- %8 = shl i32 %7, 7 ; <i32> [#uses=1]
- %9 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 6), align 4 ; <i32> [#uses=1]
- %10 = or i32 %8, %9 ; <i32> [#uses=1]
- %11 = shl i32 %10, 7 ; <i32> [#uses=1]
- %12 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 5), align 4 ; <i32> [#uses=1]
- %13 = or i32 %11, %12 ; <i32> [#uses=3]
- %14 = icmp ugt i32 %6, %13 ; <i1> [#uses=2]
- %.pn2.in.i = select i1 %14, i32 %6, i32 %13 ; <i32> [#uses=1]
- %.pn1.in.i = select i1 %14, i32 %13, i32 %6 ; <i32> [#uses=1]
- %.pn2.i = shl i32 %.pn2.in.i, 7 ; <i32> [#uses=1]
- %.pn3.i = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 4) ; <i32> [#uses=1]
- %.pn.in.in.i = or i32 %.pn2.i, %.pn3.i ; <i32> [#uses=1]
- %.pn.in.i = zext i32 %.pn.in.in.i to i64 ; <i64> [#uses=1]
- %.pn.i = shl i64 %.pn.in.i, 21 ; <i64> [#uses=1]
- %.pn1.i = zext i32 %.pn1.in.i to i64 ; <i64> [#uses=1]
- %iftmp.22.0.i = or i64 %.pn.i, %.pn1.i ; <i64> [#uses=2]
- %15 = lshr i64 %iftmp.22.0.i, 17 ; <i64> [#uses=1]
- %16 = trunc i64 %15 to i32 ; <i32> [#uses=2]
- %17 = urem i64 %iftmp.22.0.i, 1050011 ; <i64> [#uses=1]
- %18 = trunc i64 %17 to i32 ; <i32> [#uses=1]
- %19 = urem i32 %16, 179 ; <i32> [#uses=1]
- %20 = or i32 %19, 131072 ; <i32> [#uses=1]
- %21 = load i32** @ht, align 4 ; <i32*> [#uses=1]
- br label %bb5
-
-bb: ; preds = %bb5
- %22 = getelementptr inbounds i32* %21, i32 %x.0 ; <i32*> [#uses=1]
- %23 = load i32* %22, align 4 ; <i32> [#uses=1]
- %24 = icmp eq i32 %23, %16 ; <i1> [#uses=1]
- br i1 %24, label %bb1, label %bb2
-
-bb1: ; preds = %bb
- %25 = load i8** @he, align 4 ; <i8*> [#uses=1]
- %26 = getelementptr inbounds i8* %25, i32 %x.0 ; <i8*> [#uses=1]
- %27 = load i8* %26, align 1 ; <i8> [#uses=1]
- %28 = sext i8 %27 to i32 ; <i32> [#uses=1]
- ret i32 %28
-
-bb2: ; preds = %bb
- %29 = add nsw i32 %20, %x.0 ; <i32> [#uses=3]
- %30 = add i32 %29, -1050011 ; <i32> [#uses=1]
- %31 = icmp sgt i32 %29, 1050010 ; <i1> [#uses=1]
- %. = select i1 %31, i32 %30, i32 %29 ; <i32> [#uses=1]
- %32 = add i32 %33, 1 ; <i32> [#uses=1]
- br label %bb5
-
-bb5: ; preds = %bb2, %entry
- %33 = phi i32 [ 0, %entry ], [ %32, %bb2 ] ; <i32> [#uses=2]
- %x.0 = phi i32 [ %18, %entry ], [ %., %bb2 ] ; <i32> [#uses=3]
- %34 = icmp sgt i32 %33, 7 ; <i1> [#uses=1]
- br i1 %34, label %bb7, label %bb
-
-bb7: ; preds = %bb5
- ret i32 -128
-}
-
-declare noalias i8* @calloc(i32, i32) nounwind
-
-declare void @llvm.memset.i64(i8* nocapture, i8, i64, i32) nounwind
diff --git a/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll b/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
index 4aa879dc4092..0fe3b39a622d 100644
--- a/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
+++ b/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
@@ -5,7 +5,7 @@
define void @foo(%0* noalias nocapture sret %agg.result, double %x.0, double %y.0) nounwind {
; CHECK: foo:
-; CHECK: bl __adddf3
+; CHECK: bl __aeabi_dadd
; CHECK-NOT: strd
; CHECK: mov
%x76 = fmul double %y.0, 0.000000e+00 ; <double> [#uses=1]
diff --git a/test/CodeGen/ARM/2009-11-02-NegativeLane.ll b/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
index 89c9037bd9f6..ca5ae8b62e8b 100644
--- a/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
+++ b/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=cortex-a8 < %s | grep vdup.16
+; RUN: llc -mcpu=cortex-a8 < %s | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "armv7-eabi"
@@ -7,6 +7,7 @@ entry:
br i1 undef, label %return, label %bb
bb: ; preds = %bb, %entry
+; CHECK: vld1.16 {d16[], d17[]}
%0 = load i16* undef, align 2
%1 = insertelement <8 x i16> undef, i16 %0, i32 2
%2 = insertelement <8 x i16> %1, i16 undef, i32 3
diff --git a/test/CodeGen/ARM/2010-03-18-ldm-rtrn.ll b/test/CodeGen/ARM/2010-03-18-ldm-rtrn.ll
index 31525eff4461..d9e1a1486a3c 100644
--- a/test/CodeGen/ARM/2010-03-18-ldm-rtrn.ll
+++ b/test/CodeGen/ARM/2010-03-18-ldm-rtrn.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv4-unknown-eabi | FileCheck %s
+; RUN: llc < %s -mtriple=armv4-unknown-eabi | FileCheck %s -check-prefix=V4
; RUN: llc < %s -mtriple=armv5-unknown-eabi | FileCheck %s
; RUN: llc < %s -mtriple=armv6-unknown-eabi | FileCheck %s
@@ -7,6 +7,8 @@ entry:
%0 = tail call i32 @foo(i32 %a) nounwind ; <i32> [#uses=1]
%1 = add nsw i32 %0, 3 ; <i32> [#uses=1]
; CHECK: ldmia sp!, {r11, pc}
+; V4: pop
+; V4-NEXT: mov pc, lr
ret i32 %1
}
diff --git a/test/CodeGen/ARM/2010-04-07-DbgValueOtherTargets.ll b/test/CodeGen/ARM/2010-04-07-DbgValueOtherTargets.ll
index 8a24cfa39785..642268992062 100644
--- a/test/CodeGen/ARM/2010-04-07-DbgValueOtherTargets.ll
+++ b/test/CodeGen/ARM/2010-04-07-DbgValueOtherTargets.ll
@@ -1,33 +1,28 @@
; RUN: llc -O0 -march=arm -asm-verbose < %s | FileCheck %s
; Check that DEBUG_VALUE comments come through on a variety of targets.
-%tart.reflect.ComplexType = type { double, double }
-
-@.type.SwitchStmtTest = constant %tart.reflect.ComplexType { double 3.0, double 2.0 }
-
-define i32 @"main(tart.core.String[])->int32"(i32 %args) {
+define i32 @main() nounwind ssp {
entry:
; CHECK: DEBUG_VALUE
- tail call void @llvm.dbg.value(metadata !14, i64 0, metadata !8)
- tail call void @"tart.reflect.ComplexType.create->tart.core.Object"(%tart.reflect.ComplexType* @.type.SwitchStmtTest) ; <%tart.core.Object*> [#uses=2]
- ret i32 3
+ call void @llvm.dbg.value(metadata !6, i64 0, metadata !7), !dbg !9
+ ret i32 0, !dbg !10
}
+declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
+
declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
-declare void @"tart.reflect.ComplexType.create->tart.core.Object"(%tart.reflect.ComplexType*) nounwind readnone
-!0 = metadata !{i32 458769, i32 0, i32 1, metadata !"sm.c", metadata !"/Volumes/MacOS9/tests/", metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, i1 false, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 458790, metadata !0, metadata !"", metadata !0, i32 0, i64 192, i64 64, i64 0, i32 0, metadata !2} ; [ DW_TAG_const_type ]
-!2 = metadata !{i32 458771, metadata !0, metadata !"C", metadata !0, i32 1, i64 192, i64 64, i64 0, i32 0, null, metadata !3, i32 0, null} ; [ DW_TAG_structure_type ]
-!3 = metadata !{metadata !4, metadata !6, metadata !7}
-!4 = metadata !{i32 458765, metadata !2, metadata !"x", metadata !0, i32 1, i64 64, i64 64, i64 0, i32 0, metadata !5} ; [ DW_TAG_member ]
-!5 = metadata !{i32 458788, metadata !0, metadata !"double", metadata !0, i32 0, i64 64, i64 64, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
-!6 = metadata !{i32 458765, metadata !2, metadata !"y", metadata !0, i32 1, i64 64, i64 64, i64 64, i32 0, metadata !5} ; [ DW_TAG_member ]
-!7 = metadata !{i32 458765, metadata !2, metadata !"z", metadata !0, i32 1, i64 64, i64 64, i64 128, i32 0, metadata !5} ; [ DW_TAG_member ]
-!8 = metadata !{i32 459008, metadata !9, metadata !"t", metadata !0, i32 5, metadata !2} ; [ DW_TAG_auto_variable ]
-!9 = metadata !{i32 458763, metadata !10} ; [ DW_TAG_lexical_block ]
-!10 = metadata !{i32 458798, i32 0, metadata !0, metadata !"foo", metadata !"foo", metadata !"foo", metadata !0, i32 4, metadata !11, i1 false, i1 true, i32 0, i32 0, null} ; [ DW_TAG_subprogram ]
-!11 = metadata !{i32 458773, metadata !0, metadata !"", metadata !0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null} ; [ DW_TAG_subroutine_type ]
-!12 = metadata !{metadata !13}
-!13 = metadata !{i32 458788, metadata !0, metadata !"int", metadata !0, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!14 = metadata !{%tart.reflect.ComplexType* @.type.SwitchStmtTest}
+!llvm.dbg.sp = !{!0}
+
+!0 = metadata !{i32 589870, i32 0, metadata !1, metadata !"main", metadata !"main", metadata !"", metadata !1, i32 2, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, i32 ()* @main} ; [ DW_TAG_subprogram ]
+!1 = metadata !{i32 589865, metadata !"/tmp/x.c", metadata !"/Users/manav", metadata !2} ; [ DW_TAG_file_type ]
+!2 = metadata !{i32 589841, i32 0, i32 12, metadata !"/tmp/x.c", metadata !"/Users/manav", metadata !"clang version 2.9 (trunk 120996)", i1 true, i1 false, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 589845, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!4 = metadata !{metadata !5}
+!5 = metadata !{i32 589860, metadata !2, metadata !"int", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!6 = metadata !{i32 0}
+!7 = metadata !{i32 590080, metadata !8, metadata !"i", metadata !1, i32 3, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!8 = metadata !{i32 589835, metadata !0, i32 2, i32 12, metadata !1, i32 0} ; [ DW_TAG_lexical_block ]
+!9 = metadata !{i32 3, i32 11, metadata !8, null}
+!10 = metadata !{i32 4, i32 2, metadata !8, null}
+
diff --git a/test/CodeGen/ARM/2010-05-17-DAGCombineAssert.ll b/test/CodeGen/ARM/2010-05-17-DAGCombineAssert.ll
deleted file mode 100644
index 2a4bbd1d8cc6..000000000000
--- a/test/CodeGen/ARM/2010-05-17-DAGCombineAssert.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-eabi -mcpu=cortex-a8
-; PR7158
-
-define arm_aapcs_vfpcc i32 @main() nounwind {
-bb.nph55.bb.nph55.split_crit_edge:
- br label %bb3
-
-bb3: ; preds = %bb3, %bb.nph55.bb.nph55.split_crit_edge
- br i1 undef, label %bb.i19, label %bb3
-
-bb.i19: ; preds = %bb.i19, %bb3
- %0 = insertelement <4 x float> undef, float undef, i32 3 ; <<4 x float>> [#uses=3]
- %1 = fmul <4 x float> %0, %0 ; <<4 x float>> [#uses=1]
- %2 = bitcast <4 x float> %1 to <2 x double> ; <<2 x double>> [#uses=0]
- %3 = fmul <4 x float> %0, undef ; <<4 x float>> [#uses=0]
- br label %bb.i19
-}
diff --git a/test/CodeGen/ARM/2010-06-28-DAGCombineUndef.ll b/test/CodeGen/ARM/2010-06-28-DAGCombineUndef.ll
deleted file mode 100644
index ad2810b5bb9a..000000000000
--- a/test/CodeGen/ARM/2010-06-28-DAGCombineUndef.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon
-
-define void @main() nounwind {
-entry:
- store <2 x i64> undef, <2 x i64>* undef, align 16
- %0 = load <16 x i8>* undef, align 16 ; <<16 x i8>> [#uses=1]
- %1 = or <16 x i8> zeroinitializer, %0 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %1, <16 x i8>* undef, align 16
- ret void
-}
diff --git a/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll b/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
index ffc47ebdf196..b9d5600d2ad8 100644
--- a/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
+++ b/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
@@ -10,9 +10,9 @@ target triple = "thumbv7-apple-darwin10"
; %reg1028 gets allocated %Q0, and if %reg1030 is reloaded for the partial
; redef, it cannot also get %Q0.
-; CHECK: vld1.64 {d0, d1}, [r{{.}}]
-; CHECK-NOT: vld1.64 {d0, d1}
-; CHECK: vmov.f64 d3, d0
+; CHECK: vld1.64 {d16, d17}, [r{{.}}]
+; CHECK-NOT: vld1.64 {d16, d17}
+; CHECK: vmov.f64 d19, d16
define i32 @test(i8* %arg) nounwind {
entry:
diff --git a/test/CodeGen/ARM/2010-09-21-OptCmpBug.ll b/test/CodeGen/ARM/2010-09-21-OptCmpBug.ll
new file mode 100644
index 000000000000..d2820918626a
--- /dev/null
+++ b/test/CodeGen/ARM/2010-09-21-OptCmpBug.ll
@@ -0,0 +1,84 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10
+
+declare noalias i8* @malloc(i32) nounwind
+
+define internal void @gl_DrawPixels(i32 %width, i32 %height, i32 %format, i32 %type, i8* %pixels) nounwind {
+entry:
+ br i1 undef, label %bb3.i, label %bb3
+
+bb3.i: ; preds = %entry
+ unreachable
+
+gl_error.exit: ; preds = %bb22
+ ret void
+
+bb3: ; preds = %entry
+ br i1 false, label %bb5, label %bb4
+
+bb4: ; preds = %bb3
+ br label %bb5
+
+bb5: ; preds = %bb4, %bb3
+ br i1 undef, label %bb19, label %bb22
+
+bb19: ; preds = %bb5
+ switch i32 %type, label %bb3.i6.i [
+ i32 5120, label %bb1.i13
+ i32 5121, label %bb1.i13
+ i32 6656, label %bb9.i.i6
+ ]
+
+bb9.i.i6: ; preds = %bb19
+ br label %bb1.i13
+
+bb3.i6.i: ; preds = %bb19
+ unreachable
+
+bb1.i13: ; preds = %bb9.i.i6, %bb19, %bb19
+ br i1 undef, label %bb3.i17, label %bb2.i16
+
+bb2.i16: ; preds = %bb1.i13
+ unreachable
+
+bb3.i17: ; preds = %bb1.i13
+ br i1 undef, label %bb4.i18, label %bb23.i
+
+bb4.i18: ; preds = %bb3.i17
+ %0 = mul nsw i32 %height, %width
+ %1 = and i32 %0, 7
+ %not..i = icmp ne i32 %1, 0
+ %2 = zext i1 %not..i to i32
+ %storemerge2.i = add i32 0, %2
+ %3 = call noalias i8* @malloc(i32 %storemerge2.i) nounwind
+ br i1 undef, label %bb3.i9, label %bb9.i
+
+bb9.i: ; preds = %bb4.i18
+ br i1 undef, label %bb13.i19, label %bb.i24.i
+
+bb13.i19: ; preds = %bb9.i
+ br i1 undef, label %bb14.i20, label %bb15.i
+
+bb14.i20: ; preds = %bb13.i19
+ unreachable
+
+bb15.i: ; preds = %bb13.i19
+ unreachable
+
+bb.i24.i: ; preds = %bb.i24.i, %bb9.i
+ %storemerge1.i21.i = phi i32 [ %4, %bb.i24.i ], [ 0, %bb9.i ]
+ %4 = add i32 %storemerge1.i21.i, 1
+ %exitcond47.i = icmp eq i32 %4, %storemerge2.i
+ br i1 %exitcond47.i, label %bb22, label %bb.i24.i
+
+bb23.i: ; preds = %bb3.i17
+ unreachable
+
+bb3.i9: ; preds = %bb4.i18
+ unreachable
+
+bb22: ; preds = %bb.i24.i, %bb5
+ br i1 undef, label %gl_error.exit, label %bb23
+
+bb23: ; preds = %bb22
+ ret void
+}
diff --git a/test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll b/test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll
new file mode 100644
index 000000000000..bda14bcb1520
--- /dev/null
+++ b/test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s
+; This tests that MC/asm header conversion is smooth
+;
+; CHECK: .syntax unified
+; CHECK: .eabi_attribute 20, 1
+; CHECK: .eabi_attribute 21, 1
+; CHECK: .eabi_attribute 23, 3
+; CHECK: .eabi_attribute 24, 1
+; CHECK: .eabi_attribute 25, 1
+
+define i32 @f(i64 %z) {
+ ret i32 0
+}
diff --git a/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll b/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll
new file mode 100644
index 000000000000..ee443febcc1e
--- /dev/null
+++ b/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll
@@ -0,0 +1,37 @@
+; RUN: llc %s -mtriple=arm-linux-gnueabi -filetype=obj -o - | \
+; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=BASIC %s
+; RUN: llc %s -mtriple=armv7-linux-gnueabi -march=arm -mcpu=cortex-a8 \
+; RUN: -mattr=-neon -mattr=+vfp2 \
+; RUN: -arm-reserve-r9 -filetype=obj -o - | \
+; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=CORTEXA8 %s
+
+
+; This tests that the extpected ARM attributes are emitted.
+;
+; BASIC: .ARM.attributes
+; BASIC-NEXT: 0x70000003
+; BASIC-NEXT: 0x00000000
+; BASIC-NEXT: 0x00000000
+; BASIC-NEXT: 0x0000003c
+; BASIC-NEXT: 0x00000020
+; BASIC-NEXT: 0x00000000
+; BASIC-NEXT: 0x00000000
+; BASIC-NEXT: 0x00000001
+; BASIC-NEXT: 0x00000000
+; BASIC-NEXT: '411f0000 00616561 62690001 15000000 06020801 09011401 15011703 18011901'
+
+; CORTEXA8: .ARM.attributes
+; CORTEXA8-NEXT: 0x70000003
+; CORTEXA8-NEXT: 0x00000000
+; CORTEXA8-NEXT: 0x00000000
+; CORTEXA8-NEXT: 0x0000003c
+; CORTEXA8-NEXT: 0x0000002f
+; CORTEXA8-NEXT: 0x00000000
+; CORTEXA8-NEXT: 0x00000000
+; CORTEXA8-NEXT: 0x00000001
+; CORTEXA8-NEXT: 0x00000000
+; CORTEXA8-NEXT: '412e0000 00616561 62690001 24000000 05434f52 5445582d 41380006 0a074108 0109020a 02140115 01170318 011901'
+
+define i32 @f(i64 %z) {
+ ret i32 0
+}
diff --git a/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll b/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll
new file mode 100644
index 000000000000..163c9b030ec8
--- /dev/null
+++ b/test/CodeGen/ARM/2010-10-25-ifcvt-ldm.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -mtriple=armv6-apple-darwin -mcpu=arm1136jf-s | FileCheck %s
+; Radar 8589805: Counting the number of microcoded operations, such as for an
+; LDM instruction, was causing an assertion failure because the microop count
+; was being treated as an instruction count.
+
+; CHECK: push
+; CHECK: ldmia
+; CHECK: ldmia
+; CHECK: ldmia
+
+define i32 @test(i32 %x) {
+entry:
+ %0 = tail call signext i16 undef(i32* undef)
+ switch i32 undef, label %bb3 [
+ i32 0, label %bb4
+ i32 1, label %bb1
+ i32 2, label %bb2
+ ]
+
+bb1:
+ ret i32 1
+
+bb2:
+ ret i32 2
+
+bb3:
+ ret i32 1
+
+bb4:
+ ret i32 3
+}
diff --git a/test/CodeGen/ARM/2010-11-15-SpillEarlyClobber.ll b/test/CodeGen/ARM/2010-11-15-SpillEarlyClobber.ll
new file mode 100644
index 000000000000..04220949027f
--- /dev/null
+++ b/test/CodeGen/ARM/2010-11-15-SpillEarlyClobber.ll
@@ -0,0 +1,85 @@
+; RUN: llc < %s -verify-machineinstrs -spiller=standard
+; RUN: llc < %s -verify-machineinstrs -spiller=inline
+; PR8612
+;
+; This test has an inline asm with early-clobber arguments.
+; It is big enough that one of the early clobber registers is spilled.
+;
+; All the spillers would get the live ranges wrong when spilling an early
+; clobber, allowing the undef register to be allocated to the same register as
+; the early clobber.
+;
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32"
+target triple = "armv7-eabi"
+
+%0 = type { i32, i32 }
+
+define void @foo(i32* %in) nounwind {
+entry:
+ br label %bb.i
+
+bb.i: ; preds = %bb.i, %entry
+ br i1 undef, label %bb10.preheader.i, label %bb.i
+
+bb10.preheader.i: ; preds = %bb.i
+ br label %bb10.i
+
+bb10.i: ; preds = %bb10.i, %bb10.preheader.i
+ br i1 undef, label %bb27.i, label %bb10.i
+
+bb27.i: ; preds = %bb10.i
+ br label %bb28.i
+
+bb28.i: ; preds = %bb28.i, %bb27.i
+ br i1 undef, label %presymmetry.exit, label %bb28.i
+
+presymmetry.exit: ; preds = %bb28.i
+ %tmp175387 = or i32 undef, 12
+ %scevgep101.i = getelementptr i32* %in, i32 undef
+ %tmp189401 = or i32 undef, 7
+ %scevgep97.i = getelementptr i32* %in, i32 undef
+ %tmp198410 = or i32 undef, 1
+ %scevgep.i48 = getelementptr i32* %in, i32 undef
+ %0 = load i32* %scevgep.i48, align 4
+ %1 = add nsw i32 %0, 0
+ store i32 %1, i32* undef, align 4
+ %asmtmp.i.i33.i.i.i = tail call %0 asm "smull\09$0, $1, $2, $3", "=&r,=&r,%r,r,~{cc}"(i32 undef, i32 1518500250) nounwind
+ %asmresult1.i.i34.i.i.i = extractvalue %0 %asmtmp.i.i33.i.i.i, 1
+ %2 = shl i32 %asmresult1.i.i34.i.i.i, 1
+ %3 = load i32* null, align 4
+ %4 = load i32* undef, align 4
+ %5 = sub nsw i32 %3, %4
+ %6 = load i32* undef, align 4
+ %7 = load i32* null, align 4
+ %8 = sub nsw i32 %6, %7
+ %9 = load i32* %scevgep97.i, align 4
+ %10 = load i32* undef, align 4
+ %11 = sub nsw i32 %9, %10
+ %12 = load i32* null, align 4
+ %13 = load i32* %scevgep101.i, align 4
+ %14 = sub nsw i32 %12, %13
+ %15 = load i32* %scevgep.i48, align 4
+ %16 = load i32* null, align 4
+ %17 = add nsw i32 %16, %15
+ %18 = sub nsw i32 %15, %16
+ %19 = load i32* undef, align 4
+ %20 = add nsw i32 %19, %2
+ %21 = sub nsw i32 %19, %2
+ %22 = add nsw i32 %14, %5
+ %23 = sub nsw i32 %5, %14
+ %24 = add nsw i32 %11, %8
+ %25 = sub nsw i32 %8, %11
+ %26 = add nsw i32 %21, %23
+ store i32 %26, i32* %scevgep.i48, align 4
+ %27 = sub nsw i32 %25, %18
+ store i32 %27, i32* null, align 4
+ %28 = sub nsw i32 %23, %21
+ store i32 %28, i32* undef, align 4
+ %29 = add nsw i32 %18, %25
+ store i32 %29, i32* undef, align 4
+ %30 = add nsw i32 %17, %22
+ store i32 %30, i32* %scevgep101.i, align 4
+ %31 = add nsw i32 %20, %24
+ store i32 %31, i32* null, align 4
+ unreachable
+}
diff --git a/test/CodeGen/ARM/2010-11-29-PrologueBug.ll b/test/CodeGen/ARM/2010-11-29-PrologueBug.ll
new file mode 100644
index 000000000000..8d7541feae94
--- /dev/null
+++ b/test/CodeGen/ARM/2010-11-29-PrologueBug.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=THUMB2
+; rdar://8690640
+
+define i32* @t(i32* %x) nounwind {
+entry:
+; ARM: t:
+; ARM: push
+; ARM: mov r7, sp
+; ARM: bl _foo
+; ARM: bl _foo
+; ARM: bl _foo
+; ARM: ldmia sp!, {r7, pc}
+
+; THUMB2: t:
+; THUMB2: push
+; THUMB2: mov r7, sp
+; THUMB2: blx _foo
+; THUMB2: blx _foo
+; THUMB2: blx _foo
+; THUMB2: pop
+ %0 = tail call i32* @foo(i32* %x) nounwind
+ %1 = tail call i32* @foo(i32* %0) nounwind
+ %2 = tail call i32* @foo(i32* %1) nounwind
+ ret i32* %2
+}
+
+declare i32* @foo(i32*)
diff --git a/test/CodeGen/ARM/2010-11-30-reloc-movt.ll b/test/CodeGen/ARM/2010-11-30-reloc-movt.ll
new file mode 100644
index 000000000000..930cd8d41563
--- /dev/null
+++ b/test/CodeGen/ARM/2010-11-30-reloc-movt.ll
@@ -0,0 +1,42 @@
+; RUN: llc %s -mtriple=armv7-linux-gnueabi -filetype=obj -o - | \
+; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=OBJ %s
+
+target triple = "armv7-none-linux-gnueabi"
+
+@a = external global i8
+
+define arm_aapcs_vfpcc i32 @barf() nounwind {
+entry:
+ %0 = tail call arm_aapcs_vfpcc i32 @foo(i8* @a) nounwind
+ ret i32 %0
+; OBJ: '.text'
+; OBJ-NEXT: 'sh_type'
+; OBJ-NEXT: 'sh_flags'
+; OBJ-NEXT: 'sh_addr'
+; OBJ-NEXT: 'sh_offset'
+; OBJ-NEXT: 'sh_size'
+; OBJ-NEXT: 'sh_link'
+; OBJ-NEXT: 'sh_info'
+; OBJ-NEXT: 'sh_addralign'
+; OBJ-NEXT: 'sh_entsize'
+; OBJ-NEXT: '_section_data', '00482de9 000000e3 000040e3 feffffeb 0088bde8'
+
+; OBJ: Relocation 0x00000000
+; OBJ-NEXT: 'r_offset', 0x00000004
+; OBJ-NEXT: 'r_sym', 0x00000007
+; OBJ-NEXT: 'r_type', 0x0000002b
+
+; OBJ: Relocation 0x00000001
+; OBJ-NEXT: 'r_offset', 0x00000008
+; OBJ-NEXT: 'r_sym'
+; OBJ-NEXT: 'r_type', 0x0000002c
+
+; OBJ: # Relocation 0x00000002
+; OBJ-NEXT: 'r_offset', 0x0000000c
+; OBJ-NEXT: 'r_sym', 0x00000008
+; OBJ-NEXT: 'r_type', 0x0000001c
+
+}
+
+declare arm_aapcs_vfpcc i32 @foo(i8*)
+
diff --git a/test/CodeGen/ARM/2010-12-07-PEIBug.ll b/test/CodeGen/ARM/2010-12-07-PEIBug.ll
new file mode 100644
index 000000000000..c65952be3c64
--- /dev/null
+++ b/test/CodeGen/ARM/2010-12-07-PEIBug.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -mcpu=cortex-a8 | FileCheck %s
+; rdar://8728956
+
+define hidden void @foo() nounwind ssp {
+entry:
+; CHECK: foo:
+; CHECK: push {r7, lr}
+; CHECK-NEXT: mov r7, sp
+; CHECK-NEXT: vpush {d8}
+; CHECK-NEXT: vpush {d10, d11}
+ %tmp40 = load <4 x i8>* undef
+ %tmp41 = extractelement <4 x i8> %tmp40, i32 2
+ %conv42 = zext i8 %tmp41 to i32
+ %conv43 = sitofp i32 %conv42 to float
+ %div44 = fdiv float %conv43, 2.560000e+02
+ %vecinit45 = insertelement <4 x float> undef, float %div44, i32 2
+ %vecinit46 = insertelement <4 x float> %vecinit45, float 1.000000e+00, i32 3
+ store <4 x float> %vecinit46, <4 x float>* undef
+ br i1 undef, label %if.then105, label %if.else109
+
+if.then105: ; preds = %entry
+ br label %if.end114
+
+if.else109: ; preds = %entry
+ br label %if.end114
+
+if.end114: ; preds = %if.else109, %if.then105
+ %call185 = call float @bar()
+ %vecinit186 = insertelement <4 x float> undef, float %call185, i32 1
+ %call189 = call float @bar()
+ %vecinit190 = insertelement <4 x float> %vecinit186, float %call189, i32 2
+ %vecinit191 = insertelement <4 x float> %vecinit190, float 1.000000e+00, i32 3
+ store <4 x float> %vecinit191, <4 x float>* undef
+; CHECK: vpop {d10, d11}
+; CHECK-NEXT: vpop {d8}
+; CHECK-NEXT: pop {r7, pc}
+ ret void
+}
+
+declare hidden float @bar() nounwind readnone ssp
diff --git a/test/CodeGen/ARM/2010-12-08-tpsoft.ll b/test/CodeGen/ARM/2010-12-08-tpsoft.ll
new file mode 100644
index 000000000000..b8ed8199d398
--- /dev/null
+++ b/test/CodeGen/ARM/2010-12-08-tpsoft.ll
@@ -0,0 +1,52 @@
+; RUN: llc %s -mtriple=armv7-linux-gnueabi -o - | \
+; RUN: FileCheck -check-prefix=ELFASM %s
+; RUN: llc %s -mtriple=armv7-linux-gnueabi -filetype=obj -o - | \
+; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=ELFOBJ %s
+
+;; Make sure that bl __aeabi_read_tp is materiazlied and fixed up correctly
+;; in the obj case.
+
+@i = external thread_local global i32
+@a = external global i8
+@b = external global [10 x i8]
+
+define arm_aapcs_vfpcc i32 @main() nounwind {
+entry:
+ %0 = load i32* @i, align 4
+ switch i32 %0, label %bb2 [
+ i32 12, label %bb
+ i32 13, label %bb1
+ ]
+
+bb: ; preds = %entry
+ %1 = tail call arm_aapcs_vfpcc i32 @foo(i8* @a) nounwind
+ ret i32 %1
+; ELFASM: bl __aeabi_read_tp
+
+
+; ELFOBJ: '.text'
+; ELFOBJ-NEXT: 'sh_type'
+; ELFOBJ-NEXT: 'sh_flags'
+; ELFOBJ-NEXT: 'sh_addr'
+; ELFOBJ-NEXT: 'sh_offset'
+; ELFOBJ-NEXT: 'sh_size'
+; ELFOBJ-NEXT: 'sh_link'
+; ELFOBJ-NEXT: 'sh_info'
+; ELFOBJ-NEXT: 'sh_addralign'
+; ELFOBJ-NEXT: 'sh_entsize'
+;;; BL __aeabi_read_tp is ---+
+;;; V
+; ELFOBJ-NEXT: 00482de9 3c009fe5 00109fe7 feffffeb
+
+
+bb1: ; preds = %entry
+ %2 = tail call arm_aapcs_vfpcc i32 @bar(i32* bitcast ([10 x i8]* @b to i32*)) nounwind
+ ret i32 %2
+
+bb2: ; preds = %entry
+ ret i32 -1
+}
+
+declare arm_aapcs_vfpcc i32 @foo(i8*)
+
+declare arm_aapcs_vfpcc i32 @bar(i32*)
diff --git a/test/CodeGen/ARM/2010-12-13-reloc-pic.ll b/test/CodeGen/ARM/2010-12-13-reloc-pic.ll
new file mode 100644
index 000000000000..d5aefbee197c
--- /dev/null
+++ b/test/CodeGen/ARM/2010-12-13-reloc-pic.ll
@@ -0,0 +1,100 @@
+; RUN: llc %s -mtriple=armv7-linux-gnueabi -relocation-model=pic -filetype=obj -o - | \
+; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=PIC01 %s
+
+;; FIXME: Reduce this test further, or even better,
+;; redo as .s -> .o test once ARM AsmParser is working better
+
+; ModuleID = 'large2.pnacl.bc'
+target triple = "armv7-none-linux-gnueabi"
+
+%struct._Bigint = type { %struct._Bigint*, i32, i32, i32, i32, [1 x i32] }
+%struct.__FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, %struct._reent*, i8*, i32 (%struct._reent*, i8*, i8*, i32)*, i32 (%struct._reent*, i8*, i8*, i32)*, i32 (%struct._reent*, i8*, i32, i32)*, i32 (%struct._reent*, i8*)*, %struct.__sbuf, i8*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i32, %struct._flock_t, %struct._mbstate_t, i32 }
+%struct.__sbuf = type { i8*, i32 }
+%struct.__tm = type { i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct._atexit = type { %struct._atexit*, i32, [32 x void ()*], %struct._on_exit_args* }
+%struct._flock_t = type { i32, i32, i32, i32, i32 }
+%struct._glue = type { %struct._glue*, i32, %struct.__FILE* }
+%struct._mbstate_t = type { i32, %union.anon }
+%struct._misc_reent = type { i8*, %struct._mbstate_t, %struct._mbstate_t, %struct._mbstate_t, [8 x i8], i32, %struct._mbstate_t, %struct._mbstate_t, %struct._mbstate_t, %struct._mbstate_t, %struct._mbstate_t }
+%struct._mprec = type { %struct._Bigint*, i32, %struct._Bigint*, %struct._Bigint** }
+%struct._on_exit_args = type { [32 x i8*], [32 x i8*], i32, i32 }
+%struct._rand48 = type { [3 x i16], [3 x i16], i16, i64 }
+%struct._reent = type { %struct.__FILE*, %struct.__FILE*, %struct.__FILE*, i32, i32, i8*, i32, i32, i8*, %struct._mprec*, void (%struct._reent*)*, i32, i32, i8*, %struct._rand48*, %struct.__tm*, i8*, void (i32)**, %struct._atexit*, %struct._atexit, %struct._glue, %struct.__FILE*, %struct._misc_reent*, i8* }
+%union.anon = type { i32 }
+
+@buf = constant [2 x i8] c"x\00", align 4
+@_impure_ptr = external thread_local global %struct._reent*
+@.str = private constant [22 x i8] c"This should fault...\0A\00", align 4
+@.str1 = private constant [40 x i8] c"We're still running. This is not good.\0A\00", align 4
+
+define i32 @main() nounwind {
+entry:
+ %0 = load %struct._reent** @_impure_ptr, align 4
+ %1 = getelementptr inbounds %struct._reent* %0, i32 0, i32 1
+ %2 = load %struct.__FILE** %1, align 4
+ %3 = bitcast %struct.__FILE* %2 to i8*
+ %4 = tail call i32 @fwrite(i8* getelementptr inbounds ([22 x i8]* @.str, i32 0, i32 0), i32 1, i32 21, i8* %3) nounwind
+ %5 = load %struct._reent** @_impure_ptr, align 4
+ %6 = getelementptr inbounds %struct._reent* %5, i32 0, i32 1
+ %7 = load %struct.__FILE** %6, align 4
+ %8 = tail call i32 @fflush(%struct.__FILE* %7) nounwind
+ store i8 121, i8* getelementptr inbounds ([2 x i8]* @buf, i32 0, i32 0), align 4
+ %9 = load %struct._reent** @_impure_ptr, align 4
+ %10 = getelementptr inbounds %struct._reent* %9, i32 0, i32 1
+ %11 = load %struct.__FILE** %10, align 4
+ %12 = bitcast %struct.__FILE* %11 to i8*
+ %13 = tail call i32 @fwrite(i8* getelementptr inbounds ([40 x i8]* @.str1, i32 0, i32 0), i32 1, i32 39, i8* %12) nounwind
+ ret i32 1
+}
+
+
+; PIC01: Relocation 0x00000000
+; PIC01-NEXT: 'r_offset', 0x0000001c
+; PIC01-NEXT: 'r_sym'
+; PIC01-NEXT: 'r_type', 0x0000001b
+
+
+; PIC01: Relocation 0x00000001
+; PIC01-NEXT: 'r_offset', 0x00000038
+; PIC01-NEXT: 'r_sym'
+; PIC01-NEXT: 'r_type', 0x0000001b
+
+; PIC01: Relocation 0x00000002
+; PIC01-NEXT: 'r_offset', 0x00000044
+; PIC01-NEXT: 'r_sym'
+; PIC01-NEXT: 'r_type', 0x0000001b
+
+; PIC01: Relocation 0x00000003
+; PIC01-NEXT: 'r_offset', 0x00000070
+; PIC01-NEXT: 'r_sym'
+; PIC01-NEXT: 'r_type', 0x0000001b
+
+; PIC01: Relocation 0x00000004
+; PIC01-NEXT: 'r_offset', 0x0000007c
+; PIC01-NEXT: 'r_sym'
+; PIC01-NEXT: 'r_type', 0x00000019
+
+
+; PIC01: Relocation 0x00000005
+; PIC01-NEXT: 'r_offset', 0x00000080
+; PIC01-NEXT: 'r_sym'
+; PIC01-NEXT: 'r_type', 0x00000018
+
+; PIC01: Relocation 0x00000006
+; PIC01-NEXT: 'r_offset', 0x00000084
+; PIC01-NEXT: 'r_sym'
+; PIC01-NEXT: 'r_type', 0x00000068
+
+; PIC01: Relocation 0x00000007
+; PIC01-NEXT: 'r_offset', 0x00000088
+; PIC01-NEXT: 'r_sym'
+; PIC01-NEXT: 'r_type', 0x0000001a
+
+; PIC01: Relocation 0x00000008
+; PIC01-NEXT: 'r_offset', 0x0000008c
+; PIC01-NEXT: 'r_sym'
+; PIC01-NEXT: 'r_type', 0x00000018
+
+declare i32 @fwrite(i8* nocapture, i32, i32, i8* nocapture) nounwind
+
+declare i32 @fflush(%struct.__FILE* nocapture) nounwind
diff --git a/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll b/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll
new file mode 100644
index 000000000000..eaa34e7960fb
--- /dev/null
+++ b/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll
@@ -0,0 +1,35 @@
+; RUN: llc %s -mtriple=armv7-linux-gnueabi -filetype=obj -o - | \
+; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=OBJ %s
+; RUN: llc %s -mtriple=armv7-linux-gnueabi -o - | \
+; RUN: FileCheck -check-prefix=ASM %s
+
+
+@dummy = internal global i32 666
+@array00 = internal global [20 x i32] zeroinitializer
+@sum = internal global i32 55
+@STRIDE = internal global i32 8
+
+; ASM: .type array00,%object @ @array00
+; ASM-NEXT: .lcomm array00,80 @ @array00
+; ASM-NEXT: .type _MergedGlobals,%object @ @_MergedGlobals
+
+
+
+; OBJ: Section 0x00000003
+; OBJ-NEXT: '.bss'
+
+; OBJ: 'array00'
+; OBJ-NEXT: 'st_value', 0x00000000
+; OBJ-NEXT: 'st_size', 0x00000050
+; OBJ-NEXT: 'st_bind', 0x00000000
+; OBJ-NEXT: 'st_type', 0x00000001
+; OBJ-NEXT: 'st_other', 0x00000000
+; OBJ-NEXT: 'st_shndx', 0x00000003
+
+define i32 @main(i32 %argc) nounwind {
+ %1 = load i32* @sum, align 4
+ %2 = getelementptr [20 x i32]* @array00, i32 0, i32 %argc
+ %3 = load i32* %2, align 4
+ %4 = add i32 %1, %3
+ ret i32 %4;
+}
diff --git a/test/CodeGen/ARM/2010-12-17-LocalStackSlotCrash.ll b/test/CodeGen/ARM/2010-12-17-LocalStackSlotCrash.ll
new file mode 100644
index 000000000000..a2f50b587b22
--- /dev/null
+++ b/test/CodeGen/ARM/2010-12-17-LocalStackSlotCrash.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -mtriple=armv6-apple-darwin10
+; <rdar://problem/8782198>
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:64-n32"
+target triple = "armv6-apple-darwin10"
+
+define void @func() nounwind optsize {
+entry:
+ %buf = alloca [8096 x i8], align 1
+ br label %bb
+
+bb:
+ %p.2 = getelementptr [8096 x i8]* %buf, i32 0, i32 0
+ store i8 undef, i8* %p.2, align 1
+ ret void
+}
diff --git a/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll b/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
new file mode 100644
index 000000000000..99baad2d38d1
--- /dev/null
+++ b/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
@@ -0,0 +1,127 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32"
+target triple = "thumbv7-apple-darwin10"
+
+@x1 = internal global i8 1
+@x2 = internal global i8 1
+@x3 = internal global i8 1
+@x4 = internal global i8 1
+@x5 = global i8 1
+
+; Check debug info output for merged global.
+; DW_AT_location
+; DW_OP_addr
+; DW_OP_plus
+; .long __MergedGlobals
+; DW_OP_constu
+; offset
+
+;CHECK: .byte 7 @ Abbrev [7] 0x1a5:0x13 DW_TAG_variable
+;CHECK-NEXT: .ascii "x2" @ DW_AT_name
+;CHECK-NEXT: .byte 0
+;CHECK-NEXT: .long 93 @ DW_AT_type
+;CHECK-NEXT: .byte 1 @ DW_AT_decl_file
+;CHECK-NEXT: .byte 6 @ DW_AT_decl_line
+;CHECK-NEXT: .byte 8 @ DW_AT_location
+;CHECK-NEXT: .byte 3
+;CHECK-NEXT: .long __MergedGlobals
+;CHECK-NEXT: .byte 16
+;CHECK-NEXT: .byte 1
+;CHECK-NEXT: .byte 34
+
+define zeroext i8 @get1(i8 zeroext %a) nounwind optsize {
+entry:
+ tail call void @llvm.dbg.value(metadata !{i8 %a}, i64 0, metadata !10), !dbg !30
+ %0 = load i8* @x1, align 4, !dbg !30
+ tail call void @llvm.dbg.value(metadata !{i8 %0}, i64 0, metadata !11), !dbg !30
+ store i8 %a, i8* @x1, align 4, !dbg !30
+ ret i8 %0, !dbg !31
+}
+
+declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
+
+define zeroext i8 @get2(i8 zeroext %a) nounwind optsize {
+entry:
+ tail call void @llvm.dbg.value(metadata !{i8 %a}, i64 0, metadata !18), !dbg !32
+ %0 = load i8* @x2, align 4, !dbg !32
+ tail call void @llvm.dbg.value(metadata !{i8 %0}, i64 0, metadata !19), !dbg !32
+ store i8 %a, i8* @x2, align 4, !dbg !32
+ ret i8 %0, !dbg !33
+}
+
+define zeroext i8 @get3(i8 zeroext %a) nounwind optsize {
+entry:
+ tail call void @llvm.dbg.value(metadata !{i8 %a}, i64 0, metadata !21), !dbg !34
+ %0 = load i8* @x3, align 4, !dbg !34
+ tail call void @llvm.dbg.value(metadata !{i8 %0}, i64 0, metadata !22), !dbg !34
+ store i8 %a, i8* @x3, align 4, !dbg !34
+ ret i8 %0, !dbg !35
+}
+
+define zeroext i8 @get4(i8 zeroext %a) nounwind optsize {
+entry:
+ tail call void @llvm.dbg.value(metadata !{i8 %a}, i64 0, metadata !24), !dbg !36
+ %0 = load i8* @x4, align 4, !dbg !36
+ tail call void @llvm.dbg.value(metadata !{i8 %0}, i64 0, metadata !25), !dbg !36
+ store i8 %a, i8* @x4, align 4, !dbg !36
+ ret i8 %0, !dbg !37
+}
+
+define zeroext i8 @get5(i8 zeroext %a) nounwind optsize {
+entry:
+ tail call void @llvm.dbg.value(metadata !{i8 %a}, i64 0, metadata !27), !dbg !38
+ %0 = load i8* @x5, align 4, !dbg !38
+ tail call void @llvm.dbg.value(metadata !{i8 %0}, i64 0, metadata !28), !dbg !38
+ store i8 %a, i8* @x5, align 4, !dbg !38
+ ret i8 %0, !dbg !39
+}
+
+!llvm.dbg.sp = !{!0, !6, !7, !8, !9}
+!llvm.dbg.lv.get1 = !{!10, !11}
+!llvm.dbg.gv = !{!13, !14, !15, !16, !17}
+!llvm.dbg.lv.get2 = !{!18, !19}
+!llvm.dbg.lv.get3 = !{!21, !22}
+!llvm.dbg.lv.get4 = !{!24, !25}
+!llvm.dbg.lv.get5 = !{!27, !28}
+
+!0 = metadata !{i32 589870, i32 0, metadata !1, metadata !"get1", metadata !"get1", metadata !"get1", metadata !1, i32 4, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get1} ; [ DW_TAG_subprogram ]
+!1 = metadata !{i32 589865, metadata !"foo.c", metadata !"/tmp/", metadata !2} ; [ DW_TAG_file_type ]
+!2 = metadata !{i32 589841, i32 0, i32 1, metadata !"foo.c", metadata !"/tmp/", metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2369.8)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 589845, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!4 = metadata !{metadata !5, metadata !5}
+!5 = metadata !{i32 589860, metadata !1, metadata !"_Bool", metadata !1, i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ]
+!6 = metadata !{i32 589870, i32 0, metadata !1, metadata !"get2", metadata !"get2", metadata !"get2", metadata !1, i32 7, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get2} ; [ DW_TAG_subprogram ]
+!7 = metadata !{i32 589870, i32 0, metadata !1, metadata !"get3", metadata !"get3", metadata !"get3", metadata !1, i32 10, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get3} ; [ DW_TAG_subprogram ]
+!8 = metadata !{i32 589870, i32 0, metadata !1, metadata !"get4", metadata !"get4", metadata !"get4", metadata !1, i32 13, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get4} ; [ DW_TAG_subprogram ]
+!9 = metadata !{i32 589870, i32 0, metadata !1, metadata !"get5", metadata !"get5", metadata !"get5", metadata !1, i32 16, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get5} ; [ DW_TAG_subprogram ]
+!10 = metadata !{i32 590081, metadata !0, metadata !"a", metadata !1, i32 4, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!11 = metadata !{i32 590080, metadata !12, metadata !"b", metadata !1, i32 4, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!12 = metadata !{i32 589835, metadata !0, i32 4, i32 0, metadata !1, i32 0} ; [ DW_TAG_lexical_block ]
+!13 = metadata !{i32 589876, i32 0, metadata !1, metadata !"x1", metadata !"x1", metadata !"", metadata !1, i32 3, metadata !5, i1 true, i1 true, i8* @x1} ; [ DW_TAG_variable ]
+!14 = metadata !{i32 589876, i32 0, metadata !1, metadata !"x2", metadata !"x2", metadata !"", metadata !1, i32 6, metadata !5, i1 true, i1 true, i8* @x2} ; [ DW_TAG_variable ]
+!15 = metadata !{i32 589876, i32 0, metadata !1, metadata !"x3", metadata !"x3", metadata !"", metadata !1, i32 9, metadata !5, i1 true, i1 true, i8* @x3} ; [ DW_TAG_variable ]
+!16 = metadata !{i32 589876, i32 0, metadata !1, metadata !"x4", metadata !"x4", metadata !"", metadata !1, i32 12, metadata !5, i1 true, i1 true, i8* @x4} ; [ DW_TAG_variable ]
+!17 = metadata !{i32 589876, i32 0, metadata !1, metadata !"x5", metadata !"x5", metadata !"", metadata !1, i32 15, metadata !5, i1 false, i1 true, i8* @x5} ; [ DW_TAG_variable ]
+!18 = metadata !{i32 590081, metadata !6, metadata !"a", metadata !1, i32 7, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!19 = metadata !{i32 590080, metadata !20, metadata !"b", metadata !1, i32 7, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!20 = metadata !{i32 589835, metadata !6, i32 7, i32 0, metadata !1, i32 1} ; [ DW_TAG_lexical_block ]
+!21 = metadata !{i32 590081, metadata !7, metadata !"a", metadata !1, i32 10, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!22 = metadata !{i32 590080, metadata !23, metadata !"b", metadata !1, i32 10, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!23 = metadata !{i32 589835, metadata !7, i32 10, i32 0, metadata !1, i32 2} ; [ DW_TAG_lexical_block ]
+!24 = metadata !{i32 590081, metadata !8, metadata !"a", metadata !1, i32 13, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!25 = metadata !{i32 590080, metadata !26, metadata !"b", metadata !1, i32 13, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!26 = metadata !{i32 589835, metadata !8, i32 13, i32 0, metadata !1, i32 3} ; [ DW_TAG_lexical_block ]
+!27 = metadata !{i32 590081, metadata !9, metadata !"a", metadata !1, i32 16, metadata !5, i32 0} ; [ DW_TAG_arg_variable ]
+!28 = metadata !{i32 590080, metadata !29, metadata !"b", metadata !1, i32 16, metadata !5, i32 0} ; [ DW_TAG_auto_variable ]
+!29 = metadata !{i32 589835, metadata !9, i32 16, i32 0, metadata !1, i32 4} ; [ DW_TAG_lexical_block ]
+!30 = metadata !{i32 4, i32 0, metadata !0, null}
+!31 = metadata !{i32 4, i32 0, metadata !12, null}
+!32 = metadata !{i32 7, i32 0, metadata !6, null}
+!33 = metadata !{i32 7, i32 0, metadata !20, null}
+!34 = metadata !{i32 10, i32 0, metadata !7, null}
+!35 = metadata !{i32 10, i32 0, metadata !23, null}
+!36 = metadata !{i32 13, i32 0, metadata !8, null}
+!37 = metadata !{i32 13, i32 0, metadata !26, null}
+!38 = metadata !{i32 16, i32 0, metadata !9, null}
+!39 = metadata !{i32 16, i32 0, metadata !29, null}
diff --git a/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll b/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll
new file mode 100644
index 000000000000..85a113755bf4
--- /dev/null
+++ b/test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll
@@ -0,0 +1,128 @@
+; RUN: llc < %s -asm-verbose=false -O3 -mtriple=armv6-apple-darwin -relocation-model=pic -mcpu=arm1136jf-s | FileCheck %s
+; rdar://8959122 illegal register operands for UMULL instruction
+; in cfrac nightly test.
+; Armv6 generates a umull that must write to two distinct destination regs.
+
+; ModuleID = 'bugpoint-reduced-simplified.bc'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:64-n32"
+target triple = "armv6-apple-darwin10"
+
+define void @ptoa() nounwind {
+entry:
+ br i1 false, label %bb3, label %bb
+
+bb: ; preds = %entry
+ br label %bb3
+
+bb3: ; preds = %bb, %entry
+ %0 = call noalias i8* @malloc() nounwind
+ br i1 undef, label %bb46, label %bb8
+
+bb8: ; preds = %bb3
+ %1 = getelementptr inbounds i8* %0, i32 0
+ store i8 0, i8* %1, align 1
+ %2 = call i32 @ptou() nounwind
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ %3 = udiv i32 %2, 10
+ %4 = urem i32 %3, 10
+ %5 = icmp ult i32 %4, 10
+ %6 = trunc i32 %4 to i8
+ %7 = or i8 %6, 48
+ %8 = add i8 %6, 87
+ %iftmp.5.0.1 = select i1 %5, i8 %7, i8 %8
+ store i8 %iftmp.5.0.1, i8* undef, align 1
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ %9 = udiv i32 %2, 100
+ %10 = urem i32 %9, 10
+ %11 = icmp ult i32 %10, 10
+ %12 = trunc i32 %10 to i8
+ %13 = or i8 %12, 48
+ %14 = add i8 %12, 87
+ %iftmp.5.0.2 = select i1 %11, i8 %13, i8 %14
+ store i8 %iftmp.5.0.2, i8* undef, align 1
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ %15 = udiv i32 %2, 10000
+ %16 = urem i32 %15, 10
+ %17 = icmp ult i32 %16, 10
+ %18 = trunc i32 %16 to i8
+ %19 = or i8 %18, 48
+ %20 = add i8 %18, 87
+ %iftmp.5.0.4 = select i1 %17, i8 %19, i8 %20
+ store i8 %iftmp.5.0.4, i8* null, align 1
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ %21 = udiv i32 %2, 100000
+ %22 = urem i32 %21, 10
+ %23 = icmp ult i32 %22, 10
+ %iftmp.5.0.5 = select i1 %23, i8 0, i8 undef
+ store i8 %iftmp.5.0.5, i8* undef, align 1
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ %24 = udiv i32 %2, 1000000
+ %25 = urem i32 %24, 10
+ %26 = icmp ult i32 %25, 10
+ %27 = trunc i32 %25 to i8
+ %28 = or i8 %27, 48
+ %29 = add i8 %27, 87
+ %iftmp.5.0.6 = select i1 %26, i8 %28, i8 %29
+ store i8 %iftmp.5.0.6, i8* undef, align 1
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ %30 = udiv i32 %2, 10000000
+ %31 = urem i32 %30, 10
+ %32 = icmp ult i32 %31, 10
+ %33 = trunc i32 %31 to i8
+ %34 = or i8 %33, 48
+ %35 = add i8 %33, 87
+ %iftmp.5.0.7 = select i1 %32, i8 %34, i8 %35
+ store i8 %iftmp.5.0.7, i8* undef, align 1
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ ; CHECK: umull [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}}
+ %36 = udiv i32 %2, 100000000
+ %37 = urem i32 %36, 10
+ %38 = icmp ult i32 %37, 10
+ %39 = trunc i32 %37 to i8
+ %40 = or i8 %39, 48
+ %41 = add i8 %39, 87
+ %iftmp.5.0.8 = select i1 %38, i8 %40, i8 %41
+ store i8 %iftmp.5.0.8, i8* null, align 1
+ unreachable
+
+bb46: ; preds = %bb3
+ ret void
+}
+
+declare noalias i8* @malloc() nounwind
+
+declare i32 @ptou()
diff --git a/test/CodeGen/ARM/2011-02-07-AntidepClobber.ll b/test/CodeGen/ARM/2011-02-07-AntidepClobber.ll
new file mode 100644
index 000000000000..f3d788818afc
--- /dev/null
+++ b/test/CodeGen/ARM/2011-02-07-AntidepClobber.ll
@@ -0,0 +1,89 @@
+; RUN: llc < %s -asm-verbose=false -O3 -mtriple=armv5e-none-linux-gnueabi | FileCheck %s
+; PR8986: PostRA antidependence breaker must respect "earlyclobber".
+; armv5e generates mulv5 that cannot used the same reg for src/dest.
+
+; ModuleID = '<stdin>'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32"
+target triple = "armv5e-none-linux-gnueabi"
+
+define hidden fastcc void @storeAtts() nounwind {
+entry:
+ %.SV116 = alloca i8**
+ br i1 undef, label %meshBB520, label %meshBB464
+
+bb15: ; preds = %meshBB424
+ br i1 undef, label %bb216, label %meshBB396
+
+bb22: ; preds = %meshBB396
+ br label %cBB564
+
+cBB564: ; preds = %cBB564, %bb22
+ br label %cBB564
+
+poolStoreString.exit.thread: ; preds = %meshBB424
+ ret void
+
+bb78: ; preds = %meshBB412
+ unreachable
+
+bb129: ; preds = %meshBB540
+ br i1 undef, label %bb131.loopexit, label %meshBB540
+
+bb131.loopexit: ; preds = %bb129
+ br label %bb131
+
+bb131: ; preds = %bb135, %bb131.loopexit
+ br i1 undef, label %bb134, label %meshBB396
+
+bb134: ; preds = %bb131
+ unreachable
+
+bb135: ; preds = %meshBB396
+ %uriHash.1.phi.load = load i32* undef
+ %.load120 = load i8*** %.SV116
+ %.phi24 = load i8* null
+ %.phi26 = load i8** null
+ store i8 %.phi24, i8* %.phi26, align 1
+ %0 = getelementptr inbounds i8* %.phi26, i32 1
+ store i8* %0, i8** %.load120, align 4
+ ; CHECK: mul [[REGISTER:lr|r[0-9]+]],
+ ; CHECK-NOT: [[REGISTER]],
+ ; CHECK: {{(lr|r[0-9]+)$}}
+ %1 = mul i32 %uriHash.1.phi.load, 1000003
+ %2 = xor i32 0, %1
+ store i32 %2, i32* null
+ %3 = load i8* null, align 1
+ %4 = icmp eq i8 %3, 0
+ store i8* %0, i8** undef
+ br i1 %4, label %meshBB472, label %bb131
+
+bb212: ; preds = %meshBB540
+ unreachable
+
+bb216: ; preds = %bb15
+ ret void
+
+meshBB396: ; preds = %bb131, %bb15
+ br i1 undef, label %bb135, label %bb22
+
+meshBB412: ; preds = %meshBB464
+ br i1 undef, label %meshBB504, label %bb78
+
+meshBB424: ; preds = %meshBB464
+ br i1 undef, label %poolStoreString.exit.thread, label %bb15
+
+meshBB464: ; preds = %entry
+ br i1 undef, label %meshBB424, label %meshBB412
+
+meshBB472: ; preds = %meshBB504, %bb135
+ unreachable
+
+meshBB504: ; preds = %meshBB412
+ br label %meshBB472
+
+meshBB520: ; preds = %entry
+ br label %meshBB540
+
+meshBB540: ; preds = %meshBB520, %bb129
+ br i1 undef, label %bb212, label %bb129
+}
diff --git a/test/CodeGen/ARM/align.ll b/test/CodeGen/ARM/align.ll
index d4d01288f29b..d57c159b85cb 100644
--- a/test/CodeGen/ARM/align.ll
+++ b/test/CodeGen/ARM/align.ll
@@ -22,7 +22,7 @@
@e = global i64 4
;ELF: .align 3
;ELF: e
-;DARWIN: .align 2
+;DARWIN: .align 3
;DARWIN: _e:
@f = global float 5.0
@@ -34,7 +34,7 @@
@g = global double 6.0
;ELF: .align 3
;ELF: g:
-;DARWIN: .align 2
+;DARWIN: .align 3
;DARWIN: _g:
@bar = common global [75 x i8] zeroinitializer, align 128
diff --git a/test/CodeGen/ARM/arguments.ll b/test/CodeGen/ARM/arguments.ll
index bb7853e66ef4..c7fcb9755d9e 100644
--- a/test/CodeGen/ARM/arguments.ll
+++ b/test/CodeGen/ARM/arguments.ll
@@ -13,8 +13,8 @@ define i32 @f1(i32 %a, i64 %b) {
; test that allocating the double to r2/r3 makes r1 unavailable on gnueabi.
define i32 @f2() nounwind optsize {
; ELF: f2:
-; ELF: mov r0, #128
-; ELF: str r0, [sp]
+; ELF: mov [[REGISTER:(r[0-9]+)]], #128
+; ELF: str [[REGISTER]], [sp]
; DARWIN: f2:
; DARWIN: mov r3, #128
entry:
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
new file mode 100644
index 000000000000..50c638b73931
--- /dev/null
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -0,0 +1,112 @@
+; RUN: llc < %s -march=arm | FileCheck -check-prefix=ARM %s
+; RUN: llc < %s -march=thumb | FileCheck -check-prefix=THUMB %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck -check-prefix=T2 %s
+
+; FIXME: The -march=thumb test doesn't change if -disable-peephole is specified.
+
+%struct.Foo = type { i8* }
+
+; ARM: foo
+; THUMB: foo
+; T2: foo
+define %struct.Foo* @foo(%struct.Foo* %this, i32 %acc) nounwind readonly align 2 {
+entry:
+ %scevgep = getelementptr %struct.Foo* %this, i32 1
+ br label %tailrecurse
+
+tailrecurse: ; preds = %sw.bb, %entry
+ %lsr.iv2 = phi %struct.Foo* [ %scevgep3, %sw.bb ], [ %scevgep, %entry ]
+ %lsr.iv = phi i32 [ %lsr.iv.next, %sw.bb ], [ 1, %entry ]
+ %acc.tr = phi i32 [ %or, %sw.bb ], [ %acc, %entry ]
+ %lsr.iv24 = bitcast %struct.Foo* %lsr.iv2 to i8**
+ %scevgep5 = getelementptr i8** %lsr.iv24, i32 -1
+ %tmp2 = load i8** %scevgep5
+ %0 = ptrtoint i8* %tmp2 to i32
+
+; ARM: ands r12, r12, #3
+; ARM-NEXT: beq
+
+; THUMB: movs r5, #3
+; THUMB-NEXT: ands r5, r4
+; THUMB-NEXT: cmp r5, #0
+; THUMB-NEXT: beq
+
+; T2: ands r12, r12, #3
+; T2-NEXT: beq
+
+ %and = and i32 %0, 3
+ %tst = icmp eq i32 %and, 0
+ br i1 %tst, label %sw.bb, label %tailrecurse.switch
+
+tailrecurse.switch: ; preds = %tailrecurse
+ switch i32 %and, label %sw.epilog [
+ i32 1, label %sw.bb
+ i32 3, label %sw.bb6
+ i32 2, label %sw.bb8
+ ]
+
+sw.bb: ; preds = %tailrecurse.switch, %tailrecurse
+ %shl = shl i32 %acc.tr, 1
+ %or = or i32 %and, %shl
+ %lsr.iv.next = add i32 %lsr.iv, 1
+ %scevgep3 = getelementptr %struct.Foo* %lsr.iv2, i32 1
+ br label %tailrecurse
+
+sw.bb6: ; preds = %tailrecurse.switch
+ ret %struct.Foo* %lsr.iv2
+
+sw.bb8: ; preds = %tailrecurse.switch
+ %tmp1 = add i32 %acc.tr, %lsr.iv
+ %add.ptr11 = getelementptr inbounds %struct.Foo* %this, i32 %tmp1
+ ret %struct.Foo* %add.ptr11
+
+sw.epilog: ; preds = %tailrecurse.switch
+ ret %struct.Foo* undef
+}
+
+; Another test that exercises the AND/TST peephole optimization and also
+; generates a predicated ANDS instruction. Check that the predicate is printed
+; after the "S" modifier on the instruction.
+
+%struct.S = type { i8* (i8*)*, [1 x i8] }
+
+; ARM: bar
+; THUMB: bar
+; T2: bar
+define internal zeroext i8 @bar(%struct.S* %x, %struct.S* nocapture %y) nounwind readonly {
+entry:
+ %0 = getelementptr inbounds %struct.S* %x, i32 0, i32 1, i32 0
+ %1 = load i8* %0, align 1
+ %2 = zext i8 %1 to i32
+; ARM: ands
+; THUMB: ands
+; T2: ands
+ %3 = and i32 %2, 112
+ %4 = icmp eq i32 %3, 0
+ br i1 %4, label %return, label %bb
+
+bb: ; preds = %entry
+ %5 = getelementptr inbounds %struct.S* %y, i32 0, i32 1, i32 0
+ %6 = load i8* %5, align 1
+ %7 = zext i8 %6 to i32
+; ARM: andsne
+; THUMB: ands
+; T2: andsne
+ %8 = and i32 %7, 112
+ %9 = icmp eq i32 %8, 0
+ br i1 %9, label %return, label %bb2
+
+bb2: ; preds = %bb
+ %10 = icmp eq i32 %3, 16
+ %11 = icmp eq i32 %8, 16
+ %or.cond = or i1 %10, %11
+ br i1 %or.cond, label %bb4, label %return
+
+bb4: ; preds = %bb2
+ %12 = ptrtoint %struct.S* %x to i32
+ %phitmp = trunc i32 %12 to i8
+ ret i8 %phitmp
+
+return: ; preds = %bb2, %bb, %entry
+ ret i8 1
+}
diff --git a/test/CodeGen/ARM/atomic-cmp.ll b/test/CodeGen/ARM/atomic-cmp.ll
new file mode 100644
index 000000000000..f31aa7bc58e3
--- /dev/null
+++ b/test/CodeGen/ARM/atomic-cmp.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s -check-prefix=ARM
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s -check-prefix=T2
+; rdar://8964854
+
+define i8 @t(i8* %a, i8 %b, i8 %c) nounwind {
+; ARM: t:
+; ARM: ldrexb
+; ARM: strexb
+
+; T2: t:
+; T2: ldrexb
+; T2: strexb
+ %tmp0 = tail call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %a, i8 %b, i8 %c)
+ ret i8 %tmp0
+}
+
+declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* nocapture, i8, i8) nounwind
diff --git a/test/CodeGen/ARM/bfi.ll b/test/CodeGen/ARM/bfi.ll
index 59e2b43a9172..946db1909fe5 100644
--- a/test/CodeGen/ARM/bfi.ll
+++ b/test/CodeGen/ARM/bfi.ll
@@ -16,10 +16,10 @@ entry:
ret void
}
-define i32 @f2(i32 %A, i32 %B) nounwind readnone optsize {
+define i32 @f2(i32 %A, i32 %B) nounwind {
entry:
; CHECK: f2
-; CHECK: mov r1, r1, lsr #7
+; CHECK: lsr{{.*}}#7
; CHECK: bfi r0, r1, #7, #16
%and = and i32 %A, -8388481 ; <i32> [#uses=1]
%and2 = and i32 %B, 8388480 ; <i32> [#uses=1]
@@ -27,10 +27,10 @@ entry:
ret i32 %or
}
-define i32 @f3(i32 %A, i32 %B) nounwind readnone optsize {
+define i32 @f3(i32 %A, i32 %B) nounwind {
entry:
; CHECK: f3
-; CHECK: mov r2, r0, lsr #7
+; CHECK: lsr{{.*}} #7
; CHECK: mov r0, r1
; CHECK: bfi r0, r2, #7, #16
%and = and i32 %A, 8388480 ; <i32> [#uses=1]
@@ -38,3 +38,27 @@ entry:
%or = or i32 %and2, %and ; <i32> [#uses=1]
ret i32 %or
}
+
+; rdar://8752056
+define i32 @f4(i32 %a) nounwind {
+; CHECK: f4
+; CHECK: movw r1, #3137
+; CHECK: bfi r1, r0, #15, #5
+ %1 = shl i32 %a, 15
+ %ins7 = and i32 %1, 1015808
+ %ins12 = or i32 %ins7, 3137
+ ret i32 %ins12
+}
+
+; rdar://8458663
+define i32 @f5(i32 %a, i32 %b) nounwind {
+entry:
+; CHECK: f5:
+; CHECK-NOT: bfc
+; CHECK: bfi r0, r1, #20, #4
+ %0 = and i32 %a, -15728641
+ %1 = shl i32 %b, 20
+ %2 = and i32 %1, 15728640
+ %3 = or i32 %2, %0
+ ret i32 %3
+}
diff --git a/test/CodeGen/ARM/bits.ll b/test/CodeGen/ARM/bits.ll
index 9e94efe3f9db..ce1b2ad5fad3 100644
--- a/test/CodeGen/ARM/bits.ll
+++ b/test/CodeGen/ARM/bits.ll
@@ -1,36 +1,41 @@
-; RUN: llc < %s -march=arm > %t
-; RUN: grep and %t | count 1
-; RUN: grep orr %t | count 1
-; RUN: grep eor %t | count 1
-; RUN: grep mov.*lsl %t | count 1
-; RUN: grep mov.*asr %t | count 1
+; RUN: llc < %s -march=arm | FileCheck %s
define i32 @f1(i32 %a, i32 %b) {
entry:
+; CHECK: f1
+; CHECK: and r0, r1, r0
%tmp2 = and i32 %b, %a ; <i32> [#uses=1]
ret i32 %tmp2
}
define i32 @f2(i32 %a, i32 %b) {
entry:
+; CHECK: f2
+; CHECK: orr r0, r1, r0
%tmp2 = or i32 %b, %a ; <i32> [#uses=1]
ret i32 %tmp2
}
define i32 @f3(i32 %a, i32 %b) {
entry:
+; CHECK: f3
+; CHECK: eor r0, r1, r0
%tmp2 = xor i32 %b, %a ; <i32> [#uses=1]
ret i32 %tmp2
}
define i32 @f4(i32 %a, i32 %b) {
entry:
+; CHECK: f4
+; CHECK: lsl
%tmp3 = shl i32 %a, %b ; <i32> [#uses=1]
ret i32 %tmp3
}
define i32 @f5(i32 %a, i32 %b) {
entry:
+; CHECK: f5
+; CHECK: asr
%tmp3 = ashr i32 %a, %b ; <i32> [#uses=1]
ret i32 %tmp3
}
diff --git a/test/CodeGen/ARM/bswap-inline-asm.ll b/test/CodeGen/ARM/bswap-inline-asm.ll
new file mode 100644
index 000000000000..472213d5f85f
--- /dev/null
+++ b/test/CodeGen/ARM/bswap-inline-asm.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6 | FileCheck %s
+
+define i32 @t1(i32 %x) nounwind {
+; CHECK: t1:
+; CHECK-NOT: InlineAsm
+; CHECK: rev
+ %asmtmp = tail call i32 asm "rev $0, $1\0A", "=l,l"(i32 %x) nounwind
+ ret i32 %asmtmp
+}
diff --git a/test/CodeGen/ARM/bx_fold.ll b/test/CodeGen/ARM/bx_fold.ll
index 0e3e070a818f..09f1aae0a9f0 100644
--- a/test/CodeGen/ARM/bx_fold.ll
+++ b/test/CodeGen/ARM/bx_fold.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -march=arm | not grep bx
+; RUN: llc < %s -mtriple=armv5t-apple-darwin | FileCheck %s
define void @test(i32 %Ptr, i8* %L) {
entry:
@@ -24,6 +23,8 @@ bb1: ; preds = %bb, %entry
br i1 %bothcond, label %bb, label %bb18
bb18: ; preds = %bb1
+; CHECK-NOT: bx
+; CHECK: ldmia sp!
ret void
}
diff --git a/test/CodeGen/ARM/call-tc.ll b/test/CodeGen/ARM/call-tc.ll
index db5afe3f56cb..a77aba037be5 100644
--- a/test/CodeGen/ARM/call-tc.ll
+++ b/test/CodeGen/ARM/call-tc.ll
@@ -1,8 +1,6 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -march=arm | FileCheck %s -check-prefix=CHECKV4
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin -mattr=+v5t | FileCheck %s -check-prefix=CHECKV5
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi\
-; RUN: -relocation-model=pic | FileCheck %s -check-prefix=CHECKELF
-; XFAIL: *
+; RUN: llc < %s -mtriple=armv6-apple-darwin -mattr=+vfp2 -arm-tail-calls | FileCheck %s -check-prefix=CHECKV6
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -relocation-model=pic -mattr=+vfp2 -arm-tail-calls | FileCheck %s -check-prefix=CHECKELF
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -arm-tail-calls | FileCheck %s -check-prefix=CHECKT2
@t = weak global i32 ()* null ; <i32 ()**> [#uses=1]
@@ -10,40 +8,80 @@ declare void @g(i32, i32, i32, i32)
define void @t1() {
; CHECKELF: t1:
-; CHECKELF: PLT
+; CHECKELF: bl g(PLT)
call void @g( i32 1, i32 2, i32 3, i32 4 )
ret void
}
define void @t2() {
-; CHECKV4: t2:
-; CHECKV4: bx r0 @ TAILCALL
-; CHECKV5: t2:
-; CHECKV5: bx r0 @ TAILCALL
+; CHECKV6: t2:
+; CHECKV6: bx r0 @ TAILCALL
%tmp = load i32 ()** @t ; <i32 ()*> [#uses=1]
%tmp.upgrd.2 = tail call i32 %tmp( ) ; <i32> [#uses=0]
ret void
}
-define i32* @t3(i32, i32, i32*, i32*, i32*) nounwind {
-; CHECKV4: t3:
-; CHECKV4: bx r{{.*}}
-BB0:
- %5 = inttoptr i32 %0 to i32* ; <i32*> [#uses=1]
- %t35 = volatile load i32* %5 ; <i32> [#uses=1]
- %6 = inttoptr i32 %t35 to i32** ; <i32**> [#uses=1]
- %7 = getelementptr i32** %6, i32 86 ; <i32**> [#uses=1]
- %8 = load i32** %7 ; <i32*> [#uses=1]
- %9 = bitcast i32* %8 to i32* (i32, i32*, i32, i32*, i32*, i32*)* ; <i32* (i32, i32*, i32, i32*, i32*, i32*)*> [#uses=1]
- %10 = call i32* %9(i32 %0, i32* null, i32 %1, i32* %2, i32* %3, i32* %4) ; <i32*> [#uses=1]
- ret i32* %10
-}
-
-define void @t4() {
-; CHECKV4: t4:
-; CHECKV4: b _t2 @ TAILCALL
-; CHECKV5: t4:
-; CHECKV5: b _t2 @ TAILCALL
+define void @t3() {
+; CHECKV6: t3:
+; CHECKV6: b _t2 @ TAILCALL
+; CHECKELF: t3:
+; CHECKELF: b t2(PLT) @ TAILCALL
tail call void @t2( ) ; <i32> [#uses=0]
ret void
}
+
+; Sibcall optimization of expanded libcalls. rdar://8707777
+define double @t4(double %a) nounwind readonly ssp {
+entry:
+; CHECKV6: t4:
+; CHECKV6: b _sin @ TAILCALL
+; CHECKELF: t4:
+; CHECKELF: b sin(PLT) @ TAILCALL
+ %0 = tail call double @sin(double %a) nounwind readonly ; <double> [#uses=1]
+ ret double %0
+}
+
+define float @t5(float %a) nounwind readonly ssp {
+entry:
+; CHECKV6: t5:
+; CHECKV6: b _sinf @ TAILCALL
+; CHECKELF: t5:
+; CHECKELF: b sinf(PLT) @ TAILCALL
+ %0 = tail call float @sinf(float %a) nounwind readonly ; <float> [#uses=1]
+ ret float %0
+}
+
+declare float @sinf(float) nounwind readonly
+
+declare double @sin(double) nounwind readonly
+
+define i32 @t6(i32 %a, i32 %b) nounwind readnone {
+entry:
+; CHECKV6: t6:
+; CHECKV6: b ___divsi3 @ TAILCALL
+; CHECKELF: t6:
+; CHECKELF: b __aeabi_idiv(PLT) @ TAILCALL
+ %0 = sdiv i32 %a, %b
+ ret i32 %0
+}
+
+; Make sure the tail call instruction isn't deleted
+; rdar://8309338
+declare void @foo() nounwind
+
+define void @t7() nounwind {
+entry:
+; CHECKT2: t7:
+; CHECKT2: blxeq _foo
+; CHECKT2-NEXT: pop.w
+; CHECKT2-NEXT: b.w _foo
+ br i1 undef, label %bb, label %bb1.lr.ph
+
+bb1.lr.ph:
+ tail call void @foo() nounwind
+ unreachable
+
+bb:
+ tail call void @foo() nounwind
+ ret void
+}
diff --git a/test/CodeGen/ARM/clz.ll b/test/CodeGen/ARM/clz.ll
index d2235c9221ce..e381e0029819 100644
--- a/test/CodeGen/ARM/clz.ll
+++ b/test/CodeGen/ARM/clz.ll
@@ -1,8 +1,10 @@
-; RUN: llc < %s -march=arm -mattr=+v5t | grep clz
+; RUN: llc < %s -march=arm -mattr=+v5t | FileCheck %s
declare i32 @llvm.ctlz.i32(i32)
define i32 @test(i32 %x) {
- %tmp.1 = call i32 @llvm.ctlz.i32( i32 %x ) ; <i32> [#uses=1]
+; CHECK: test
+; CHECK: clz r0, r0
+ %tmp.1 = call i32 @llvm.ctlz.i32( i32 %x )
ret i32 %tmp.1
}
diff --git a/test/CodeGen/ARM/code-placement.ll b/test/CodeGen/ARM/code-placement.ll
index 25c556889fc4..845be8c20ea5 100644
--- a/test/CodeGen/ARM/code-placement.ll
+++ b/test/CodeGen/ARM/code-placement.ll
@@ -1,12 +1,13 @@
-; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-apple-darwin -cgp-critical-edge-splitting=0 | FileCheck %s
; PHI elimination shouldn't break backedge.
; rdar://8263994
%struct.list_data_s = type { i16, i16 }
%struct.list_head = type { %struct.list_head*, %struct.list_data_s* }
-define arm_apcscc %struct.list_head* @t(%struct.list_head* %list) nounwind {
+define arm_apcscc %struct.list_head* @t1(%struct.list_head* %list) nounwind {
entry:
+; CHECK: t1:
%0 = icmp eq %struct.list_head* %list, null
br i1 %0, label %bb2, label %bb
@@ -27,3 +28,52 @@ bb2:
%next.0.lcssa = phi %struct.list_head* [ null, %entry ], [ %list_addr.05, %bb ]
ret %struct.list_head* %next.0.lcssa
}
+
+; Optimize loop entry, eliminate intra loop branches
+; rdar://8117827
+define i32 @t2(i32 %passes, i32* nocapture %src, i32 %size) nounwind readonly {
+entry:
+; CHECK: t2:
+; CHECK: beq LBB1_[[RET:.]]
+ %0 = icmp eq i32 %passes, 0 ; <i1> [#uses=1]
+ br i1 %0, label %bb5, label %bb.nph15
+
+; CHECK: LBB1_[[PREHDR:.]]: @ %bb2.preheader
+bb1: ; preds = %bb2.preheader, %bb1
+; CHECK: LBB1_[[BB1:.]]: @ %bb1
+; CHECK: bne LBB1_[[BB1]]
+ %indvar = phi i32 [ %indvar.next, %bb1 ], [ 0, %bb2.preheader ] ; <i32> [#uses=2]
+ %sum.08 = phi i32 [ %2, %bb1 ], [ %sum.110, %bb2.preheader ] ; <i32> [#uses=1]
+ %tmp17 = sub i32 %i.07, %indvar ; <i32> [#uses=1]
+ %scevgep = getelementptr i32* %src, i32 %tmp17 ; <i32*> [#uses=1]
+ %1 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
+ %2 = add nsw i32 %1, %sum.08 ; <i32> [#uses=2]
+ %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
+ %exitcond = icmp eq i32 %indvar.next, %size ; <i1> [#uses=1]
+ br i1 %exitcond, label %bb3, label %bb1
+
+bb3: ; preds = %bb1, %bb2.preheader
+; CHECK: LBB1_[[BB3:.]]: @ %bb3
+; CHECK: bne LBB1_[[PREHDR]]
+; CHECK-NOT: b LBB1_
+ %sum.0.lcssa = phi i32 [ %sum.110, %bb2.preheader ], [ %2, %bb1 ] ; <i32> [#uses=2]
+ %3 = add i32 %pass.011, 1 ; <i32> [#uses=2]
+ %exitcond18 = icmp eq i32 %3, %passes ; <i1> [#uses=1]
+ br i1 %exitcond18, label %bb5, label %bb2.preheader
+
+bb.nph15: ; preds = %entry
+ %i.07 = add i32 %size, -1 ; <i32> [#uses=2]
+ %4 = icmp sgt i32 %i.07, -1 ; <i1> [#uses=1]
+ br label %bb2.preheader
+
+bb2.preheader: ; preds = %bb3, %bb.nph15
+ %pass.011 = phi i32 [ 0, %bb.nph15 ], [ %3, %bb3 ] ; <i32> [#uses=1]
+ %sum.110 = phi i32 [ 0, %bb.nph15 ], [ %sum.0.lcssa, %bb3 ] ; <i32> [#uses=2]
+ br i1 %4, label %bb1, label %bb3
+
+; CHECK: LBB1_[[RET]]: @ %bb5
+; CHECK: ldmia sp!
+bb5: ; preds = %bb3, %entry
+ %sum.1.lcssa = phi i32 [ 0, %entry ], [ %sum.0.lcssa, %bb3 ] ; <i32> [#uses=1]
+ ret i32 %sum.1.lcssa
+}
diff --git a/test/CodeGen/ARM/constants.ll b/test/CodeGen/ARM/constants.ll
index ce919361619a..542cf02f2a90 100644
--- a/test/CodeGen/ARM/constants.ll
+++ b/test/CodeGen/ARM/constants.ll
@@ -14,34 +14,33 @@ define i32 @f2() {
define i32 @f3() {
; CHECK: f3
-; CHECK: mov r0{{.*}}256
+; CHECK: mov r0, #1, 24
ret i32 256
}
define i32 @f4() {
; CHECK: f4
-; CHECK: orr{{.*}}256
+; CHECK: orr{{.*}}#1, 24
ret i32 257
}
define i32 @f5() {
; CHECK: f5
-; CHECK: mov r0, {{.*}}-1073741761
+; CHECK: mov r0, #255, 2
ret i32 -1073741761
}
define i32 @f6() {
; CHECK: f6
-; CHECK: mov r0, {{.*}}1008
+; CHECK: mov r0, #63, 28
ret i32 1008
}
define void @f7(i32 %a) {
; CHECK: f7
; CHECK: cmp r0, #1, 16
- %b = icmp ugt i32 %a, 65536 ; <i1> [#uses=1]
+ %b = icmp ugt i32 %a, 65536
br i1 %b, label %r, label %r
-
-r: ; preds = %0, %0
+r:
ret void
}
diff --git a/test/CodeGen/ARM/crash.ll b/test/CodeGen/ARM/crash.ll
new file mode 100644
index 000000000000..4b6876df4a03
--- /dev/null
+++ b/test/CodeGen/ARM/crash.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10
+
+; <rdar://problem/8529919>
+%struct.foo = type { i32, i32 }
+
+define void @func() nounwind {
+entry:
+ %tmp = load i32* undef, align 4
+ br label %bb1
+
+bb1:
+ %tmp1 = and i32 %tmp, 16
+ %tmp2 = icmp eq i32 %tmp1, 0
+ %invok.1.i = select i1 %tmp2, i32 undef, i32 0
+ %tmp119 = add i32 %invok.1.i, 0
+ br i1 undef, label %bb2, label %exit
+
+bb2:
+ %tmp120 = add i32 %tmp119, 0
+ %scevgep810.i = getelementptr %struct.foo* null, i32 %tmp120, i32 1
+ store i32 undef, i32* %scevgep810.i, align 4
+ br i1 undef, label %bb2, label %bb3
+
+bb3:
+ br i1 %tmp2, label %bb2, label %bb2
+
+exit:
+ ret void
+}
diff --git a/test/CodeGen/ARM/div.ll b/test/CodeGen/ARM/div.ll
index 448b437ddf46..3d29e05a0ccf 100644
--- a/test/CodeGen/ARM/div.ll
+++ b/test/CodeGen/ARM/div.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s -check-prefix=CHECK-ARM
+; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=CHECK-ARM
define i32 @f1(i32 %a, i32 %b) {
entry:
diff --git a/test/CodeGen/ARM/fabss.ll b/test/CodeGen/ARM/fabss.ll
index dfc1e0a957c3..f03282bdab7f 100644
--- a/test/CodeGen/ARM/fabss.ll
+++ b/test/CodeGen/ARM/fabss.ll
@@ -24,4 +24,4 @@ declare float @fabsf(float)
; CORTEXA8: test:
; CORTEXA8: vabs.f32 d1, d1
; CORTEXA9: test:
-; CORTEXA9: vabs.f32 s0, s0
+; CORTEXA9: vabs.f32 s1, s1
diff --git a/test/CodeGen/ARM/fadds.ll b/test/CodeGen/ARM/fadds.ll
index 113f0e29bd15..749690e98d0f 100644
--- a/test/CodeGen/ARM/fadds.ll
+++ b/test/CodeGen/ARM/fadds.ll
@@ -20,4 +20,4 @@ entry:
; CORTEXA8: test:
; CORTEXA8: vadd.f32 d0, d1, d0
; CORTEXA9: test:
-; CORTEXA9: vadd.f32 s0, s0, s1
+; CORTEXA9: vadd.f32 s0, s1, s0
diff --git a/test/CodeGen/ARM/fast-isel-crash.ll b/test/CodeGen/ARM/fast-isel-crash.ll
new file mode 100644
index 000000000000..370c70f174fd
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-crash.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -O0 -mtriple=thumbv7-apple-darwin
+
+%union.anon = type { <16 x i32> }
+
+@__md0 = external global [137 x i8]
+
+define internal void @stretch(<4 x i8> addrspace(1)* %src, <4 x i8> addrspace(1)* %dst, i32 %width, i32 %height, i32 %iLS, i32 %oLS, <2 x float> %c, <4 x float> %param) nounwind {
+entry:
+ ret void
+}
+
+define internal i32 @_Z13get_global_idj(i32 %dim) nounwind ssp {
+entry:
+ ret i32 undef
+}
+
+define void @wrap(i8 addrspace(1)* addrspace(1)* %arglist, i32 addrspace(1)* %gtid) nounwind ssp {
+entry:
+ call void @stretch(<4 x i8> addrspace(1)* undef, <4 x i8> addrspace(1)* undef, i32 undef, i32 undef, i32 undef, i32 undef, <2 x float> undef, <4 x float> undef)
+ ret void
+}
diff --git a/test/CodeGen/ARM/fast-isel-static.ll b/test/CodeGen/ARM/fast-isel-static.ll
new file mode 100644
index 000000000000..8f58480be164
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-static.ll
@@ -0,0 +1,30 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O0 -relocation-model=static -arm-long-calls | FileCheck -check-prefix=LONG %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O0 -relocation-model=static | FileCheck -check-prefix=NORM %s
+
+define void @myadd(float* %sum, float* %addend) nounwind {
+entry:
+ %sum.addr = alloca float*, align 4
+ %addend.addr = alloca float*, align 4
+ store float* %sum, float** %sum.addr, align 4
+ store float* %addend, float** %addend.addr, align 4
+ %tmp = load float** %sum.addr, align 4
+ %tmp1 = load float* %tmp
+ %tmp2 = load float** %addend.addr, align 4
+ %tmp3 = load float* %tmp2
+ %add = fadd float %tmp1, %tmp3
+ %tmp4 = load float** %sum.addr, align 4
+ store float %add, float* %tmp4
+ ret void
+}
+
+define i32 @main(i32 %argc, i8** %argv) nounwind {
+entry:
+ %ztot = alloca float, align 4
+ %z = alloca float, align 4
+ store float 0.000000e+00, float* %ztot, align 4
+ store float 1.000000e+00, float* %z, align 4
+; CHECK-LONG: blx r2
+; CHECK-NORM: blx _myadd
+ call void @myadd(float* %ztot, float* %z)
+ ret i32 0
+}
diff --git a/test/CodeGen/ARM/fast-isel.ll b/test/CodeGen/ARM/fast-isel.ll
index 3bee84d84de4..dd806ec6f1ae 100644
--- a/test/CodeGen/ARM/fast-isel.ll
+++ b/test/CodeGen/ARM/fast-isel.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -O0 -arm-fast-isel -fast-isel-abort -mtriple=armv7-apple-darwin
-; RUN: llc < %s -O0 -arm-fast-isel -fast-isel-abort -mtriple=thumbv7-apple-darwin
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv7-apple-darwin
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=thumbv7-apple-darwin
; Very basic fast-isel functionality.
-define i32 @add(i32 %a, i32 %b) nounwind ssp {
+define i32 @add(i32 %a, i32 %b) nounwind {
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca i32, align 4
@@ -13,27 +13,4 @@ entry:
%tmp1 = load i32* %b.addr
%add = add nsw i32 %tmp, %tmp1
ret i32 %add
-}
-
-define i32* @foo(i32* %p, i32* %q, i32** %z) nounwind {
-entry:
- %r = load i32* %p
- %s = load i32* %q
- %y = load i32** %z
- br label %fast
-
-fast:
- %t0 = add i32 %r, %s
- %t1 = mul i32 %t0, %s
- %t2 = sub i32 %t1, %s
- %t3 = and i32 %t2, %s
- %t4 = xor i32 %t3, 3
- %t5 = xor i32 %t4, %s
- %t6 = add i32 %t5, 2
- %t7 = getelementptr i32* %y, i32 1
- %t8 = getelementptr i32* %t7, i32 %t6
- br label %exit
-
-exit:
- ret i32* %t8
-}
+} \ No newline at end of file
diff --git a/test/CodeGen/ARM/fcopysign.ll b/test/CodeGen/ARM/fcopysign.ll
index a6d741087a89..1050cd265998 100644
--- a/test/CodeGen/ARM/fcopysign.ll
+++ b/test/CodeGen/ARM/fcopysign.ll
@@ -1,18 +1,45 @@
-; RUN: llc < %s -march=arm | grep bic | count 2
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 | \
-; RUN: grep vneg | count 2
+; RUN: llc < %s -mtriple=armv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=SOFT
+; RUN: llc < %s -mtriple=armv7-gnueabi -float-abi=hard -mcpu=cortex-a8 | FileCheck %s -check-prefix=HARD
-define float @test1(float %x, double %y) {
- %tmp = fpext float %x to double
- %tmp2 = tail call double @copysign( double %tmp, double %y )
- %tmp3 = fptrunc double %tmp2 to float
- ret float %tmp3
+; rdar://8984306
+define float @test1(float %x, float %y) nounwind {
+entry:
+; SOFT: test1:
+; SOFT: lsr r1, r1, #31
+; SOFT: bfi r0, r1, #31, #1
+
+; HARD: test1:
+; HARD: vabs.f32 d0, d0
+; HARD: cmp r0, #0
+; HARD: vneglt.f32 s0, s0
+ %0 = tail call float @copysignf(float %x, float %y) nounwind
+ ret float %0
+}
+
+define double @test2(double %x, double %y) nounwind {
+entry:
+; SOFT: test2:
+; SOFT: lsr r2, r3, #31
+; SOFT: bfi r1, r2, #31, #1
+
+; HARD: test2:
+; HARD: vabs.f64 d0, d0
+; HARD: cmp r1, #0
+; HARD: vneglt.f64 d0, d0
+ %0 = tail call double @copysign(double %x, double %y) nounwind
+ ret double %0
}
-define double @test2(double %x, float %y) {
- %tmp = fpext float %y to double
- %tmp2 = tail call double @copysign( double %x, double %tmp )
- ret double %tmp2
+define double @test3(double %x, double %y, double %z) nounwind {
+entry:
+; SOFT: test3:
+; SOFT: vabs.f64
+; SOFT: cmp {{.*}}, #0
+; SOFT: vneglt.f64
+ %0 = fmul double %x, %y
+ %1 = tail call double @copysign(double %0, double %z) nounwind
+ ret double %1
}
-declare double @copysign(double, double)
+declare double @copysign(double, double) nounwind
+declare float @copysignf(float, float) nounwind
diff --git a/test/CodeGen/ARM/fdivs.ll b/test/CodeGen/ARM/fdivs.ll
index 9af1217de1d0..0c3149579297 100644
--- a/test/CodeGen/ARM/fdivs.ll
+++ b/test/CodeGen/ARM/fdivs.ll
@@ -20,4 +20,4 @@ entry:
; CORTEXA8: test:
; CORTEXA8: vdiv.f32 s0, s1, s0
; CORTEXA9: test:
-; CORTEXA9: vdiv.f32 s0, s0, s1
+; CORTEXA9: vdiv.f32 s0, s1, s0
diff --git a/test/CodeGen/ARM/fmacs.ll b/test/CodeGen/ARM/fmacs.ll
index c4ceca9828b0..fb83ef626af6 100644
--- a/test/CodeGen/ARM/fmacs.ll
+++ b/test/CodeGen/ARM/fmacs.ll
@@ -1,24 +1,51 @@
; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
+; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
-define float @test(float %acc, float %a, float %b) {
+define float @t1(float %acc, float %a, float %b) {
entry:
+; VFP2: t1:
+; VFP2: vmla.f32
+
+; NEON: t1:
+; NEON: vmla.f32
+
+; A8: t1:
+; A8: vmul.f32
+; A8: vadd.f32
%0 = fmul float %a, %b
%1 = fadd float %acc, %0
ret float %1
}
-; VFP2: test:
-; VFP2: vmla.f32 s2, s1, s0
+define double @t2(double %acc, double %a, double %b) {
+entry:
+; VFP2: t2:
+; VFP2: vmla.f64
+
+; NEON: t2:
+; NEON: vmla.f64
-; NFP1: test:
-; NFP1: vmul.f32 d0, d1, d0
-; NFP0: test:
-; NFP0: vmla.f32 s2, s1, s0
+; A8: t2:
+; A8: vmul.f64
+; A8: vadd.f64
+ %0 = fmul double %a, %b
+ %1 = fadd double %acc, %0
+ ret double %1
+}
-; CORTEXA8: test:
-; CORTEXA8: vmul.f32 d0, d1, d0
-; CORTEXA9: test:
-; CORTEXA9: vmla.f32 s0, s1, s2
+define float @t3(float %acc, float %a, float %b) {
+entry:
+; VFP2: t3:
+; VFP2: vmla.f32
+
+; NEON: t3:
+; NEON: vmla.f32
+
+; A8: t3:
+; A8: vmul.f32
+; A8: vadd.f32
+ %0 = fmul float %a, %b
+ %1 = fadd float %0, %acc
+ ret float %1
+}
diff --git a/test/CodeGen/ARM/fmscs.ll b/test/CodeGen/ARM/fmscs.ll
index 103ce334519b..a182833a7a2c 100644
--- a/test/CodeGen/ARM/fmscs.ll
+++ b/test/CodeGen/ARM/fmscs.ll
@@ -1,24 +1,35 @@
; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
+; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
-define float @test(float %acc, float %a, float %b) {
+define float @t1(float %acc, float %a, float %b) {
entry:
+; VFP2: t1:
+; VFP2: vnmls.f32
+
+; NEON: t1:
+; NEON: vnmls.f32
+
+; A8: t1:
+; A8: vmul.f32
+; A8: vsub.f32
%0 = fmul float %a, %b
%1 = fsub float %0, %acc
ret float %1
}
-; VFP2: test:
-; VFP2: vnmls.f32 s2, s1, s0
+define double @t2(double %acc, double %a, double %b) {
+entry:
+; VFP2: t2:
+; VFP2: vnmls.f64
-; NFP1: test:
-; NFP1: vnmls.f32 s2, s1, s0
-; NFP0: test:
-; NFP0: vnmls.f32 s2, s1, s0
+; NEON: t2:
+; NEON: vnmls.f64
-; CORTEXA8: test:
-; CORTEXA8: vnmls.f32 s2, s1, s0
-; CORTEXA9: test:
-; CORTEXA9: vnmls.f32 s0, s1, s2
+; A8: t2:
+; A8: vmul.f64
+; A8: vsub.f64
+ %0 = fmul double %a, %b
+ %1 = fsub double %0, %acc
+ ret double %1
+}
diff --git a/test/CodeGen/ARM/fmuls.ll b/test/CodeGen/ARM/fmuls.ll
index bfafd20c8602..ef4e3e52818e 100644
--- a/test/CodeGen/ARM/fmuls.ll
+++ b/test/CodeGen/ARM/fmuls.ll
@@ -20,4 +20,4 @@ entry:
; CORTEXA8: test:
; CORTEXA8: vmul.f32 d0, d1, d0
; CORTEXA9: test:
-; CORTEXA9: vmul.f32 s0, s0, s1
+; CORTEXA9: vmul.f32 s0, s1, s0
diff --git a/test/CodeGen/ARM/fnegs.ll b/test/CodeGen/ARM/fnegs.ll
index c15005e6e8ab..418b59803d30 100644
--- a/test/CodeGen/ARM/fnegs.ll
+++ b/test/CodeGen/ARM/fnegs.ll
@@ -13,19 +13,19 @@ entry:
ret float %retval
}
; VFP2: test1:
-; VFP2: vneg.f32 s1, s0
+; VFP2: vneg.f32 s{{.*}}, s{{.*}}
; NFP1: test1:
-; NFP1: vneg.f32 d1, d0
+; NFP1: vneg.f32 d{{.*}}, d{{.*}}
; NFP0: test1:
-; NFP0: vneg.f32 s1, s0
+; NFP0: vneg.f32 s{{.*}}, s{{.*}}
; CORTEXA8: test1:
-; CORTEXA8: vneg.f32 d1, d0
+; CORTEXA8: vneg.f32 d{{.*}}, d{{.*}}
; CORTEXA9: test1:
-; CORTEXA9: vneg.f32 s1, s0
+; CORTEXA9: vneg.f32 s{{.*}}, s{{.*}}
define float @test2(float* %a) {
entry:
@@ -37,17 +37,17 @@ entry:
ret float %retval
}
; VFP2: test2:
-; VFP2: vneg.f32 s1, s0
+; VFP2: vneg.f32 s{{.*}}, s{{.*}}
; NFP1: test2:
-; NFP1: vneg.f32 d1, d0
+; NFP1: vneg.f32 d{{.*}}, d{{.*}}
; NFP0: test2:
-; NFP0: vneg.f32 s1, s0
+; NFP0: vneg.f32 s{{.*}}, s{{.*}}
; CORTEXA8: test2:
-; CORTEXA8: vneg.f32 d1, d0
+; CORTEXA8: vneg.f32 d{{.*}}, d{{.*}}
; CORTEXA9: test2:
-; CORTEXA9: vneg.f32 s1, s0
+; CORTEXA9: vneg.f32 s{{.*}}, s{{.*}}
diff --git a/test/CodeGen/ARM/fnmacs.ll b/test/CodeGen/ARM/fnmacs.ll
index 1d1d06a70ea6..1763d46e06c4 100644
--- a/test/CodeGen/ARM/fnmacs.ll
+++ b/test/CodeGen/ARM/fnmacs.ll
@@ -1,20 +1,35 @@
; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=NEONFP
+; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
-define float @test(float %acc, float %a, float %b) {
+define float @t1(float %acc, float %a, float %b) {
entry:
+; VFP2: t1:
; VFP2: vmls.f32
-; NEON: vmls.f32
-; NEONFP-NOT: vmls
-; NEONFP-NOT: vmov.f32
-; NEONFP: vmul.f32
-; NEONFP: vsub.f32
-; NEONFP: vmov
+; NEON: t1:
+; NEON: vmls.f32
+; A8: t1:
+; A8: vmul.f32
+; A8: vsub.f32
%0 = fmul float %a, %b
%1 = fsub float %acc, %0
ret float %1
}
+define double @t2(double %acc, double %a, double %b) {
+entry:
+; VFP2: t2:
+; VFP2: vmls.f64
+
+; NEON: t2:
+; NEON: vmls.f64
+
+; A8: t2:
+; A8: vmul.f64
+; A8: vsub.f64
+ %0 = fmul double %a, %b
+ %1 = fsub double %acc, %0
+ ret double %1
+}
diff --git a/test/CodeGen/ARM/fnmscs.ll b/test/CodeGen/ARM/fnmscs.ll
index 0b47edd5f1f1..76c806761f75 100644
--- a/test/CodeGen/ARM/fnmscs.ll
+++ b/test/CodeGen/ARM/fnmscs.ll
@@ -1,23 +1,71 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s
+; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
+; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
-define float @test1(float %acc, float %a, float %b) nounwind {
-; CHECK: vnmla.f32 s{{.*}}, s{{.*}}, s{{.*}}
+define float @t1(float %acc, float %a, float %b) nounwind {
entry:
+; VFP2: t1:
+; VFP2: vnmla.f32
+
+; NEON: t1:
+; NEON: vnmla.f32
+
+; A8: t1:
+; A8: vnmul.f32 s0, s{{[01]}}, s{{[01]}}
+; A8: vsub.f32 d0, d0, d1
%0 = fmul float %a, %b
%1 = fsub float -0.0, %0
%2 = fsub float %1, %acc
ret float %2
}
-define float @test2(float %acc, float %a, float %b) nounwind {
-; CHECK: vnmla.f32 s{{.*}}, s{{.*}}, s{{.*}}
+define float @t2(float %acc, float %a, float %b) nounwind {
entry:
+; VFP2: t2:
+; VFP2: vnmla.f32
+
+; NEON: t2:
+; NEON: vnmla.f32
+
+; A8: t2:
+; A8: vnmul.f32 s0, s{{[01]}}, s{{[01]}}
+; A8: vsub.f32 d0, d0, d1
%0 = fmul float %a, %b
%1 = fmul float -1.0, %0
%2 = fsub float %1, %acc
ret float %2
}
+define double @t3(double %acc, double %a, double %b) nounwind {
+entry:
+; VFP2: t3:
+; VFP2: vnmla.f64
+
+; NEON: t3:
+; NEON: vnmla.f64
+
+; A8: t3:
+; A8: vnmul.f64 d16, d1{{[67]}}, d1{{[67]}}
+; A8: vsub.f64 d16, d16, d17
+ %0 = fmul double %a, %b
+ %1 = fsub double -0.0, %0
+ %2 = fsub double %1, %acc
+ ret double %2
+}
+
+define double @t4(double %acc, double %a, double %b) nounwind {
+entry:
+; VFP2: t4:
+; VFP2: vnmla.f64
+
+; NEON: t4:
+; NEON: vnmla.f64
+
+; A8: t4:
+; A8: vnmul.f64 d16, d1{{[67]}}, d1{{[67]}}
+; A8: vsub.f64 d16, d16, d17
+ %0 = fmul double %a, %b
+ %1 = fmul double -1.0, %0
+ %2 = fsub double %1, %acc
+ ret double %2
+}
diff --git a/test/CodeGen/ARM/fp.ll b/test/CodeGen/ARM/fp.ll
index 8fbd45b97579..b6e9c3c22e75 100644
--- a/test/CodeGen/ARM/fp.ll
+++ b/test/CodeGen/ARM/fp.ll
@@ -51,7 +51,7 @@ entry:
define float @h2() {
;CHECK: h2:
-;CHECK: 1065353216
+;CHECK: mov r0, #254, 10
entry:
ret float 1.000000e+00
}
diff --git a/test/CodeGen/ARM/fpcmp-opt.ll b/test/CodeGen/ARM/fpcmp-opt.ll
index 64350591b87f..65b921bdf655 100644
--- a/test/CodeGen/ARM/fpcmp-opt.ll
+++ b/test/CodeGen/ARM/fpcmp-opt.ll
@@ -38,6 +38,7 @@ entry:
; FINITE: t2:
; FINITE-NOT: vldr
; FINITE: ldrd r0, [r0]
+; FINITE-NOT: b LBB
; FINITE: cmp r0, #0
; FINITE: cmpeq r1, #0
; FINITE-NOT: vcmpe.f32
diff --git a/test/CodeGen/ARM/fpcmp_ueq.ll b/test/CodeGen/ARM/fpcmp_ueq.ll
index 67f70e9eb5ed..2e6b3e3167ae 100644
--- a/test/CodeGen/ARM/fpcmp_ueq.ll
+++ b/test/CodeGen/ARM/fpcmp_ueq.ll
@@ -1,8 +1,14 @@
-; RUN: llc < %s -march=arm | grep moveq
-; RUN: llc < %s -march=arm -mattr=+vfp2 | grep movvs
+; RUN: llc < %s -mtriple=arm-apple-darwin | grep moveq
+; RUN: llc < %s -mtriple=armv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s
define i32 @f7(float %a, float %b) {
entry:
+; CHECK: f7:
+; CHECK: vcmpe.f32
+; CHECK: vmrs apsr_nzcv, fpscr
+; CHECK: movweq
+; CHECK-NOT: vmrs
+; CHECK: movwvs
%tmp = fcmp ueq float %a,%b
%retval = select i1 %tmp, i32 666, i32 42
ret i32 %retval
diff --git a/test/CodeGen/ARM/fpconsts.ll b/test/CodeGen/ARM/fpconsts.ll
index f1d6a16f3edb..638dde9d8a0f 100644
--- a/test/CodeGen/ARM/fpconsts.ll
+++ b/test/CodeGen/ARM/fpconsts.ll
@@ -3,7 +3,7 @@
define float @t1(float %x) nounwind readnone optsize {
entry:
; CHECK: t1:
-; CHECK: vmov.f32 s1, #4.000000e+00
+; CHECK: vmov.f32 s{{.*}}, #4.000000e+00
%0 = fadd float %x, 4.000000e+00
ret float %0
}
@@ -11,7 +11,7 @@ entry:
define double @t2(double %x) nounwind readnone optsize {
entry:
; CHECK: t2:
-; CHECK: vmov.f64 d1, #3.000000e+00
+; CHECK: vmov.f64 d{{.*}}, #3.000000e+00
%0 = fadd double %x, 3.000000e+00
ret double %0
}
@@ -19,7 +19,7 @@ entry:
define double @t3(double %x) nounwind readnone optsize {
entry:
; CHECK: t3:
-; CHECK: vmov.f64 d1, #-1.300000e+01
+; CHECK: vmov.f64 d{{.*}}, #-1.300000e+01
%0 = fmul double %x, -1.300000e+01
ret double %0
}
@@ -27,7 +27,7 @@ entry:
define float @t4(float %x) nounwind readnone optsize {
entry:
; CHECK: t4:
-; CHECK: vmov.f32 s1, #-2.400000e+01
+; CHECK: vmov.f32 s{{.*}}, #-2.400000e+01
%0 = fmul float %x, -2.400000e+01
ret float %0
}
diff --git a/test/CodeGen/ARM/fpconv.ll b/test/CodeGen/ARM/fpconv.ll
index bf197a46cb77..1b4c008bb775 100644
--- a/test/CodeGen/ARM/fpconv.ll
+++ b/test/CodeGen/ARM/fpconv.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s --check-prefix=CHECK-VFP
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
define float @f1(double %x) {
;CHECK-VFP: f1:
diff --git a/test/CodeGen/ARM/global-merge.ll b/test/CodeGen/ARM/global-merge.ll
new file mode 100644
index 000000000000..28bf2214740a
--- /dev/null
+++ b/test/CodeGen/ARM/global-merge.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -mtriple=thumb-apple-darwin | FileCheck %s
+; Test the ARMGlobalMerge pass. Use -march=thumb because it has a small
+; value for the maximum offset (127).
+
+; A local array that exceeds the maximum offset should not be merged.
+; CHECK: g0:
+@g0 = internal global [32 x i32] [ i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 1, i32 2 ]
+
+; CHECK: _MergedGlobals:
+@g1 = internal global i32 1
+@g2 = internal global i32 2
+
+; Make sure that the complete variable fits within the range of the maximum
+; offset. Having the starting offset in range is not sufficient.
+; When this works properly, @g3 is placed in a separate chunk of merged globals.
+; CHECK: _MergedGlobals1:
+@g3 = internal global [30 x i32] [ i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10 ]
+
+; Global variables that can be placed in BSS should be kept together in a
+; separate pool of merged globals.
+; CHECK: _MergedGlobals2
+@g4 = internal global i32 0
+@g5 = internal global i32 0
diff --git a/test/CodeGen/ARM/hello.ll b/test/CodeGen/ARM/hello.ll
index ccdc7bf4c140..bfed7a6630b4 100644
--- a/test/CodeGen/ARM/hello.ll
+++ b/test/CodeGen/ARM/hello.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm
; RUN: llc < %s -mtriple=arm-linux-gnueabi | grep mov | count 1
; RUN: llc < %s -mtriple=arm-linux-gnu --disable-fp-elim | \
-; RUN: grep mov | count 3
+; RUN: grep mov | count 2
; RUN: llc < %s -mtriple=arm-apple-darwin | grep mov | count 2
@str = internal constant [12 x i8] c"Hello World\00"
diff --git a/test/CodeGen/ARM/ifcvt10.ll b/test/CodeGen/ARM/ifcvt10.ll
new file mode 100644
index 000000000000..75428ac21655
--- /dev/null
+++ b/test/CodeGen/ARM/ifcvt10.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -mtriple=arm-apple-darwin -mcpu=cortex-a9 | FileCheck %s
+; rdar://8402126
+; Make sure if-converter is not predicating vldmia and ldmia. These are
+; micro-coded and would have long issue latency even if predicated on
+; false predicate.
+
+define void @t(double %a, double %b, double %c, double %d, i32* nocapture %solutions, double* nocapture %x) nounwind {
+entry:
+; CHECK: t:
+; CHECK: vpop {d8}
+; CHECK-NOT: vpopne
+; CHECK: ldmia sp!, {r7, pc}
+; CHECK: vpop {d8}
+; CHECK: ldmia sp!, {r7, pc}
+ br i1 undef, label %if.else, label %if.then
+
+if.then: ; preds = %entry
+ %mul73 = fmul double undef, 0.000000e+00
+ %sub76 = fsub double %mul73, undef
+ store double %sub76, double* undef, align 4
+ %call88 = tail call double @cos(double 0.000000e+00) nounwind
+ %mul89 = fmul double undef, %call88
+ %sub92 = fsub double %mul89, undef
+ store double %sub92, double* undef, align 4
+ ret void
+
+if.else: ; preds = %entry
+ %tmp101 = tail call double @llvm.pow.f64(double undef, double 0x3FD5555555555555)
+ %add112 = fadd double %tmp101, undef
+ %mul118 = fmul double %add112, undef
+ store double 0.000000e+00, double* %x, align 4
+ ret void
+}
+
+declare double @acos(double)
+
+declare double @sqrt(double) readnone
+
+declare double @cos(double) readnone
+
+declare double @fabs(double)
+
+declare double @llvm.pow.f64(double, double) nounwind readonly
diff --git a/test/CodeGen/ARM/ifcvt11.ll b/test/CodeGen/ARM/ifcvt11.ll
new file mode 100644
index 000000000000..63f8557d555b
--- /dev/null
+++ b/test/CodeGen/ARM/ifcvt11.ll
@@ -0,0 +1,59 @@
+; RUN: llc < %s -mtriple=arm-apple-darwin -mcpu=cortex-a8 | FileCheck %s
+; rdar://8598427
+; Adjust if-converter heuristics to avoid predicating vmrs which can cause
+; significant regression.
+
+%struct.xyz_t = type { double, double, double }
+
+define i32 @effie(i32 %tsets, %struct.xyz_t* nocapture %p, i32 %a, i32 %b, i32 %c) nounwind readonly noinline {
+; CHECK: effie:
+entry:
+ %0 = icmp sgt i32 %tsets, 0
+ br i1 %0, label %bb.nph, label %bb6
+
+bb.nph: ; preds = %entry
+ %1 = add nsw i32 %b, %a
+ %2 = add nsw i32 %1, %c
+ br label %bb
+
+bb: ; preds = %bb4, %bb.nph
+; CHECK: vcmpe.f64
+; CHECK: vmrs apsr_nzcv, fpscr
+ %r.19 = phi i32 [ 0, %bb.nph ], [ %r.0, %bb4 ]
+ %n.08 = phi i32 [ 0, %bb.nph ], [ %10, %bb4 ]
+ %scevgep10 = getelementptr inbounds %struct.xyz_t* %p, i32 %n.08, i32 0
+ %scevgep11 = getelementptr %struct.xyz_t* %p, i32 %n.08, i32 1
+ %3 = load double* %scevgep10, align 4
+ %4 = load double* %scevgep11, align 4
+ %5 = fcmp uge double %3, %4
+ br i1 %5, label %bb3, label %bb1
+
+bb1: ; preds = %bb
+; CHECK-NOT: it
+; CHECK-NOT: vcmpemi
+; CHECK-NOT: vmrsmi
+; CHECK: vcmpe.f64
+; CHECK: vmrs apsr_nzcv, fpscr
+ %scevgep12 = getelementptr %struct.xyz_t* %p, i32 %n.08, i32 2
+ %6 = load double* %scevgep12, align 4
+ %7 = fcmp uge double %3, %6
+ br i1 %7, label %bb3, label %bb2
+
+bb2: ; preds = %bb1
+ %8 = add nsw i32 %2, %r.19
+ br label %bb4
+
+bb3: ; preds = %bb1, %bb
+ %9 = add nsw i32 %r.19, 1
+ br label %bb4
+
+bb4: ; preds = %bb3, %bb2
+ %r.0 = phi i32 [ %9, %bb3 ], [ %8, %bb2 ]
+ %10 = add nsw i32 %n.08, 1
+ %exitcond = icmp eq i32 %10, %tsets
+ br i1 %exitcond, label %bb6, label %bb
+
+bb6: ; preds = %bb4, %entry
+ %r.1.lcssa = phi i32 [ 0, %entry ], [ %r.0, %bb4 ]
+ ret i32 %r.1.lcssa
+}
diff --git a/test/CodeGen/ARM/ifcvt6.ll b/test/CodeGen/ARM/ifcvt6.ll
index e2c0ba398c68..5edf32fd1af6 100644
--- a/test/CodeGen/ARM/ifcvt6.ll
+++ b/test/CodeGen/ARM/ifcvt6.ll
@@ -1,10 +1,9 @@
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep cmpne | count 1
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep ldmiahi | count 1
+; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
define void @foo(i32 %X, i32 %Y) {
entry:
+; CHECK: cmpne
+; CHECK: ldmiahi sp!
%tmp1 = icmp ult i32 %X, 4 ; <i1> [#uses=1]
%tmp4 = icmp eq i32 %Y, 0 ; <i1> [#uses=1]
%tmp7 = or i1 %tmp4, %tmp1 ; <i1> [#uses=1]
diff --git a/test/CodeGen/ARM/ifcvt7.ll b/test/CodeGen/ARM/ifcvt7.ll
index eb97085ac004..62e13557cfdc 100644
--- a/test/CodeGen/ARM/ifcvt7.ll
+++ b/test/CodeGen/ARM/ifcvt7.ll
@@ -1,14 +1,12 @@
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep cmpeq | count 1
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep moveq | count 1
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep ldmiaeq | count 1
+; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
; FIXME: Need post-ifcvt branch folding to get rid of the extra br at end of BB1.
%struct.quad_struct = type { i32, i32, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct* }
define fastcc i32 @CountTree(%struct.quad_struct* %tree) {
+; CHECK: cmpeq
+; CHECK: moveq
+; CHECK: ldmiaeq sp!
entry:
br label %tailrecurse
diff --git a/test/CodeGen/ARM/ifcvt8.ll b/test/CodeGen/ARM/ifcvt8.ll
index 1e39060e69f2..5fdfc4ea6805 100644
--- a/test/CodeGen/ARM/ifcvt8.ll
+++ b/test/CodeGen/ARM/ifcvt8.ll
@@ -1,11 +1,11 @@
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep ldmiane | count 1
+; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
%struct.SString = type { i8*, i32, i32 }
declare void @abort()
define fastcc void @t(%struct.SString* %word, i8 signext %c) {
+; CHECK: ldmiane sp!
entry:
%tmp1 = icmp eq %struct.SString* %word, null ; <i1> [#uses=1]
br i1 %tmp1, label %cond_true, label %cond_false
diff --git a/test/CodeGen/ARM/inlineasm3.ll b/test/CodeGen/ARM/inlineasm3.ll
index 687e138c1b4e..9f77ad1f794c 100644
--- a/test/CodeGen/ARM/inlineasm3.ll
+++ b/test/CodeGen/ARM/inlineasm3.ll
@@ -7,7 +7,7 @@ define void @t() nounwind {
entry:
; CHECK: vmov.I64 q15, #0
; CHECK: vmov.32 d30[0], r0
-; CHECK: vmov q0, q15
+; CHECK: vmov q8, q15
%tmp = alloca %struct.int32x4_t, align 16
call void asm sideeffect "vmov.I64 q15, #0\0Avmov.32 d30[0], $1\0Avmov ${0:q}, q15\0A", "=*w,r,~{d31},~{d30}"(%struct.int32x4_t* %tmp, i32 8192) nounwind
ret void
@@ -18,7 +18,7 @@ entry:
define void @t2() nounwind {
entry:
-; CHECK: vmov d30, d0
+; CHECK: vmov d30, d16
; CHECK: vmov.32 r0, d30[0]
%asmtmp2 = tail call i32 asm sideeffect "vmov d30, $1\0Avmov.32 $0, d30[0]\0A", "=r,w,~{d30}"(<2 x i32> undef) nounwind
ret void
diff --git a/test/CodeGen/ARM/ispositive.ll b/test/CodeGen/ARM/ispositive.ll
index 245ed516f70b..2f1a2cfd7786 100644
--- a/test/CodeGen/ARM/ispositive.ll
+++ b/test/CodeGen/ARM/ispositive.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s
define i32 @test1(i32 %X) {
-; CHECK: mov r0, r0, lsr #31
+; CHECK: lsr{{.*}}#31
entry:
icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
zext i1 %0 to i32 ; <i32>:1 [#uses=1]
diff --git a/test/CodeGen/ARM/ldm.ll b/test/CodeGen/ARM/ldm.ll
index 78201a6b341a..2f1b85ebbb04 100644
--- a/test/CodeGen/ARM/ldm.ll
+++ b/test/CodeGen/ARM/ldm.ll
@@ -1,10 +1,13 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=armv4t-apple-darwin | FileCheck %s -check-prefix=V4T
@X = external global [0 x i32] ; <[0 x i32]*> [#uses=5]
define i32 @t1() {
; CHECK: t1:
; CHECK: ldmia
+; V4T: t1:
+; V4T: ldmia
%tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 0) ; <i32> [#uses=1]
%tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
%tmp4 = tail call i32 @f1( i32 %tmp, i32 %tmp3 ) ; <i32> [#uses=1]
@@ -14,6 +17,8 @@ define i32 @t1() {
define i32 @t2() {
; CHECK: t2:
; CHECK: ldmia
+; V4T: t2:
+; V4T: ldmia
%tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
%tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1]
%tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 4) ; <i32> [#uses=1]
@@ -25,6 +30,10 @@ define i32 @t3() {
; CHECK: t3:
; CHECK: ldmib
; CHECK: ldmia sp!
+; V4T: t3:
+; V4T: ldmib
+; V4T: pop
+; V4T-NEXT: bx lr
%tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
%tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
%tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1]
diff --git a/test/CodeGen/ARM/ldst-f32-2-i32.ll b/test/CodeGen/ARM/ldst-f32-2-i32.ll
new file mode 100644
index 000000000000..2d016f6cd423
--- /dev/null
+++ b/test/CodeGen/ARM/ldst-f32-2-i32.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -mtriple=armv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s
+; Check if the f32 load / store pair are optimized to i32 load / store.
+; rdar://8944252
+
+define void @t(i32 %width, float* nocapture %src, float* nocapture %dst, i32 %index) nounwind {
+; CHECK: t:
+entry:
+ %src6 = bitcast float* %src to i8*
+ %0 = icmp eq i32 %width, 0
+ br i1 %0, label %return, label %bb
+
+bb:
+; CHECK: ldr [[REGISTER:(r[0-9]+)]], [r1], r3
+; CHECK: str [[REGISTER]], [r2], #4
+ %j.05 = phi i32 [ %2, %bb ], [ 0, %entry ]
+ %tmp = mul i32 %j.05, %index
+ %uglygep = getelementptr i8* %src6, i32 %tmp
+ %src_addr.04 = bitcast i8* %uglygep to float*
+ %dst_addr.03 = getelementptr float* %dst, i32 %j.05
+ %1 = load float* %src_addr.04, align 4
+ store float %1, float* %dst_addr.03, align 4
+ %2 = add i32 %j.05, 1
+ %exitcond = icmp eq i32 %2, %width
+ br i1 %exitcond, label %return, label %bb
+
+return:
+ ret void
+}
diff --git a/test/CodeGen/ARM/load-global.ll b/test/CodeGen/ARM/load-global.ll
new file mode 100644
index 000000000000..15a415df731d
--- /dev/null
+++ b/test/CodeGen/ARM/load-global.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -mtriple=armv6-apple-darwin -relocation-model=static | FileCheck %s -check-prefix=STATIC
+; RUN: llc < %s -mtriple=armv6-apple-darwin -relocation-model=dynamic-no-pic | FileCheck %s -check-prefix=DYNAMIC
+; RUN: llc < %s -mtriple=armv6-apple-darwin -relocation-model=pic | FileCheck %s -check-prefix=PIC
+; RUN: llc < %s -mtriple=thumbv6-apple-darwin -relocation-model=pic | FileCheck %s -check-prefix=PIC_T
+; RUN: llc < %s -mtriple=armv7-apple-darwin -relocation-model=pic | FileCheck %s -check-prefix=PIC_V7
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -relocation-model=pic | FileCheck %s -check-prefix=LINUX
+
+@G = external global i32
+
+define i32 @test1() {
+; STATIC: _test1:
+; STATIC: ldr r0, LCPI0_0
+; STATIC: ldr r0, [r0]
+; STATIC: .long _G
+
+; DYNAMIC: _test1:
+; DYNAMIC: ldr r0, LCPI0_0
+; DYNAMIC: ldr r0, [r0]
+; DYNAMIC: ldr r0, [r0]
+; DYNAMIC: .long L_G$non_lazy_ptr
+
+; PIC: _test1
+; PIC: ldr r0, LCPI0_0
+; PIC: ldr r0, [pc, r0]
+; PIC: ldr r0, [r0]
+; PIC: .long L_G$non_lazy_ptr-(LPC0_0+8)
+
+; PIC_T: _test1
+; PIC_T: ldr.n r0, LCPI0_0
+; PIC_T: add r0, pc
+; PIC_T: ldr r0, [r0]
+; PIC_T: ldr r0, [r0]
+; PIC_T: .long L_G$non_lazy_ptr-(LPC0_0+4)
+
+; PIC_V7: _test1
+; PIC_V7: movw r0, :lower16:(L_G$non_lazy_ptr-(LPC0_0+8))
+; PIC_V7: movt r0, :upper16:(L_G$non_lazy_ptr-(LPC0_0+8))
+; PIC_V7: ldr r0, [pc, r0]
+; PIC_V7: ldr r0, [r0]
+
+; LINUX: test1
+; LINUX: ldr r0, .LCPI0_0
+; LINUX: ldr r1, .LCPI0_1
+; LINUX: add r0, pc, r0
+; LINUX: ldr r0, [r1, r0]
+; LINUX: ldr r0, [r0]
+; LINUX: .long G(GOT)
+ %tmp = load i32* @G
+ ret i32 %tmp
+}
diff --git a/test/CodeGen/ARM/long.ll b/test/CodeGen/ARM/long.ll
index 16ef7cc2cb6c..74f8d783377d 100644
--- a/test/CodeGen/ARM/long.ll
+++ b/test/CodeGen/ARM/long.ll
@@ -14,22 +14,22 @@ entry:
define i64 @f3() {
; CHECK: f3:
-; CHECK: mvn{{.*}}-2147483648
+; CHECK: mvn r0, #2, 2
entry:
ret i64 2147483647
}
define i64 @f4() {
; CHECK: f4:
-; CHECK: -2147483648
+; CHECK: mov r0, #2, 2
entry:
ret i64 2147483648
}
define i64 @f5() {
; CHECK: f5:
-; CHECK: mvn
-; CHECK: mvn{{.*}}-2147483648
+; CHECK: mvn r0, #0
+; CHECK: mvn r1, #2, 2
entry:
ret i64 9223372036854775807
}
diff --git a/test/CodeGen/ARM/long_shift.ll b/test/CodeGen/ARM/long_shift.ll
index 1ec4d15f6672..5e4f5730f8d2 100644
--- a/test/CodeGen/ARM/long_shift.ll
+++ b/test/CodeGen/ARM/long_shift.ll
@@ -2,8 +2,8 @@
define i64 @f0(i64 %A, i64 %B) {
; CHECK: f0
-; CHECK: movs r3, r3, lsr #1
-; CHECK-NEXT: mov r2, r2, rrx
+; CHECK: lsrs r3, r3, #1
+; CHECK-NEXT: rrx r2, r2
; CHECK-NEXT: subs r0, r0, r2
; CHECK-NEXT: sbc r1, r1, r3
%tmp = bitcast i64 %A to i64
@@ -14,7 +14,7 @@ define i64 @f0(i64 %A, i64 %B) {
define i32 @f1(i64 %x, i64 %y) {
; CHECK: f1
-; CHECK: mov r0, r0, lsl r2
+; CHECK: lsl{{.*}}r2
%a = shl i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
@@ -22,7 +22,7 @@ define i32 @f1(i64 %x, i64 %y) {
define i32 @f2(i64 %x, i64 %y) {
; CHECK: f2
-; CHECK: mov r0, r0, lsr r2
+; CHECK: lsr{{.*}}r2
; CHECK-NEXT: rsb r3, r2, #32
; CHECK-NEXT: subs r2, r2, #32
; CHECK-NEXT: orr r0, r0, r1, lsl r3
@@ -34,7 +34,7 @@ define i32 @f2(i64 %x, i64 %y) {
define i32 @f3(i64 %x, i64 %y) {
; CHECK: f3
-; CHECK: mov r0, r0, lsr r2
+; CHECK: lsr{{.*}}r2
; CHECK-NEXT: rsb r3, r2, #32
; CHECK-NEXT: subs r2, r2, #32
; CHECK-NEXT: orr r0, r0, r1, lsl r3
diff --git a/test/CodeGen/ARM/lsr-code-insertion.ll b/test/CodeGen/ARM/lsr-code-insertion.ll
index b8c543b1bd18..1bbb96deeefe 100644
--- a/test/CodeGen/ARM/lsr-code-insertion.ll
+++ b/test/CodeGen/ARM/lsr-code-insertion.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -stats |& grep {38.*Number of machine instrs printed}
+; RUN: llc < %s -stats |& grep {39.*Number of machine instrs printed}
; RUN: llc < %s -stats |& not grep {.*Number of re-materialization}
; This test really wants to check that the resultant "cond_true" block only
; has a single store in it, and that cond_true55 only has code to materialize
diff --git a/test/CodeGen/ARM/lsr-on-unrolled-loops.ll b/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
index 866be423c2cb..9882690da268 100644
--- a/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
+++ b/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
@@ -4,14 +4,14 @@
; constant offset addressing, so that each of the following stores
; uses the same register.
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #-128]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #-96]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #-64]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #-32]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #32]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #64]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #96]
+; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #-128]
+; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #-96]
+; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #-64]
+; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #-32]
+; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}]
+; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #32]
+; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #64]
+; CHECK: vstr.32 s{{.*}}, [{{(r[0-9]+)|(lr)}}, #96]
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
@@ -624,12 +624,11 @@ bb23: ; preds = %bb22, %bb20, %bb9,
bb24: ; preds = %bb23
; LSR should use count-down iteration to avoid requiring the trip count
-; in a register, and it shouldn't require any reloads here.
+; in a register.
; CHECK: @ %bb24
-; CHECK-NEXT: @ in Loop: Header=BB1_1 Depth=1
-; CHECK-NEXT: sub{{.*}} [[REGISTER:(r[0-9]+)|(lr)]], #1
-; CHECK-NEXT: bne.w
+; CHECK: subs{{.*}} {{(r[0-9]+)|(lr)}}, #1
+; CHECK: bne.w
%92 = icmp eq i32 %tmp81, %indvar78 ; <i1> [#uses=1]
%indvar.next79 = add i32 %indvar78, 1 ; <i32> [#uses=1]
diff --git a/test/CodeGen/ARM/machine-licm.ll b/test/CodeGen/ARM/machine-licm.ll
new file mode 100644
index 000000000000..8656c5bbd72c
--- /dev/null
+++ b/test/CodeGen/ARM/machine-licm.ll
@@ -0,0 +1,66 @@
+; RUN: llc < %s -mtriple=thumb-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s -check-prefix=THUMB
+; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s -check-prefix=ARM
+; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -disable-fp-elim -mattr=+v6t2 | FileCheck %s -check-prefix=MOVT
+; rdar://7353541
+; rdar://7354376
+; rdar://8887598
+
+; The generated code is no where near ideal. It's not recognizing the two
+; constantpool entries being loaded can be merged into one.
+
+@GV = external global i32 ; <i32*> [#uses=2]
+
+define void @t(i32* nocapture %vals, i32 %c) nounwind {
+entry:
+; ARM: t:
+; ARM: ldr [[REGISTER_1:r[0-9]+]], LCPI0_0
+; Unfortunately currently ARM codegen doesn't cse the ldr from constantpool.
+; The issue is it can be read by an "add pc" or a "ldr [pc]" so it's messy
+; to add the pseudo instructions to make sure they are CSE'ed at the same
+; time as the "ldr cp".
+; ARM: ldr r{{[0-9]+}}, LCPI0_1
+; ARM: LPC0_0:
+; ARM: ldr r{{[0-9]+}}, [pc, [[REGISTER_1]]]
+; ARM: ldr r{{[0-9]+}}, [r{{[0-9]+}}]
+
+; MOVT: t:
+; MOVT: movw [[REGISTER_2:r[0-9]+]], :lower16:(L_GV$non_lazy_ptr-(LPC0_0+8))
+; MOVT: movt [[REGISTER_2]], :upper16:(L_GV$non_lazy_ptr-(LPC0_0+8))
+; MOVT: LPC0_0:
+; MOVT: ldr r{{[0-9]+}}, [pc, [[REGISTER_2]]]
+; MOVT: ldr r{{[0-9]+}}, [r{{[0-9]+}}]
+
+; THUMB: t:
+ %0 = icmp eq i32 %c, 0 ; <i1> [#uses=1]
+ br i1 %0, label %return, label %bb.nph
+
+bb.nph: ; preds = %entry
+; ARM: LCPI0_0:
+; ARM: LCPI0_1:
+; ARM: .section
+
+; THUMB: BB#1
+; THUMB: ldr.n r2, LCPI0_0
+; THUMB: add r2, pc
+; THUMB: ldr r{{[0-9]+}}, [r2]
+; THUMB: LBB0_2
+; THUMB: LCPI0_0:
+; THUMB-NOT: LCPI0_1:
+; THUMB: .section
+ %.pre = load i32* @GV, align 4 ; <i32> [#uses=1]
+ br label %bb
+
+bb: ; preds = %bb, %bb.nph
+ %1 = phi i32 [ %.pre, %bb.nph ], [ %3, %bb ] ; <i32> [#uses=1]
+ %i.03 = phi i32 [ 0, %bb.nph ], [ %4, %bb ] ; <i32> [#uses=2]
+ %scevgep = getelementptr i32* %vals, i32 %i.03 ; <i32*> [#uses=1]
+ %2 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
+ %3 = add nsw i32 %1, %2 ; <i32> [#uses=2]
+ store i32 %3, i32* @GV, align 4
+ %4 = add i32 %i.03, 1 ; <i32> [#uses=2]
+ %exitcond = icmp eq i32 %4, %c ; <i1> [#uses=1]
+ br i1 %exitcond, label %return, label %bb
+
+return: ; preds = %bb, %entry
+ ret void
+}
diff --git a/test/CodeGen/ARM/mul_const.ll b/test/CodeGen/ARM/mul_const.ll
index 8c102464612c..3cb8a8e816f6 100644
--- a/test/CodeGen/ARM/mul_const.ll
+++ b/test/CodeGen/ARM/mul_const.ll
@@ -36,7 +36,7 @@ define i32 @t12288(i32 %v) nounwind readnone {
entry:
; CHECK: t12288:
; CHECK: add r0, r0, r0, lsl #1
-; CHECK: mov r0, r0, lsl #12
+; CHECK: lsl{{.*}}#12
%0 = mul i32 %v, 12288
ret i32 %0
}
diff --git a/test/CodeGen/ARM/mult-alt-generic-arm.ll b/test/CodeGen/ARM/mult-alt-generic-arm.ll
new file mode 100644
index 000000000000..a8104db337f5
--- /dev/null
+++ b/test/CodeGen/ARM/mult-alt-generic-arm.ll
@@ -0,0 +1,323 @@
+; RUN: llc < %s -march=arm
+; ModuleID = 'mult-alt-generic.c'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32"
+target triple = "arm"
+
+@mout0 = common global i32 0, align 4
+@min1 = common global i32 0, align 4
+@marray = common global [2 x i32] zeroinitializer, align 4
+
+define arm_aapcscc void @single_m() nounwind {
+entry:
+ call void asm "foo $1,$0", "=*m,*m"(i32* @mout0, i32* @min1) nounwind
+ ret void
+}
+
+define arm_aapcscc void @single_o() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %index = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %index, align 4
+ ret void
+}
+
+define arm_aapcscc void @single_V() nounwind {
+entry:
+ ret void
+}
+
+define arm_aapcscc void @single_lt() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %in1 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %in1, align 4
+ %tmp = load i32* %in1, align 4
+ %0 = call i32 asm "foo $1,$0", "=r,<r"(i32 %tmp) nounwind
+ store i32 %0, i32* %out0, align 4
+ %tmp1 = load i32* %in1, align 4
+ %1 = call i32 asm "foo $1,$0", "=r,r<"(i32 %tmp1) nounwind
+ store i32 %1, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @single_gt() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %in1 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %in1, align 4
+ %tmp = load i32* %in1, align 4
+ %0 = call i32 asm "foo $1,$0", "=r,>r"(i32 %tmp) nounwind
+ store i32 %0, i32* %out0, align 4
+ %tmp1 = load i32* %in1, align 4
+ %1 = call i32 asm "foo $1,$0", "=r,r>"(i32 %tmp1) nounwind
+ store i32 %1, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @single_r() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %in1 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %in1, align 4
+ %tmp = load i32* %in1, align 4
+ %0 = call i32 asm "foo $1,$0", "=r,r"(i32 %tmp) nounwind
+ store i32 %0, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @single_i() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ %0 = call i32 asm "foo $1,$0", "=r,i"(i32 1) nounwind
+ store i32 %0, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @single_n() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ %0 = call i32 asm "foo $1,$0", "=r,n"(i32 1) nounwind
+ store i32 %0, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @single_E() nounwind {
+entry:
+ %out0 = alloca double, align 8
+ store double 0.000000e+000, double* %out0, align 8
+; No lowering support.
+; %0 = call double asm "foo $1,$0", "=r,E"(double 1.000000e+001) nounwind
+; store double %0, double* %out0, align 8
+ ret void
+}
+
+define arm_aapcscc void @single_F() nounwind {
+entry:
+ %out0 = alloca double, align 8
+ store double 0.000000e+000, double* %out0, align 8
+; No lowering support.
+; %0 = call double asm "foo $1,$0", "=r,F"(double 1.000000e+000) nounwind
+; store double %0, double* %out0, align 8
+ ret void
+}
+
+define arm_aapcscc void @single_s() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @single_g() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %in1 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %in1, align 4
+ %tmp = load i32* %in1, align 4
+ %0 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp) nounwind
+ store i32 %0, i32* %out0, align 4
+ %tmp1 = load i32* @min1, align 4
+ %1 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp1) nounwind
+ store i32 %1, i32* %out0, align 4
+ %2 = call i32 asm "foo $1,$0", "=r,imr"(i32 1) nounwind
+ store i32 %2, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @single_X() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %in1 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %in1, align 4
+ %tmp = load i32* %in1, align 4
+ %0 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp) nounwind
+ store i32 %0, i32* %out0, align 4
+ %tmp1 = load i32* @min1, align 4
+ %1 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp1) nounwind
+ store i32 %1, i32* %out0, align 4
+ %2 = call i32 asm "foo $1,$0", "=r,X"(i32 1) nounwind
+ store i32 %2, i32* %out0, align 4
+ %3 = call i32 asm "foo $1,$0", "=r,X"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ store i32 %3, i32* %out0, align 4
+; No lowering support.
+; %4 = call i32 asm "foo $1,$0", "=r,X"(double 1.000000e+001) nounwind
+; store i32 %4, i32* %out0, align 4
+; %5 = call i32 asm "foo $1,$0", "=r,X"(double 1.000000e+000) nounwind
+; store i32 %5, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @single_p() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ %0 = call i32 asm "foo $1,$0", "=r,r"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ store i32 %0, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @multi_m() nounwind {
+entry:
+ %tmp = load i32* @min1, align 4
+ call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
+ ret void
+}
+
+define arm_aapcscc void @multi_o() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %index = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %index, align 4
+ ret void
+}
+
+define arm_aapcscc void @multi_V() nounwind {
+entry:
+ ret void
+}
+
+define arm_aapcscc void @multi_lt() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %in1 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %in1, align 4
+ %tmp = load i32* %in1, align 4
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|<r"(i32 %tmp) nounwind
+ store i32 %0, i32* %out0, align 4
+ %tmp1 = load i32* %in1, align 4
+ %1 = call i32 asm "foo $1,$0", "=r|r,r|r<"(i32 %tmp1) nounwind
+ store i32 %1, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @multi_gt() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %in1 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %in1, align 4
+ %tmp = load i32* %in1, align 4
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|>r"(i32 %tmp) nounwind
+ store i32 %0, i32* %out0, align 4
+ %tmp1 = load i32* %in1, align 4
+ %1 = call i32 asm "foo $1,$0", "=r|r,r|r>"(i32 %tmp1) nounwind
+ store i32 %1, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @multi_r() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %in1 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %in1, align 4
+ %tmp = load i32* %in1, align 4
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|m"(i32 %tmp) nounwind
+ store i32 %0, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @multi_i() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|i"(i32 1) nounwind
+ store i32 %0, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @multi_n() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|n"(i32 1) nounwind
+ store i32 %0, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @multi_E() nounwind {
+entry:
+ %out0 = alloca double, align 8
+ store double 0.000000e+000, double* %out0, align 8
+; No lowering support.
+; %0 = call double asm "foo $1,$0", "=r|r,r|E"(double 1.000000e+001) nounwind
+; store double %0, double* %out0, align 8
+ ret void
+}
+
+define arm_aapcscc void @multi_F() nounwind {
+entry:
+ %out0 = alloca double, align 8
+ store double 0.000000e+000, double* %out0, align 8
+; No lowering support.
+; %0 = call double asm "foo $1,$0", "=r|r,r|F"(double 1.000000e+000) nounwind
+; store double %0, double* %out0, align 8
+ ret void
+}
+
+define arm_aapcscc void @multi_s() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @multi_g() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %in1 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %in1, align 4
+ %tmp = load i32* %in1, align 4
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp) nounwind
+ store i32 %0, i32* %out0, align 4
+ %tmp1 = load i32* @min1, align 4
+ %1 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp1) nounwind
+ store i32 %1, i32* %out0, align 4
+ %2 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 1) nounwind
+ store i32 %2, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @multi_X() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ %in1 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ store i32 1, i32* %in1, align 4
+ %tmp = load i32* %in1, align 4
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp) nounwind
+ store i32 %0, i32* %out0, align 4
+ %tmp1 = load i32* @min1, align 4
+ %1 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp1) nounwind
+ store i32 %1, i32* %out0, align 4
+ %2 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 1) nounwind
+ store i32 %2, i32* %out0, align 4
+ %3 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ store i32 %3, i32* %out0, align 4
+; No lowering support.
+; %4 = call i32 asm "foo $1,$0", "=r|r,r|X"(double 1.000000e+001) nounwind
+; store i32 %4, i32* %out0, align 4
+; %5 = call i32 asm "foo $1,$0", "=r|r,r|X"(double 1.000000e+000) nounwind
+; store i32 %5, i32* %out0, align 4
+ ret void
+}
+
+define arm_aapcscc void @multi_p() nounwind {
+entry:
+ %out0 = alloca i32, align 4
+ store i32 0, i32* %out0, align 4
+ %0 = call i32 asm "foo $1,$0", "=r|r,r|r"(i32* getelementptr inbounds ([2 x i32]* @marray, i32 0, i32 0)) nounwind
+ store i32 %0, i32* %out0, align 4
+ ret void
+}
diff --git a/test/CodeGen/ARM/neon_div.ll b/test/CodeGen/ARM/neon_div.ll
new file mode 100644
index 000000000000..e33797079093
--- /dev/null
+++ b/test/CodeGen/ARM/neon_div.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+
+define <8 x i8> @sdivi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: vrecpe.f32
+;CHECK: vrecpe.f32
+;CHECK: vmovn.i32
+;CHECK: vmovn.i32
+;CHECK: vmovn.i16
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = sdiv <8 x i8> %tmp1, %tmp2
+ ret <8 x i8> %tmp3
+}
+
+define <8 x i8> @udivi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: vrecpe.f32
+;CHECK: vrecps.f32
+;CHECK: vrecpe.f32
+;CHECK: vrecps.f32
+;CHECK: vmovn.i32
+;CHECK: vmovn.i32
+;CHECK: vqmovun.s16
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = udiv <8 x i8> %tmp1, %tmp2
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @sdivi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK: vrecpe.f32
+;CHECK: vrecps.f32
+;CHECK: vmovn.i32
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = sdiv <4 x i16> %tmp1, %tmp2
+ ret <4 x i16> %tmp3
+}
+
+define <4 x i16> @udivi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK: vrecpe.f32
+;CHECK: vrecps.f32
+;CHECK: vrecps.f32
+;CHECK: vmovn.i32
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = udiv <4 x i16> %tmp1, %tmp2
+ ret <4 x i16> %tmp3
+}
diff --git a/test/CodeGen/ARM/pack.ll b/test/CodeGen/ARM/pack.ll
index 4905dc28cf48..90151767b919 100644
--- a/test/CodeGen/ARM/pack.ll
+++ b/test/CodeGen/ARM/pack.ll
@@ -3,87 +3,78 @@
; CHECK: test1
; CHECK: pkhbt r0, r0, r1, lsl #16
define i32 @test1(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp4 = shl i32 %Y, 16 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-; CHECK: test1a
-; CHECK: pkhbt r0, r0, r1, lsl #16
-define i32 @test1a(i32 %X, i32 %Y) {
- %tmp19 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp37 = shl i32 %Y, 16 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp37, %tmp19 ; <i32> [#uses=1]
+ %tmp1 = and i32 %X, 65535
+ %tmp4 = shl i32 %Y, 16
+ %tmp5 = or i32 %tmp4, %tmp1
ret i32 %tmp5
}
; CHECK: test2
; CHECK: pkhbt r0, r0, r1, lsl #12
define i32 @test2(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp3 = shl i32 %Y, 12 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp3, -65536 ; <i32> [#uses=1]
- %tmp57 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
+ %tmp1 = and i32 %X, 65535
+ %tmp3 = shl i32 %Y, 12
+ %tmp4 = and i32 %tmp3, -65536
+ %tmp57 = or i32 %tmp4, %tmp1
ret i32 %tmp57
}
; CHECK: test3
; CHECK: pkhbt r0, r0, r1, lsl #18
define i32 @test3(i32 %X, i32 %Y) {
- %tmp19 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp37 = shl i32 %Y, 18 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp37, %tmp19 ; <i32> [#uses=1]
+ %tmp19 = and i32 %X, 65535
+ %tmp37 = shl i32 %Y, 18
+ %tmp5 = or i32 %tmp37, %tmp19
ret i32 %tmp5
}
; CHECK: test4
; CHECK: pkhbt r0, r0, r1
define i32 @test4(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp3 = and i32 %Y, -65536 ; <i32> [#uses=1]
- %tmp46 = or i32 %tmp3, %tmp1 ; <i32> [#uses=1]
+ %tmp1 = and i32 %X, 65535
+ %tmp3 = and i32 %Y, -65536
+ %tmp46 = or i32 %tmp3, %tmp1
ret i32 %tmp46
}
; CHECK: test5
; CHECK: pkhtb r0, r0, r1, asr #16
define i32 @test5(i32 %X, i32 %Y) {
- %tmp17 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp2 = bitcast i32 %Y to i32 ; <i32> [#uses=1]
- %tmp4 = lshr i32 %tmp2, 16 ; <i32> [#uses=2]
- %tmp5 = or i32 %tmp4, %tmp17 ; <i32> [#uses=1]
+ %tmp17 = and i32 %X, -65536
+ %tmp2 = bitcast i32 %Y to i32
+ %tmp4 = lshr i32 %tmp2, 16
+ %tmp5 = or i32 %tmp4, %tmp17
ret i32 %tmp5
}
; CHECK: test5a
; CHECK: pkhtb r0, r0, r1, asr #16
define i32 @test5a(i32 %X, i32 %Y) {
- %tmp110 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp37 = lshr i32 %Y, 16 ; <i32> [#uses=1]
- %tmp39 = bitcast i32 %tmp37 to i32 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp39, %tmp110 ; <i32> [#uses=1]
+ %tmp110 = and i32 %X, -65536
+ %tmp37 = lshr i32 %Y, 16
+ %tmp39 = bitcast i32 %tmp37 to i32
+ %tmp5 = or i32 %tmp39, %tmp110
ret i32 %tmp5
}
; CHECK: test6
; CHECK: pkhtb r0, r0, r1, asr #12
define i32 @test6(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp37 = lshr i32 %Y, 12 ; <i32> [#uses=1]
- %tmp38 = bitcast i32 %tmp37 to i32 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp38, 65535 ; <i32> [#uses=1]
- %tmp59 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
+ %tmp1 = and i32 %X, -65536
+ %tmp37 = lshr i32 %Y, 12
+ %tmp38 = bitcast i32 %tmp37 to i32
+ %tmp4 = and i32 %tmp38, 65535
+ %tmp59 = or i32 %tmp4, %tmp1
ret i32 %tmp59
}
; CHECK: test7
; CHECK: pkhtb r0, r0, r1, asr #18
define i32 @test7(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp3 = ashr i32 %Y, 18 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp3, 65535 ; <i32> [#uses=1]
- %tmp57 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
+ %tmp1 = and i32 %X, -65536
+ %tmp3 = ashr i32 %Y, 18
+ %tmp4 = and i32 %tmp3, 65535
+ %tmp57 = or i32 %tmp4, %tmp1
ret i32 %tmp57
}
diff --git a/test/CodeGen/ARM/phi.ll b/test/CodeGen/ARM/phi.ll
new file mode 100644
index 000000000000..29e17c095a74
--- /dev/null
+++ b/test/CodeGen/ARM/phi.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=arm < %s | FileCheck %s
+; <rdar://problem/8686347>
+
+define i32 @test1(i1 %a, i32* %b) {
+; CHECK: test1
+entry:
+ br i1 %a, label %lblock, label %rblock
+
+lblock:
+ %lbranch = getelementptr i32* %b, i32 1
+ br label %end
+
+rblock:
+ %rbranch = getelementptr i32* %b, i32 1
+ br label %end
+
+end:
+; CHECK: ldr r0, [r1, #4]
+ %gep = phi i32* [%lbranch, %lblock], [%rbranch, %rblock]
+ %r = load i32* %gep
+; CHECK-NEXT: bx lr
+ ret i32 %r
+} \ No newline at end of file
diff --git a/test/CodeGen/ARM/prefetch.ll b/test/CodeGen/ARM/prefetch.ll
new file mode 100644
index 000000000000..895b27b749db
--- /dev/null
+++ b/test/CodeGen/ARM/prefetch.ll
@@ -0,0 +1,61 @@
+; RUN: llc < %s -march=thumb -mattr=-thumb2 | not grep pld
+; RUN: llc < %s -march=thumb -mattr=+v7a | FileCheck %s -check-prefix=THUMB2
+; RUN: llc < %s -march=arm -mattr=+v7a,+mp | FileCheck %s -check-prefix=ARM-MP
+; rdar://8601536
+
+define void @t1(i8* %ptr) nounwind {
+entry:
+; ARM-MP: t1:
+; ARM-MP: pldw [r0]
+; ARM-MP: pld [r0]
+
+; THUMB2: t1:
+; THUMB2-NOT: pldw [r0]
+; THUMB2: pld [r0]
+ tail call void @llvm.prefetch( i8* %ptr, i32 1, i32 3 )
+ tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 3 )
+ ret void
+}
+
+define void @t2(i8* %ptr) nounwind {
+entry:
+; ARM-MP: t2:
+; ARM-MP: pld [r0, #1023]
+
+; THUMB2: t2:
+; THUMB2: pld [r0, #1023]
+ %tmp = getelementptr i8* %ptr, i32 1023
+ tail call void @llvm.prefetch( i8* %tmp, i32 0, i32 3 )
+ ret void
+}
+
+define void @t3(i32 %base, i32 %offset) nounwind {
+entry:
+; ARM-MP: t3:
+; ARM-MP: pld [r0, r1, lsr #2]
+
+; THUMB2: t3:
+; THUMB2: lsrs r1, r1, #2
+; THUMB2: pld [r0, r1]
+ %tmp1 = lshr i32 %offset, 2
+ %tmp2 = add i32 %base, %tmp1
+ %tmp3 = inttoptr i32 %tmp2 to i8*
+ tail call void @llvm.prefetch( i8* %tmp3, i32 0, i32 3 )
+ ret void
+}
+
+define void @t4(i32 %base, i32 %offset) nounwind {
+entry:
+; ARM-MP: t4:
+; ARM-MP: pld [r0, r1, lsl #2]
+
+; THUMB2: t4:
+; THUMB2: pld [r0, r1, lsl #2]
+ %tmp1 = shl i32 %offset, 2
+ %tmp2 = add i32 %base, %tmp1
+ %tmp3 = inttoptr i32 %tmp2 to i8*
+ tail call void @llvm.prefetch( i8* %tmp3, i32 0, i32 3 )
+ ret void
+}
+
+declare void @llvm.prefetch(i8*, i32, i32) nounwind
diff --git a/test/CodeGen/ARM/reg_sequence.ll b/test/CodeGen/ARM/reg_sequence.ll
index 2e4f10d8a63d..53214fd4c302 100644
--- a/test/CodeGen/ARM/reg_sequence.ll
+++ b/test/CodeGen/ARM/reg_sequence.ll
@@ -46,8 +46,8 @@ entry:
; CHECK: t2:
; CHECK: vld1.16
; CHECK-NOT: vmov
-; CHECK: vld1.16
; CHECK: vmul.i16
+; CHECK: vld1.16
; CHECK: vmul.i16
; CHECK-NOT: vmov
; CHECK: vst1.16
@@ -75,7 +75,8 @@ define <8 x i8> @t3(i8* %A, i8* %B) nounwind {
; CHECK: t3:
; CHECK: vld3.8
; CHECK: vmul.i8
-; CHECK-NOT: vmov
+; CHECK: vmov r
+; CHECK-NOT: vmov d
; CHECK: vst3.8
%tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=2]
%tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0 ; <<8 x i8>> [#uses=1]
@@ -122,9 +123,9 @@ return1:
return2:
; CHECK: %return2
; CHECK: vadd.i32
-; CHECK: vmov q1, q3
+; CHECK: vmov q9, q11
; CHECK-NOT: vmov
-; CHECK: vst2.32 {d0, d1, d2, d3}
+; CHECK: vst2.32 {d16, d17, d18, d19}
%tmp100 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1]
%tmp101 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1]
%tmp102 = add <4 x i32> %tmp100, %tmp101 ; <<4 x i32>> [#uses=1]
@@ -136,9 +137,9 @@ return2:
define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind {
; CHECK: t5:
; CHECK: vldmia
-; CHECK: vmov q1, q0
+; CHECK: vmov q9, q8
; CHECK-NOT: vmov
-; CHECK: vld2.16 {d0[1], d2[1]}, [r0]
+; CHECK: vld2.16 {d16[1], d18[1]}, [r0]
; CHECK-NOT: vmov
; CHECK: vadd.i16
%tmp0 = bitcast i16* %A to i8* ; <i8*> [#uses=1]
@@ -153,8 +154,8 @@ define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind {
define <8 x i8> @t6(i8* %A, <8 x i8>* %B) nounwind {
; CHECK: t6:
; CHECK: vldr.64
-; CHECK: vmov d1, d0
-; CHECK-NEXT: vld2.8 {d0[1], d1[1]}
+; CHECK: vmov d17, d16
+; CHECK-NEXT: vld2.8 {d16[1], d17[1]}
%tmp1 = load <8 x i8>* %B ; <<8 x i8>> [#uses=2]
%tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1) ; <%struct.__neon_int8x8x2_t> [#uses=2]
%tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 ; <<8 x i8>> [#uses=1]
@@ -168,10 +169,10 @@ entry:
; CHECK: t7:
; CHECK: vld2.32
; CHECK: vst2.32
-; CHECK: vld1.32 {d0, d1},
-; CHECK: vmov q1, q0
+; CHECK: vld1.32 {d16, d17},
+; CHECK: vmov q9, q8
; CHECK-NOT: vmov
-; CHECK: vuzp.32 q0, q1
+; CHECK: vuzp.32 q8, q9
; CHECK: vst1.32
%0 = bitcast i32* %iptr to i8* ; <i8*> [#uses=2]
%1 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %0, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2]
@@ -188,7 +189,7 @@ entry:
; PR7156
define arm_aapcs_vfpcc i32 @t8() nounwind {
; CHECK: t8:
-; CHECK: vrsqrte.f32 q0, q0
+; CHECK: vrsqrte.f32 q8, q8
bb.nph55.bb.nph55.split_crit_edge:
br label %bb3
@@ -238,10 +239,10 @@ bb14: ; preds = %bb6
define arm_aapcs_vfpcc float @t9(%0* nocapture, %3* nocapture) nounwind {
; CHECK: t9:
; CHECK: vldr.64
-; CHECK-NOT: vmov d{{.*}}, d0
-; CHECK: vmov.i32 d1
-; CHECK-NEXT: vstmia r0, {d0, d1}
-; CHECK-NEXT: vstmia r0, {d0, d1}
+; CHECK-NOT: vmov d{{.*}}, d16
+; CHECK: vmov.i32 d17
+; CHECK-NEXT: vstmia r0, {d16, d17}
+; CHECK-NEXT: vstmia r0, {d16, d17}
%3 = bitcast double 0.000000e+00 to <2 x float> ; <<2 x float>> [#uses=2]
%4 = shufflevector <2 x float> %3, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
store <4 x float> %4, <4 x float>* undef, align 16
@@ -269,9 +270,9 @@ define arm_aapcs_vfpcc float @t9(%0* nocapture, %3* nocapture) nounwind {
define arm_aapcs_vfpcc i32 @t10() nounwind {
entry:
; CHECK: t10:
-; CHECK: vmov.i32 q1, #0x3F000000
-; CHECK: vmov d0, d1
-; CHECK: vmla.f32 q0, q0, d0[0]
+; CHECK: vmul.f32 q8, q8, d0[0]
+; CHECK: vmov.i32 q9, #0x3F000000
+; CHECK: vadd.f32 q8, q8, q8
%0 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
%1 = insertelement <4 x float> %0, float undef, i32 1 ; <<4 x float>> [#uses=1]
%2 = insertelement <4 x float> %1, float undef, i32 2 ; <<4 x float>> [#uses=1]
diff --git a/test/CodeGen/ARM/remat.ll b/test/CodeGen/ARM/remat.ll
deleted file mode 100644
index 6b86f1a9f368..000000000000
--- a/test/CodeGen/ARM/remat.ll
+++ /dev/null
@@ -1,65 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 -o /dev/null -stats -info-output-file - | grep "Number of re-materialization"
-
-define i32 @main(i32 %argc, i8** nocapture %argv, double %d1, double %d2) nounwind {
-entry:
- br i1 undef, label %smvp.exit, label %bb.i3
-
-bb.i3: ; preds = %bb.i3, %bb134
- br i1 undef, label %smvp.exit, label %bb.i3
-
-smvp.exit: ; preds = %bb.i3
- %0 = fmul double %d1, 2.400000e-03 ; <double> [#uses=2]
- br i1 undef, label %bb138.preheader, label %bb159
-
-bb138.preheader: ; preds = %smvp.exit
- br label %bb138
-
-bb138: ; preds = %bb138, %bb138.preheader
- br i1 undef, label %bb138, label %bb145.loopexit
-
-bb142: ; preds = %bb.nph218.bb.nph218.split_crit_edge, %phi0.exit
- %1 = fmul double %d1, -1.200000e-03 ; <double> [#uses=1]
- %2 = fadd double %d2, %1 ; <double> [#uses=1]
- %3 = fmul double %2, %d2 ; <double> [#uses=1]
- %4 = fsub double 0.000000e+00, %3 ; <double> [#uses=1]
- br i1 %14, label %phi1.exit, label %bb.i35
-
-bb.i35: ; preds = %bb142
- %5 = call double @sin(double %15) nounwind readonly ; <double> [#uses=1]
- %6 = fmul double %5, 0x4031740AFA84AD8A ; <double> [#uses=1]
- %7 = fsub double 1.000000e+00, undef ; <double> [#uses=1]
- %8 = fdiv double %7, 6.000000e-01 ; <double> [#uses=1]
- br label %phi1.exit
-
-phi1.exit: ; preds = %bb.i35, %bb142
- %.pn = phi double [ %6, %bb.i35 ], [ 0.000000e+00, %bb142 ] ; <double> [#uses=1]
- %9 = phi double [ %8, %bb.i35 ], [ 0.000000e+00, %bb142 ] ; <double> [#uses=1]
- %10 = fmul double %.pn, %9 ; <double> [#uses=1]
- br i1 %14, label %phi0.exit, label %bb.i
-
-bb.i: ; preds = %phi1.exit
- unreachable
-
-phi0.exit: ; preds = %phi1.exit
- %11 = fsub double %4, %10 ; <double> [#uses=1]
- %12 = fadd double 0.000000e+00, %11 ; <double> [#uses=1]
- store double %12, double* undef, align 4
- br label %bb142
-
-bb145.loopexit: ; preds = %bb138
- br i1 undef, label %bb.nph218.bb.nph218.split_crit_edge, label %bb159
-
-bb.nph218.bb.nph218.split_crit_edge: ; preds = %bb145.loopexit
- %13 = fmul double %0, 0x401921FB54442D18 ; <double> [#uses=1]
- %14 = fcmp ugt double %0, 6.000000e-01 ; <i1> [#uses=2]
- %15 = fdiv double %13, 6.000000e-01 ; <double> [#uses=1]
- br label %bb142
-
-bb159: ; preds = %bb145.loopexit, %smvp.exit, %bb134
- unreachable
-
-bb166: ; preds = %bb127
- unreachable
-}
-
-declare double @sin(double) nounwind readonly
diff --git a/test/CodeGen/ARM/rev.ll b/test/CodeGen/ARM/rev.ll
index 1c12268ef86c..687bf8834c9f 100644
--- a/test/CodeGen/ARM/rev.ll
+++ b/test/CodeGen/ARM/rev.ll
@@ -1,27 +1,30 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | grep rev16
-; RUN: llc < %s -march=arm -mattr=+v6 | grep revsh
+; RUN: llc < %s -march=arm -mattr=+v6 | FileCheck %s
define i32 @test1(i32 %X) {
- %tmp1 = lshr i32 %X, 8 ; <i32> [#uses=3]
- %X15 = bitcast i32 %X to i32 ; <i32> [#uses=1]
- %tmp4 = shl i32 %X15, 8 ; <i32> [#uses=2]
- %tmp2 = and i32 %tmp1, 16711680 ; <i32> [#uses=1]
- %tmp5 = and i32 %tmp4, -16777216 ; <i32> [#uses=1]
- %tmp9 = and i32 %tmp1, 255 ; <i32> [#uses=1]
- %tmp13 = and i32 %tmp4, 65280 ; <i32> [#uses=1]
- %tmp6 = or i32 %tmp5, %tmp2 ; <i32> [#uses=1]
- %tmp10 = or i32 %tmp6, %tmp13 ; <i32> [#uses=1]
- %tmp14 = or i32 %tmp10, %tmp9 ; <i32> [#uses=1]
+; CHECK: test1
+; CHECK: rev16 r0, r0
+ %tmp1 = lshr i32 %X, 8
+ %X15 = bitcast i32 %X to i32
+ %tmp4 = shl i32 %X15, 8
+ %tmp2 = and i32 %tmp1, 16711680
+ %tmp5 = and i32 %tmp4, -16777216
+ %tmp9 = and i32 %tmp1, 255
+ %tmp13 = and i32 %tmp4, 65280
+ %tmp6 = or i32 %tmp5, %tmp2
+ %tmp10 = or i32 %tmp6, %tmp13
+ %tmp14 = or i32 %tmp10, %tmp9
ret i32 %tmp14
}
define i32 @test2(i32 %X) {
- %tmp1 = lshr i32 %X, 8 ; <i32> [#uses=1]
- %tmp1.upgrd.1 = trunc i32 %tmp1 to i16 ; <i16> [#uses=1]
- %tmp3 = trunc i32 %X to i16 ; <i16> [#uses=1]
- %tmp2 = and i16 %tmp1.upgrd.1, 255 ; <i16> [#uses=1]
- %tmp4 = shl i16 %tmp3, 8 ; <i16> [#uses=1]
- %tmp5 = or i16 %tmp2, %tmp4 ; <i16> [#uses=1]
- %tmp5.upgrd.2 = sext i16 %tmp5 to i32 ; <i32> [#uses=1]
+; CHECK: test2
+; CHECK: revsh r0, r0
+ %tmp1 = lshr i32 %X, 8
+ %tmp1.upgrd.1 = trunc i32 %tmp1 to i16
+ %tmp3 = trunc i32 %X to i16
+ %tmp2 = and i16 %tmp1.upgrd.1, 255
+ %tmp4 = shl i16 %tmp3, 8
+ %tmp5 = or i16 %tmp2, %tmp4
+ %tmp5.upgrd.2 = sext i16 %tmp5 to i32
ret i32 %tmp5.upgrd.2
}
diff --git a/test/CodeGen/ARM/select-imm.ll b/test/CodeGen/ARM/select-imm.ll
index 6e15fde045fb..578834ec93bc 100644
--- a/test/CodeGen/ARM/select-imm.ll
+++ b/test/CodeGen/ARM/select-imm.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=arm | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -march=arm -mattr=+thumb2 | FileCheck %s --check-prefix=T2
+; RUN: llc < %s -march=arm | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -march=arm -mattr=+thumb2 | FileCheck %s --check-prefix=ARMT2
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s --check-prefix=THUMB2
define i32 @t1(i32 %c) nounwind readnone {
entry:
@@ -8,9 +9,13 @@ entry:
; ARM: orr r1, r1, #1, 24
; ARM: movgt r0, #123
-; T2: t1:
-; T2: movw r0, #357
-; T2: movgt r0, #123
+; ARMT2: t1:
+; ARMT2: movw r0, #357
+; ARMT2: movgt r0, #123
+
+; THUMB2: t1:
+; THUMB2: movw r0, #357
+; THUMB2: movgt r0, #123
%0 = icmp sgt i32 %c, 1
%1 = select i1 %0, i32 123, i32 357
@@ -20,13 +25,17 @@ entry:
define i32 @t2(i32 %c) nounwind readnone {
entry:
; ARM: t2:
-; ARM: mov r1, #101
-; ARM: orr r1, r1, #1, 24
-; ARM: movle r0, #123
+; ARM: mov r0, #123
+; ARM: movgt r0, #101
+; ARM: orrgt r0, r0, #1, 24
-; T2: t2:
-; T2: movw r0, #357
-; T2: movle r0, #123
+; ARMT2: t2:
+; ARMT2: mov r0, #123
+; ARMT2: movwgt r0, #357
+
+; THUMB2: t2:
+; THUMB2: mov.w r0, #123
+; THUMB2: movwgt r0, #357
%0 = icmp sgt i32 %c, 1
%1 = select i1 %0, i32 357, i32 123
@@ -39,10 +48,31 @@ entry:
; ARM: mov r0, #0
; ARM: moveq r0, #1
-; T2: t3:
-; T2: mov r0, #0
-; T2: moveq r0, #1
+; ARMT2: t3:
+; ARMT2: mov r0, #0
+; ARMT2: moveq r0, #1
+
+; THUMB2: t3:
+; THUMB2: mov.w r0, #0
+; THUMB2: moveq r0, #1
%0 = icmp eq i32 %a, 160
%1 = zext i1 %0 to i32
ret i32 %1
}
+
+define i32 @t4(i32 %a, i32 %b, i32 %x) nounwind {
+entry:
+; ARM: t4:
+; ARM: ldr
+; ARM: movlt
+
+; ARMT2: t4:
+; ARMT2: movwlt r0, #65365
+; ARMT2: movtlt r0, #65365
+
+; THUMB2: t4:
+; THUMB2: mvnlt.w r0, #11141290
+ %0 = icmp slt i32 %a, %b
+ %1 = select i1 %0, i32 4283826005, i32 %x
+ ret i32 %1
+}
diff --git a/test/CodeGen/ARM/select.ll b/test/CodeGen/ARM/select.ll
index 7413bed5c5b1..1aa0d3904125 100644
--- a/test/CodeGen/ARM/select.ll
+++ b/test/CodeGen/ARM/select.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s --check-prefix=CHECK-VFP
; RUN: llc < %s -mattr=+neon,+thumb2 -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=CHECK-NEON
@@ -79,9 +79,9 @@ define double @f7(double %a, double %b) {
; CHECK-NEON: movw [[REGISTER_1:r[0-9]+]], #1123
; CHECK-NEON-NEXT: movs [[REGISTER_2:r[0-9]+]], #0
; CHECK-NEON-NEXT: cmp r0, [[REGISTER_1]]
-; CHECK-NEON-NEXT: adr [[REGISTER_3:r[0-9]+]], #LCPI
; CHECK-NEON-NEXT: it eq
; CHECK-NEON-NEXT: moveq [[REGISTER_2]], #4
+; CHECK-NEON-NEXT: adr [[REGISTER_3:r[0-9]+]], #LCPI
; CHECK-NEON-NEXT: ldr
; CHECK-NEON: bx
diff --git a/test/CodeGen/ARM/select_xform.ll b/test/CodeGen/ARM/select_xform.ll
index 7fd91ceea5ad..5dabfc3a82a3 100644
--- a/test/CodeGen/ARM/select_xform.ll
+++ b/test/CodeGen/ARM/select_xform.ll
@@ -1,15 +1,60 @@
-; RUN: llc < %s -march=arm | grep mov | count 2
+; RUN: llc < %s -mtriple=arm-apple-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=ARM
+; RUN: llc < %s -mtriple=thumb-apple-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=T2
+; rdar://8662825
define i32 @t1(i32 %a, i32 %b, i32 %c) nounwind {
- %tmp1 = icmp sgt i32 %c, 10
- %tmp2 = select i1 %tmp1, i32 0, i32 2147483647
- %tmp3 = add i32 %tmp2, %b
- ret i32 %tmp3
+; ARM: t1:
+; ARM: sub r0, r1, #6, 2
+; ARM: movgt r0, r1
+
+; T2: t1:
+; T2: mvn r0, #-2147483648
+; T2: add r0, r1
+; T2: movgt r0, r1
+ %tmp1 = icmp sgt i32 %c, 10
+ %tmp2 = select i1 %tmp1, i32 0, i32 2147483647
+ %tmp3 = add i32 %tmp2, %b
+ ret i32 %tmp3
}
define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
- %tmp1 = icmp sgt i32 %c, 10
- %tmp2 = select i1 %tmp1, i32 0, i32 10
- %tmp3 = sub i32 %b, %tmp2
- ret i32 %tmp3
+; ARM: t2:
+; ARM: sub r0, r1, #10
+; ARM: movgt r0, r1
+
+; T2: t2:
+; T2: sub.w r0, r1, #10
+; T2: movgt r0, r1
+ %tmp1 = icmp sgt i32 %c, 10
+ %tmp2 = select i1 %tmp1, i32 0, i32 10
+ %tmp3 = sub i32 %b, %tmp2
+ ret i32 %tmp3
+}
+
+define i32 @t3(i32 %a, i32 %b, i32 %x, i32 %y) nounwind {
+; ARM: t3:
+; ARM: mvnlt r2, #0
+; ARM: and r0, r2, r3
+
+; T2: t3:
+; T2: movlt.w r2, #-1
+; T2: and.w r0, r2, r3
+ %cond = icmp slt i32 %a, %b
+ %z = select i1 %cond, i32 -1, i32 %x
+ %s = and i32 %z, %y
+ ret i32 %s
+}
+
+define i32 @t4(i32 %a, i32 %b, i32 %x, i32 %y) nounwind {
+; ARM: t4:
+; ARM: movlt r2, #0
+; ARM: orr r0, r2, r3
+
+; T2: t4:
+; T2: movlt r2, #0
+; T2: orr.w r0, r2, r3
+ %cond = icmp slt i32 %a, %b
+ %z = select i1 %cond, i32 0, i32 %x
+ %s = or i32 %z, %y
+ ret i32 %s
}
diff --git a/test/CodeGen/ARM/shifter_operand.ll b/test/CodeGen/ARM/shifter_operand.ll
index 2bbe9fd2602c..01e3a922f656 100644
--- a/test/CodeGen/ARM/shifter_operand.ll
+++ b/test/CodeGen/ARM/shifter_operand.ll
@@ -1,18 +1,72 @@
-; RUN: llc < %s -march=arm | grep add | grep lsl
-; RUN: llc < %s -march=arm | grep bic | grep asr
+; RUN: llc < %s -mtriple=armv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
+; RUN: llc < %s -mtriple=armv7-apple-darwin -mcpu=cortex-a9 | FileCheck %s -check-prefix=A9
+; rdar://8576755
define i32 @test1(i32 %X, i32 %Y, i8 %sh) {
- %shift.upgrd.1 = zext i8 %sh to i32 ; <i32> [#uses=1]
- %A = shl i32 %Y, %shift.upgrd.1 ; <i32> [#uses=1]
- %B = add i32 %X, %A ; <i32> [#uses=1]
+; A8: test1:
+; A8: add r0, r0, r1, lsl r2
+
+; A9: test1:
+; A9: add r0, r0, r1, lsl r2
+ %shift.upgrd.1 = zext i8 %sh to i32
+ %A = shl i32 %Y, %shift.upgrd.1
+ %B = add i32 %X, %A
ret i32 %B
}
define i32 @test2(i32 %X, i32 %Y, i8 %sh) {
- %shift.upgrd.2 = zext i8 %sh to i32 ; <i32> [#uses=1]
- %A = ashr i32 %Y, %shift.upgrd.2 ; <i32> [#uses=1]
- %B = xor i32 %A, -1 ; <i32> [#uses=1]
- %C = and i32 %X, %B ; <i32> [#uses=1]
+; A8: test2:
+; A8: bic r0, r0, r1, asr r2
+
+; A9: test2:
+; A9: bic r0, r0, r1, asr r2
+ %shift.upgrd.2 = zext i8 %sh to i32
+ %A = ashr i32 %Y, %shift.upgrd.2
+ %B = xor i32 %A, -1
+ %C = and i32 %X, %B
ret i32 %C
}
+
+define i32 @test3(i32 %base, i32 %base2, i32 %offset) {
+entry:
+; A8: test3:
+; A8: ldr r0, [r0, r2, lsl #2]
+; A8: ldr r1, [r1, r2, lsl #2]
+
+; lsl #2 is free
+; A9: test3:
+; A9: ldr r0, [r0, r2, lsl #2]
+; A9: ldr r1, [r1, r2, lsl #2]
+ %tmp1 = shl i32 %offset, 2
+ %tmp2 = add i32 %base, %tmp1
+ %tmp3 = inttoptr i32 %tmp2 to i32*
+ %tmp4 = add i32 %base2, %tmp1
+ %tmp5 = inttoptr i32 %tmp4 to i32*
+ %tmp6 = load i32* %tmp3
+ %tmp7 = load i32* %tmp5
+ %tmp8 = add i32 %tmp7, %tmp6
+ ret i32 %tmp8
+}
+
+declare i8* @malloc(...)
+
+define fastcc void @test4() nounwind {
+entry:
+; A8: test4:
+; A8: ldr r1, [r0, r0, lsl #2]
+; A8: str r1, [r0, r0, lsl #2]
+
+; A9: test4:
+; A9: add r0, r0, r0, lsl #2
+; A9: ldr r1, [r0]
+; A9: str r1, [r0]
+ %0 = tail call i8* (...)* @malloc(i32 undef) nounwind
+ %1 = bitcast i8* %0 to i32*
+ %2 = sext i16 undef to i32
+ %3 = getelementptr inbounds i32* %1, i32 %2
+ %4 = load i32* %3, align 4
+ %5 = add nsw i32 %4, 1
+ store i32 %5, i32* %3, align 4
+ ret void
+}
diff --git a/test/CodeGen/ARM/spill-q.ll b/test/CodeGen/ARM/spill-q.ll
index ae1ba2f73825..bf4e55cb06c4 100644
--- a/test/CodeGen/ARM/spill-q.ll
+++ b/test/CodeGen/ARM/spill-q.ll
@@ -15,11 +15,34 @@ define void @aaa(%quuz* %this, i8* %block) {
; CHECK: vst1.64 {{.*}}sp, :128
; CHECK: vld1.64 {{.*}}sp, :128
entry:
- %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
+ %aligned_vec = alloca <4 x float>, align 16
+ %"alloca point" = bitcast i32 0 to i32
+ %vecptr = bitcast <4 x float>* %aligned_vec to i8*
+ %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %vecptr, i32 1) nounwind ; <<4 x float>> [#uses=1]
store float 6.300000e+01, float* undef, align 4
%1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
store float 0.000000e+00, float* undef, align 4
%2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
+ %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+ store float 0.000000e+00, float* undef, align 4
+ %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+ store float 0.000000e+00, float* undef, align 4
+ %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+ store float 0.000000e+00, float* undef, align 4
+ %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+ store float 0.000000e+00, float* undef, align 4
+ %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+ store float 0.000000e+00, float* undef, align 4
+ %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+ store float 0.000000e+00, float* undef, align 4
+ %ld9 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+ store float 0.000000e+00, float* undef, align 4
+ %ld10 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+ store float 0.000000e+00, float* undef, align 4
+ %ld11 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+ store float 0.000000e+00, float* undef, align 4
+ %ld12 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+ store float 0.000000e+00, float* undef, align 4
%val173 = load <4 x float>* undef ; <<4 x float>> [#uses=1]
br label %bb4
@@ -44,7 +67,16 @@ bb4: ; preds = %bb193, %entry
%18 = fmul <4 x float> %17, %val173 ; <<4 x float>> [#uses=1]
%19 = shufflevector <4 x float> %18, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
%20 = shufflevector <2 x float> %19, <2 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %21 = fadd <4 x float> zeroinitializer, %20 ; <<4 x float>> [#uses=2]
+ %tmp1 = fadd <4 x float> %20, %ld3
+ %tmp2 = fadd <4 x float> %tmp1, %ld4
+ %tmp3 = fadd <4 x float> %tmp2, %ld5
+ %tmp4 = fadd <4 x float> %tmp3, %ld6
+ %tmp5 = fadd <4 x float> %tmp4, %ld7
+ %tmp6 = fadd <4 x float> %tmp5, %ld8
+ %tmp7 = fadd <4 x float> %tmp6, %ld9
+ %tmp8 = fadd <4 x float> %tmp7, %ld10
+ %tmp9 = fadd <4 x float> %tmp8, %ld11
+ %21 = fadd <4 x float> %tmp9, %ld12
%22 = fcmp ogt <4 x float> %besterror.0.2264, %21 ; <<4 x i1>> [#uses=0]
%tmp = extractelement <4 x i1> %22, i32 0
br i1 %tmp, label %bb193, label %bb186
diff --git a/test/CodeGen/ARM/stm.ll b/test/CodeGen/ARM/stm.ll
index 22a7ecb4aa28..2f5fadbee28a 100644
--- a/test/CodeGen/ARM/stm.ll
+++ b/test/CodeGen/ARM/stm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6,+vfp2 | grep stm | count 2
+; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6,+vfp2 | FileCheck %s
@"\01LC" = internal constant [32 x i8] c"Boolean Not: %d %d %d %d %d %d\0A\00", section "__TEXT,__cstring,cstring_literals" ; <[32 x i8]*> [#uses=1]
@"\01LC1" = internal constant [26 x i8] c"Bitwise Not: %d %d %d %d\0A\00", section "__TEXT,__cstring,cstring_literals" ; <[26 x i8]*> [#uses=1]
@@ -7,6 +7,9 @@ declare i32 @printf(i8* nocapture, ...) nounwind
define i32 @main() nounwind {
entry:
+; CHECK: main
+; CHECK: push
+; CHECK: stmib
%0 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([26 x i8]* @"\01LC1", i32 0, i32 0), i32 -2, i32 -3, i32 2, i32 -6) nounwind ; <i32> [#uses=0]
%1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([32 x i8]* @"\01LC", i32 0, i32 0), i32 0, i32 1, i32 0, i32 1, i32 0, i32 1) nounwind ; <i32> [#uses=0]
ret i32 0
diff --git a/test/CodeGen/ARM/str_pre-2.ll b/test/CodeGen/ARM/str_pre-2.ll
index 553cd64fce94..465c7e676c56 100644
--- a/test/CodeGen/ARM/str_pre-2.ll
+++ b/test/CodeGen/ARM/str_pre-2.ll
@@ -1,10 +1,11 @@
-; RUN: llc < %s -mtriple=arm-linux-gnu | grep {str.*\\!}
-; RUN: llc < %s -mtriple=arm-linux-gnu | grep {ldr.*\\\[.*\], #4}
+; RUN: llc < %s -mtriple=armv6-linux-gnu | FileCheck %s
@b = external global i64*
define i64 @t(i64 %a) nounwind readonly {
entry:
+; CHECK: str lr, [sp, #-4]!
+; CHECK: ldr lr, [sp], #4
%0 = load i64** @b, align 4
%1 = load i64* %0, align 4
%2 = mul i64 %1, %a
diff --git a/test/CodeGen/ARM/tail-opts.ll b/test/CodeGen/ARM/tail-opts.ll
index 17c8baedbfa8..5b3dce386bb7 100644
--- a/test/CodeGen/ARM/tail-opts.ll
+++ b/test/CodeGen/ARM/tail-opts.ll
@@ -17,13 +17,16 @@ declare i8* @choose(i8*, i8*)
; CHECK: tail_duplicate_me:
; CHECK: qux
; CHECK: qux
-; CHECK: ldr r{{.}}, LCPI
+; CHECK: movw r{{[0-9]+}}, :lower16:_GHJK
+; CHECK: movt r{{[0-9]+}}, :upper16:_GHJK
; CHECK: str r
; CHECK-NEXT: bx r
-; CHECK: ldr r{{.}}, LCPI
+; CHECK: movw r{{[0-9]+}}, :lower16:_GHJK
+; CHECK: movt r{{[0-9]+}}, :upper16:_GHJK
; CHECK: str r
; CHECK-NEXT: bx r
-; CHECK: ldr r{{.}}, LCPI
+; CHECK: movw r{{[0-9]+}}, :lower16:_GHJK
+; CHECK: movt r{{[0-9]+}}, :upper16:_GHJK
; CHECK: str r
; CHECK-NEXT: bx r
diff --git a/test/CodeGen/ARM/thumb1-varalloc.ll b/test/CodeGen/ARM/thumb1-varalloc.ll
new file mode 100644
index 000000000000..25093fee225a
--- /dev/null
+++ b/test/CodeGen/ARM/thumb1-varalloc.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -mtriple=thumbv6-apple-darwin | FileCheck %s
+; rdar://8819685
+
+@__bar = external hidden global i8*
+@__baz = external hidden global i8*
+
+define i8* @_foo() {
+entry:
+; CHECK: foo:
+
+ %size = alloca i32, align 4
+ %0 = load i8** @__bar, align 4
+ %1 = icmp eq i8* %0, null
+ br i1 %1, label %bb1, label %bb3
+
+bb1:
+ store i32 1026, i32* %size, align 4
+ %2 = alloca [1026 x i8], align 1
+; CHECK: mov r0, sp
+; CHECK: adds r4, r0, r4
+ %3 = getelementptr inbounds [1026 x i8]* %2, i32 0, i32 0
+ %4 = call i32 @_called_func(i8* %3, i32* %size) nounwind
+ %5 = icmp eq i32 %4, 0
+ br i1 %5, label %bb2, label %bb3
+
+bb2:
+ %6 = call i8* @strdup(i8* %3) nounwind
+ store i8* %6, i8** @__baz, align 4
+ br label %bb3
+
+bb3:
+ %.0 = phi i8* [ %0, %entry ], [ %6, %bb2 ], [ %3, %bb1 ]
+; CHECK: subs r4, #5
+; CHECK-NEXT: mov sp, r4
+; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
+ ret i8* %.0
+}
+
+declare noalias i8* @strdup(i8* nocapture) nounwind
+declare i32 @_called_func(i8*, i32*) nounwind \ No newline at end of file
diff --git a/test/CodeGen/ARM/umulo-32.ll b/test/CodeGen/ARM/umulo-32.ll
new file mode 100644
index 000000000000..aa7d28a62349
--- /dev/null
+++ b/test/CodeGen/ARM/umulo-32.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -mtriple=thumbv6-apple-darwin | FileCheck %s
+
+%umul.ty = type { i32, i1 }
+
+define i32 @func(i32 %a) nounwind {
+; CHECK: func
+; CHECK: muldi3
+ %tmp0 = tail call %umul.ty @llvm.umul.with.overflow.i32(i32 %a, i32 37)
+ %tmp1 = extractvalue %umul.ty %tmp0, 0
+ %tmp2 = select i1 undef, i32 -1, i32 %tmp1
+ ret i32 %tmp2
+}
+
+declare %umul.ty @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
diff --git a/test/CodeGen/ARM/unaligned_load_store.ll b/test/CodeGen/ARM/unaligned_load_store.ll
index e2794919d9da..b42e11f2c4ab 100644
--- a/test/CodeGen/ARM/unaligned_load_store.ll
+++ b/test/CodeGen/ARM/unaligned_load_store.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=arm | FileCheck %s -check-prefix=GENERIC
+; RUN: llc < %s -march=arm -pre-RA-sched=source | FileCheck %s -check-prefix=GENERIC
; RUN: llc < %s -mtriple=armv6-apple-darwin | FileCheck %s -check-prefix=DARWIN_V6
+; RUN: llc < %s -mtriple=armv6-apple-darwin -arm-strict-align | FileCheck %s -check-prefix=GENERIC
; RUN: llc < %s -mtriple=armv6-linux | FileCheck %s -check-prefix=GENERIC
; rdar://7113725
diff --git a/test/CodeGen/ARM/vbits.ll b/test/CodeGen/ARM/vbits.ll
index 293d22938a76..51f9bdf9718b 100644
--- a/test/CodeGen/ARM/vbits.ll
+++ b/test/CodeGen/ARM/vbits.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc < %s -march=arm -mattr=+neon -mcpu=cortex-a8 | FileCheck %s
define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK: v_andi8:
@@ -505,3 +505,43 @@ define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
%tmp5 = sext <4 x i1> %tmp4 to <4 x i32>
ret <4 x i32> %tmp5
}
+
+define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind {
+; CHECK: v_orrimm:
+; CHECK-NOT: vmov
+; CHECK-NOT: vmvn
+; CHECK: vorr
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = or <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1>
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind {
+; CHECK: v_orrimmQ
+; CHECK-NOT: vmov
+; CHECK-NOT: vmvn
+; CHECK: vorr
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = or <16 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1>
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind {
+; CHECK: v_bicimm:
+; CHECK-NOT: vmov
+; CHECK-NOT: vmvn
+; CHECK: vbic
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = and <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 >
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @v_bicimmQ(<16 x i8>* %A) nounwind {
+; CHECK: v_bicimmQ:
+; CHECK-NOT: vmov
+; CHECK-NOT: vmvn
+; CHECK: vbic
+ %tmp1 = load <16 x i8>* %A
+ %tmp3 = and <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 >
+ ret <16 x i8> %tmp3
+}
diff --git a/test/CodeGen/ARM/vceq.ll b/test/CodeGen/ARM/vceq.ll
index e4787518e731..051c349a06a4 100644
--- a/test/CodeGen/ARM/vceq.ll
+++ b/test/CodeGen/ARM/vceq.ll
@@ -79,3 +79,14 @@ define <4 x i32> @vceqQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
}
+
+define <8 x i8> @vceqi8Z(<8 x i8>* %A) nounwind {
+;CHECK: vceqi8Z:
+;CHECK-NOT: vmov
+;CHECK-NOT: vmvn
+;CHECK: vceq.i8
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = icmp eq <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
diff --git a/test/CodeGen/ARM/vcge.ll b/test/CodeGen/ARM/vcge.ll
index 2c161113c113..bf5f0b9efb2f 100644
--- a/test/CodeGen/ARM/vcge.ll
+++ b/test/CodeGen/ARM/vcge.ll
@@ -160,3 +160,44 @@ define <4 x i32> @vacgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
declare <2 x i32> @llvm.arm.neon.vacged(<2 x float>, <2 x float>) nounwind readnone
declare <4 x i32> @llvm.arm.neon.vacgeq(<4 x float>, <4 x float>) nounwind readnone
+
+define <8 x i8> @vcgei8Z(<8 x i8>* %A) nounwind {
+;CHECK: vcgei8Z:
+;CHECK-NOT: vmov
+;CHECK-NOT: vmvn
+;CHECK: vcge.s8
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = icmp sge <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <8 x i8> @vclei8Z(<8 x i8>* %A) nounwind {
+;CHECK: vclei8Z:
+;CHECK-NOT: vmov
+;CHECK-NOT: vmvn
+;CHECK: vcle.s8
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = icmp sle <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+; Radar 8782191
+; Floating-point comparisons against zero produce results with integer
+; elements, not floating-point elements.
+define void @test_vclez_fp() nounwind optsize {
+;CHECK: test_vclez_fp
+;CHECK: vcle.f32
+entry:
+ %0 = fcmp ole <4 x float> undef, zeroinitializer
+ %1 = sext <4 x i1> %0 to <4 x i16>
+ %2 = add <4 x i16> %1, zeroinitializer
+ %3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = add <8 x i16> %3, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %5 = trunc <8 x i16> %4 to <8 x i8>
+ tail call void @llvm.arm.neon.vst1.v8i8(i8* undef, <8 x i8> %5, i32 1)
+ unreachable
+}
+
+declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32) nounwind
diff --git a/test/CodeGen/ARM/vcgt.ll b/test/CodeGen/ARM/vcgt.ll
index 194093c8418c..c3c4cb356307 100644
--- a/test/CodeGen/ARM/vcgt.ll
+++ b/test/CodeGen/ARM/vcgt.ll
@@ -161,9 +161,9 @@ define <4 x i32> @vacgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
; rdar://7923010
define <4 x i32> @vcgt_zext(<4 x float>* %A, <4 x float>* %B) nounwind {
;CHECK: vcgt_zext:
-;CHECK: vcgt.f32 q0
-;CHECK: vmov.i32 q1, #0x1
-;CHECK: vand q0, q0, q1
+;CHECK: vmov.i32 q10, #0x1
+;CHECK: vcgt.f32 q8
+;CHECK: vand q8, q8, q10
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
%tmp3 = fcmp ogt <4 x float> %tmp1, %tmp2
@@ -173,3 +173,25 @@ define <4 x i32> @vcgt_zext(<4 x float>* %A, <4 x float>* %B) nounwind {
declare <2 x i32> @llvm.arm.neon.vacgtd(<2 x float>, <2 x float>) nounwind readnone
declare <4 x i32> @llvm.arm.neon.vacgtq(<4 x float>, <4 x float>) nounwind readnone
+
+define <8 x i8> @vcgti8Z(<8 x i8>* %A) nounwind {
+;CHECK: vcgti8Z:
+;CHECK-NOT: vmov
+;CHECK-NOT: vmvn
+;CHECK: vcgt.s8
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = icmp sgt <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <8 x i8> @vclti8Z(<8 x i8>* %A) nounwind {
+;CHECK: vclti8Z:
+;CHECK-NOT: vmov
+;CHECK-NOT: vmvn
+;CHECK: vclt.s8
+ %tmp1 = load <8 x i8>* %A
+ %tmp3 = icmp slt <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
diff --git a/test/CodeGen/ARM/vcombine.ll b/test/CodeGen/ARM/vcombine.ll
index e6733051f269..527f93b6637c 100644
--- a/test/CodeGen/ARM/vcombine.ll
+++ b/test/CodeGen/ARM/vcombine.ll
@@ -1,6 +1,9 @@
-; RUN: llc < %s -march=arm -mattr=+neon
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <16 x i8> @vcombine8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+; CHECK: vcombine8
+; CHECK: vmov r0, r1, d16
+; CHECK: vmov r2, r3, d17
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -8,6 +11,9 @@ define <16 x i8> @vcombine8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <8 x i16> @vcombine16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+; CHECK: vcombine16
+; CHECK: vmov r0, r1, d16
+; CHECK: vmov r2, r3, d17
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -15,6 +21,9 @@ define <8 x i16> @vcombine16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <4 x i32> @vcombine32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+; CHECK: vcombine32
+; CHECK: vmov r0, r1, d16
+; CHECK: vmov r2, r3, d17
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
%tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -22,6 +31,9 @@ define <4 x i32> @vcombine32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <4 x float> @vcombinefloat(<2 x float>* %A, <2 x float>* %B) nounwind {
+; CHECK: vcombinefloat
+; CHECK: vmov r0, r1, d16
+; CHECK: vmov r2, r3, d17
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
%tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -29,8 +41,32 @@ define <4 x float> @vcombinefloat(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <2 x i64> @vcombine64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+; CHECK: vcombine64
+; CHECK: vmov r0, r1, d16
+; CHECK: vmov r2, r3, d17
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
%tmp3 = shufflevector <1 x i64> %tmp1, <1 x i64> %tmp2, <2 x i32> <i32 0, i32 1>
ret <2 x i64> %tmp3
}
+
+; Check for vget_low and vget_high implemented with shufflevector. PR8411.
+; They should not require storing to the stack.
+
+define <4 x i16> @vget_low16(<8 x i16>* %A) nounwind {
+; CHECK: vget_low16
+; CHECK-NOT: vst
+; CHECK: vmov r0, r1, d16
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %tmp2
+}
+
+define <8 x i8> @vget_high8(<16 x i8>* %A) nounwind {
+; CHECK: vget_high8
+; CHECK-NOT: vst
+; CHECK: vmov r0, r1, d17
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %tmp2
+}
diff --git a/test/CodeGen/ARM/vcvt.ll b/test/CodeGen/ARM/vcvt.ll
index f4cc5368d9aa..c078f493094b 100644
--- a/test/CodeGen/ARM/vcvt.ll
+++ b/test/CodeGen/ARM/vcvt.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc < %s -march=arm -mattr=+neon,+fp16 | FileCheck %s
define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind {
;CHECK: vcvt_f32tos32:
@@ -138,3 +138,21 @@ declare <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float>, i32) nounwi
declare <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
declare <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
+define <4 x float> @vcvt_f16tof32(<4 x i16>* %A) nounwind {
+;CHECK: vcvt_f16tof32:
+;CHECK: vcvt.f32.f16
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %tmp1)
+ ret <4 x float> %tmp2
+}
+
+define <4 x i16> @vcvt_f32tof16(<4 x float>* %A) nounwind {
+;CHECK: vcvt_f32tof16:
+;CHECK: vcvt.f16.f32
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %tmp1)
+ ret <4 x i16> %tmp2
+}
+
+declare <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float>) nounwind readnone
diff --git a/test/CodeGen/ARM/vdup.ll b/test/CodeGen/ARM/vdup.ll
index a545f6c03d5b..e99fac1f1e67 100644
--- a/test/CodeGen/ARM/vdup.ll
+++ b/test/CodeGen/ARM/vdup.ll
@@ -162,24 +162,6 @@ define <4 x float> @v_shuffledupQfloat(float %A) nounwind {
ret <4 x float> %tmp2
}
-define <2 x float> @v_shuffledupfloat2(float* %A) nounwind {
-;CHECK: v_shuffledupfloat2:
-;CHECK: vdup.32
- %tmp0 = load float* %A
- %tmp1 = insertelement <2 x float> undef, float %tmp0, i32 0
- %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
- ret <2 x float> %tmp2
-}
-
-define <4 x float> @v_shuffledupQfloat2(float* %A) nounwind {
-;CHECK: v_shuffledupQfloat2:
-;CHECK: vdup.32
- %tmp0 = load float* %A
- %tmp1 = insertelement <4 x float> undef, float %tmp0, i32 0
- %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
- ret <4 x float> %tmp2
-}
-
define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind {
;CHECK: vduplane8:
;CHECK: vdup.8
diff --git a/test/CodeGen/ARM/vector-DAGCombine.ll b/test/CodeGen/ARM/vector-DAGCombine.ll
new file mode 100644
index 000000000000..3ab0cfcbbc77
--- /dev/null
+++ b/test/CodeGen/ARM/vector-DAGCombine.ll
@@ -0,0 +1,107 @@
+; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
+
+; PR7158
+define i32 @test_pr7158() nounwind {
+bb.nph55.bb.nph55.split_crit_edge:
+ br label %bb3
+
+bb3: ; preds = %bb3, %bb.nph55.bb.nph55.split_crit_edge
+ br i1 undef, label %bb.i19, label %bb3
+
+bb.i19: ; preds = %bb.i19, %bb3
+ %0 = insertelement <4 x float> undef, float undef, i32 3 ; <<4 x float>> [#uses=3]
+ %1 = fmul <4 x float> %0, %0 ; <<4 x float>> [#uses=1]
+ %2 = bitcast <4 x float> %1 to <2 x double> ; <<2 x double>> [#uses=0]
+ %3 = fmul <4 x float> %0, undef ; <<4 x float>> [#uses=0]
+ br label %bb.i19
+}
+
+; Check that the DAG combiner does not arbitrarily modify BUILD_VECTORs
+; after legalization.
+define void @test_illegal_build_vector() nounwind {
+entry:
+ store <2 x i64> undef, <2 x i64>* undef, align 16
+ %0 = load <16 x i8>* undef, align 16 ; <<16 x i8>> [#uses=1]
+ %1 = or <16 x i8> zeroinitializer, %0 ; <<16 x i8>> [#uses=1]
+ store <16 x i8> %1, <16 x i8>* undef, align 16
+ ret void
+}
+
+; Radar 8407927: Make sure that VMOVRRD gets optimized away when the result is
+; converted back to be used as a vector type.
+; CHECK: test_vmovrrd_combine
+define <4 x i32> @test_vmovrrd_combine() nounwind {
+entry:
+ br i1 undef, label %bb1, label %bb2
+
+bb1:
+ %0 = bitcast <2 x i64> zeroinitializer to <2 x double>
+ %1 = extractelement <2 x double> %0, i32 0
+ %2 = bitcast double %1 to i64
+ %3 = insertelement <1 x i64> undef, i64 %2, i32 0
+; CHECK-NOT: vmov s
+; CHECK: vext.8
+ %4 = shufflevector <1 x i64> %3, <1 x i64> undef, <2 x i32> <i32 0, i32 1>
+ %tmp2006.3 = bitcast <2 x i64> %4 to <16 x i8>
+ %5 = shufflevector <16 x i8> %tmp2006.3, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+ %tmp2004.3 = bitcast <16 x i8> %5 to <4 x i32>
+ br i1 undef, label %bb2, label %bb1
+
+bb2:
+ %result = phi <4 x i32> [ undef, %entry ], [ %tmp2004.3, %bb1 ]
+ ret <4 x i32> %result
+}
+
+; Test trying to do a ShiftCombine on illegal types.
+; The vector should be split first.
+define void @lshrIllegalType(<8 x i32>* %A) nounwind {
+ %tmp1 = load <8 x i32>* %A
+ %tmp2 = lshr <8 x i32> %tmp1, < i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ store <8 x i32> %tmp2, <8 x i32>* %A
+ ret void
+}
+
+; Test folding a binary vector operation with constant BUILD_VECTOR
+; operands with i16 elements.
+define void @test_i16_constant_fold() nounwind optsize {
+entry:
+ %0 = sext <4 x i1> zeroinitializer to <4 x i16>
+ %1 = add <4 x i16> %0, zeroinitializer
+ %2 = shufflevector <4 x i16> %1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %3 = add <8 x i16> %2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %4 = trunc <8 x i16> %3 to <8 x i8>
+ tail call void @llvm.arm.neon.vst1.v8i8(i8* undef, <8 x i8> %4, i32 1)
+ unreachable
+}
+
+declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32) nounwind
+
+; Test that loads and stores of i64 vector elements are handled as f64 values
+; so they are not split up into i32 values. Radar 8755338.
+define void @i64_buildvector(i64* %ptr, <2 x i64>* %vp) nounwind {
+; CHECK: i64_buildvector
+; CHECK: vldr.64
+ %t0 = load i64* %ptr, align 4
+ %t1 = insertelement <2 x i64> undef, i64 %t0, i32 0
+ store <2 x i64> %t1, <2 x i64>* %vp
+ ret void
+}
+
+define void @i64_insertelement(i64* %ptr, <2 x i64>* %vp) nounwind {
+; CHECK: i64_insertelement
+; CHECK: vldr.64
+ %t0 = load i64* %ptr, align 4
+ %vec = load <2 x i64>* %vp
+ %t1 = insertelement <2 x i64> %vec, i64 %t0, i32 0
+ store <2 x i64> %t1, <2 x i64>* %vp
+ ret void
+}
+
+define void @i64_extractelement(i64* %ptr, <2 x i64>* %vp) nounwind {
+; CHECK: i64_extractelement
+; CHECK: vstr.64
+ %vec = load <2 x i64>* %vp
+ %t1 = extractelement <2 x i64> %vec, i32 0
+ store i64 %t1, i64* %ptr
+ ret void
+}
diff --git a/test/CodeGen/ARM/vext.ll b/test/CodeGen/ARM/vext.ll
index e460a84f6265..55abefef0fa7 100644
--- a/test/CodeGen/ARM/vext.ll
+++ b/test/CodeGen/ARM/vext.ll
@@ -74,3 +74,62 @@ define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
ret <16 x i8> %tmp3
}
+; Tests for ReconstructShuffle function. Indices have to be carefully
+; chosen to reach lowering phase as a BUILD_VECTOR.
+
+; One vector needs vext, the other can be handled by extract_subvector
+; Also checks interleaving of sources is handled correctly.
+; Essence: a vext is used on %A and something saner than stack load/store for final result.
+define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK: test_interleaved:
+;CHECK: vext.16
+;CHECK-NOT: vext.16
+;CHECK: vzip.16
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 3, i32 8, i32 5, i32 9>
+ ret <4 x i16> %tmp3
+}
+
+; An undef in the shuffle list should still be optimizable
+define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK: test_undef:
+;CHECK: vzip.16
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 undef, i32 8, i32 5, i32 9>
+ ret <4 x i16> %tmp3
+}
+
+; We should ignore a build_vector with more than two sources.
+; Use illegal <32 x i16> type to produce such a shuffle after legalizing types.
+; Try to look for fallback to stack expansion.
+define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind {
+;CHECK: test_multisource:
+;CHECK: vst1.16
+ %tmp1 = load <32 x i16>* %B
+ %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
+ ret <4 x i16> %tmp2
+}
+
+; We don't handle shuffles using more than half of a 128-bit vector.
+; Again, test for fallback to stack expansion
+define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind {
+;CHECK: test_largespan:
+;CHECK: vst1.16
+ %tmp1 = load <8 x i16>* %B
+ %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i16> %tmp2
+}
+
+; The actual shuffle code only handles some cases, make sure we check
+; this rather than blindly emitting a VECTOR_SHUFFLE (infinite
+; lowering loop can result otherwise).
+define <8 x i8> @test_illegal(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK: test_illegal:
+;CHECK: vst1.8
+ %tmp1 = load <16 x i8>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <8 x i32> <i32 0, i32 7, i32 5, i32 25, i32 3, i32 2, i32 2, i32 26>
+ ret <8 x i8> %tmp3
+}
diff --git a/test/CodeGen/ARM/vget_lane.ll b/test/CodeGen/ARM/vget_lane.ll
index 05e7f5090952..1fc885d61372 100644
--- a/test/CodeGen/ARM/vget_lane.ll
+++ b/test/CodeGen/ARM/vget_lane.ll
@@ -96,13 +96,14 @@ define i32 @vgetQ_lanei32(<4 x i32>* %A) nounwind {
define arm_aapcs_vfpcc void @test_vget_laneu16() nounwind {
entry:
-; CHECK: vmov.u16 r0, d0[1]
+; CHECK: vmov.u16 r0, d{{.*}}[1]
%arg0_uint16x4_t = alloca <4 x i16> ; <<4 x i16>*> [#uses=1]
%out_uint16_t = alloca i16 ; <i16*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%0 = load <4 x i16>* %arg0_uint16x4_t, align 8 ; <<4 x i16>> [#uses=1]
%1 = extractelement <4 x i16> %0, i32 1 ; <i16> [#uses=1]
- store i16 %1, i16* %out_uint16_t, align 2
+ %2 = add i16 %1, %1
+ store i16 %2, i16* %out_uint16_t, align 2
br label %return
return: ; preds = %entry
@@ -111,13 +112,14 @@ return: ; preds = %entry
define arm_aapcs_vfpcc void @test_vget_laneu8() nounwind {
entry:
-; CHECK: vmov.u8 r0, d0[1]
+; CHECK: vmov.u8 r0, d{{.*}}[1]
%arg0_uint8x8_t = alloca <8 x i8> ; <<8 x i8>*> [#uses=1]
%out_uint8_t = alloca i8 ; <i8*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%0 = load <8 x i8>* %arg0_uint8x8_t, align 8 ; <<8 x i8>> [#uses=1]
%1 = extractelement <8 x i8> %0, i32 1 ; <i8> [#uses=1]
- store i8 %1, i8* %out_uint8_t, align 1
+ %2 = add i8 %1, %1
+ store i8 %2, i8* %out_uint8_t, align 1
br label %return
return: ; preds = %entry
@@ -126,13 +128,14 @@ return: ; preds = %entry
define arm_aapcs_vfpcc void @test_vgetQ_laneu16() nounwind {
entry:
-; CHECK: vmov.u16 r0, d0[1]
+; CHECK: vmov.u16 r0, d{{.*}}[1]
%arg0_uint16x8_t = alloca <8 x i16> ; <<8 x i16>*> [#uses=1]
%out_uint16_t = alloca i16 ; <i16*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%0 = load <8 x i16>* %arg0_uint16x8_t, align 16 ; <<8 x i16>> [#uses=1]
%1 = extractelement <8 x i16> %0, i32 1 ; <i16> [#uses=1]
- store i16 %1, i16* %out_uint16_t, align 2
+ %2 = add i16 %1, %1
+ store i16 %2, i16* %out_uint16_t, align 2
br label %return
return: ; preds = %entry
@@ -141,13 +144,14 @@ return: ; preds = %entry
define arm_aapcs_vfpcc void @test_vgetQ_laneu8() nounwind {
entry:
-; CHECK: vmov.u8 r0, d0[1]
+; CHECK: vmov.u8 r0, d{{.*}}[1]
%arg0_uint8x16_t = alloca <16 x i8> ; <<16 x i8>*> [#uses=1]
%out_uint8_t = alloca i8 ; <i8*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%0 = load <16 x i8>* %arg0_uint8x16_t, align 16 ; <<16 x i8>> [#uses=1]
%1 = extractelement <16 x i8> %0, i32 1 ; <i8> [#uses=1]
- store i8 %1, i8* %out_uint8_t, align 1
+ %2 = add i8 %1, %1
+ store i8 %2, i8* %out_uint8_t, align 1
br label %return
return: ; preds = %entry
@@ -210,3 +214,20 @@ entry:
%0 = insertelement <2 x float> %arg1_float32x2_t, float %arg0_float32_t, i32 1 ; <<2 x float>> [#uses=1]
ret <2 x float> %0
}
+
+; The llvm extractelement instruction does not require that the lane number
+; be an immediate constant. Make sure a variable lane number is handled.
+
+define i32 @vget_variable_lanes8(<8 x i8>* %A, i32 %B) nounwind {
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = extractelement <8 x i8> %tmp1, i32 %B
+ %tmp3 = sext i8 %tmp2 to i32
+ ret i32 %tmp3
+}
+
+define i32 @vgetQ_variable_lanei32(<4 x i32>* %A, i32 %B) nounwind {
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = add <4 x i32> %tmp1, %tmp1
+ %tmp3 = extractelement <4 x i32> %tmp2, i32 %B
+ ret i32 %tmp3
+}
diff --git a/test/CodeGen/ARM/vld1.ll b/test/CodeGen/ARM/vld1.ll
index 2488e8a0d0cc..c886125a2fb0 100644
--- a/test/CodeGen/ARM/vld1.ll
+++ b/test/CodeGen/ARM/vld1.ll
@@ -2,8 +2,9 @@
define <8 x i8> @vld1i8(i8* %A) nounwind {
;CHECK: vld1i8:
-;CHECK: vld1.8
- %tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %A, i32 1)
+;Check the alignment value. Max for this instruction is 64 bits:
+;CHECK: vld1.8 {d16}, [r0, :64]
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %A, i32 16)
ret <8 x i8> %tmp1
}
@@ -15,6 +16,18 @@ define <4 x i16> @vld1i16(i16* %A) nounwind {
ret <4 x i16> %tmp1
}
+;Check for a post-increment updating load.
+define <4 x i16> @vld1i16_update(i16** %ptr) nounwind {
+;CHECK: vld1i16_update:
+;CHECK: vld1.16 {d16}, [r1]!
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1)
+ %tmp2 = getelementptr i16* %A, i32 4
+ store i16* %tmp2, i16** %ptr
+ ret <4 x i16> %tmp1
+}
+
define <2 x i32> @vld1i32(i32* %A) nounwind {
;CHECK: vld1i32:
;CHECK: vld1.32
@@ -23,6 +36,18 @@ define <2 x i32> @vld1i32(i32* %A) nounwind {
ret <2 x i32> %tmp1
}
+;Check for a post-increment updating load with register increment.
+define <2 x i32> @vld1i32_update(i32** %ptr, i32 %inc) nounwind {
+;CHECK: vld1i32_update:
+;CHECK: vld1.32 {d16}, [r2], r1
+ %A = load i32** %ptr
+ %tmp0 = bitcast i32* %A to i8*
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1)
+ %tmp2 = getelementptr i32* %A, i32 %inc
+ store i32* %tmp2, i32** %ptr
+ ret <2 x i32> %tmp1
+}
+
define <2 x float> @vld1f(float* %A) nounwind {
;CHECK: vld1f:
;CHECK: vld1.32
@@ -41,16 +66,29 @@ define <1 x i64> @vld1i64(i64* %A) nounwind {
define <16 x i8> @vld1Qi8(i8* %A) nounwind {
;CHECK: vld1Qi8:
-;CHECK: vld1.8
- %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 1)
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vld1.8 {d16, d17}, [r0, :64]
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8)
+ ret <16 x i8> %tmp1
+}
+
+;Check for a post-increment updating load.
+define <16 x i8> @vld1Qi8_update(i8** %ptr) nounwind {
+;CHECK: vld1Qi8_update:
+;CHECK: vld1.8 {d16, d17}, [r1, :64]!
+ %A = load i8** %ptr
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8)
+ %tmp2 = getelementptr i8* %A, i32 16
+ store i8* %tmp2, i8** %ptr
ret <16 x i8> %tmp1
}
define <8 x i16> @vld1Qi16(i16* %A) nounwind {
;CHECK: vld1Qi16:
-;CHECK: vld1.16
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vld1.16 {d16, d17}, [r0, :128]
%tmp0 = bitcast i16* %A to i8*
- %tmp1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %tmp0, i32 1)
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %tmp0, i32 32)
ret <8 x i16> %tmp1
}
diff --git a/test/CodeGen/ARM/vld2.ll b/test/CodeGen/ARM/vld2.ll
index 811f6e6db96f..29b379465db5 100644
--- a/test/CodeGen/ARM/vld2.ll
+++ b/test/CodeGen/ARM/vld2.ll
@@ -13,8 +13,9 @@
define <8 x i8> @vld2i8(i8* %A) nounwind {
;CHECK: vld2i8:
-;CHECK: vld2.8
- %tmp1 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8* %A, i32 1)
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vld2.8 {d16, d17}, [r0, :64]
+ %tmp1 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8* %A, i32 8)
%tmp2 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 1
%tmp4 = add <8 x i8> %tmp2, %tmp3
@@ -23,9 +24,10 @@ define <8 x i8> @vld2i8(i8* %A) nounwind {
define <4 x i16> @vld2i16(i16* %A) nounwind {
;CHECK: vld2i16:
-;CHECK: vld2.16
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vld2.16 {d16, d17}, [r0, :128]
%tmp0 = bitcast i16* %A to i8*
- %tmp1 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16(i8* %tmp0, i32 1)
+ %tmp1 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16(i8* %tmp0, i32 32)
%tmp2 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 1
%tmp4 = add <4 x i16> %tmp2, %tmp3
@@ -54,11 +56,27 @@ define <2 x float> @vld2f(float* %A) nounwind {
ret <2 x float> %tmp4
}
+;Check for a post-increment updating load.
+define <2 x float> @vld2f_update(float** %ptr) nounwind {
+;CHECK: vld2f_update:
+;CHECK: vld2.32 {d16, d17}, [r1]!
+ %A = load float** %ptr
+ %tmp0 = bitcast float* %A to i8*
+ %tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(i8* %tmp0, i32 1)
+ %tmp2 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 1
+ %tmp4 = fadd <2 x float> %tmp2, %tmp3
+ %tmp5 = getelementptr float* %A, i32 4
+ store float* %tmp5, float** %ptr
+ ret <2 x float> %tmp4
+}
+
define <1 x i64> @vld2i64(i64* %A) nounwind {
;CHECK: vld2i64:
-;CHECK: vld1.64
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vld1.64 {d16, d17}, [r0, :128]
%tmp0 = bitcast i64* %A to i8*
- %tmp1 = call %struct.__neon_int64x1x2_t @llvm.arm.neon.vld2.v1i64(i8* %tmp0, i32 1)
+ %tmp1 = call %struct.__neon_int64x1x2_t @llvm.arm.neon.vld2.v1i64(i8* %tmp0, i32 32)
%tmp2 = extractvalue %struct.__neon_int64x1x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int64x1x2_t %tmp1, 1
%tmp4 = add <1 x i64> %tmp2, %tmp3
@@ -67,19 +85,35 @@ define <1 x i64> @vld2i64(i64* %A) nounwind {
define <16 x i8> @vld2Qi8(i8* %A) nounwind {
;CHECK: vld2Qi8:
-;CHECK: vld2.8
- %tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8* %A, i32 1)
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vld2.8 {d16, d17, d18, d19}, [r0, :64]
+ %tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8* %A, i32 8)
+ %tmp2 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 1
+ %tmp4 = add <16 x i8> %tmp2, %tmp3
+ ret <16 x i8> %tmp4
+}
+
+;Check for a post-increment updating load with register increment.
+define <16 x i8> @vld2Qi8_update(i8** %ptr, i32 %inc) nounwind {
+;CHECK: vld2Qi8_update:
+;CHECK: vld2.8 {d16, d17, d18, d19}, [r2, :128], r1
+ %A = load i8** %ptr
+ %tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8* %A, i32 16)
%tmp2 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 1
%tmp4 = add <16 x i8> %tmp2, %tmp3
+ %tmp5 = getelementptr i8* %A, i32 %inc
+ store i8* %tmp5, i8** %ptr
ret <16 x i8> %tmp4
}
define <8 x i16> @vld2Qi16(i16* %A) nounwind {
;CHECK: vld2Qi16:
-;CHECK: vld2.16
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vld2.16 {d16, d17, d18, d19}, [r0, :128]
%tmp0 = bitcast i16* %A to i8*
- %tmp1 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2.v8i16(i8* %tmp0, i32 1)
+ %tmp1 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2.v8i16(i8* %tmp0, i32 16)
%tmp2 = extractvalue %struct.__neon_int16x8x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp1, 1
%tmp4 = add <8 x i16> %tmp2, %tmp3
@@ -88,9 +122,10 @@ define <8 x i16> @vld2Qi16(i16* %A) nounwind {
define <4 x i32> @vld2Qi32(i32* %A) nounwind {
;CHECK: vld2Qi32:
-;CHECK: vld2.32
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vld2.32 {d16, d17, d18, d19}, [r0, :256]
%tmp0 = bitcast i32* %A to i8*
- %tmp1 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp0, i32 1)
+ %tmp1 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp0, i32 64)
%tmp2 = extractvalue %struct.__neon_int32x4x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int32x4x2_t %tmp1, 1
%tmp4 = add <4 x i32> %tmp2, %tmp3
diff --git a/test/CodeGen/ARM/vld3.ll b/test/CodeGen/ARM/vld3.ll
index 92538c34f5b8..dde530f6df1f 100644
--- a/test/CodeGen/ARM/vld3.ll
+++ b/test/CodeGen/ARM/vld3.ll
@@ -13,8 +13,9 @@
define <8 x i8> @vld3i8(i8* %A) nounwind {
;CHECK: vld3i8:
-;CHECK: vld3.8
- %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A, i32 1)
+;Check the alignment value. Max for this instruction is 64 bits:
+;CHECK: vld3.8 {d16, d17, d18}, [r0, :64]
+ %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A, i32 32)
%tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2
%tmp4 = add <8 x i8> %tmp2, %tmp3
@@ -32,6 +33,21 @@ define <4 x i16> @vld3i16(i16* %A) nounwind {
ret <4 x i16> %tmp4
}
+;Check for a post-increment updating load with register increment.
+define <4 x i16> @vld3i16_update(i16** %ptr, i32 %inc) nounwind {
+;CHECK: vld3i16_update:
+;CHECK: vld3.16 {d16, d17, d18}, [r2], r1
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ %tmp1 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i8* %tmp0, i32 1)
+ %tmp2 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 2
+ %tmp4 = add <4 x i16> %tmp2, %tmp3
+ %tmp5 = getelementptr i16* %A, i32 %inc
+ store i16* %tmp5, i16** %ptr
+ ret <4 x i16> %tmp4
+}
+
define <2 x i32> @vld3i32(i32* %A) nounwind {
;CHECK: vld3i32:
;CHECK: vld3.32
@@ -56,9 +72,10 @@ define <2 x float> @vld3f(float* %A) nounwind {
define <1 x i64> @vld3i64(i64* %A) nounwind {
;CHECK: vld3i64:
-;CHECK: vld1.64
+;Check the alignment value. Max for this instruction is 64 bits:
+;CHECK: vld1.64 {d16, d17, d18}, [r0, :64]
%tmp0 = bitcast i64* %A to i8*
- %tmp1 = call %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64(i8* %tmp0, i32 1)
+ %tmp1 = call %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64(i8* %tmp0, i32 16)
%tmp2 = extractvalue %struct.__neon_int64x1x3_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int64x1x3_t %tmp1, 2
%tmp4 = add <1 x i64> %tmp2, %tmp3
@@ -67,9 +84,10 @@ define <1 x i64> @vld3i64(i64* %A) nounwind {
define <16 x i8> @vld3Qi8(i8* %A) nounwind {
;CHECK: vld3Qi8:
-;CHECK: vld3.8
-;CHECK: vld3.8
- %tmp1 = call %struct.__neon_int8x16x3_t @llvm.arm.neon.vld3.v16i8(i8* %A, i32 1)
+;Check the alignment value. Max for this instruction is 64 bits:
+;CHECK: vld3.8 {d16, d18, d20}, [r0, :64]!
+;CHECK: vld3.8 {d17, d19, d21}, [r0, :64]
+ %tmp1 = call %struct.__neon_int8x16x3_t @llvm.arm.neon.vld3.v16i8(i8* %A, i32 32)
%tmp2 = extractvalue %struct.__neon_int8x16x3_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x16x3_t %tmp1, 2
%tmp4 = add <16 x i8> %tmp2, %tmp3
@@ -100,6 +118,22 @@ define <4 x i32> @vld3Qi32(i32* %A) nounwind {
ret <4 x i32> %tmp4
}
+;Check for a post-increment updating load.
+define <4 x i32> @vld3Qi32_update(i32** %ptr) nounwind {
+;CHECK: vld3Qi32_update:
+;CHECK: vld3.32 {d16, d18, d20}, [r1]!
+;CHECK: vld3.32 {d17, d19, d21}, [r1]!
+ %A = load i32** %ptr
+ %tmp0 = bitcast i32* %A to i8*
+ %tmp1 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3.v4i32(i8* %tmp0, i32 1)
+ %tmp2 = extractvalue %struct.__neon_int32x4x3_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int32x4x3_t %tmp1, 2
+ %tmp4 = add <4 x i32> %tmp2, %tmp3
+ %tmp5 = getelementptr i32* %A, i32 12
+ store i32* %tmp5, i32** %ptr
+ ret <4 x i32> %tmp4
+}
+
define <4 x float> @vld3Qf(float* %A) nounwind {
;CHECK: vld3Qf:
;CHECK: vld3.32
diff --git a/test/CodeGen/ARM/vld4.ll b/test/CodeGen/ARM/vld4.ll
index d1bf957ebadc..59a73db3187e 100644
--- a/test/CodeGen/ARM/vld4.ll
+++ b/test/CodeGen/ARM/vld4.ll
@@ -13,19 +13,35 @@
define <8 x i8> @vld4i8(i8* %A) nounwind {
;CHECK: vld4i8:
-;CHECK: vld4.8
- %tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8* %A, i32 1)
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vld4.8 {d16, d17, d18, d19}, [r0, :64]
+ %tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8* %A, i32 8)
%tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 2
%tmp4 = add <8 x i8> %tmp2, %tmp3
ret <8 x i8> %tmp4
}
+;Check for a post-increment updating load with register increment.
+define <8 x i8> @vld4i8_update(i8** %ptr, i32 %inc) nounwind {
+;CHECK: vld4i8_update:
+;CHECK: vld4.8 {d16, d17, d18, d19}, [r2, :128], r1
+ %A = load i8** %ptr
+ %tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8* %A, i32 16)
+ %tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 2
+ %tmp4 = add <8 x i8> %tmp2, %tmp3
+ %tmp5 = getelementptr i8* %A, i32 %inc
+ store i8* %tmp5, i8** %ptr
+ ret <8 x i8> %tmp4
+}
+
define <4 x i16> @vld4i16(i16* %A) nounwind {
;CHECK: vld4i16:
-;CHECK: vld4.16
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vld4.16 {d16, d17, d18, d19}, [r0, :128]
%tmp0 = bitcast i16* %A to i8*
- %tmp1 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i8* %tmp0, i32 1)
+ %tmp1 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i8* %tmp0, i32 16)
%tmp2 = extractvalue %struct.__neon_int16x4x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp1, 2
%tmp4 = add <4 x i16> %tmp2, %tmp3
@@ -34,9 +50,10 @@ define <4 x i16> @vld4i16(i16* %A) nounwind {
define <2 x i32> @vld4i32(i32* %A) nounwind {
;CHECK: vld4i32:
-;CHECK: vld4.32
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vld4.32 {d16, d17, d18, d19}, [r0, :256]
%tmp0 = bitcast i32* %A to i8*
- %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8* %tmp0, i32 1)
+ %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8* %tmp0, i32 32)
%tmp2 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 2
%tmp4 = add <2 x i32> %tmp2, %tmp3
@@ -56,9 +73,10 @@ define <2 x float> @vld4f(float* %A) nounwind {
define <1 x i64> @vld4i64(i64* %A) nounwind {
;CHECK: vld4i64:
-;CHECK: vld1.64
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vld1.64 {d16, d17, d18, d19}, [r0, :256]
%tmp0 = bitcast i64* %A to i8*
- %tmp1 = call %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64(i8* %tmp0, i32 1)
+ %tmp1 = call %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64(i8* %tmp0, i32 64)
%tmp2 = extractvalue %struct.__neon_int64x1x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int64x1x4_t %tmp1, 2
%tmp4 = add <1 x i64> %tmp2, %tmp3
@@ -67,9 +85,10 @@ define <1 x i64> @vld4i64(i64* %A) nounwind {
define <16 x i8> @vld4Qi8(i8* %A) nounwind {
;CHECK: vld4Qi8:
-;CHECK: vld4.8
-;CHECK: vld4.8
- %tmp1 = call %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8(i8* %A, i32 1)
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vld4.8 {d16, d18, d20, d22}, [r0, :256]!
+;CHECK: vld4.8 {d17, d19, d21, d23}, [r0, :256]
+ %tmp1 = call %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8(i8* %A, i32 64)
%tmp2 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 2
%tmp4 = add <16 x i8> %tmp2, %tmp3
@@ -78,8 +97,9 @@ define <16 x i8> @vld4Qi8(i8* %A) nounwind {
define <8 x i16> @vld4Qi16(i16* %A) nounwind {
;CHECK: vld4Qi16:
-;CHECK: vld4.16
-;CHECK: vld4.16
+;Check for no alignment specifier.
+;CHECK: vld4.16 {d16, d18, d20, d22}, [r0]!
+;CHECK: vld4.16 {d17, d19, d21, d23}, [r0]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i8* %tmp0, i32 1)
%tmp2 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 0
@@ -88,6 +108,22 @@ define <8 x i16> @vld4Qi16(i16* %A) nounwind {
ret <8 x i16> %tmp4
}
+;Check for a post-increment updating load.
+define <8 x i16> @vld4Qi16_update(i16** %ptr) nounwind {
+;CHECK: vld4Qi16_update:
+;CHECK: vld4.16 {d16, d18, d20, d22}, [r1, :64]!
+;CHECK: vld4.16 {d17, d19, d21, d23}, [r1, :64]!
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ %tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i8* %tmp0, i32 8)
+ %tmp2 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 2
+ %tmp4 = add <8 x i16> %tmp2, %tmp3
+ %tmp5 = getelementptr i16* %A, i32 32
+ store i16* %tmp5, i16** %ptr
+ ret <8 x i16> %tmp4
+}
+
define <4 x i32> @vld4Qi32(i32* %A) nounwind {
;CHECK: vld4Qi32:
;CHECK: vld4.32
diff --git a/test/CodeGen/ARM/vlddup.ll b/test/CodeGen/ARM/vlddup.ll
new file mode 100644
index 000000000000..d0e9ac3ad3c4
--- /dev/null
+++ b/test/CodeGen/ARM/vlddup.ll
@@ -0,0 +1,212 @@
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+
+define <8 x i8> @vld1dupi8(i8* %A) nounwind {
+;CHECK: vld1dupi8:
+;Check the (default) alignment value.
+;CHECK: vld1.8 {d16[]}, [r0]
+ %tmp1 = load i8* %A, align 8
+ %tmp2 = insertelement <8 x i8> undef, i8 %tmp1, i32 0
+ %tmp3 = shufflevector <8 x i8> %tmp2, <8 x i8> undef, <8 x i32> zeroinitializer
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @vld1dupi16(i16* %A) nounwind {
+;CHECK: vld1dupi16:
+;Check the alignment value. Max for this instruction is 16 bits:
+;CHECK: vld1.16 {d16[]}, [r0, :16]
+ %tmp1 = load i16* %A, align 8
+ %tmp2 = insertelement <4 x i16> undef, i16 %tmp1, i32 0
+ %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> undef, <4 x i32> zeroinitializer
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @vld1dupi32(i32* %A) nounwind {
+;CHECK: vld1dupi32:
+;Check the alignment value. Max for this instruction is 32 bits:
+;CHECK: vld1.32 {d16[]}, [r0, :32]
+ %tmp1 = load i32* %A, align 8
+ %tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0
+ %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitializer
+ ret <2 x i32> %tmp3
+}
+
+define <2 x float> @vld1dupf(float* %A) nounwind {
+;CHECK: vld1dupf:
+;CHECK: vld1.32 {d16[]}, [r0]
+ %tmp0 = load float* %A
+ %tmp1 = insertelement <2 x float> undef, float %tmp0, i32 0
+ %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
+ ret <2 x float> %tmp2
+}
+
+define <16 x i8> @vld1dupQi8(i8* %A) nounwind {
+;CHECK: vld1dupQi8:
+;Check the (default) alignment value.
+;CHECK: vld1.8 {d16[], d17[]}, [r0]
+ %tmp1 = load i8* %A, align 8
+ %tmp2 = insertelement <16 x i8> undef, i8 %tmp1, i32 0
+ %tmp3 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <16 x i32> zeroinitializer
+ ret <16 x i8> %tmp3
+}
+
+define <4 x float> @vld1dupQf(float* %A) nounwind {
+;CHECK: vld1dupQf:
+;CHECK: vld1.32 {d16[], d17[]}, [r0]
+ %tmp0 = load float* %A
+ %tmp1 = insertelement <4 x float> undef, float %tmp0, i32 0
+ %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
+ ret <4 x float> %tmp2
+}
+
+%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
+%struct.__neon_int4x16x2_t = type { <4 x i16>, <4 x i16> }
+%struct.__neon_int2x32x2_t = type { <2 x i32>, <2 x i32> }
+
+define <8 x i8> @vld2dupi8(i8* %A) nounwind {
+;CHECK: vld2dupi8:
+;Check the (default) alignment value.
+;CHECK: vld2.8 {d16[], d17[]}, [r0]
+ %tmp0 = tail call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
+ %tmp1 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 0
+ %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer
+ %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 1
+ %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <8 x i32> zeroinitializer
+ %tmp5 = add <8 x i8> %tmp2, %tmp4
+ ret <8 x i8> %tmp5
+}
+
+define <4 x i16> @vld2dupi16(i16* %A) nounwind {
+;CHECK: vld2dupi16:
+;Check that a power-of-two alignment smaller than the total size of the memory
+;being loaded is ignored.
+;CHECK: vld2.16 {d16[], d17[]}, [r0]
+ %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
+ %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0
+ %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1
+ %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp5 = add <4 x i16> %tmp2, %tmp4
+ ret <4 x i16> %tmp5
+}
+
+;Check for a post-increment updating load.
+define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind {
+;CHECK: vld2dupi16_update:
+;CHECK: vld2.16 {d16[], d17[]}, [r1]!
+ %A = load i16** %ptr
+ %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
+ %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0
+ %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1
+ %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp5 = add <4 x i16> %tmp2, %tmp4
+ %tmp6 = getelementptr i16* %A, i32 2
+ store i16* %tmp6, i16** %ptr
+ ret <4 x i16> %tmp5
+}
+
+define <2 x i32> @vld2dupi32(i32* %A) nounwind {
+;CHECK: vld2dupi32:
+;Check the alignment value. Max for this instruction is 64 bits:
+;CHECK: vld2.32 {d16[], d17[]}, [r0, :64]
+ %tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i32* %A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16)
+ %tmp1 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 0
+ %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
+ %tmp3 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 1
+ %tmp4 = shufflevector <2 x i32> %tmp3, <2 x i32> undef, <2 x i32> zeroinitializer
+ %tmp5 = add <2 x i32> %tmp2, %tmp4
+ ret <2 x i32> %tmp5
+}
+
+declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly
+declare %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16*, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i32*, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly
+
+%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
+%struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> }
+
+;Check for a post-increment updating load with register increment.
+define <8 x i8> @vld3dupi8_update(i8** %ptr, i32 %inc) nounwind {
+;CHECK: vld3dupi8_update:
+;CHECK: vld3.8 {d16[], d17[], d18[]}, [r2], r1
+ %A = load i8** %ptr
+ %tmp0 = tail call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 8)
+ %tmp1 = extractvalue %struct.__neon_int8x8x3_t %tmp0, 0
+ %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer
+ %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp0, 1
+ %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <8 x i32> zeroinitializer
+ %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp0, 2
+ %tmp6 = shufflevector <8 x i8> %tmp5, <8 x i8> undef, <8 x i32> zeroinitializer
+ %tmp7 = add <8 x i8> %tmp2, %tmp4
+ %tmp8 = add <8 x i8> %tmp7, %tmp6
+ %tmp9 = getelementptr i8* %A, i32 %inc
+ store i8* %tmp9, i8** %ptr
+ ret <8 x i8> %tmp8
+}
+
+define <4 x i16> @vld3dupi16(i16* %A) nounwind {
+;CHECK: vld3dupi16:
+;Check the (default) alignment value. VLD3 does not support alignment.
+;CHECK: vld3.16 {d16[], d17[], d18[]}, [r0]
+ %tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8)
+ %tmp1 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 0
+ %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 1
+ %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp5 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 2
+ %tmp6 = shufflevector <4 x i16> %tmp5, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp7 = add <4 x i16> %tmp2, %tmp4
+ %tmp8 = add <4 x i16> %tmp7, %tmp6
+ ret <4 x i16> %tmp8
+}
+
+declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
+
+%struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
+%struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
+
+;Check for a post-increment updating load.
+define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind {
+;CHECK: vld4dupi16_update:
+;CHECK: vld4.16 {d16[], d17[], d18[], d19[]}, [r1]!
+ %A = load i16** %ptr
+ %tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 1)
+ %tmp1 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 0
+ %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 1
+ %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp5 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 2
+ %tmp6 = shufflevector <4 x i16> %tmp5, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp7 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 3
+ %tmp8 = shufflevector <4 x i16> %tmp7, <4 x i16> undef, <4 x i32> zeroinitializer
+ %tmp9 = add <4 x i16> %tmp2, %tmp4
+ %tmp10 = add <4 x i16> %tmp6, %tmp8
+ %tmp11 = add <4 x i16> %tmp9, %tmp10
+ %tmp12 = getelementptr i16* %A, i32 4
+ store i16* %tmp12, i16** %ptr
+ ret <4 x i16> %tmp11
+}
+
+define <2 x i32> @vld4dupi32(i32* %A) nounwind {
+;CHECK: vld4dupi32:
+;Check the alignment value. An 8-byte alignment is allowed here even though
+;it is smaller than the total size of the memory being loaded.
+;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [r0, :64]
+ %tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32* %A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 8)
+ %tmp1 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 0
+ %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
+ %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 1
+ %tmp4 = shufflevector <2 x i32> %tmp3, <2 x i32> undef, <2 x i32> zeroinitializer
+ %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 2
+ %tmp6 = shufflevector <2 x i32> %tmp5, <2 x i32> undef, <2 x i32> zeroinitializer
+ %tmp7 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 3
+ %tmp8 = shufflevector <2 x i32> %tmp7, <2 x i32> undef, <2 x i32> zeroinitializer
+ %tmp9 = add <2 x i32> %tmp2, %tmp4
+ %tmp10 = add <2 x i32> %tmp6, %tmp8
+ %tmp11 = add <2 x i32> %tmp9, %tmp10
+ ret <2 x i32> %tmp11
+}
+
+declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i16*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly
diff --git a/test/CodeGen/ARM/vldlane.ll b/test/CodeGen/ARM/vldlane.ll
index 31ee64fa598f..770ed071ac12 100644
--- a/test/CodeGen/ARM/vldlane.ll
+++ b/test/CodeGen/ARM/vldlane.ll
@@ -1,5 +1,80 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+define <8 x i8> @vld1lanei8(i8* %A, <8 x i8>* %B) nounwind {
+;CHECK: vld1lanei8:
+;Check the (default) alignment value.
+;CHECK: vld1.8 {d16[3]}, [r0]
+ %tmp1 = load <8 x i8>* %B
+ %tmp2 = load i8* %A, align 8
+ %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 3
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @vld1lanei16(i16* %A, <4 x i16>* %B) nounwind {
+;CHECK: vld1lanei16:
+;Check the alignment value. Max for this instruction is 16 bits:
+;CHECK: vld1.16 {d16[2]}, [r0, :16]
+ %tmp1 = load <4 x i16>* %B
+ %tmp2 = load i16* %A, align 8
+ %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 2
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @vld1lanei32(i32* %A, <2 x i32>* %B) nounwind {
+;CHECK: vld1lanei32:
+;Check the alignment value. Max for this instruction is 32 bits:
+;CHECK: vld1.32 {d16[1]}, [r0, :32]
+ %tmp1 = load <2 x i32>* %B
+ %tmp2 = load i32* %A, align 8
+ %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1
+ ret <2 x i32> %tmp3
+}
+
+define <2 x float> @vld1lanef(float* %A, <2 x float>* %B) nounwind {
+;CHECK: vld1lanef:
+;CHECK: vld1.32 {d16[1]}, [r0]
+ %tmp1 = load <2 x float>* %B
+ %tmp2 = load float* %A, align 4
+ %tmp3 = insertelement <2 x float> %tmp1, float %tmp2, i32 1
+ ret <2 x float> %tmp3
+}
+
+define <16 x i8> @vld1laneQi8(i8* %A, <16 x i8>* %B) nounwind {
+;CHECK: vld1laneQi8:
+;CHECK: vld1.8 {d17[1]}, [r0]
+ %tmp1 = load <16 x i8>* %B
+ %tmp2 = load i8* %A, align 8
+ %tmp3 = insertelement <16 x i8> %tmp1, i8 %tmp2, i32 9
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @vld1laneQi16(i16* %A, <8 x i16>* %B) nounwind {
+;CHECK: vld1laneQi16:
+;CHECK: vld1.16 {d17[1]}, [r0, :16]
+ %tmp1 = load <8 x i16>* %B
+ %tmp2 = load i16* %A, align 8
+ %tmp3 = insertelement <8 x i16> %tmp1, i16 %tmp2, i32 5
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @vld1laneQi32(i32* %A, <4 x i32>* %B) nounwind {
+;CHECK: vld1laneQi32:
+;CHECK: vld1.32 {d17[1]}, [r0, :32]
+ %tmp1 = load <4 x i32>* %B
+ %tmp2 = load i32* %A, align 8
+ %tmp3 = insertelement <4 x i32> %tmp1, i32 %tmp2, i32 3
+ ret <4 x i32> %tmp3
+}
+
+define <4 x float> @vld1laneQf(float* %A, <4 x float>* %B) nounwind {
+;CHECK: vld1laneQf:
+;CHECK: vld1.32 {d16[0]}, [r0]
+ %tmp1 = load <4 x float>* %B
+ %tmp2 = load float* %A
+ %tmp3 = insertelement <4 x float> %tmp1, float %tmp2, i32 0
+ ret <4 x float> %tmp3
+}
+
%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
%struct.__neon_int16x4x2_t = type { <4 x i16>, <4 x i16> }
%struct.__neon_int32x2x2_t = type { <2 x i32>, <2 x i32> }
@@ -11,9 +86,10 @@
define <8 x i8> @vld2lanei8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vld2lanei8:
-;CHECK: vld2.8
+;Check the alignment value. Max for this instruction is 16 bits:
+;CHECK: vld2.8 {d16[1], d17[1]}, [r0, :16]
%tmp1 = load <8 x i8>* %B
- %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1)
+ %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 4)
%tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
%tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -22,10 +98,11 @@ define <8 x i8> @vld2lanei8(i8* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @vld2lanei16(i16* %A, <4 x i16>* %B) nounwind {
;CHECK: vld2lanei16:
-;CHECK: vld2.16
+;Check the alignment value. Max for this instruction is 32 bits:
+;CHECK: vld2.16 {d16[1], d17[1]}, [r0, :32]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <4 x i16>* %B
- %tmp2 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 1)
+ %tmp2 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
%tmp3 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 1
%tmp5 = add <4 x i16> %tmp3, %tmp4
@@ -44,6 +121,22 @@ define <2 x i32> @vld2lanei32(i32* %A, <2 x i32>* %B) nounwind {
ret <2 x i32> %tmp5
}
+;Check for a post-increment updating load.
+define <2 x i32> @vld2lanei32_update(i32** %ptr, <2 x i32>* %B) nounwind {
+;CHECK: vld2lanei32_update:
+;CHECK: vld2.32 {d16[1], d17[1]}, [r1]!
+ %A = load i32** %ptr
+ %tmp0 = bitcast i32* %A to i8*
+ %tmp1 = load <2 x i32>* %B
+ %tmp2 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1
+ %tmp5 = add <2 x i32> %tmp3, %tmp4
+ %tmp6 = getelementptr i32* %A, i32 2
+ store i32* %tmp6, i32** %ptr
+ ret <2 x i32> %tmp5
+}
+
define <2 x float> @vld2lanef(float* %A, <2 x float>* %B) nounwind {
;CHECK: vld2lanef:
;CHECK: vld2.32
@@ -58,10 +151,11 @@ define <2 x float> @vld2lanef(float* %A, <2 x float>* %B) nounwind {
define <8 x i16> @vld2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vld2laneQi16:
-;CHECK: vld2.16
+;Check the (default) alignment.
+;CHECK: vld2.16 {d17[1], d19[1]}, [r0]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
- %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 1)
+ %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 5, i32 1)
%tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1
%tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -70,10 +164,11 @@ define <8 x i16> @vld2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @vld2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
;CHECK: vld2laneQi32:
-;CHECK: vld2.32
+;Check the alignment value. Max for this instruction is 64 bits:
+;CHECK: vld2.32 {d17[0], d19[0]}, [r0, :64]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <4 x i32>* %B
- %tmp2 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 1)
+ %tmp2 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 16)
%tmp3 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 1
%tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -125,10 +220,11 @@ define <8 x i8> @vld3lanei8(i8* %A, <8 x i8>* %B) nounwind {
define <4 x i16> @vld3lanei16(i16* %A, <4 x i16>* %B) nounwind {
;CHECK: vld3lanei16:
-;CHECK: vld3.16
+;Check the (default) alignment value. VLD3 does not support alignment.
+;CHECK: vld3.16 {d16[1], d17[1], d18[1]}, [r0]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <4 x i16>* %B
- %tmp2 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 1)
+ %tmp2 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
%tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 1
%tmp5 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 2
@@ -167,10 +263,11 @@ define <2 x float> @vld3lanef(float* %A, <2 x float>* %B) nounwind {
define <8 x i16> @vld3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vld3laneQi16:
-;CHECK: vld3.16
+;Check the (default) alignment value. VLD3 does not support alignment.
+;CHECK: vld3.16 {d16[1], d18[1], d20[1]}, [r0]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
- %tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 1)
+ %tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 8)
%tmp3 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 1
%tmp5 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 2
@@ -179,6 +276,24 @@ define <8 x i16> @vld3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
ret <8 x i16> %tmp7
}
+;Check for a post-increment updating load with register increment.
+define <8 x i16> @vld3laneQi16_update(i16** %ptr, <8 x i16>* %B, i32 %inc) nounwind {
+;CHECK: vld3laneQi16_update:
+;CHECK: vld3.16 {d16[1], d18[1], d20[1]}, [r2], r1
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ %tmp1 = load <8 x i16>* %B
+ %tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 8)
+ %tmp3 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 2
+ %tmp6 = add <8 x i16> %tmp3, %tmp4
+ %tmp7 = add <8 x i16> %tmp5, %tmp6
+ %tmp8 = getelementptr i16* %A, i32 %inc
+ store i16* %tmp8, i16** %ptr
+ ret <8 x i16> %tmp7
+}
+
define <4 x i32> @vld3laneQi32(i32* %A, <4 x i32>* %B) nounwind {
;CHECK: vld3laneQi32:
;CHECK: vld3.32
@@ -227,9 +342,10 @@ declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8*, <4 x flo
define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vld4lanei8:
-;CHECK: vld4.8
+;Check the alignment value. Max for this instruction is 32 bits:
+;CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [r0, :32]
%tmp1 = load <8 x i8>* %B
- %tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1)
+ %tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
%tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
%tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
@@ -240,12 +356,33 @@ define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
ret <8 x i8> %tmp9
}
+;Check for a post-increment updating load.
+define <8 x i8> @vld4lanei8_update(i8** %ptr, <8 x i8>* %B) nounwind {
+;CHECK: vld4lanei8_update:
+;CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [r1, :32]!
+ %A = load i8** %ptr
+ %tmp1 = load <8 x i8>* %B
+ %tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
+ %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
+ %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3
+ %tmp7 = add <8 x i8> %tmp3, %tmp4
+ %tmp8 = add <8 x i8> %tmp5, %tmp6
+ %tmp9 = add <8 x i8> %tmp7, %tmp8
+ %tmp10 = getelementptr i8* %A, i32 4
+ store i8* %tmp10, i8** %ptr
+ ret <8 x i8> %tmp9
+}
+
define <4 x i16> @vld4lanei16(i16* %A, <4 x i16>* %B) nounwind {
;CHECK: vld4lanei16:
-;CHECK: vld4.16
+;Check that a power-of-two alignment smaller than the total size of the memory
+;being loaded is ignored.
+;CHECK: vld4.16 {d16[1], d17[1], d18[1], d19[1]}, [r0]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <4 x i16>* %B
- %tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 1)
+ %tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 4)
%tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 1
%tmp5 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 2
@@ -258,10 +395,12 @@ define <4 x i16> @vld4lanei16(i16* %A, <4 x i16>* %B) nounwind {
define <2 x i32> @vld4lanei32(i32* %A, <2 x i32>* %B) nounwind {
;CHECK: vld4lanei32:
-;CHECK: vld4.32
+;Check the alignment value. An 8-byte alignment is allowed here even though
+;it is smaller than the total size of the memory being loaded.
+;CHECK: vld4.32 {d16[1], d17[1], d18[1], d19[1]}, [r0, :64]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <2 x i32>* %B
- %tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
+ %tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 8)
%tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 1
%tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 2
@@ -290,10 +429,11 @@ define <2 x float> @vld4lanef(float* %A, <2 x float>* %B) nounwind {
define <8 x i16> @vld4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vld4laneQi16:
-;CHECK: vld4.16
+;Check the alignment value. Max for this instruction is 64 bits:
+;CHECK: vld4.16 {d16[1], d18[1], d20[1], d22[1]}, [r0, :64]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
- %tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 1)
+ %tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 16)
%tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 1
%tmp5 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 2
@@ -306,10 +446,11 @@ define <8 x i16> @vld4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
define <4 x i32> @vld4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
;CHECK: vld4laneQi32:
-;CHECK: vld4.32
+;Check the (default) alignment.
+;CHECK: vld4.32 {d17[0], d19[0], d21[0], d23[0]}, [r0]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <4 x i32>* %B
- %tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 1, i32 1)
+ %tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 1)
%tmp3 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 1
%tmp5 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 2
@@ -344,3 +485,22 @@ declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(i8*, <2 x flo
declare %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32) nounwind readonly
declare %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32) nounwind readonly
declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32, i32) nounwind readonly
+
+; Radar 8776599: If one of the operands to a QQQQ REG_SEQUENCE is a register
+; in the QPR_VFP2 regclass, it needs to be copied to a QPR regclass because
+; we don't currently have a QQQQ_VFP2 super-regclass. (The "0" for the low
+; part of %ins67 is supposed to be loaded by a VLDRS instruction in this test.)
+define void @test_qqqq_regsequence_subreg([6 x i64] %b) nounwind {
+;CHECK: test_qqqq_regsequence_subreg
+;CHECK: vld3.16
+ %tmp63 = extractvalue [6 x i64] %b, 5
+ %tmp64 = zext i64 %tmp63 to i128
+ %tmp65 = shl i128 %tmp64, 64
+ %ins67 = or i128 %tmp65, 0
+ %tmp78 = bitcast i128 %ins67 to <8 x i16>
+ %vld3_lane = tail call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8* undef, <8 x i16> undef, <8 x i16> undef, <8 x i16> %tmp78, i32 1, i32 2)
+ call void @llvm.trap()
+ unreachable
+}
+
+declare void @llvm.trap() nounwind
diff --git a/test/CodeGen/ARM/vmov.ll b/test/CodeGen/ARM/vmov.ll
index 8cd94576b0c2..a86be32bd203 100644
--- a/test/CodeGen/ARM/vmov.ll
+++ b/test/CodeGen/ARM/vmov.ll
@@ -2,169 +2,169 @@
define <8 x i8> @v_movi8() nounwind {
;CHECK: v_movi8:
-;CHECK: vmov.i8 d0, #0x8
+;CHECK: vmov.i8 d{{.*}}, #0x8
ret <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
}
define <4 x i16> @v_movi16a() nounwind {
;CHECK: v_movi16a:
-;CHECK: vmov.i16 d0, #0x10
+;CHECK: vmov.i16 d{{.*}}, #0x10
ret <4 x i16> < i16 16, i16 16, i16 16, i16 16 >
}
define <4 x i16> @v_movi16b() nounwind {
;CHECK: v_movi16b:
-;CHECK: vmov.i16 d0, #0x1000
+;CHECK: vmov.i16 d{{.*}}, #0x1000
ret <4 x i16> < i16 4096, i16 4096, i16 4096, i16 4096 >
}
define <4 x i16> @v_mvni16a() nounwind {
;CHECK: v_mvni16a:
-;CHECK: vmvn.i16 d0, #0x10
+;CHECK: vmvn.i16 d{{.*}}, #0x10
ret <4 x i16> < i16 65519, i16 65519, i16 65519, i16 65519 >
}
define <4 x i16> @v_mvni16b() nounwind {
;CHECK: v_mvni16b:
-;CHECK: vmvn.i16 d0, #0x1000
+;CHECK: vmvn.i16 d{{.*}}, #0x1000
ret <4 x i16> < i16 61439, i16 61439, i16 61439, i16 61439 >
}
define <2 x i32> @v_movi32a() nounwind {
;CHECK: v_movi32a:
-;CHECK: vmov.i32 d0, #0x20
+;CHECK: vmov.i32 d{{.*}}, #0x20
ret <2 x i32> < i32 32, i32 32 >
}
define <2 x i32> @v_movi32b() nounwind {
;CHECK: v_movi32b:
-;CHECK: vmov.i32 d0, #0x2000
+;CHECK: vmov.i32 d{{.*}}, #0x2000
ret <2 x i32> < i32 8192, i32 8192 >
}
define <2 x i32> @v_movi32c() nounwind {
;CHECK: v_movi32c:
-;CHECK: vmov.i32 d0, #0x200000
+;CHECK: vmov.i32 d{{.*}}, #0x200000
ret <2 x i32> < i32 2097152, i32 2097152 >
}
define <2 x i32> @v_movi32d() nounwind {
;CHECK: v_movi32d:
-;CHECK: vmov.i32 d0, #0x20000000
+;CHECK: vmov.i32 d{{.*}}, #0x20000000
ret <2 x i32> < i32 536870912, i32 536870912 >
}
define <2 x i32> @v_movi32e() nounwind {
;CHECK: v_movi32e:
-;CHECK: vmov.i32 d0, #0x20FF
+;CHECK: vmov.i32 d{{.*}}, #0x20FF
ret <2 x i32> < i32 8447, i32 8447 >
}
define <2 x i32> @v_movi32f() nounwind {
;CHECK: v_movi32f:
-;CHECK: vmov.i32 d0, #0x20FFFF
+;CHECK: vmov.i32 d{{.*}}, #0x20FFFF
ret <2 x i32> < i32 2162687, i32 2162687 >
}
define <2 x i32> @v_mvni32a() nounwind {
;CHECK: v_mvni32a:
-;CHECK: vmvn.i32 d0, #0x20
+;CHECK: vmvn.i32 d{{.*}}, #0x20
ret <2 x i32> < i32 4294967263, i32 4294967263 >
}
define <2 x i32> @v_mvni32b() nounwind {
;CHECK: v_mvni32b:
-;CHECK: vmvn.i32 d0, #0x2000
+;CHECK: vmvn.i32 d{{.*}}, #0x2000
ret <2 x i32> < i32 4294959103, i32 4294959103 >
}
define <2 x i32> @v_mvni32c() nounwind {
;CHECK: v_mvni32c:
-;CHECK: vmvn.i32 d0, #0x200000
+;CHECK: vmvn.i32 d{{.*}}, #0x200000
ret <2 x i32> < i32 4292870143, i32 4292870143 >
}
define <2 x i32> @v_mvni32d() nounwind {
;CHECK: v_mvni32d:
-;CHECK: vmvn.i32 d0, #0x20000000
+;CHECK: vmvn.i32 d{{.*}}, #0x20000000
ret <2 x i32> < i32 3758096383, i32 3758096383 >
}
define <2 x i32> @v_mvni32e() nounwind {
;CHECK: v_mvni32e:
-;CHECK: vmvn.i32 d0, #0x20FF
+;CHECK: vmvn.i32 d{{.*}}, #0x20FF
ret <2 x i32> < i32 4294958848, i32 4294958848 >
}
define <2 x i32> @v_mvni32f() nounwind {
;CHECK: v_mvni32f:
-;CHECK: vmvn.i32 d0, #0x20FFFF
+;CHECK: vmvn.i32 d{{.*}}, #0x20FFFF
ret <2 x i32> < i32 4292804608, i32 4292804608 >
}
define <1 x i64> @v_movi64() nounwind {
;CHECK: v_movi64:
-;CHECK: vmov.i64 d0, #0xFF0000FF0000FFFF
+;CHECK: vmov.i64 d{{.*}}, #0xFF0000FF0000FFFF
ret <1 x i64> < i64 18374687574888349695 >
}
define <16 x i8> @v_movQi8() nounwind {
;CHECK: v_movQi8:
-;CHECK: vmov.i8 q0, #0x8
+;CHECK: vmov.i8 q{{.*}}, #0x8
ret <16 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
}
define <8 x i16> @v_movQi16a() nounwind {
;CHECK: v_movQi16a:
-;CHECK: vmov.i16 q0, #0x10
+;CHECK: vmov.i16 q{{.*}}, #0x10
ret <8 x i16> < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
}
define <8 x i16> @v_movQi16b() nounwind {
;CHECK: v_movQi16b:
-;CHECK: vmov.i16 q0, #0x1000
+;CHECK: vmov.i16 q{{.*}}, #0x1000
ret <8 x i16> < i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096 >
}
define <4 x i32> @v_movQi32a() nounwind {
;CHECK: v_movQi32a:
-;CHECK: vmov.i32 q0, #0x20
+;CHECK: vmov.i32 q{{.*}}, #0x20
ret <4 x i32> < i32 32, i32 32, i32 32, i32 32 >
}
define <4 x i32> @v_movQi32b() nounwind {
;CHECK: v_movQi32b:
-;CHECK: vmov.i32 q0, #0x2000
+;CHECK: vmov.i32 q{{.*}}, #0x2000
ret <4 x i32> < i32 8192, i32 8192, i32 8192, i32 8192 >
}
define <4 x i32> @v_movQi32c() nounwind {
;CHECK: v_movQi32c:
-;CHECK: vmov.i32 q0, #0x200000
+;CHECK: vmov.i32 q{{.*}}, #0x200000
ret <4 x i32> < i32 2097152, i32 2097152, i32 2097152, i32 2097152 >
}
define <4 x i32> @v_movQi32d() nounwind {
;CHECK: v_movQi32d:
-;CHECK: vmov.i32 q0, #0x20000000
+;CHECK: vmov.i32 q{{.*}}, #0x20000000
ret <4 x i32> < i32 536870912, i32 536870912, i32 536870912, i32 536870912 >
}
define <4 x i32> @v_movQi32e() nounwind {
;CHECK: v_movQi32e:
-;CHECK: vmov.i32 q0, #0x20FF
+;CHECK: vmov.i32 q{{.*}}, #0x20FF
ret <4 x i32> < i32 8447, i32 8447, i32 8447, i32 8447 >
}
define <4 x i32> @v_movQi32f() nounwind {
;CHECK: v_movQi32f:
-;CHECK: vmov.i32 q0, #0x20FFFF
+;CHECK: vmov.i32 q{{.*}}, #0x20FFFF
ret <4 x i32> < i32 2162687, i32 2162687, i32 2162687, i32 2162687 >
}
define <2 x i64> @v_movQi64() nounwind {
;CHECK: v_movQi64:
-;CHECK: vmov.i64 q0, #0xFF0000FF0000FFFF
+;CHECK: vmov.i64 q{{.*}}, #0xFF0000FF0000FFFF
ret <2 x i64> < i64 18374687574888349695, i64 18374687574888349695 >
}
@@ -173,7 +173,7 @@ define <2 x i64> @v_movQi64() nounwind {
define void @vdupn128(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
entry:
;CHECK: vdupn128:
-;CHECK: vmov.i8 d0, #0x80
+;CHECK: vmov.i8 d{{.*}}, #0x80
%0 = getelementptr inbounds %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
store <8 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>, <8 x i8>* %0, align 8
ret void
@@ -182,7 +182,7 @@ entry:
define void @vdupnneg75(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
entry:
;CHECK: vdupnneg75:
-;CHECK: vmov.i8 d0, #0xB5
+;CHECK: vmov.i8 d{{.*}}, #0xB5
%0 = getelementptr inbounds %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
store <8 x i8> <i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75>, <8 x i8>* %0, align 8
ret void
@@ -343,3 +343,13 @@ declare <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64>) nounwind readnone
declare <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16>) nounwind readnone
declare <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32>) nounwind readnone
declare <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64>) nounwind readnone
+
+; Truncating vector stores are not supported. The following should not crash.
+; Radar 8598391.
+define void @noTruncStore(<4 x i32>* %a, <4 x i16>* %b) nounwind {
+;CHECK: vmovn
+ %tmp1 = load <4 x i32>* %a, align 16
+ %tmp2 = trunc <4 x i32> %tmp1 to <4 x i16>
+ store <4 x i16> %tmp2, <4 x i16>* %b, align 8
+ ret void
+}
diff --git a/test/CodeGen/ARM/vmul.ll b/test/CodeGen/ARM/vmul.ll
index 5383425018f8..ee033caa00d0 100644
--- a/test/CodeGen/ARM/vmul.ll
+++ b/test/CodeGen/ARM/vmul.ll
@@ -267,3 +267,75 @@ entry:
}
declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
+
+
+; Radar 8687140
+; VMULL needs to recognize BUILD_VECTORs with sign/zero-extended elements.
+
+define <8 x i16> @vmull_extvec_s8(<8 x i8> %arg) nounwind {
+; CHECK: vmull_extvec_s8
+; CHECK: vmull.s8
+ %tmp3 = sext <8 x i8> %arg to <8 x i16>
+ %tmp4 = mul <8 x i16> %tmp3, <i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12>
+ ret <8 x i16> %tmp4
+}
+
+define <8 x i16> @vmull_extvec_u8(<8 x i8> %arg) nounwind {
+; CHECK: vmull_extvec_u8
+; CHECK: vmull.u8
+ %tmp3 = zext <8 x i8> %arg to <8 x i16>
+ %tmp4 = mul <8 x i16> %tmp3, <i16 12, i16 12, i16 12, i16 12, i16 12, i16 12, i16 12, i16 12>
+ ret <8 x i16> %tmp4
+}
+
+define <8 x i16> @vmull_noextvec_s8(<8 x i8> %arg) nounwind {
+; Do not use VMULL if the BUILD_VECTOR element values are too big.
+; CHECK: vmull_noextvec_s8
+; CHECK: vmovl.s8
+; CHECK: vmul.i16
+ %tmp3 = sext <8 x i8> %arg to <8 x i16>
+ %tmp4 = mul <8 x i16> %tmp3, <i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999>
+ ret <8 x i16> %tmp4
+}
+
+define <8 x i16> @vmull_noextvec_u8(<8 x i8> %arg) nounwind {
+; Do not use VMULL if the BUILD_VECTOR element values are too big.
+; CHECK: vmull_noextvec_u8
+; CHECK: vmovl.u8
+; CHECK: vmul.i16
+ %tmp3 = zext <8 x i8> %arg to <8 x i16>
+ %tmp4 = mul <8 x i16> %tmp3, <i16 999, i16 999, i16 999, i16 999, i16 999, i16 999, i16 999, i16 999>
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @vmull_extvec_s16(<4 x i16> %arg) nounwind {
+; CHECK: vmull_extvec_s16
+; CHECK: vmull.s16
+ %tmp3 = sext <4 x i16> %arg to <4 x i32>
+ %tmp4 = mul <4 x i32> %tmp3, <i32 -12, i32 -12, i32 -12, i32 -12>
+ ret <4 x i32> %tmp4
+}
+
+define <4 x i32> @vmull_extvec_u16(<4 x i16> %arg) nounwind {
+; CHECK: vmull_extvec_u16
+; CHECK: vmull.u16
+ %tmp3 = zext <4 x i16> %arg to <4 x i32>
+ %tmp4 = mul <4 x i32> %tmp3, <i32 1234, i32 1234, i32 1234, i32 1234>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @vmull_extvec_s32(<2 x i32> %arg) nounwind {
+; CHECK: vmull_extvec_s32
+; CHECK: vmull.s32
+ %tmp3 = sext <2 x i32> %arg to <2 x i64>
+ %tmp4 = mul <2 x i64> %tmp3, <i64 -1234, i64 -1234>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i64> @vmull_extvec_u32(<2 x i32> %arg) nounwind {
+; CHECK: vmull_extvec_u32
+; CHECK: vmull.u32
+ %tmp3 = zext <2 x i32> %arg to <2 x i64>
+ %tmp4 = mul <2 x i64> %tmp3, <i64 1234, i64 1234>
+ ret <2 x i64> %tmp4
+}
diff --git a/test/CodeGen/ARM/vrev.ll b/test/CodeGen/ARM/vrev.ll
index e1fe64b02d9d..f0f9e4e339b4 100644
--- a/test/CodeGen/ARM/vrev.ll
+++ b/test/CodeGen/ARM/vrev.ll
@@ -129,3 +129,21 @@ define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
ret <8 x i16> %tmp2
}
+
+; A vcombine feeding a VREV should not obscure things. Radar 8597007.
+
+define void @test_with_vcombine(<4 x float>* %v) nounwind {
+;CHECK: test_with_vcombine:
+;CHECK-NOT: vext
+;CHECK: vrev64.32
+ %tmp1 = load <4 x float>* %v, align 16
+ %tmp2 = bitcast <4 x float> %tmp1 to <2 x double>
+ %tmp3 = extractelement <2 x double> %tmp2, i32 0
+ %tmp4 = bitcast double %tmp3 to <2 x float>
+ %tmp5 = extractelement <2 x double> %tmp2, i32 1
+ %tmp6 = bitcast double %tmp5 to <2 x float>
+ %tmp7 = fadd <2 x float> %tmp6, %tmp6
+ %tmp8 = shufflevector <2 x float> %tmp4, <2 x float> %tmp7, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ store <4 x float> %tmp8, <4 x float>* %v, align 16
+ ret void
+}
diff --git a/test/CodeGen/ARM/vst1.ll b/test/CodeGen/ARM/vst1.ll
index 2b535ada3072..364d44b7116f 100644
--- a/test/CodeGen/ARM/vst1.ll
+++ b/test/CodeGen/ARM/vst1.ll
@@ -2,9 +2,10 @@
define void @vst1i8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vst1i8:
-;CHECK: vst1.8
+;Check the alignment value. Max for this instruction is 64 bits:
+;CHECK: vst1.8 {d16}, [r0, :64]
%tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst1.v8i8(i8* %A, <8 x i8> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst1.v8i8(i8* %A, <8 x i8> %tmp1, i32 16)
ret void
}
@@ -35,6 +36,19 @@ define void @vst1f(float* %A, <2 x float>* %B) nounwind {
ret void
}
+;Check for a post-increment updating store.
+define void @vst1f_update(float** %ptr, <2 x float>* %B) nounwind {
+;CHECK: vst1f_update:
+;CHECK: vst1.32 {d16}, [r1]!
+ %A = load float** %ptr
+ %tmp0 = bitcast float* %A to i8*
+ %tmp1 = load <2 x float>* %B
+ call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
+ %tmp2 = getelementptr float* %A, i32 2
+ store float* %tmp2, float** %ptr
+ ret void
+}
+
define void @vst1i64(i64* %A, <1 x i64>* %B) nounwind {
;CHECK: vst1i64:
;CHECK: vst1.64
@@ -46,18 +60,33 @@ define void @vst1i64(i64* %A, <1 x i64>* %B) nounwind {
define void @vst1Qi8(i8* %A, <16 x i8>* %B) nounwind {
;CHECK: vst1Qi8:
-;CHECK: vst1.8
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vst1.8 {d16, d17}, [r0, :64]
%tmp1 = load <16 x i8>* %B
- call void @llvm.arm.neon.vst1.v16i8(i8* %A, <16 x i8> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst1.v16i8(i8* %A, <16 x i8> %tmp1, i32 8)
ret void
}
define void @vst1Qi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vst1Qi16:
-;CHECK: vst1.16
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vst1.16 {d16, d17}, [r0, :128]
+ %tmp0 = bitcast i16* %A to i8*
+ %tmp1 = load <8 x i16>* %B
+ call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %tmp1, i32 32)
+ ret void
+}
+
+;Check for a post-increment updating store with register increment.
+define void @vst1Qi16_update(i16** %ptr, <8 x i16>* %B, i32 %inc) nounwind {
+;CHECK: vst1Qi16_update:
+;CHECK: vst1.16 {d16, d17}, [r1, :64], r2
+ %A = load i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %tmp1, i32 8)
+ %tmp2 = getelementptr i16* %A, i32 %inc
+ store i16* %tmp2, i16** %ptr
ret void
}
diff --git a/test/CodeGen/ARM/vst2.ll b/test/CodeGen/ARM/vst2.ll
index aed15fd51c56..915a84b67767 100644
--- a/test/CodeGen/ARM/vst2.ll
+++ b/test/CodeGen/ARM/vst2.ll
@@ -2,18 +2,32 @@
define void @vst2i8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vst2i8:
-;CHECK: vst2.8
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vst2.8 {d16, d17}, [r0, :64]
%tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8)
+ ret void
+}
+
+;Check for a post-increment updating store with register increment.
+define void @vst2i8_update(i8** %ptr, <8 x i8>* %B, i32 %inc) nounwind {
+;CHECK: vst2i8_update:
+;CHECK: vst2.8 {d16, d17}, [r1], r2
+ %A = load i8** %ptr
+ %tmp1 = load <8 x i8>* %B
+ call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 4)
+ %tmp2 = getelementptr i8* %A, i32 %inc
+ store i8* %tmp2, i8** %ptr
ret void
}
define void @vst2i16(i16* %A, <4 x i16>* %B) nounwind {
;CHECK: vst2i16:
-;CHECK: vst2.16
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vst2.16 {d16, d17}, [r0, :128]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst2.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst2.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 32)
ret void
}
@@ -37,36 +51,53 @@ define void @vst2f(float* %A, <2 x float>* %B) nounwind {
define void @vst2i64(i64* %A, <1 x i64>* %B) nounwind {
;CHECK: vst2i64:
-;CHECK: vst1.64
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vst1.64 {d16, d17}, [r0, :128]
+ %tmp0 = bitcast i64* %A to i8*
+ %tmp1 = load <1 x i64>* %B
+ call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 32)
+ ret void
+}
+
+;Check for a post-increment updating store.
+define void @vst2i64_update(i64** %ptr, <1 x i64>* %B) nounwind {
+;CHECK: vst2i64_update:
+;CHECK: vst1.64 {d16, d17}, [r1, :64]!
+ %A = load i64** %ptr
%tmp0 = bitcast i64* %A to i8*
%tmp1 = load <1 x i64>* %B
- call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 8)
+ %tmp2 = getelementptr i64* %A, i32 2
+ store i64* %tmp2, i64** %ptr
ret void
}
define void @vst2Qi8(i8* %A, <16 x i8>* %B) nounwind {
;CHECK: vst2Qi8:
-;CHECK: vst2.8
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vst2.8 {d16, d17, d18, d19}, [r0, :64]
%tmp1 = load <16 x i8>* %B
- call void @llvm.arm.neon.vst2.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst2.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 8)
ret void
}
define void @vst2Qi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vst2Qi16:
-;CHECK: vst2.16
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vst2.16 {d16, d17, d18, d19}, [r0, :128]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst2.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst2.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 16)
ret void
}
define void @vst2Qi32(i32* %A, <4 x i32>* %B) nounwind {
;CHECK: vst2Qi32:
-;CHECK: vst2.32
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vst2.32 {d16, d17, d18, d19}, [r0, :256]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <4 x i32>* %B
- call void @llvm.arm.neon.vst2.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst2.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 64)
ret void
}
diff --git a/test/CodeGen/ARM/vst3.ll b/test/CodeGen/ARM/vst3.ll
index 1feaed5a1044..d262303bc60e 100644
--- a/test/CodeGen/ARM/vst3.ll
+++ b/test/CodeGen/ARM/vst3.ll
@@ -2,9 +2,11 @@
define void @vst3i8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vst3i8:
-;CHECK: vst3.8
+;Check the alignment value. Max for this instruction is 64 bits:
+;This test runs at -O0 so do not check for specific register numbers.
+;CHECK: vst3.8 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}, :64]
%tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst3.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst3.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 32)
ret void
}
@@ -26,6 +28,19 @@ define void @vst3i32(i32* %A, <2 x i32>* %B) nounwind {
ret void
}
+;Check for a post-increment updating store.
+define void @vst3i32_update(i32** %ptr, <2 x i32>* %B) nounwind {
+;CHECK: vst3i32_update:
+;CHECK: vst3.32 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}]!
+ %A = load i32** %ptr
+ %tmp0 = bitcast i32* %A to i8*
+ %tmp1 = load <2 x i32>* %B
+ call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+ %tmp2 = getelementptr i32* %A, i32 6
+ store i32* %tmp2, i32** %ptr
+ ret void
+}
+
define void @vst3f(float* %A, <2 x float>* %B) nounwind {
;CHECK: vst3f:
;CHECK: vst3.32
@@ -37,19 +52,23 @@ define void @vst3f(float* %A, <2 x float>* %B) nounwind {
define void @vst3i64(i64* %A, <1 x i64>* %B) nounwind {
;CHECK: vst3i64:
-;CHECK: vst1.64
+;Check the alignment value. Max for this instruction is 64 bits:
+;This test runs at -O0 so do not check for specific register numbers.
+;CHECK: vst1.64 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}, :64]
%tmp0 = bitcast i64* %A to i8*
%tmp1 = load <1 x i64>* %B
- call void @llvm.arm.neon.vst3.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst3.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 16)
ret void
}
define void @vst3Qi8(i8* %A, <16 x i8>* %B) nounwind {
;CHECK: vst3Qi8:
-;CHECK: vst3.8
-;CHECK: vst3.8
+;Check the alignment value. Max for this instruction is 64 bits:
+;This test runs at -O0 so do not check for specific register numbers.
+;CHECK: vst3.8 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}, :64]!
+;CHECK: vst3.8 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}, :64]
%tmp1 = load <16 x i8>* %B
- call void @llvm.arm.neon.vst3.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst3.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 32)
ret void
}
@@ -63,6 +82,20 @@ define void @vst3Qi16(i16* %A, <8 x i16>* %B) nounwind {
ret void
}
+;Check for a post-increment updating store.
+define void @vst3Qi16_update(i16** %ptr, <8 x i16>* %B) nounwind {
+;CHECK: vst3Qi16_update:
+;CHECK: vst3.16 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}]!
+;CHECK: vst3.16 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}]!
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ %tmp1 = load <8 x i16>* %B
+ call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
+ %tmp2 = getelementptr i16* %A, i32 24
+ store i16* %tmp2, i16** %ptr
+ ret void
+}
+
define void @vst3Qi32(i32* %A, <4 x i32>* %B) nounwind {
;CHECK: vst3Qi32:
;CHECK: vst3.32
diff --git a/test/CodeGen/ARM/vst4.ll b/test/CodeGen/ARM/vst4.ll
index d302f097fc1f..e94acb66bf2e 100644
--- a/test/CodeGen/ARM/vst4.ll
+++ b/test/CodeGen/ARM/vst4.ll
@@ -2,27 +2,42 @@
define void @vst4i8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vst4i8:
-;CHECK: vst4.8
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vst4.8 {d16, d17, d18, d19}, [r0, :64]
%tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8)
+ ret void
+}
+
+;Check for a post-increment updating store with register increment.
+define void @vst4i8_update(i8** %ptr, <8 x i8>* %B, i32 %inc) nounwind {
+;CHECK: vst4i8_update:
+;CHECK: vst4.8 {d16, d17, d18, d19}, [r1, :128], r2
+ %A = load i8** %ptr
+ %tmp1 = load <8 x i8>* %B
+ call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 16)
+ %tmp2 = getelementptr i8* %A, i32 %inc
+ store i8* %tmp2, i8** %ptr
ret void
}
define void @vst4i16(i16* %A, <4 x i16>* %B) nounwind {
;CHECK: vst4i16:
-;CHECK: vst4.16
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vst4.16 {d16, d17, d18, d19}, [r0, :128]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst4.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst4.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 16)
ret void
}
define void @vst4i32(i32* %A, <2 x i32>* %B) nounwind {
;CHECK: vst4i32:
-;CHECK: vst4.32
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vst4.32 {d16, d17, d18, d19}, [r0, :256]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <2 x i32>* %B
- call void @llvm.arm.neon.vst4.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst4.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 32)
ret void
}
@@ -37,26 +52,29 @@ define void @vst4f(float* %A, <2 x float>* %B) nounwind {
define void @vst4i64(i64* %A, <1 x i64>* %B) nounwind {
;CHECK: vst4i64:
-;CHECK: vst1.64
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vst1.64 {d16, d17, d18, d19}, [r0, :256]
%tmp0 = bitcast i64* %A to i8*
%tmp1 = load <1 x i64>* %B
- call void @llvm.arm.neon.vst4.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst4.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 64)
ret void
}
define void @vst4Qi8(i8* %A, <16 x i8>* %B) nounwind {
;CHECK: vst4Qi8:
-;CHECK: vst4.8
-;CHECK: vst4.8
+;Check the alignment value. Max for this instruction is 256 bits:
+;CHECK: vst4.8 {d16, d18, d20, d22}, [r0, :256]!
+;CHECK: vst4.8 {d17, d19, d21, d23}, [r0, :256]
%tmp1 = load <16 x i8>* %B
- call void @llvm.arm.neon.vst4.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 1)
+ call void @llvm.arm.neon.vst4.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 64)
ret void
}
define void @vst4Qi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vst4Qi16:
-;CHECK: vst4.16
-;CHECK: vst4.16
+;Check for no alignment specifier.
+;CHECK: vst4.16 {d16, d18, d20, d22}, [r0]!
+;CHECK: vst4.16 {d17, d19, d21, d23}, [r0]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
call void @llvm.arm.neon.vst4.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
@@ -83,6 +101,20 @@ define void @vst4Qf(float* %A, <4 x float>* %B) nounwind {
ret void
}
+;Check for a post-increment updating store.
+define void @vst4Qf_update(float** %ptr, <4 x float>* %B) nounwind {
+;CHECK: vst4Qf_update:
+;CHECK: vst4.32 {d16, d18, d20, d22}, [r1]!
+;CHECK: vst4.32 {d17, d19, d21, d23}, [r1]!
+ %A = load float** %ptr
+ %tmp0 = bitcast float* %A to i8*
+ %tmp1 = load <4 x float>* %B
+ call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
+ %tmp2 = getelementptr float* %A, i32 16
+ store float* %tmp2, float** %ptr
+ ret void
+}
+
declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
declare void @llvm.arm.neon.vst4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind
declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
diff --git a/test/CodeGen/ARM/vstlane.ll b/test/CodeGen/ARM/vstlane.ll
index 30ec52ac6420..6cc052bbeb1c 100644
--- a/test/CodeGen/ARM/vstlane.ll
+++ b/test/CodeGen/ARM/vstlane.ll
@@ -1,19 +1,109 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+define void @vst1lanei8(i8* %A, <8 x i8>* %B) nounwind {
+;CHECK: vst1lanei8:
+;Check the (default) alignment.
+;CHECK: vst1.8 {d16[3]}, [r0]
+ %tmp1 = load <8 x i8>* %B
+ %tmp2 = extractelement <8 x i8> %tmp1, i32 3
+ store i8 %tmp2, i8* %A, align 8
+ ret void
+}
+
+define void @vst1lanei16(i16* %A, <4 x i16>* %B) nounwind {
+;CHECK: vst1lanei16:
+;Check the alignment value. Max for this instruction is 16 bits:
+;CHECK: vst1.16 {d16[2]}, [r0, :16]
+ %tmp1 = load <4 x i16>* %B
+ %tmp2 = extractelement <4 x i16> %tmp1, i32 2
+ store i16 %tmp2, i16* %A, align 8
+ ret void
+}
+
+define void @vst1lanei32(i32* %A, <2 x i32>* %B) nounwind {
+;CHECK: vst1lanei32:
+;Check the alignment value. Max for this instruction is 32 bits:
+;CHECK: vst1.32 {d16[1]}, [r0, :32]
+ %tmp1 = load <2 x i32>* %B
+ %tmp2 = extractelement <2 x i32> %tmp1, i32 1
+ store i32 %tmp2, i32* %A, align 8
+ ret void
+}
+
+define void @vst1lanef(float* %A, <2 x float>* %B) nounwind {
+;CHECK: vst1lanef:
+;CHECK: vst1.32 {d16[1]}, [r0]
+ %tmp1 = load <2 x float>* %B
+ %tmp2 = extractelement <2 x float> %tmp1, i32 1
+ store float %tmp2, float* %A
+ ret void
+}
+
+define void @vst1laneQi8(i8* %A, <16 x i8>* %B) nounwind {
+;CHECK: vst1laneQi8:
+;CHECK: vst1.8 {d17[1]}, [r0]
+ %tmp1 = load <16 x i8>* %B
+ %tmp2 = extractelement <16 x i8> %tmp1, i32 9
+ store i8 %tmp2, i8* %A, align 8
+ ret void
+}
+
+define void @vst1laneQi16(i16* %A, <8 x i16>* %B) nounwind {
+;CHECK: vst1laneQi16:
+;CHECK: vst1.16 {d17[1]}, [r0, :16]
+ %tmp1 = load <8 x i16>* %B
+ %tmp2 = extractelement <8 x i16> %tmp1, i32 5
+ store i16 %tmp2, i16* %A, align 8
+ ret void
+}
+
+define void @vst1laneQi32(i32* %A, <4 x i32>* %B) nounwind {
+;CHECK: vst1laneQi32:
+;CHECK: vst1.32 {d17[1]}, [r0, :32]
+ %tmp1 = load <4 x i32>* %B
+ %tmp2 = extractelement <4 x i32> %tmp1, i32 3
+ store i32 %tmp2, i32* %A, align 8
+ ret void
+}
+
+define void @vst1laneQf(float* %A, <4 x float>* %B) nounwind {
+;CHECK: vst1laneQf:
+;CHECK: vst1.32 {d17[1]}, [r0]
+ %tmp1 = load <4 x float>* %B
+ %tmp2 = extractelement <4 x float> %tmp1, i32 3
+ store float %tmp2, float* %A
+ ret void
+}
+
define void @vst2lanei8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vst2lanei8:
-;CHECK: vst2.8
+;Check the alignment value. Max for this instruction is 16 bits:
+;CHECK: vst2.8 {d16[1], d17[1]}, [r0, :16]
%tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1)
+ call void @llvm.arm.neon.vst2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 4)
ret void
}
define void @vst2lanei16(i16* %A, <4 x i16>* %B) nounwind {
;CHECK: vst2lanei16:
-;CHECK: vst2.16
+;Check the alignment value. Max for this instruction is 32 bits:
+;CHECK: vst2.16 {d16[1], d17[1]}, [r0, :32]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 1)
+ call void @llvm.arm.neon.vst2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
+ ret void
+}
+
+;Check for a post-increment updating store with register increment.
+define void @vst2lanei16_update(i16** %ptr, <4 x i16>* %B, i32 %inc) nounwind {
+;CHECK: vst2lanei16_update:
+;CHECK: vst2.16 {d16[1], d17[1]}, [r1], r2
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ %tmp1 = load <4 x i16>* %B
+ call void @llvm.arm.neon.vst2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 2)
+ %tmp2 = getelementptr i16* %A, i32 %inc
+ store i16* %tmp2, i16** %ptr
ret void
}
@@ -37,19 +127,21 @@ define void @vst2lanef(float* %A, <2 x float>* %B) nounwind {
define void @vst2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vst2laneQi16:
-;CHECK: vst2.16
+;Check the (default) alignment.
+;CHECK: vst2.16 {d17[1], d19[1]}, [r0]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 1)
+ call void @llvm.arm.neon.vst2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 5, i32 1)
ret void
}
define void @vst2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
;CHECK: vst2laneQi32:
-;CHECK: vst2.32
+;Check the alignment value. Max for this instruction is 64 bits:
+;CHECK: vst2.32 {d17[0], d19[0]}, [r0, :64]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <4 x i32>* %B
- call void @llvm.arm.neon.vst2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 1)
+ call void @llvm.arm.neon.vst2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 16)
ret void
}
@@ -81,10 +173,11 @@ define void @vst3lanei8(i8* %A, <8 x i8>* %B) nounwind {
define void @vst3lanei16(i16* %A, <4 x i16>* %B) nounwind {
;CHECK: vst3lanei16:
-;CHECK: vst3.16
+;Check the (default) alignment value. VST3 does not support alignment.
+;CHECK: vst3.16 {d16[1], d17[1], d18[1]}, [r0]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 1)
+ call void @llvm.arm.neon.vst3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
ret void
}
@@ -108,10 +201,11 @@ define void @vst3lanef(float* %A, <2 x float>* %B) nounwind {
define void @vst3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vst3laneQi16:
-;CHECK: vst3.16
+;Check the (default) alignment value. VST3 does not support alignment.
+;CHECK: vst3.16 {d17[2], d19[2], d21[2]}, [r0]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 6, i32 1)
+ call void @llvm.arm.neon.vst3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 6, i32 8)
ret void
}
@@ -124,6 +218,19 @@ define void @vst3laneQi32(i32* %A, <4 x i32>* %B) nounwind {
ret void
}
+;Check for a post-increment updating store.
+define void @vst3laneQi32_update(i32** %ptr, <4 x i32>* %B) nounwind {
+;CHECK: vst3laneQi32_update:
+;CHECK: vst3.32 {d16[0], d18[0], d20[0]}, [r1]!
+ %A = load i32** %ptr
+ %tmp0 = bitcast i32* %A to i8*
+ %tmp1 = load <4 x i32>* %B
+ call void @llvm.arm.neon.vst3lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 0, i32 1)
+ %tmp2 = getelementptr i32* %A, i32 3
+ store i32* %tmp2, i32** %ptr
+ ret void
+}
+
define void @vst3laneQf(float* %A, <4 x float>* %B) nounwind {
;CHECK: vst3laneQf:
;CHECK: vst3.32
@@ -145,9 +252,22 @@ declare void @llvm.arm.neon.vst3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x f
define void @vst4lanei8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vst4lanei8:
-;CHECK: vst4.8
+;Check the alignment value. Max for this instruction is 32 bits:
+;CHECK: vst4.8 {d16[1], d17[1], d18[1], d19[1]}, [r0, :32]
+ %tmp1 = load <8 x i8>* %B
+ call void @llvm.arm.neon.vst4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
+ ret void
+}
+
+;Check for a post-increment updating store.
+define void @vst4lanei8_update(i8** %ptr, <8 x i8>* %B) nounwind {
+;CHECK: vst4lanei8_update:
+;CHECK: vst4.8 {d16[1], d17[1], d18[1], d19[1]}, [r1, :32]!
+ %A = load i8** %ptr
%tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1)
+ call void @llvm.arm.neon.vst4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
+ %tmp2 = getelementptr i8* %A, i32 4
+ store i8* %tmp2, i8** %ptr
ret void
}
@@ -162,10 +282,11 @@ define void @vst4lanei16(i16* %A, <4 x i16>* %B) nounwind {
define void @vst4lanei32(i32* %A, <2 x i32>* %B) nounwind {
;CHECK: vst4lanei32:
-;CHECK: vst4.32
+;Check the alignment value. Max for this instruction is 128 bits:
+;CHECK: vst4.32 {d16[1], d17[1], d18[1], d19[1]}, [r0, :128]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <2 x i32>* %B
- call void @llvm.arm.neon.vst4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
+ call void @llvm.arm.neon.vst4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 16)
ret void
}
@@ -180,16 +301,18 @@ define void @vst4lanef(float* %A, <2 x float>* %B) nounwind {
define void @vst4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vst4laneQi16:
-;CHECK: vst4.16
+;Check the alignment value. Max for this instruction is 64 bits:
+;CHECK: vst4.16 {d17[3], d19[3], d21[3], d23[3]}, [r0, :64]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 7, i32 1)
+ call void @llvm.arm.neon.vst4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 7, i32 16)
ret void
}
define void @vst4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
;CHECK: vst4laneQi32:
-;CHECK: vst4.32
+;Check the (default) alignment.
+;CHECK: vst4.32 {d17[0], d19[0], d21[0], d23[0]}, [r0]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <4 x i32>* %B
call void @llvm.arm.neon.vst4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 1)