diff options
Diffstat (limited to 'test/CodeGen/Hexagon')
168 files changed, 10482 insertions, 779 deletions
diff --git a/test/CodeGen/Hexagon/BranchPredict.ll b/test/CodeGen/Hexagon/BranchPredict.ll index 5d564493e507..17d169974e5a 100644 --- a/test/CodeGen/Hexagon/BranchPredict.ll +++ b/test/CodeGen/Hexagon/BranchPredict.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s +; RUN: llc -march=hexagon -ifcvt-limit=0 < %s | FileCheck %s ; Check if the branch probabilities are reflected in the instructions: ; The basic block placement pass should place the more probable successor @@ -53,7 +53,7 @@ return: ; preds = %if.else, %if.then define i32 @foo_bar(i32 %a, i16 signext %b) nounwind { ; CHECK: if{{ *}}(!cmp.eq(r{{[0-9]*}}.new, #0)) jump:nt entry: - %0 = load i32* @j, align 4 + %0 = load i32, i32* @j, align 4 %tobool = icmp eq i32 %0, 0 br i1 %tobool, label %if.else, label %if.then, !prof !0 diff --git a/test/CodeGen/Hexagon/absaddr-store.ll b/test/CodeGen/Hexagon/absaddr-store.ll index 5c2554df8aeb..3be4b1cc2614 100644 --- a/test/CodeGen/Hexagon/absaddr-store.ll +++ b/test/CodeGen/Hexagon/absaddr-store.ll @@ -9,7 +9,7 @@ define zeroext i8 @absStoreByte() nounwind { ; CHECK: memb(##b){{ *}}={{ *}}r{{[0-9]+}} entry: - %0 = load i8* @b, align 1 + %0 = load i8, i8* @b, align 1 %conv = zext i8 %0 to i32 %mul = mul nsw i32 100, %conv %conv1 = trunc i32 %mul to i8 @@ -20,7 +20,7 @@ entry: define signext i16 @absStoreHalf() nounwind { ; CHECK: memh(##c){{ *}}={{ *}}r{{[0-9]+}} entry: - %0 = load i16* @c, align 2 + %0 = load i16, i16* @c, align 2 %conv = sext i16 %0 to i32 %mul = mul nsw i32 100, %conv %conv1 = trunc i32 %mul to i16 @@ -31,7 +31,7 @@ entry: define i32 @absStoreWord() nounwind { ; CHECK: memw(##a){{ *}}={{ *}}r{{[0-9]+}} entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %mul = mul nsw i32 100, %0 store i32 %mul, i32* @a, align 4 ret i32 %mul diff --git a/test/CodeGen/Hexagon/absimm.ll b/test/CodeGen/Hexagon/absimm.ll index b8f5edc26470..07adb3fe49d5 100644 --- a/test/CodeGen/Hexagon/absimm.ll +++ b/test/CodeGen/Hexagon/absimm.ll @@ -12,7 +12,7 @@ entry: define i32* @f2(i32* nocapture %i) nounwind { entry: ; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(##786432) - %0 = load volatile i32* inttoptr (i32 786432 to i32*), align 262144 + %0 = load volatile i32, i32* inttoptr (i32 786432 to i32*), align 262144 %1 = inttoptr i32 %0 to i32* ret i32* %1 } diff --git a/test/CodeGen/Hexagon/adde.ll b/test/CodeGen/Hexagon/adde.ll index 6d060c1b9e26..7b29e7ad8a0f 100644 --- a/test/CodeGen/Hexagon/adde.ll +++ b/test/CodeGen/Hexagon/adde.ll @@ -1,7 +1,7 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s -; CHECK: r{{[0-9]+:[0-9]+}} = #0 ; CHECK: r{{[0-9]+:[0-9]+}} = #1 +; CHECK: r{{[0-9]+:[0-9]+}} = #0 ; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) ; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) ; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) diff --git a/test/CodeGen/Hexagon/alu64.ll b/test/CodeGen/Hexagon/alu64.ll new file mode 100644 index 000000000000..d0824a4ecadc --- /dev/null +++ b/test/CodeGen/Hexagon/alu64.ll @@ -0,0 +1,599 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s + +; CHECK-LABEL: @test00 +; CHECK: p0 = cmp.eq(r1:0, r3:2) +define i32 @test00(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.C2.cmpeqp(i64 %Rs, i64 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test01 +; CHECK: p0 = cmp.gt(r1:0, r3:2) +define i32 @test01(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.C2.cmpgtp(i64 %Rs, i64 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test02 +; CHECK: p0 = cmp.gtu(r1:0, r3:2) +define i32 @test02(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.C2.cmpgtup(i64 %Rs, i64 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test10 +; CHECK: r0 = cmp.eq(r0, r1) +define i32 @test10(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.rcmpeq(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test11 +; CHECK: r0 = !cmp.eq(r0, r1) +define i32 @test11(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.rcmpneq(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test12 +; CHECK: r0 = cmp.eq(r0, #23) +define i32 @test12(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.rcmpeqi(i32 %Rs, i32 23) + ret i32 %0 +} + +; CHECK-LABEL: @test13 +; CHECK: r0 = !cmp.eq(r0, #47) +define i32 @test13(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.rcmpneqi(i32 %Rs, i32 47) + ret i32 %0 +} + +; CHECK-LABEL: @test20 +; CHECK: p0 = cmpb.eq(r0, r1) +define i32 @test20(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmpbeq(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test21 +; CHECK: p0 = cmpb.gt(r0, r1) +define i32 @test21(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmpbgt(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test22 +; CHECK: p0 = cmpb.gtu(r0, r1) +define i32 @test22(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmpbgtu(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test23 +; CHECK: p0 = cmpb.eq(r0, #56) +define i32 @test23(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmpbeqi(i32 %Rs, i32 56) + ret i32 %0 +} + +; CHECK-LABEL: @test24 +; CHECK: p0 = cmpb.gt(r0, #29) +define i32 @test24(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmpbgti(i32 %Rs, i32 29) + ret i32 %0 +} + +; CHECK-LABEL: @test25 +; CHECK: p0 = cmpb.gtu(r0, #111) +define i32 @test25(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmpbgtui(i32 %Rs, i32 111) + ret i32 %0 +} + +; CHECK-LABEL: @test30 +; CHECK: p0 = cmph.eq(r0, r1) +define i32 @test30(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmpheq(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test31 +; CHECK: p0 = cmph.gt(r0, r1) +define i32 @test31(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmphgt(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test32 +; CHECK: p0 = cmph.gtu(r0, r1) +define i32 @test32(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmphgtu(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test33 +; CHECK: p0 = cmph.eq(r0, #-123) +define i32 @test33(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmpheqi(i32 %Rs, i32 -123) + ret i32 %0 +} + +; CHECK-LABEL: @test34 +; CHECK: p0 = cmph.gt(r0, #-3) +define i32 @test34(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmphgti(i32 %Rs, i32 -3) + ret i32 %0 +} + +; CHECK-LABEL: @test35 +; CHECK: p0 = cmph.gtu(r0, #13) +define i32 @test35(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.cmphgtui(i32 %Rs, i32 13) + ret i32 %0 +} + +; CHECK-LABEL: @test40 +; CHECK: r1:0 = vmux(p0, r3:2, r5:4) +define i64 @test40(i32 %Pu, i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.C2.vmux(i32 %Pu, i64 %Rs, i64 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test41 +; CHECK: p0 = any8(vcmpb.eq(r1:0, r3:2)) +define i32 @test41(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %Rs, i64 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test50 +; CHECK: r1:0 = add(r1:0, r3:2) +define i64 @test50(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A2.addp(i64 %Rs, i64 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test51 +; CHECK: r1:0 = add(r1:0, r3:2):sat +define i64 @test51(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A2.addpsat(i64 %Rs, i64 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test52 +; CHECK: r1:0 = sub(r1:0, r3:2) +define i64 @test52(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A2.subp(i64 %Rs, i64 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test53 +; CHECK: r1:0 = add(r0, r3:2) +define i64 @test53(i32 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A2.addsp(i32 %Rs, i64 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test54 +; CHECK: r1:0 = and(r1:0, r3:2) +define i64 @test54(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A2.andp(i64 %Rs, i64 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test55 +; CHECK: r1:0 = or(r1:0, r3:2) +define i64 @test55(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A2.orp(i64 %Rs, i64 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test56 +; CHECK: r1:0 = xor(r1:0, r3:2) +define i64 @test56(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A2.xorp(i64 %Rs, i64 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test57 +; CHECK: r1:0 = and(r1:0, ~r3:2) +define i64 @test57(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A4.andnp(i64 %Rs, i64 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test58 +; CHECK: r1:0 = or(r1:0, ~r3:2) +define i64 @test58(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A4.ornp(i64 %Rs, i64 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test60 +; CHECK: r0 = add(r0.l, r1.l) +define i32 @test60(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test61 +; CHECK: r0 = add(r0.l, r1.h) +define i32 @test61(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test62 +; CHECK: r0 = add(r0.l, r1.l):sat +define i32 @test62(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test63 +; CHECK: r0 = add(r0.l, r1.h):sat +define i32 @test63(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test64 +; CHECK: r0 = add(r0.l, r1.l):<<16 +define i32 @test64(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test65 +; CHECK: r0 = add(r0.l, r1.h):<<16 +define i32 @test65(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test66 +; CHECK: r0 = add(r0.h, r1.l):<<16 +define i32 @test66(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test67 +; CHECK: r0 = add(r0.h, r1.h):<<16 +define i32 @test67(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test68 +; CHECK: r0 = add(r0.l, r1.l):sat:<<16 +define i32 @test68(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test69 +; CHECK: r0 = add(r0.l, r1.h):sat:<<16 +define i32 @test69(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test6A +; CHECK: r0 = add(r0.h, r1.l):sat:<<16 +define i32 @test6A(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test6B +; CHECK: r0 = add(r0.h, r1.h):sat:<<16 +define i32 @test6B(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test70 +; CHECK: r0 = sub(r0.l, r1.l) +define i32 @test70(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test71 +; CHECK: r0 = sub(r0.l, r1.h) +define i32 @test71(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test72 +; CHECK: r0 = sub(r0.l, r1.l):sat +define i32 @test72(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test73 +; CHECK: r0 = sub(r0.l, r1.h):sat +define i32 @test73(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test74 +; CHECK: r0 = sub(r0.l, r1.l):<<16 +define i32 @test74(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test75 +; CHECK: r0 = sub(r0.l, r1.h):<<16 +define i32 @test75(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test76 +; CHECK: r0 = sub(r0.h, r1.l):<<16 +define i32 @test76(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test77 +; CHECK: r0 = sub(r0.h, r1.h):<<16 +define i32 @test77(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test78 +; CHECK: r0 = sub(r0.l, r1.l):sat:<<16 +define i32 @test78(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test79 +; CHECK: r0 = sub(r0.l, r1.h):sat:<<16 +define i32 @test79(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test7A +; CHECK: r0 = sub(r0.h, r1.l):sat:<<16 +define i32 @test7A(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test7B +; CHECK: r0 = sub(r0.h, r1.h):sat:<<16 +define i32 @test7B(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test90 +; CHECK: r0 = and(#1, asl(r0, #2)) +define i32 @test90(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.S4.andi.asl.ri(i32 1, i32 %Rs, i32 2) + ret i32 %0 +} + +; CHECK-LABEL: @test91 +; CHECK: r0 = or(#1, asl(r0, #2)) +define i32 @test91(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.S4.ori.asl.ri(i32 1, i32 %Rs, i32 2) + ret i32 %0 +} + +; CHECK-LABEL: @test92 +; CHECK: r0 = add(#1, asl(r0, #2)) +define i32 @test92(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.S4.addi.asl.ri(i32 1, i32 %Rs, i32 2) + ret i32 %0 +} + +; CHECK-LABEL: @test93 +; CHECK: r0 = sub(#1, asl(r0, #2)) +define i32 @test93(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.S4.subi.asl.ri(i32 1, i32 %Rs, i32 2) + ret i32 %0 +} + +; CHECK-LABEL: @test94 +; CHECK: r0 = and(#1, lsr(r0, #2)) +define i32 @test94(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 1, i32 %Rs, i32 2) + ret i32 %0 +} + +; CHECK-LABEL: @test95 +; CHECK: r0 = or(#1, lsr(r0, #2)) +define i32 @test95(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 1, i32 %Rs, i32 2) + ret i32 %0 +} + +; CHECK-LABEL: @test96 +; CHECK: r0 = add(#1, lsr(r0, #2)) +define i32 @test96(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 1, i32 %Rs, i32 2) + ret i32 %0 +} + +; CHECK-LABEL: @test97 +; CHECK: r0 = sub(#1, lsr(r0, #2)) +define i32 @test97(i32 %Rs) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 1, i32 %Rs, i32 2) + ret i32 %0 +} + +; CHECK-LABEL: @test100 +; CHECK: r1:0 = bitsplit(r0, r1) +define i64 @test100(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A4.bitsplit(i32 %Rs, i32 %Rt) + ret i64 %0 +} + +; CHECK-LABEL: @test101 +; CHECK: r0 = modwrap(r0, r1) +define i32 @test101(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A4.modwrapu(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test102 +; CHECK: r0 = parity(r1:0, r3:2) +define i32 @test102(i64 %Rs, i64 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.S2.parityp(i64 %Rs, i64 %Rt) + ret i32 %0 +} + +; CHECK-LABEL: @test103 +; CHECK: r0 = parity(r0, r1) +define i32 @test103(i32 %Rs, i32 %Rt) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.S4.parity(i32 %Rs, i32 %Rt) + ret i32 %0 +} + +declare i32 @llvm.hexagon.C2.cmpeqp(i64, i64) #1 +declare i32 @llvm.hexagon.C2.cmpgtp(i64, i64) #1 +declare i32 @llvm.hexagon.C2.cmpgtup(i64, i64) #1 +declare i32 @llvm.hexagon.A4.rcmpeq(i32, i32) #1 +declare i32 @llvm.hexagon.A4.rcmpneq(i32, i32) #1 +declare i32 @llvm.hexagon.A4.rcmpeqi(i32, i32) #1 +declare i32 @llvm.hexagon.A4.rcmpneqi(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmpbeq(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmpbgt(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmpbgtu(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmpbeqi(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmpbgti(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmpbgtui(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmpheq(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmphgt(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmphgtu(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmpheqi(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmphgti(i32, i32) #1 +declare i32 @llvm.hexagon.A4.cmphgtui(i32, i32) #1 +declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64) #1 +declare i32 @llvm.hexagon.A4.vcmpbeq.any(i64, i64) #1 +declare i64 @llvm.hexagon.A2.addp(i64, i64) #1 +declare i64 @llvm.hexagon.A2.addpsat(i64, i64) #1 +declare i64 @llvm.hexagon.A2.subp(i64, i64) #1 +declare i64 @llvm.hexagon.A2.addsp(i32, i64) #1 +declare i64 @llvm.hexagon.A2.andp(i64, i64) #1 +declare i64 @llvm.hexagon.A2.orp(i64, i64) #1 +declare i64 @llvm.hexagon.A2.xorp(i64, i64) #1 +declare i64 @llvm.hexagon.A4.ornp(i64, i64) #1 +declare i64 @llvm.hexagon.A4.andnp(i64, i64) #1 +declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32) #1 +declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32) #1 +declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32) #1 +declare i64 @llvm.hexagon.A4.bitsplit(i32, i32) #1 +declare i32 @llvm.hexagon.A4.modwrapu(i32, i32) #1 +declare i32 @llvm.hexagon.S2.parityp(i64, i64) #1 +declare i32 @llvm.hexagon.S4.parity(i32, i32) #1 +declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32) #1 +declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32) #1 +declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32) #1 +declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32) #1 +declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32) #1 +declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32) #1 +declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32) #1 +declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32) #1 + +attributes #0 = { nounwind readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone } diff --git a/test/CodeGen/Hexagon/always-ext.ll b/test/CodeGen/Hexagon/always-ext.ll index 9c8d708ba877..8b4b2f5bf4f2 100644 --- a/test/CodeGen/Hexagon/always-ext.ll +++ b/test/CodeGen/Hexagon/always-ext.ll @@ -1,3 +1,4 @@ +; XFAIL: ; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s ; Check that we don't generate an invalid packet with too many instructions @@ -7,7 +8,7 @@ ; CHECK: { ; CHECK-NOT: call abort ; CHECK: memw(##0) -; CHECK: memw(r{{[0-9+]}}<<#2+##4) +; CHECK: memw(r{{[0-9+]}}<<#2 + ##4) ; CHECK: } %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111 = type { i8*, void (%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*)*, i32, i32, i8*, [23 x i32]* } @@ -23,8 +24,8 @@ entry: br i1 undef, label %for.body.us, label %for.end for.body.us: ; preds = %entry - %0 = load %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** null, align 4 - %1 = load i32* undef, align 4 + %0 = load %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*, %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** null, align 4 + %1 = load i32, i32* undef, align 4 %cmp.i.us = icmp slt i32 %1, 1024 br i1 %cmp.i.us, label %CuSuiteAdd.exit.us, label %cond.false6.i.us @@ -33,7 +34,7 @@ cond.false6.i.us: ; preds = %for.body.us unreachable CuSuiteAdd.exit.us: ; preds = %for.body.us - %arrayidx.i.us = getelementptr inbounds %struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112* null, i32 0, i32 1, i32 %1 + %arrayidx.i.us = getelementptr inbounds %struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112, %struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112* null, i32 0, i32 1, i32 %1 store %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111* %0, %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** %arrayidx.i.us, align 4 call void @llvm.trap() unreachable diff --git a/test/CodeGen/Hexagon/block-addr.ll b/test/CodeGen/Hexagon/block-addr.ll index dc0d6e60fd28..eda167a67f28 100644 --- a/test/CodeGen/Hexagon/block-addr.ll +++ b/test/CodeGen/Hexagon/block-addr.ll @@ -1,7 +1,8 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r{{[0-9]+}} = CONST32(#.LJTI{{[0-9]+_[0-9]+}}) -; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}} + r{{[0-9]+<<#[0-9]+}}) +; Allow combine(..##JTI..): +; CHECK: r{{[0-9]+}}{{.*}} = {{.*}}#.LJTI +; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+<<#[0-9]+}}) ; CHECK: jumpr r{{[0-9]+}} define void @main() #0 { @@ -10,7 +11,7 @@ entry: br label %while.body while.body: - %ret.0.load17 = load volatile i32* %ret, align 4 + %ret.0.load17 = load volatile i32, i32* %ret, align 4 switch i32 %ret.0.load17, label %label6 [ i32 0, label %label0 i32 1, label %label1 @@ -21,37 +22,37 @@ while.body: ] label0: - %ret.0.load18 = load volatile i32* %ret, align 4 + %ret.0.load18 = load volatile i32, i32* %ret, align 4 %inc = add nsw i32 %ret.0.load18, 1 store volatile i32 %inc, i32* %ret, align 4 br label %while.body label1: - %ret.0.load19 = load volatile i32* %ret, align 4 + %ret.0.load19 = load volatile i32, i32* %ret, align 4 %inc2 = add nsw i32 %ret.0.load19, 1 store volatile i32 %inc2, i32* %ret, align 4 br label %while.body label2: - %ret.0.load20 = load volatile i32* %ret, align 4 + %ret.0.load20 = load volatile i32, i32* %ret, align 4 %inc4 = add nsw i32 %ret.0.load20, 1 store volatile i32 %inc4, i32* %ret, align 4 br label %while.body label3: - %ret.0.load21 = load volatile i32* %ret, align 4 + %ret.0.load21 = load volatile i32, i32* %ret, align 4 %inc6 = add nsw i32 %ret.0.load21, 1 store volatile i32 %inc6, i32* %ret, align 4 br label %while.body label4: - %ret.0.load22 = load volatile i32* %ret, align 4 + %ret.0.load22 = load volatile i32, i32* %ret, align 4 %inc8 = add nsw i32 %ret.0.load22, 1 store volatile i32 %inc8, i32* %ret, align 4 br label %while.body label5: - %ret.0.load23 = load volatile i32* %ret, align 4 + %ret.0.load23 = load volatile i32, i32* %ret, align 4 %inc10 = add nsw i32 %ret.0.load23, 1 store volatile i32 %inc10, i32* %ret, align 4 br label %while.body diff --git a/test/CodeGen/Hexagon/brev_ld.ll b/test/CodeGen/Hexagon/brev_ld.ll new file mode 100644 index 000000000000..12edb4c2b8f7 --- /dev/null +++ b/test/CodeGen/Hexagon/brev_ld.ll @@ -0,0 +1,140 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -verify-machineinstrs=true < %s | FileCheck %s +; Testing bitreverse load intrinsics: +; Q6_bitrev_load_update_D(inputLR, pDelay, nConvLength); +; Q6_bitrev_load_update_W(inputLR, pDelay, nConvLength); +; Q6_bitrev_load_update_H(inputLR, pDelay, nConvLength); +; Q6_bitrev_load_update_UH(inputLR, pDelay, nConvLength); +; Q6_bitrev_load_update_UB(inputLR, pDelay, nConvLength); +; Q6_bitrev_load_update_B(inputLR, pDelay, nConvLength); +; producing these instructions: +; r3:2 = memd(r0++m0:brev) +; r1 = memw(r0++m0:brev) +; r1 = memh(r0++m0:brev) +; r1 = memuh(r0++m0:brev) +; r1 = memub(r0++m0:brev) +; r1 = memb(r0++m0:brev) + +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +define i64 @foo(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i64, align 8 + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %1 = bitcast i64* %inputLR to i8* + %sub = sub i32 13, %shr1 + %shl = shl i32 1, %sub +; CHECK: memd(r{{[0-9]*}} ++ m{{[0-1]}}:brev) + %2 = call i8* @llvm.hexagon.brev.ldd(i8* %0, i8* %1, i32 %shl) + %3 = bitcast i8* %2 to i64* + %4 = load i64, i64* %3, align 8, !tbaa !0 + ret i64 %4 +} + +declare i8* @llvm.hexagon.brev.ldd(i8*, i8*, i32) nounwind + +define i32 @foo1(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i32, align 4 + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %1 = bitcast i32* %inputLR to i8* + %sub = sub i32 14, %shr1 + %shl = shl i32 1, %sub +; CHECK: memw(r{{[0-9]*}} ++ m{{[0-1]}}:brev) + %2 = call i8* @llvm.hexagon.brev.ldw(i8* %0, i8* %1, i32 %shl) + %3 = bitcast i8* %2 to i32* + %4 = load i32, i32* %3, align 4, !tbaa !2 + ret i32 %4 +} + +declare i8* @llvm.hexagon.brev.ldw(i8*, i8*, i32) nounwind + +define signext i16 @foo2(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i16, align 2 + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %1 = bitcast i16* %inputLR to i8* + %sub = sub i32 15, %shr1 + %shl = shl i32 1, %sub +; CHECK: memh(r{{[0-9]*}} ++ m0:brev) + %2 = call i8* @llvm.hexagon.brev.ldh(i8* %0, i8* %1, i32 %shl) + %3 = bitcast i8* %2 to i16* + %4 = load i16, i16* %3, align 2, !tbaa !3 + ret i16 %4 +} + +declare i8* @llvm.hexagon.brev.ldh(i8*, i8*, i32) nounwind + +define zeroext i16 @foo3(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i16, align 2 + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %1 = bitcast i16* %inputLR to i8* + %sub = sub i32 15, %shr1 + %shl = shl i32 1, %sub +; CHECK: memuh(r{{[0-9]*}} ++ m0:brev) + %2 = call i8* @llvm.hexagon.brev.lduh(i8* %0, i8* %1, i32 %shl) + %3 = bitcast i8* %2 to i16* + %4 = load i16, i16* %3, align 2, !tbaa !3 + ret i16 %4 +} + +declare i8* @llvm.hexagon.brev.lduh(i8*, i8*, i32) nounwind + +define zeroext i8 @foo4(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i8, align 1 + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %sub = sub nsw i32 16, %shr1 + %shl = shl i32 1, %sub +; CHECK: memub(r{{[0-9]*}} ++ m{{[0-1]}}:brev) + %1 = call i8* @llvm.hexagon.brev.ldub(i8* %0, i8* %inputLR, i32 %shl) + %2 = load i8, i8* %1, align 1, !tbaa !0 + ret i8 %2 +} + +declare i8* @llvm.hexagon.brev.ldub(i8*, i8*, i32) nounwind + +define zeroext i8 @foo5(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i8, align 1 + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %sub = sub nsw i32 16, %shr1 + %shl = shl i32 1, %sub +; CHECK: memb(r{{[0-9]*}} ++ m{{[0-1]}}:brev) + %1 = call i8* @llvm.hexagon.brev.ldb(i8* %0, i8* %inputLR, i32 %shl) + %2 = load i8, i8* %1, align 1, !tbaa !0 + ret i8 %2 +} + +declare i8* @llvm.hexagon.brev.ldb(i8*, i8*, i32) nounwind + +!0 = !{!"omnipotent char", !1} +!1 = !{!"Simple C/C++ TBAA"} +!2 = !{!"int", !0} +!3 = !{!"short", !0} diff --git a/test/CodeGen/Hexagon/brev_st.ll b/test/CodeGen/Hexagon/brev_st.ll new file mode 100644 index 000000000000..b80579185317 --- /dev/null +++ b/test/CodeGen/Hexagon/brev_st.ll @@ -0,0 +1,112 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -verify-machineinstrs=true < %s | FileCheck %s +; Test these 5 bitreverse store intrinsics: +; Q6_bitrev_store_update_D(inputLR, pDelay, nConvLength); +; Q6_bitrev_store_update_W(inputLR, pDelay, nConvLength); +; Q6_bitrev_store_update_HL(inputLR, pDelay, nConvLength); +; Q6_bitrev_store_update_HH(inputLR, pDelay, nConvLength); +; Q6_bitrev_store_update_B(inputLR, pDelay, nConvLength); +; producing these instructions: +; memd(r0++m0:brev) = r1:0 +; memw(r0++m0:brev) = r0 +; memh(r0++m0:brev) = r3 +; memh(r0++m0:brev) = r3.h +; memb(r0++m0:brev) = r3 + +; ModuleID = 'brev_st.i' +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +define i64 @foo(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %conv = zext i16 %filtMemLen to i32 + %shr2 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %sub = sub i32 13, %shr2 + %shl = shl i32 1, %sub +; CHECK: memd(r{{[0-9]*}} ++ m{{[0-1]}}:brev) + %1 = tail call i8* @llvm.hexagon.brev.std(i8* %0, i64 undef, i32 %shl) + %2 = bitcast i8* %1 to i64* + %3 = load i64, i64* %2, align 8, !tbaa !0 + ret i64 %3 +} + +declare i8* @llvm.hexagon.brev.std(i8*, i64, i32) nounwind + +define i32 @foo1(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %sub = sub i32 14, %shr1 + %shl = shl i32 1, %sub +; CHECK: memw(r{{[0-9]*}} ++ m{{[0-1]}}:brev) + %1 = tail call i8* @llvm.hexagon.brev.stw(i8* %0, i32 undef, i32 %shl) + %2 = bitcast i8* %1 to i32* + %3 = load i32, i32* %2, align 4, !tbaa !2 + ret i32 %3 +} + +declare i8* @llvm.hexagon.brev.stw(i8*, i32, i32) nounwind + +define signext i16 @foo2(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %conv = zext i16 %filtMemLen to i32 + %shr2 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %sub = sub i32 15, %shr2 + %shl = shl i32 1, %sub +; CHECK: memh(r{{[0-9]*}} ++ m{{[0-1]}}:brev) + %1 = tail call i8* @llvm.hexagon.brev.sth(i8* %0, i32 0, i32 %shl) + %2 = bitcast i8* %1 to i16* + %3 = load i16, i16* %2, align 2, !tbaa !3 + ret i16 %3 +} + +declare i8* @llvm.hexagon.brev.sth(i8*, i32, i32) nounwind + +define signext i16 @foo3(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %conv = zext i16 %filtMemLen to i32 + %shr2 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %sub = sub i32 15, %shr2 + %shl = shl i32 1, %sub +; CHECK: memh(r{{[0-9]*}} ++ m{{[0-1]}}:brev){{ *}}={{ *}}r{{[0-9]*}}.h + %1 = tail call i8* @llvm.hexagon.brev.sthhi(i8* %0, i32 0, i32 %shl) + %2 = bitcast i8* %1 to i16* + %3 = load i16, i16* %2, align 2, !tbaa !3 + ret i16 %3 +} + +declare i8* @llvm.hexagon.brev.sthhi(i8*, i32, i32) nounwind + +define zeroext i8 @foo5(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %conv = zext i16 %filtMemLen to i32 + %shr2 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %sub = sub nsw i32 16, %shr2 + ; CHECK: memb(r{{[0-9]*}} ++ m{{[0-1]}}:brev) + %shl = shl i32 1, %sub + %1 = tail call i8* @llvm.hexagon.brev.stb(i8* %0, i32 0, i32 %shl) + %2 = load i8, i8* %1, align 1, !tbaa !0 + ret i8 %2 +} + +declare i8* @llvm.hexagon.brev.stb(i8*, i32, i32) nounwind + +!0 = !{!"omnipotent char", !1} +!1 = !{!"Simple C/C++ TBAA"} +!2 = !{!"int", !0} +!3 = !{!"short", !0} diff --git a/test/CodeGen/Hexagon/calling-conv-2.ll b/test/CodeGen/Hexagon/calling-conv-2.ll new file mode 100644 index 000000000000..3c68c88bd711 --- /dev/null +++ b/test/CodeGen/Hexagon/calling-conv-2.ll @@ -0,0 +1,13 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 <%s | \ +; RUN: FileCheck %s --check-prefix=CHECK-ONE + +%struct.test_struct = type { i32, i8, i64 } + +; CHECK-ONE: r1 = #45 +define void @foo(%struct.test_struct* noalias nocapture sret %agg.result, i32 %a) #0 { +entry: + call void @bar(%struct.test_struct* sret %agg.result, i32 45) #2 + ret void +} + +declare void @bar(%struct.test_struct* sret, i32) #1 diff --git a/test/CodeGen/Hexagon/calling-conv.ll b/test/CodeGen/Hexagon/calling-conv.ll new file mode 100644 index 000000000000..7133c1ae7aad --- /dev/null +++ b/test/CodeGen/Hexagon/calling-conv.ll @@ -0,0 +1,73 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 <%s | \ +; RUN: FileCheck %s --check-prefix=CHECK-ONE +; RUN: llc -march=hexagon -mcpu=hexagonv5 <%s | \ +; RUN: FileCheck %s --check-prefix=CHECK-TWO +; RUN: llc -march=hexagon -mcpu=hexagonv5 <%s | \ +; RUN: FileCheck %s --check-prefix=CHECK-THREE + +%struct.test_struct = type { i32, i8, i64 } +%struct.test_struct_long = type { i8, i64 } + +@mystruct = external global %struct.test_struct*, align 4 + +; CHECK-ONE: memw(r29+#48) = r2 +; CHECK-TWO: memw(r29+#52) = r2 +; CHECK-THREE: memw(r29+#56) = r2 +; Function Attrs: nounwind +define void @foo(%struct.test_struct* noalias sret %agg.result, i32 %a, i8 zeroext %c, %struct.test_struct* byval %s, %struct.test_struct_long* byval %t) #0 { +entry: + %a.addr = alloca i32, align 4 + %c.addr = alloca i8, align 1 + %z = alloca i32, align 4 + %ret = alloca %struct.test_struct, align 8 + store i32 %a, i32* %a.addr, align 4 + store i8 %c, i8* %c.addr, align 1 + %0 = bitcast i32* %z to i8* + call void @llvm.lifetime.start(i64 4, i8* %0) #1 + store i32 45, i32* %z, align 4 + %1 = bitcast %struct.test_struct* %ret to i8* + call void @llvm.lifetime.start(i64 16, i8* %1) #1 + %2 = load i32, i32* %z, align 4 + %3 = load %struct.test_struct*, %struct.test_struct** @mystruct, align 4 + %4 = load %struct.test_struct*, %struct.test_struct** @mystruct, align 4 + %5 = load i8, i8* %c.addr, align 1 + %6 = load i32, i32* %a.addr, align 4 + %conv = sext i32 %6 to i64 + %add = add nsw i64 %conv, 1 + %7 = load i32, i32* %a.addr, align 4 + %add1 = add nsw i32 %7, 2 + %8 = load i32, i32* %a.addr, align 4 + %conv2 = sext i32 %8 to i64 + %add3 = add nsw i64 %conv2, 3 + %9 = load i8, i8* %c.addr, align 1 + %10 = load i8, i8* %c.addr, align 1 + %11 = load i8, i8* %c.addr, align 1 + %12 = load i32, i32* %z, align 4 + call void @bar(%struct.test_struct* sret %ret, i32 %2, %struct.test_struct* byval %3, %struct.test_struct* byval %4, i8 zeroext %5, i64 %add, i32 %add1, i64 %add3, i8 zeroext %9, i8 zeroext %10, i8 zeroext %11, i32 %12) + %x = getelementptr inbounds %struct.test_struct, %struct.test_struct* %ret, i32 0, i32 0 + store i32 20, i32* %x, align 4 + %13 = bitcast %struct.test_struct* %agg.result to i8* + %14 = bitcast %struct.test_struct* %ret to i8* + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %13, i8* %14, i32 16, i32 8, i1 false) + %15 = bitcast %struct.test_struct* %ret to i8* + call void @llvm.lifetime.end(i64 16, i8* %15) #1 + %16 = bitcast i32* %z to i8* + call void @llvm.lifetime.end(i64 4, i8* %16) #1 + ret void +} + +; Function Attrs: nounwind +declare void @llvm.lifetime.start(i64, i8* nocapture) #1 + +declare void @bar(%struct.test_struct* sret, i32, %struct.test_struct* byval, %struct.test_struct* byval, i8 zeroext, i64, i32, i64, i8 zeroext, i8 zeroext, i8 zeroext, i32) #2 + +; Function Attrs: nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1 + +; Function Attrs: nounwind +declare void @llvm.lifetime.end(i64, i8* nocapture) #1 + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv4" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind } +attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv4" "unsafe-fp-math"="false" "use-soft-float"="false" } + diff --git a/test/CodeGen/Hexagon/cext-check.ll b/test/CodeGen/Hexagon/cext-check.ll index b7181d803f71..19b91c5245b2 100644 --- a/test/CodeGen/Hexagon/cext-check.ll +++ b/test/CodeGen/Hexagon/cext-check.ll @@ -7,19 +7,19 @@ define i32 @cext_test1(i32* %a) nounwind { ; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}##4092) ; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300) entry: - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %tobool = icmp ne i32 %0, 0 br i1 %tobool, label %if.then, label %if.end if.then: - %arrayidx1 = getelementptr inbounds i32* %a, i32 2000 - %1 = load i32* %arrayidx1, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 2000 + %1 = load i32, i32* %arrayidx1, align 4 %add = add nsw i32 %1, 300000 br label %return if.end: - %arrayidx2 = getelementptr inbounds i32* %a, i32 1023 - %2 = load i32* %arrayidx2, align 4 + %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1023 + %2 = load i32, i32* %arrayidx2, align 4 %add3 = add nsw i32 %2, 300 br label %return @@ -38,15 +38,15 @@ entry: br i1 %tobool, label %if.then, label %if.end if.then: - %arrayidx = getelementptr inbounds i8* %a, i32 1023 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %a, i32 1023 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 300000 br label %return if.end: - %arrayidx1 = getelementptr inbounds i8* %a, i32 1024 - %1 = load i8* %arrayidx1, align 1 + %arrayidx1 = getelementptr inbounds i8, i8* %a, i32 1024 + %1 = load i8, i8* %arrayidx1, align 1 %conv2 = zext i8 %1 to i32 %add3 = add nsw i32 %conv2, 6000 br label %return diff --git a/test/CodeGen/Hexagon/cext-valid-packet2.ll b/test/CodeGen/Hexagon/cext-valid-packet2.ll index 2788a6b1c865..2eba74329960 100644 --- a/test/CodeGen/Hexagon/cext-valid-packet2.ll +++ b/test/CodeGen/Hexagon/cext-valid-packet2.ll @@ -10,31 +10,31 @@ define i32 @test(i32* nocapture %a, i32* nocapture %b, i32 %c) nounwind { entry: %add = add nsw i32 %c, 200002 - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %add1 = add nsw i32 %0, 200000 - %arrayidx2 = getelementptr inbounds i32* %a, i32 3000 + %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 3000 store i32 %add1, i32* %arrayidx2, align 4 - %1 = load i32* %b, align 4 + %1 = load i32, i32* %b, align 4 %add4 = add nsw i32 %1, 200001 - %arrayidx5 = getelementptr inbounds i32* %a, i32 1 + %arrayidx5 = getelementptr inbounds i32, i32* %a, i32 1 store i32 %add4, i32* %arrayidx5, align 4 - %arrayidx7 = getelementptr inbounds i32* %b, i32 1 - %2 = load i32* %arrayidx7, align 4 + %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 1 + %2 = load i32, i32* %arrayidx7, align 4 %cmp = icmp sgt i32 %add4, %2 br i1 %cmp, label %if.then, label %if.else if.then: ; preds = %entry - %arrayidx8 = getelementptr inbounds i32* %a, i32 2 - %3 = load i32* %arrayidx8, align 4 - %arrayidx9 = getelementptr inbounds i32* %b, i32 2000 - %4 = load i32* %arrayidx9, align 4 + %arrayidx8 = getelementptr inbounds i32, i32* %a, i32 2 + %3 = load i32, i32* %arrayidx8, align 4 + %arrayidx9 = getelementptr inbounds i32, i32* %b, i32 2000 + %4 = load i32, i32* %arrayidx9, align 4 %sub = sub nsw i32 %3, %4 - %arrayidx10 = getelementptr inbounds i32* %a, i32 4000 + %arrayidx10 = getelementptr inbounds i32, i32* %a, i32 4000 store i32 %sub, i32* %arrayidx10, align 4 br label %if.end if.else: ; preds = %entry - %arrayidx11 = getelementptr inbounds i32* %b, i32 3200 + %arrayidx11 = getelementptr inbounds i32, i32* %b, i32 3200 store i32 %add, i32* %arrayidx11, align 4 br label %if.end diff --git a/test/CodeGen/Hexagon/circ_ld.ll b/test/CodeGen/Hexagon/circ_ld.ll new file mode 100644 index 000000000000..6d372403ca7a --- /dev/null +++ b/test/CodeGen/Hexagon/circ_ld.ll @@ -0,0 +1,135 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; Testing for these 6 variants of circular load: +; Q6_circ_load_update_B(inputLR, pDelay, -1, nConvLength, 4); +; Q6_circ_load_update_D(inputLR, pDelay, -1, nConvLength, 4); +; Q6_circ_load_update_H(inputLR, pDelay, -1, nConvLength, 4); +; Q6_circ_load_update_UB(inputLR, pDelay, -1, nConvLength, 4); +; Q6_circ_load_update_UH(inputLR, pDelay, -1, nConvLength, 4); +; Q6_circ_load_update_W(inputLR, pDelay, -1, nConvLength, 4); +; producing these: +; r0 = memb(r1++#-1:circ(m0)) +; r3:2 = memd(r1++#-8:circ(m0)) +; r0 = memh(r1++#-2:circ(m0)) +; r0 = memub(r1++#-1:circ(m0)) +; r0 = memuh(r1++#-2:circ(m0)) +; r0 = memw(r1++#-4:circ(m0)) + +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +define zeroext i8 @foo1(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i8, align 1 + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %or = or i32 %shr1, 33554432 +; CHECK: memb(r{{[0-9]*.}}++{{.}}#-1:circ(m{{[0-1]}})) + %1 = call i8* @llvm.hexagon.circ.ldb(i8* %0, i8* %inputLR, i32 %or, i32 -1) + %2 = load i8, i8* %1, align 1, !tbaa !0 + ret i8 %2 +} + +declare i8* @llvm.hexagon.circ.ldb(i8*, i8*, i32, i32) nounwind + +define i64 @foo2(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i64, align 8 + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %1 = bitcast i64* %inputLR to i8* + %shl = shl nuw nsw i32 %shr1, 3 + %or = or i32 %shl, 83886080 +; CHECK: memd(r{{[0-9]*.}}++{{.}}#-8:circ(m{{[0-1]}})) + %2 = call i8* @llvm.hexagon.circ.ldd(i8* %0, i8* %1, i32 %or, i32 -8) + %3 = bitcast i8* %2 to i64* + %4 = load i64, i64* %3, align 8, !tbaa !0 + ret i64 %4 +} + +declare i8* @llvm.hexagon.circ.ldd(i8*, i8*, i32, i32) nounwind + +define signext i16 @foo3(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i16, align 2 + %conv = zext i16 %filtMemLen to i32 + %shr1 = and i32 %conv, 65534 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %1 = bitcast i16* %inputLR to i8* + %or = or i32 %shr1, 50331648 +; CHECK: memh(r{{[0-9]*.}}++{{.}}#-2:circ(m{{[0-1]}})) + %2 = call i8* @llvm.hexagon.circ.ldh(i8* %0, i8* %1, i32 %or, i32 -2) + %3 = bitcast i8* %2 to i16* + %4 = load i16, i16* %3, align 2, !tbaa !2 + ret i16 %4 +} + +declare i8* @llvm.hexagon.circ.ldh(i8*, i8*, i32, i32) nounwind + +define zeroext i8 @foo4(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i8, align 1 + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %or = or i32 %shr1, 33554432 +; CHECK: memub(r{{[0-9]*.}}++{{.}}#-1:circ(m{{[0-1]}})) + %1 = call i8* @llvm.hexagon.circ.ldub(i8* %0, i8* %inputLR, i32 %or, i32 -1) + %2 = load i8, i8* %1, align 1, !tbaa !0 + ret i8 %2 +} + +declare i8* @llvm.hexagon.circ.ldub(i8*, i8*, i32, i32) nounwind + +define zeroext i16 @foo5(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i16, align 2 + %conv = zext i16 %filtMemLen to i32 + %shr1 = and i32 %conv, 65534 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %1 = bitcast i16* %inputLR to i8* + %or = or i32 %shr1, 50331648 +; CHECK: memuh(r{{[0-9]*.}}++{{.}}#-2:circ(m{{[0-1]}})) + %2 = call i8* @llvm.hexagon.circ.lduh(i8* %0, i8* %1, i32 %or, i32 -2) + %3 = bitcast i8* %2 to i16* + %4 = load i16, i16* %3, align 2, !tbaa !2 + ret i16 %4 +} + +declare i8* @llvm.hexagon.circ.lduh(i8*, i8*, i32, i32) nounwind + +define i32 @foo6(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %inputLR = alloca i32, align 4 + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %1 = bitcast i32* %inputLR to i8* + %shl = shl nuw nsw i32 %shr1, 2 + %or = or i32 %shl, 67108864 +; CHECK: memw(r{{[0-9]*.}}++{{.}}#-4:circ(m{{[0-1]}})) + %2 = call i8* @llvm.hexagon.circ.ldw(i8* %0, i8* %1, i32 %or, i32 -4) + %3 = bitcast i8* %2 to i32* + %4 = load i32, i32* %3, align 4, !tbaa !3 + ret i32 %4 +} + +declare i8* @llvm.hexagon.circ.ldw(i8*, i8*, i32, i32) nounwind + +!0 = !{!"omnipotent char", !1} +!1 = !{!"Simple C/C++ TBAA"} +!2 = !{!"short", !0} +!3 = !{!"int", !0} diff --git a/test/CodeGen/Hexagon/circ_ldd_bug.ll b/test/CodeGen/Hexagon/circ_ldd_bug.ll new file mode 100644 index 000000000000..d15b5c964eb7 --- /dev/null +++ b/test/CodeGen/Hexagon/circ_ldd_bug.ll @@ -0,0 +1,255 @@ +; RUN: llc -O2 < %s +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +; We would fail on this file with: +; Unimplemented +; UNREACHABLE executed at llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp:615! +; This happened because after unrolling a loop with a ldd_circ instruction we +; would have several TFCR and ldd_circ instruction sequences. +; %vreg0 (CRRegs) = TFCR %vreg0 (IntRegs) +; = ldd_circ( , , vreg0) +; %vreg1 (CRRegs) = TFCR %vreg1 (IntRegs) +; = ldd_circ( , , vreg0) +; The scheduler would move the CRRegs to the top of the loop. The allocator +; would try to spill the CRRegs after running out of them. We don't have code to +; spill CRRegs and the above assertion would be triggered. +declare i8* @llvm.hexagon.circ.ldd(i8*, i8*, i32, i32) nounwind + +define i32 @test(i16 zeroext %var0, i16* %var1, i16 signext %var2, i16* nocapture %var3) nounwind { +entry: + %var4 = alloca i64, align 8 + %conv = zext i16 %var0 to i32 + %shr5 = lshr i32 %conv, 1 + %idxprom = sext i16 %var2 to i32 + %arrayidx = getelementptr inbounds i16, i16* %var1, i32 %idxprom + %0 = bitcast i16* %var3 to i64* + %1 = load i64, i64* %0, align 8, !tbaa !1 + %2 = bitcast i16* %arrayidx to i8* + %3 = bitcast i64* %var4 to i8* + %shl = shl nuw nsw i32 %shr5, 3 + %or = or i32 %shl, 83886080 + %4 = call i8* @llvm.hexagon.circ.ldd(i8* %2, i8* %3, i32 %or, i32 -8) + %sub = add nsw i32 %shr5, -1 + %cmp6 = icmp sgt i32 %sub, 0 + %5 = load i64, i64* %var4, align 8, !tbaa !1 + %6 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 0, i64 %1, i64 %5) + br i1 %cmp6, label %for.body.lr.ph, label %for.end + +for.body.lr.ph: ; preds = %entry + %incdec.ptr = getelementptr inbounds i16, i16* %var3, i32 4 + %7 = bitcast i16* %incdec.ptr to i64* + %8 = zext i16 %var0 to i32 + %9 = lshr i32 %8, 1 + %10 = add i32 %9, -1 + %xtraiter = urem i32 %10, 8 + %lcmp = icmp ne i32 %xtraiter, 0 + br i1 %lcmp, label %unr.cmp60, label %for.body.lr.ph.split.split + +unr.cmp60: ; preds = %for.body.lr.ph + %un.tmp61 = icmp eq i32 %xtraiter, 1 + br i1 %un.tmp61, label %for.body.unr53, label %unr.cmp51 + +unr.cmp51: ; preds = %unr.cmp60 + %un.tmp52 = icmp eq i32 %xtraiter, 2 + br i1 %un.tmp52, label %for.body.unr44, label %unr.cmp42 + +unr.cmp42: ; preds = %unr.cmp51 + %un.tmp43 = icmp eq i32 %xtraiter, 3 + br i1 %un.tmp43, label %for.body.unr35, label %unr.cmp33 + +unr.cmp33: ; preds = %unr.cmp42 + %un.tmp34 = icmp eq i32 %xtraiter, 4 + br i1 %un.tmp34, label %for.body.unr26, label %unr.cmp24 + +unr.cmp24: ; preds = %unr.cmp33 + %un.tmp25 = icmp eq i32 %xtraiter, 5 + br i1 %un.tmp25, label %for.body.unr17, label %unr.cmp + +unr.cmp: ; preds = %unr.cmp24 + %un.tmp = icmp eq i32 %xtraiter, 6 + br i1 %un.tmp, label %for.body.unr13, label %for.body.unr + +for.body.unr: ; preds = %unr.cmp + %11 = call i8* @llvm.hexagon.circ.ldd(i8* %4, i8* %3, i32 %or, i32 -8) + %12 = load i64, i64* %7, align 8, !tbaa !1 + %inc.unr = add nsw i32 0, 1 + %incdec.ptr4.unr = getelementptr inbounds i64, i64* %7, i32 1 + %cmp.unr = icmp slt i32 %inc.unr, %sub + %13 = load i64, i64* %var4, align 8, !tbaa !1 + %14 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %6, i64 %12, i64 %13) + br label %for.body.unr13 + +for.body.unr13: ; preds = %for.body.unr, %unr.cmp + %15 = phi i64 [ %6, %unr.cmp ], [ %14, %for.body.unr ] + %pvar6.09.unr = phi i64* [ %7, %unr.cmp ], [ %incdec.ptr4.unr, %for.body.unr ] + %var8.0.in8.unr = phi i8* [ %4, %unr.cmp ], [ %11, %for.body.unr ] + %i.07.unr = phi i32 [ 0, %unr.cmp ], [ %inc.unr, %for.body.unr ] + %16 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr, i8* %3, i32 %or, i32 -8) + %17 = load i64, i64* %pvar6.09.unr, align 8, !tbaa !1 + %inc.unr14 = add nsw i32 %i.07.unr, 1 + %incdec.ptr4.unr15 = getelementptr inbounds i64, i64* %pvar6.09.unr, i32 1 + %cmp.unr16 = icmp slt i32 %inc.unr14, %sub + %18 = load i64, i64* %var4, align 8, !tbaa !1 + %19 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %15, i64 %17, i64 %18) + br label %for.body.unr17 + +for.body.unr17: ; preds = %for.body.unr13, %unr.cmp24 + %20 = phi i64 [ %6, %unr.cmp24 ], [ %19, %for.body.unr13 ] + %pvar6.09.unr18 = phi i64* [ %7, %unr.cmp24 ], [ %incdec.ptr4.unr15, %for.body.unr13 ] + %var8.0.in8.unr19 = phi i8* [ %4, %unr.cmp24 ], [ %16, %for.body.unr13 ] + %i.07.unr20 = phi i32 [ 0, %unr.cmp24 ], [ %inc.unr14, %for.body.unr13 ] + %21 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr19, i8* %3, i32 %or, i32 -8) + %22 = load i64, i64* %pvar6.09.unr18, align 8, !tbaa !1 + %inc.unr21 = add nsw i32 %i.07.unr20, 1 + %incdec.ptr4.unr22 = getelementptr inbounds i64, i64* %pvar6.09.unr18, i32 1 + %cmp.unr23 = icmp slt i32 %inc.unr21, %sub + %23 = load i64, i64* %var4, align 8, !tbaa !1 + %24 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %20, i64 %22, i64 %23) + br label %for.body.unr26 + +for.body.unr26: ; preds = %for.body.unr17, %unr.cmp33 + %25 = phi i64 [ %6, %unr.cmp33 ], [ %24, %for.body.unr17 ] + %pvar6.09.unr27 = phi i64* [ %7, %unr.cmp33 ], [ %incdec.ptr4.unr22, %for.body.unr17 ] + %var8.0.in8.unr28 = phi i8* [ %4, %unr.cmp33 ], [ %21, %for.body.unr17 ] + %i.07.unr29 = phi i32 [ 0, %unr.cmp33 ], [ %inc.unr21, %for.body.unr17 ] + %26 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr28, i8* %3, i32 %or, i32 -8) + %27 = load i64, i64* %pvar6.09.unr27, align 8, !tbaa !1 + %inc.unr30 = add nsw i32 %i.07.unr29, 1 + %incdec.ptr4.unr31 = getelementptr inbounds i64, i64* %pvar6.09.unr27, i32 1 + %cmp.unr32 = icmp slt i32 %inc.unr30, %sub + %28 = load i64, i64* %var4, align 8, !tbaa !1 + %29 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %25, i64 %27, i64 %28) + br label %for.body.unr35 + +for.body.unr35: ; preds = %for.body.unr26, %unr.cmp42 + %30 = phi i64 [ %6, %unr.cmp42 ], [ %29, %for.body.unr26 ] + %pvar6.09.unr36 = phi i64* [ %7, %unr.cmp42 ], [ %incdec.ptr4.unr31, %for.body.unr26 ] + %var8.0.in8.unr37 = phi i8* [ %4, %unr.cmp42 ], [ %26, %for.body.unr26 ] + %i.07.unr38 = phi i32 [ 0, %unr.cmp42 ], [ %inc.unr30, %for.body.unr26 ] + %31 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr37, i8* %3, i32 %or, i32 -8) + %32 = load i64, i64* %pvar6.09.unr36, align 8, !tbaa !1 + %inc.unr39 = add nsw i32 %i.07.unr38, 1 + %incdec.ptr4.unr40 = getelementptr inbounds i64, i64* %pvar6.09.unr36, i32 1 + %cmp.unr41 = icmp slt i32 %inc.unr39, %sub + %33 = load i64, i64* %var4, align 8, !tbaa !1 + %34 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %30, i64 %32, i64 %33) + br label %for.body.unr44 + +for.body.unr44: ; preds = %for.body.unr35, %unr.cmp51 + %35 = phi i64 [ %6, %unr.cmp51 ], [ %34, %for.body.unr35 ] + %pvar6.09.unr45 = phi i64* [ %7, %unr.cmp51 ], [ %incdec.ptr4.unr40, %for.body.unr35 ] + %var8.0.in8.unr46 = phi i8* [ %4, %unr.cmp51 ], [ %31, %for.body.unr35 ] + %i.07.unr47 = phi i32 [ 0, %unr.cmp51 ], [ %inc.unr39, %for.body.unr35 ] + %36 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr46, i8* %3, i32 %or, i32 -8) + %37 = load i64, i64* %pvar6.09.unr45, align 8, !tbaa !1 + %inc.unr48 = add nsw i32 %i.07.unr47, 1 + %incdec.ptr4.unr49 = getelementptr inbounds i64, i64* %pvar6.09.unr45, i32 1 + %cmp.unr50 = icmp slt i32 %inc.unr48, %sub + %38 = load i64, i64* %var4, align 8, !tbaa !1 + %39 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %35, i64 %37, i64 %38) + br label %for.body.unr53 + +for.body.unr53: ; preds = %for.body.unr44, %unr.cmp60 + %40 = phi i64 [ %6, %unr.cmp60 ], [ %39, %for.body.unr44 ] + %pvar6.09.unr54 = phi i64* [ %7, %unr.cmp60 ], [ %incdec.ptr4.unr49, %for.body.unr44 ] + %var8.0.in8.unr55 = phi i8* [ %4, %unr.cmp60 ], [ %36, %for.body.unr44 ] + %i.07.unr56 = phi i32 [ 0, %unr.cmp60 ], [ %inc.unr48, %for.body.unr44 ] + %41 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr55, i8* %3, i32 %or, i32 -8) + %42 = load i64, i64* %pvar6.09.unr54, align 8, !tbaa !1 + %inc.unr57 = add nsw i32 %i.07.unr56, 1 + %incdec.ptr4.unr58 = getelementptr inbounds i64, i64* %pvar6.09.unr54, i32 1 + %cmp.unr59 = icmp slt i32 %inc.unr57, %sub + %43 = load i64, i64* %var4, align 8, !tbaa !1 + %44 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %40, i64 %42, i64 %43) + br label %for.body.lr.ph.split + +for.body.lr.ph.split: ; preds = %for.body.unr53 + %45 = icmp ult i32 %10, 8 + br i1 %45, label %for.end.loopexit, label %for.body.lr.ph.split.split + +for.body.lr.ph.split.split: ; preds = %for.body.lr.ph.split, %for.body.lr.ph + %.unr = phi i64 [ %44, %for.body.lr.ph.split ], [ %6, %for.body.lr.ph ] + %pvar6.09.unr62 = phi i64* [ %incdec.ptr4.unr58, %for.body.lr.ph.split ], [ %7, %for.body.lr.ph ] + %var8.0.in8.unr63 = phi i8* [ %41, %for.body.lr.ph.split ], [ %4, %for.body.lr.ph ] + %i.07.unr64 = phi i32 [ %inc.unr57, %for.body.lr.ph.split ], [ 0, %for.body.lr.ph ] + %.lcssa12.unr = phi i64 [ %44, %for.body.lr.ph.split ], [ 0, %for.body.lr.ph ] + br label %for.body + +for.body: ; preds = %for.body, %for.body.lr.ph.split.split + %46 = phi i64 [ %.unr, %for.body.lr.ph.split.split ], [ %78, %for.body ] + %pvar6.09 = phi i64* [ %pvar6.09.unr62, %for.body.lr.ph.split.split ], [ %scevgep71, %for.body ] + %var8.0.in8 = phi i8* [ %var8.0.in8.unr63, %for.body.lr.ph.split.split ], [ %75, %for.body ] + %i.07 = phi i32 [ %i.07.unr64, %for.body.lr.ph.split.split ], [ %inc.7, %for.body ] + %47 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8, i8* %3, i32 %or, i32 -8) + %48 = load i64, i64* %pvar6.09, align 8, !tbaa !1 + %inc = add nsw i32 %i.07, 1 + %49 = load i64, i64* %var4, align 8, !tbaa !1 + %50 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %46, i64 %48, i64 %49) + %51 = call i8* @llvm.hexagon.circ.ldd(i8* %47, i8* %3, i32 %or, i32 -8) + %scevgep = getelementptr i64, i64* %pvar6.09, i32 1 + %52 = load i64, i64* %scevgep, align 8, !tbaa !1 + %inc.1 = add nsw i32 %inc, 1 + %53 = load i64, i64* %var4, align 8, !tbaa !1 + %54 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %50, i64 %52, i64 %53) + %55 = call i8* @llvm.hexagon.circ.ldd(i8* %51, i8* %3, i32 %or, i32 -8) + %scevgep65 = getelementptr i64, i64* %scevgep, i32 1 + %56 = load i64, i64* %scevgep65, align 8, !tbaa !1 + %inc.2 = add nsw i32 %inc.1, 1 + %57 = load i64, i64* %var4, align 8, !tbaa !1 + %58 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %54, i64 %56, i64 %57) + %59 = call i8* @llvm.hexagon.circ.ldd(i8* %55, i8* %3, i32 %or, i32 -8) + %scevgep66 = getelementptr i64, i64* %scevgep65, i32 1 + %60 = load i64, i64* %scevgep66, align 8, !tbaa !1 + %inc.3 = add nsw i32 %inc.2, 1 + %61 = load i64, i64* %var4, align 8, !tbaa !1 + %62 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %58, i64 %60, i64 %61) + %63 = call i8* @llvm.hexagon.circ.ldd(i8* %59, i8* %3, i32 %or, i32 -8) + %scevgep67 = getelementptr i64, i64* %scevgep66, i32 1 + %64 = load i64, i64* %scevgep67, align 8, !tbaa !1 + %inc.4 = add nsw i32 %inc.3, 1 + %65 = load i64, i64* %var4, align 8, !tbaa !1 + %66 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %62, i64 %64, i64 %65) + %67 = call i8* @llvm.hexagon.circ.ldd(i8* %63, i8* %3, i32 %or, i32 -8) + %scevgep68 = getelementptr i64, i64* %scevgep67, i32 1 + %68 = load i64, i64* %scevgep68, align 8, !tbaa !1 + %inc.5 = add nsw i32 %inc.4, 1 + %69 = load i64, i64* %var4, align 8, !tbaa !1 + %70 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %66, i64 %68, i64 %69) + %71 = call i8* @llvm.hexagon.circ.ldd(i8* %67, i8* %3, i32 %or, i32 -8) + %scevgep69 = getelementptr i64, i64* %scevgep68, i32 1 + %72 = load i64, i64* %scevgep69, align 8, !tbaa !1 + %inc.6 = add nsw i32 %inc.5, 1 + %73 = load i64, i64* %var4, align 8, !tbaa !1 + %74 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %70, i64 %72, i64 %73) + %75 = call i8* @llvm.hexagon.circ.ldd(i8* %71, i8* %3, i32 %or, i32 -8) + %scevgep70 = getelementptr i64, i64* %scevgep69, i32 1 + %76 = load i64, i64* %scevgep70, align 8, !tbaa !1 + %inc.7 = add nsw i32 %inc.6, 1 + %77 = load i64, i64* %var4, align 8, !tbaa !1 + %78 = call i64 @llvm.hexagon.M2.vdmacs.s1(i64 %74, i64 %76, i64 %77) + %cmp.7 = icmp slt i32 %inc.7, %sub + %scevgep71 = getelementptr i64, i64* %scevgep70, i32 1 + br i1 %cmp.7, label %for.body, label %for.end.loopexit.unr-lcssa + +for.end.loopexit.unr-lcssa: ; preds = %for.body + %.lcssa12.ph = phi i64 [ %78, %for.body ] + br label %for.end.loopexit + +for.end.loopexit: ; preds = %for.end.loopexit.unr-lcssa, %for.body.lr.ph.split + %.lcssa12 = phi i64 [ %44, %for.body.lr.ph.split ], [ %.lcssa12.ph, %for.end.loopexit.unr-lcssa ] + br label %for.end + +for.end: ; preds = %for.end.loopexit, %entry + %.lcssa = phi i64 [ %6, %entry ], [ %.lcssa12, %for.end.loopexit ] + %79 = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %.lcssa) + ret i32 %79 +} + +declare i64 @llvm.hexagon.M2.vdmacs.s1(i64, i64, i64) nounwind readnone + +declare i32 @llvm.hexagon.S2.vrndpackwhs(i64) nounwind readnone + +!0 = !{!"long long", !1} +!1 = !{!"omnipotent char", !2} +!2 = !{!"Simple C/C++ TBAA"} diff --git a/test/CodeGen/Hexagon/circ_ldw.ll b/test/CodeGen/Hexagon/circ_ldw.ll new file mode 100644 index 000000000000..4511a9cf69da --- /dev/null +++ b/test/CodeGen/Hexagon/circ_ldw.ll @@ -0,0 +1,18 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; CHECK: r{{[0-9]*}} = memw(r{{[0-9]*.}}++{{.}}#-4:circ(m0)) + + +%union.vect64 = type { i64 } +%union.vect32 = type { i32 } + +define i32* @HallowedBeThyName(%union.vect64* nocapture %pRx, %union.vect32* %pLut, %union.vect64* nocapture %pOut, i64 %dc.coerce, i32 %shift, i32 %numSamples) nounwind { +entry: + %vLutNext = alloca i32, align 4 + %0 = bitcast %union.vect32* %pLut to i8* + %1 = bitcast i32* %vLutNext to i8* + %2 = call i8* @llvm.hexagon.circ.ldw(i8* %0, i8* %1, i32 83886144, i32 -4) + %3 = bitcast i8* %2 to i32* + ret i32* %3 +} + +declare i8* @llvm.hexagon.circ.ldw(i8*, i8*, i32, i32) nounwind diff --git a/test/CodeGen/Hexagon/circ_st.ll b/test/CodeGen/Hexagon/circ_st.ll new file mode 100644 index 000000000000..244ca3bae714 --- /dev/null +++ b/test/CodeGen/Hexagon/circ_st.ll @@ -0,0 +1,108 @@ +; RUN: llc -march=hexagon -verify-machineinstrs=true < %s | FileCheck %s +; Testing for these 5 variants of circular store: +; Q6_circ_store_update_B(inputLR, pDelay, -1, nConvLength, 4); +; Q6_circ_store_update_D(inputLR, pDelay, -1, nConvLength, 4); +; Q6_circ_store_update_HL(inputLR, pDelay, -1, nConvLength, 4); +; Q6_circ_store_update_HH(inputLR, pDelay, -1, nConvLength, 4); +; Q6_circ_store_update_W(inputLR, pDelay, -1, nConvLength, 4); +; producing these +; memb(r1++#-1:circ(m0)) = r3 +; memd(r1++#-8:circ(m0)) = r1:0 +; memh(r1++#-2:circ(m0)) = r3 +; memh(r1++#-2:circ(m0)) = r3.h +; memw(r1++#-4:circ(m0)) = r0 + +; ModuleID = 'circ_st.i' +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +define zeroext i8 @foo1(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %conv = zext i16 %filtMemLen to i32 + %shr2 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %or = or i32 %shr2, 33554432 +; CHECK: memb(r{{[0-9]*}}{{.}}++{{.}}#-1:circ(m{{[0-1]}})) + %1 = tail call i8* @llvm.hexagon.circ.stb(i8* %0, i32 0, i32 %or, i32 -1) + %2 = load i8, i8* %1, align 1, !tbaa !0 + ret i8 %2 +} + +declare i8* @llvm.hexagon.circ.stb(i8*, i32, i32, i32) nounwind + +define i64 @foo2(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %shl = shl nuw nsw i32 %shr1, 3 + %or = or i32 %shl, 83886080 +; CHECK: memd(r{{[0-9]*}}{{.}}++{{.}}#-8:circ(m{{[0-1]}})) + %1 = tail call i8* @llvm.hexagon.circ.std(i8* %0, i64 undef, i32 %or, i32 -8) + %2 = bitcast i8* %1 to i64* + %3 = load i64, i64* %2, align 8, !tbaa !0 + ret i64 %3 +} + +declare i8* @llvm.hexagon.circ.std(i8*, i64, i32, i32) nounwind + +define signext i16 @foo3(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %conv = zext i16 %filtMemLen to i32 + %shr2 = and i32 %conv, 65534 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %or = or i32 %shr2, 50331648 +; CHECK: memh(r{{[0-9]*}}{{.}}++{{.}}#-2:circ(m{{[0-1]}})) + %1 = tail call i8* @llvm.hexagon.circ.sth(i8* %0, i32 0, i32 %or, i32 -2) + %2 = bitcast i8* %1 to i16* + %3 = load i16, i16* %2, align 2, !tbaa !2 + ret i16 %3 +} + +declare i8* @llvm.hexagon.circ.sth(i8*, i32, i32, i32) nounwind + +define signext i16 @foo5(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %conv = zext i16 %filtMemLen to i32 + %shr2 = and i32 %conv, 65534 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %or = or i32 %shr2, 50331648 +; CHECK: memh(r{{[0-9]*}}{{.}}++{{.}}#-2:circ(m{{[0-1]}})){{ *}}={{ *}}r{{[0-9]*}}.h + %1 = tail call i8* @llvm.hexagon.circ.sthhi(i8* %0, i32 0, i32 %or, i32 -2) + %2 = bitcast i8* %1 to i16* + %3 = load i16, i16* %2, align 2, !tbaa !2 + ret i16 %3 +} + +declare i8* @llvm.hexagon.circ.sthhi(i8*, i32, i32, i32) nounwind + +define i32 @foo6(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind { +entry: + %conv = zext i16 %filtMemLen to i32 + %shr1 = lshr i32 %conv, 1 + %idxprom = sext i16 %filtMemIndex to i32 + %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom + %0 = bitcast i16* %arrayidx to i8* + %shl = shl nuw nsw i32 %shr1, 2 + %or = or i32 %shl, 67108864 +; CHECK: memw(r{{[0-9]*}}{{.}}++{{.}}#-4:circ(m{{[0-1]}})) + %1 = tail call i8* @llvm.hexagon.circ.stw(i8* %0, i32 undef, i32 %or, i32 -4) + %2 = bitcast i8* %1 to i32* + %3 = load i32, i32* %2, align 4, !tbaa !3 + ret i32 %3 +} + +declare i8* @llvm.hexagon.circ.stw(i8*, i32, i32, i32) nounwind + +!0 = !{!"omnipotent char", !1} +!1 = !{!"Simple C/C++ TBAA"} +!2 = !{!"short", !0} +!3 = !{!"int", !0} diff --git a/test/CodeGen/Hexagon/clr_set_toggle.ll b/test/CodeGen/Hexagon/clr_set_toggle.ll new file mode 100644 index 000000000000..87c52956129e --- /dev/null +++ b/test/CodeGen/Hexagon/clr_set_toggle.ll @@ -0,0 +1,160 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; Optimized bitwise operations. + +define i32 @my_clrbit(i32 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) + %x.addr = alloca i32, align 4 + store i32 %x, i32* %x.addr, align 4 + %0 = load i32, i32* %x.addr, align 4 + %and = and i32 %0, 2147483647 + ret i32 %and +} + +define i64 @my_clrbit2(i64 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) + %x.addr = alloca i64, align 8 + store i64 %x, i64* %x.addr, align 8 + %0 = load i64, i64* %x.addr, align 8 + %and = and i64 %0, -2147483649 + ret i64 %and +} + +define i64 @my_clrbit3(i64 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) + %x.addr = alloca i64, align 8 + store i64 %x, i64* %x.addr, align 8 + %0 = load i64, i64* %x.addr, align 8 + %and = and i64 %0, 9223372036854775807 + ret i64 %and +} + +define i32 @my_clrbit4(i32 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #13) + %x.addr = alloca i32, align 4 + store i32 %x, i32* %x.addr, align 4 + %0 = load i32, i32* %x.addr, align 4 + %and = and i32 %0, -8193 + ret i32 %and +} + +define i64 @my_clrbit5(i64 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #13) + %x.addr = alloca i64, align 8 + store i64 %x, i64* %x.addr, align 8 + %0 = load i64, i64* %x.addr, align 8 + %and = and i64 %0, -8193 + ret i64 %and +} + +define i64 @my_clrbit6(i64 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #27) + %x.addr = alloca i64, align 8 + store i64 %x, i64* %x.addr, align 8 + %0 = load i64, i64* %x.addr, align 8 + %and = and i64 %0, -576460752303423489 + ret i64 %and +} + +define zeroext i16 @my_setbit(i16 zeroext %crc) nounwind { +entry: +; CHECK: memh(r{{[0-9]+}}+#0){{ *}}={{ *}}setbit(#15) + %crc.addr = alloca i16, align 2 + store i16 %crc, i16* %crc.addr, align 2 + %0 = load i16, i16* %crc.addr, align 2 + %conv = zext i16 %0 to i32 + %or = or i32 %conv, 32768 + %conv1 = trunc i32 %or to i16 + store i16 %conv1, i16* %crc.addr, align 2 + %1 = load i16, i16* %crc.addr, align 2 + ret i16 %1 +} + +define i32 @my_setbit2(i32 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #15) + %x.addr = alloca i32, align 4 + store i32 %x, i32* %x.addr, align 4 + %0 = load i32, i32* %x.addr, align 4 + %or = or i32 %0, 32768 + ret i32 %or +} + +define i64 @my_setbit3(i64 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #15) + %x.addr = alloca i64, align 8 + store i64 %x, i64* %x.addr, align 8 + %0 = load i64, i64* %x.addr, align 8 + %or = or i64 %0, 32768 + ret i64 %or +} + +define i32 @my_setbit4(i32 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #31) + %x.addr = alloca i32, align 4 + store i32 %x, i32* %x.addr, align 4 + %0 = load i32, i32* %x.addr, align 4 + %or = or i32 %0, -2147483648 + ret i32 %or +} + +define i64 @my_setbit5(i64 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #13) + %x.addr = alloca i64, align 8 + store i64 %x, i64* %x.addr, align 8 + %0 = load i64, i64* %x.addr, align 8 + %or = or i64 %0, 35184372088832 + ret i64 %or +} + +define zeroext i16 @my_togglebit(i16 zeroext %crc) nounwind { +entry: +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15) + %crc.addr = alloca i16, align 2 + store i16 %crc, i16* %crc.addr, align 2 + %0 = load i16, i16* %crc.addr, align 2 + %conv = zext i16 %0 to i32 + %xor = xor i32 %conv, 32768 + %conv1 = trunc i32 %xor to i16 + store i16 %conv1, i16* %crc.addr, align 2 + %1 = load i16, i16* %crc.addr, align 2 + ret i16 %1 +} + +define i32 @my_togglebit2(i32 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15) + %x.addr = alloca i32, align 4 + store i32 %x, i32* %x.addr, align 4 + %0 = load i32, i32* %x.addr, align 4 + %xor = xor i32 %0, 32768 + ret i32 %xor +} + +define i64 @my_togglebit3(i64 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15) + %x.addr = alloca i64, align 8 + store i64 %x, i64* %x.addr, align 8 + %0 = load i64, i64* %x.addr, align 8 + %xor = xor i64 %0, 32768 + ret i64 %xor +} + +define i64 @my_togglebit4(i64 %x) nounwind { +entry: +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #20) + %x.addr = alloca i64, align 8 + store i64 %x, i64* %x.addr, align 8 + %0 = load i64, i64* %x.addr, align 8 + %xor = xor i64 %0, 4503599627370496 + ret i64 %xor +} diff --git a/test/CodeGen/Hexagon/cmp-not.ll b/test/CodeGen/Hexagon/cmp-not.ll deleted file mode 100644 index abcddc38b23b..000000000000 --- a/test/CodeGen/Hexagon/cmp-not.ll +++ /dev/null @@ -1,50 +0,0 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s -; Check that we generate matching compare insn. - -; Function Attrs: nounwind -define i32 @neqi(i32 %argc) #0 { -entry: - %p = alloca i8, align 1 - %0 = tail call i1 @llvm.hexagon.C4.cmpneqi(i32 %argc, i32 512) - %conv = zext i1 %0 to i8 - store volatile i8 %conv, i8* %p, align 1 - %p.0.p.0. = load volatile i8* %p, align 1 - %conv1 = zext i8 %p.0.p.0. to i32 - ret i32 %conv1 -} -; CHECK: p{{[0-3]}}{{ *}} = !cmp.eq(r{{[0-9]+}}, ##512) - -; Function Attrs: nounwind readnone -declare i1 @llvm.hexagon.C4.cmpneqi(i32, i32) #1 - -; Function Attrs: nounwind -define i32 @ngti(i32 %argc) #0 { -entry: - %p = alloca i8, align 1 - %0 = tail call i1 @llvm.hexagon.C4.cmpltei(i32 %argc, i32 4) - %conv = zext i1 %0 to i8 - store volatile i8 %conv, i8* %p, align 1 - %p.0.p.0. = load volatile i8* %p, align 1 - %conv1 = zext i8 %p.0.p.0. to i32 - ret i32 %conv1 -} -; CHECK: p{{[0-3]}}{{ *}} = !cmp.gt(r{{[0-9]+}}, #4) - -; Function Attrs: nounwind readnone -declare i1 @llvm.hexagon.C4.cmpltei(i32, i32) #1 - -; Function Attrs: nounwind -define i32 @ngtui(i32 %argc) #0 { -entry: - %p = alloca i8, align 1 - %0 = tail call i1 @llvm.hexagon.C4.cmplteui(i32 %argc, i32 4) - %conv = zext i1 %0 to i8 - store volatile i8 %conv, i8* %p, align 1 - %p.0.p.0. = load volatile i8* %p, align 1 - %conv1 = zext i8 %p.0.p.0. to i32 - ret i32 %conv1 -} -; CHECK: p{{[0-3]}}{{ *}} = !cmp.gtu(r{{[0-9]+}}, #4) - -; Function Attrs: nounwind readnone -declare i1 @llvm.hexagon.C4.cmplteui(i32, i32) #1 diff --git a/test/CodeGen/Hexagon/cmp-to-predreg.ll b/test/CodeGen/Hexagon/cmp-to-predreg.ll index d430b901866d..2b65343ab2cf 100644 --- a/test/CodeGen/Hexagon/cmp-to-predreg.ll +++ b/test/CodeGen/Hexagon/cmp-to-predreg.ll @@ -2,7 +2,7 @@ ; Check that we generate compare to predicate register. define i32 @compare1(i32 %a, i32 %b) nounwind { -; CHECK: p{{[0-3]}}{{ *}}={{ *}}!cmp.eq(r{{[0-9]+}},{{ *}}r{{[0-9]+}}) +; CHECK: p{{[0-3]}}{{ *}}={{ *[!]?}}cmp.eq(r{{[0-9]+}},{{ *}}r{{[0-9]+}}) entry: %cmp = icmp ne i32 %a, %b %add = add nsw i32 %a, %b @@ -12,7 +12,7 @@ entry: } define i32 @compare2(i32 %a) nounwind { -; CHECK: p{{[0-3]}}{{ *}}={{ *}}!cmp.eq(r{{[0-9]+}},{{ *}}#10) +; CHECK: p{{[0-3]}}{{ *}}={{ *[!]?}}cmp.eq(r{{[0-9]+}},{{ *}}#10) entry: %cmp = icmp ne i32 %a, 10 %add = add nsw i32 %a, 10 diff --git a/test/CodeGen/Hexagon/cmp_pred.ll b/test/CodeGen/Hexagon/cmp_pred.ll index 37db3b499f63..39549a1f2d54 100644 --- a/test/CodeGen/Hexagon/cmp_pred.ll +++ b/test/CodeGen/Hexagon/cmp_pred.ll @@ -1,3 +1,4 @@ +; XFAIL: ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Generate various cmpb instruction followed by if (p0) .. if (!p0)... target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/cmp_pred2.ll b/test/CodeGen/Hexagon/cmp_pred2.ll index a20b9f09b6e0..28f3e1bac8d1 100644 --- a/test/CodeGen/Hexagon/cmp_pred2.ll +++ b/test/CodeGen/Hexagon/cmp_pred2.ll @@ -11,7 +11,7 @@ entry: br i1 %cmp, label %if.then, label %entry.if.end_crit_edge entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: @@ -32,7 +32,7 @@ entry: br i1 %cmp, label %entry.if.end_crit_edge, label %if.then entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: @@ -53,7 +53,7 @@ entry: br i1 %cmp, label %entry.if.end_crit_edge, label %if.then entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: @@ -73,7 +73,7 @@ entry: br i1 %cmp, label %if.then, label %entry.if.end_crit_edge entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: diff --git a/test/CodeGen/Hexagon/cmp_pred_reg.ll b/test/CodeGen/Hexagon/cmp_pred_reg.ll index 37db3b499f63..39549a1f2d54 100644 --- a/test/CodeGen/Hexagon/cmp_pred_reg.ll +++ b/test/CodeGen/Hexagon/cmp_pred_reg.ll @@ -1,3 +1,4 @@ +; XFAIL: ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Generate various cmpb instruction followed by if (p0) .. if (!p0)... target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/cmpb_pred.ll b/test/CodeGen/Hexagon/cmpb_pred.ll index 0960da1fa060..1a43e6291696 100644 --- a/test/CodeGen/Hexagon/cmpb_pred.ll +++ b/test/CodeGen/Hexagon/cmpb_pred.ll @@ -1,3 +1,4 @@ +; XFAIL: ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Generate various cmpb instruction followed by if (p0) .. if (!p0)... target triple = "hexagon" @@ -16,7 +17,7 @@ entry: define i32 @Func_3b(i32) nounwind readonly { entry: ; CHECK-NOT: mux - %1 = load i8* @Enum_global, align 1 + %1 = load i8, i8* @Enum_global, align 1 %2 = trunc i32 %0 to i8 %cmp = icmp ne i8 %1, %2 %selv = zext i1 %cmp to i32 @@ -35,7 +36,7 @@ entry: define i32 @Func_3d(i32) nounwind readonly { entry: ; CHECK-NOT: mux - %1 = load i8* @Enum_global, align 1 + %1 = load i8, i8* @Enum_global, align 1 %2 = trunc i32 %0 to i8 %cmp = icmp eq i8 %1, %2 %selv = zext i1 %cmp to i32 @@ -45,7 +46,7 @@ entry: define i32 @Func_3e(i32) nounwind readonly { entry: ; CHECK-NOT: mux - %1 = load i8* @Enum_global, align 1 + %1 = load i8, i8* @Enum_global, align 1 %2 = trunc i32 %0 to i8 %cmp = icmp eq i8 %1, %2 %selv = zext i1 %cmp to i32 diff --git a/test/CodeGen/Hexagon/combine.ll b/test/CodeGen/Hexagon/combine.ll index 721998596c81..2e320d977d62 100644 --- a/test/CodeGen/Hexagon/combine.ll +++ b/test/CodeGen/Hexagon/combine.ll @@ -6,8 +6,8 @@ define void @foo() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i64* @k, align 8 + %0 = load i32, i32* @j, align 4 + %1 = load i64, i64* @k, align 8 %conv = trunc i64 %1 to i32 %2 = call i64 @llvm.hexagon.A2.combinew(i32 %0, i32 %conv) store i64 %2, i64* @k, align 8 diff --git a/test/CodeGen/Hexagon/combine_ir.ll b/test/CodeGen/Hexagon/combine_ir.ll index e100cf7196f1..634a5c82a916 100644 --- a/test/CodeGen/Hexagon/combine_ir.ll +++ b/test/CodeGen/Hexagon/combine_ir.ll @@ -4,7 +4,7 @@ define void @word(i32* nocapture %a) nounwind { entry: - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %1 = zext i32 %0 to i64 tail call void @bar(i64 %1) nounwind ret void @@ -17,10 +17,10 @@ declare void @bar(i64) define void @halfword(i16* nocapture %a) nounwind { entry: - %0 = load i16* %a, align 2 + %0 = load i16, i16* %a, align 2 %1 = zext i16 %0 to i64 - %add.ptr = getelementptr inbounds i16* %a, i32 1 - %2 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %a, i32 1 + %2 = load i16, i16* %add.ptr, align 2 %3 = zext i16 %2 to i64 %4 = shl nuw nsw i64 %3, 16 %ins = or i64 %4, %1 @@ -33,10 +33,10 @@ entry: define void @byte(i8* nocapture %a) nounwind { entry: - %0 = load i8* %a, align 1 + %0 = load i8, i8* %a, align 1 %1 = zext i8 %0 to i64 - %add.ptr = getelementptr inbounds i8* %a, i32 1 - %2 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %a, i32 1 + %2 = load i8, i8* %add.ptr, align 1 %3 = zext i8 %2 to i64 %4 = shl nuw nsw i64 %3, 8 %ins = or i64 %4, %1 diff --git a/test/CodeGen/Hexagon/convertdptoint.ll b/test/CodeGen/Hexagon/convertdptoint.ll index fa068c4c8a51..a09c2fd14b12 100644 --- a/test/CodeGen/Hexagon/convertdptoint.ll +++ b/test/CodeGen/Hexagon/convertdptoint.ll @@ -14,13 +14,13 @@ entry: store i32 0, i32* %retval store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %a, align 8 - %1 = load double* %b, align 8 + %0 = load double, double* %a, align 8 + %1 = load double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 - %2 = load double* %c, align 8 + %2 = load double, double* %c, align 8 %conv = fptosi double %2 to i32 store i32 %conv, i32* %i, align 4 - %3 = load i32* %i, align 4 + %3 = load i32, i32* %i, align 4 ret i32 %3 } diff --git a/test/CodeGen/Hexagon/convertdptoll.ll b/test/CodeGen/Hexagon/convertdptoll.ll index 1b4dd86bd01b..f46d46cf76b1 100644 --- a/test/CodeGen/Hexagon/convertdptoll.ll +++ b/test/CodeGen/Hexagon/convertdptoll.ll @@ -14,14 +14,14 @@ entry: store i32 0, i32* %retval store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %a, align 8 - %1 = load double* %b, align 8 + %0 = load double, double* %a, align 8 + %1 = load double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 - %2 = load double* %c, align 8 + %2 = load double, double* %c, align 8 %conv = fptosi double %2 to i64 store i64 %conv, i64* %i, align 8 - %3 = load i64* %i, align 8 + %3 = load i64, i64* %i, align 8 %conv1 = trunc i64 %3 to i32 ret i32 %conv1 } diff --git a/test/CodeGen/Hexagon/convertsptoint.ll b/test/CodeGen/Hexagon/convertsptoint.ll index b8a9d6c8083c..7593e57d852f 100644 --- a/test/CodeGen/Hexagon/convertsptoint.ll +++ b/test/CodeGen/Hexagon/convertsptoint.ll @@ -14,13 +14,13 @@ entry: store i32 0, i32* %retval store float 0x402ECCCCC0000000, float* %a, align 4 store float 0x4022333340000000, float* %b, align 4 - %0 = load float* %a, align 4 - %1 = load float* %b, align 4 + %0 = load float, float* %a, align 4 + %1 = load float, float* %b, align 4 %add = fadd float %0, %1 store float %add, float* %c, align 4 - %2 = load float* %c, align 4 + %2 = load float, float* %c, align 4 %conv = fptosi float %2 to i32 store i32 %conv, i32* %i, align 4 - %3 = load i32* %i, align 4 + %3 = load i32, i32* %i, align 4 ret i32 %3 } diff --git a/test/CodeGen/Hexagon/convertsptoll.ll b/test/CodeGen/Hexagon/convertsptoll.ll index 1c4df94784aa..d8432cbc812b 100644 --- a/test/CodeGen/Hexagon/convertsptoll.ll +++ b/test/CodeGen/Hexagon/convertsptoll.ll @@ -14,14 +14,14 @@ entry: store i32 0, i32* %retval store float 0x402ECCCCC0000000, float* %a, align 4 store float 0x4022333340000000, float* %b, align 4 - %0 = load float* %a, align 4 - %1 = load float* %b, align 4 + %0 = load float, float* %a, align 4 + %1 = load float, float* %b, align 4 %add = fadd float %0, %1 store float %add, float* %c, align 4 - %2 = load float* %c, align 4 + %2 = load float, float* %c, align 4 %conv = fptosi float %2 to i64 store i64 %conv, i64* %i, align 8 - %3 = load i64* %i, align 8 + %3 = load i64, i64* %i, align 8 %conv1 = trunc i64 %3 to i32 ret i32 %conv1 } diff --git a/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll b/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll index e942f8d0c5dd..b8f483298f8c 100644 --- a/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll +++ b/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll @@ -1,8 +1,10 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s -; CHECK: r{{[0-9]+}}:{{[0-9]+}} |= lsr(r{{[0-9]+}}:{{[0-9]+}}, #4) -; CHECK: r{{[0-9]+}}:{{[0-9]+}} &= lsr(r{{[0-9]+}}:{{[0-9]+}}, #2) -; CHECK: r{{[0-9]+}} += lsr(r{{[0-9]+}}, #4) +; CHECK-DAG: ct0({{r[0-9]*:[0-9]*}}) +; CHECK-DAG: cl0({{r[0-9]*:[0-9]*}}) +; CHECK-DAG: ct0({{r[0-9]*}}) +; CHECK-DAG: cl0({{r[0-9]*}}) +; CHECK-DAG: r{{[0-9]+}} += lsr(r{{[0-9]+}}, #4) define i32 @foo(i64 %a, i32 %b) nounwind { entry: diff --git a/test/CodeGen/Hexagon/dadd.ll b/test/CodeGen/Hexagon/dadd.ll index 602978ac01d3..5fcd705bab23 100644 --- a/test/CodeGen/Hexagon/dadd.ll +++ b/test/CodeGen/Hexagon/dadd.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that we generate double precision floating point add in V5. -; CHECK: r{{[0-9]+}}:{{[0-9]+}} = dfadd(r{{[0-9]+}}:{{[0-9]+}}, r{{[0-9]+}}:{{[0-9]+}}) +; CHECK: call __hexagon_adddf3 define i32 @main() nounwind { @@ -11,8 +11,8 @@ entry: %c = alloca double, align 8 store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %a, align 8 - %1 = load double* %b, align 8 + %0 = load double, double* %a, align 8 + %1 = load double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 ret i32 0 diff --git a/test/CodeGen/Hexagon/dmul.ll b/test/CodeGen/Hexagon/dmul.ll index d7437739ee90..1b79e0aa7d70 100644 --- a/test/CodeGen/Hexagon/dmul.ll +++ b/test/CodeGen/Hexagon/dmul.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that we generate double precision floating point multiply in V5. -; CHECK: r{{[0-9]+}}:{{[0-9]+}} = dfmpy(r{{[0-9]+}}:{{[0-9]+}}, r{{[0-9]+}}:{{[0-9]+}}) +; CHECK: call __hexagon_muldf3 define i32 @main() nounwind { entry: @@ -10,8 +10,8 @@ entry: %c = alloca double, align 8 store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %b, align 8 - %1 = load double* %a, align 8 + %0 = load double, double* %b, align 8 + %1 = load double, double* %a, align 8 %mul = fmul double %0, %1 store double %mul, double* %c, align 8 ret i32 0 diff --git a/test/CodeGen/Hexagon/double.ll b/test/CodeGen/Hexagon/double.ll index c3b6f378ec8a..b4d025cd7fd0 100644 --- a/test/CodeGen/Hexagon/double.ll +++ b/test/CodeGen/Hexagon/double.ll @@ -10,13 +10,13 @@ entry: store double* %acc, double** %acc.addr, align 4 store double %num, double* %num.addr, align 8 store double %num2, double* %num2.addr, align 8 - %0 = load double** %acc.addr, align 4 - %1 = load double* %0 - %2 = load double* %num.addr, align 8 + %0 = load double*, double** %acc.addr, align 4 + %1 = load double, double* %0 + %2 = load double, double* %num.addr, align 8 %add = fadd double %1, %2 - %3 = load double* %num2.addr, align 8 + %3 = load double, double* %num2.addr, align 8 %sub = fsub double %add, %3 - %4 = load double** %acc.addr, align 4 + %4 = load double*, double** %acc.addr, align 4 store double %sub, double* %4 ret void } diff --git a/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll b/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll index 54e7ce3bcdd3..6bf8224904ec 100644 --- a/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll +++ b/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll @@ -14,13 +14,13 @@ entry: store i32 0, i32* %retval store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %a, align 8 - %1 = load double* %b, align 8 + %0 = load double, double* %a, align 8 + %1 = load double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 - %2 = load double* %c, align 8 + %2 = load double, double* %c, align 8 %conv = fptosi double %2 to i32 store i32 %conv, i32* %i, align 4 - %3 = load i32* %i, align 4 + %3 = load i32, i32* %i, align 4 ret i32 %3 } diff --git a/test/CodeGen/Hexagon/dsub.ll b/test/CodeGen/Hexagon/dsub.ll index 4f9d39ed0b24..8b37301d84fb 100644 --- a/test/CodeGen/Hexagon/dsub.ll +++ b/test/CodeGen/Hexagon/dsub.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that we generate double precision floating point subtract in V5. -; CHECK: r{{[0-9]+}}:{{[0-9]+}} = dfsub(r{{[0-9]+}}:{{[0-9]+}}, r{{[0-9]+}}:{{[0-9]+}}) +; CHECK: call __hexagon_subdf3 define i32 @main() nounwind { entry: @@ -10,8 +10,8 @@ entry: %c = alloca double, align 8 store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %b, align 8 - %1 = load double* %a, align 8 + %0 = load double, double* %b, align 8 + %1 = load double, double* %a, align 8 %sub = fsub double %0, %1 store double %sub, double* %c, align 8 ret i32 0 diff --git a/test/CodeGen/Hexagon/dualstore.ll b/test/CodeGen/Hexagon/dualstore.ll index f7d7e8bbe75d..33d9ce9b9351 100644 --- a/test/CodeGen/Hexagon/dualstore.ll +++ b/test/CodeGen/Hexagon/dualstore.ll @@ -1,17 +1,12 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 -disable-hexagon-misched < %s | FileCheck %s +; RUN: llc -march=hexagon -disable-hexagon-misched < %s | FileCheck %s ; Check that we generate dual stores in one packet in V4 -; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}={{ *}}##500000 -; CHECK-NEXT: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}={{ *}}##100000 -; CHECK-NEXT: } +; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}= +; CHECK-NEXT: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}= -@Reg = global i32 0, align 4 -define i32 @main() nounwind { +define i32 @main(i32 %v, i32* %p1, i32* %p2) nounwind { entry: - %number= alloca i32, align 4 - store i32 500000, i32* %number, align 4 - %number1= alloca i32, align 4 - store i32 100000, i32* %number1, align 4 + store i32 %v, i32* %p1, align 4 + store i32 %v, i32* %p2, align 4 ret i32 0 } - diff --git a/test/CodeGen/Hexagon/expand-condsets-basic.ll b/test/CodeGen/Hexagon/expand-condsets-basic.ll new file mode 100644 index 000000000000..16fe8af47b13 --- /dev/null +++ b/test/CodeGen/Hexagon/expand-condsets-basic.ll @@ -0,0 +1,11 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: if{{.*}}add +; CHECK: if{{.*}}sub + +define i32 @foo (i1 %a, i32 %b, i32 %c, i32 %d) nounwind { + %1 = add i32 %b, %d + %2 = sub i32 %c, %d + %3 = select i1 %a, i32 %1, i32 %2 + ret i32 %3 +} + diff --git a/test/CodeGen/Hexagon/expand-condsets-rm-segment.ll b/test/CodeGen/Hexagon/expand-condsets-rm-segment.ll new file mode 100644 index 000000000000..cde7e6a09e1d --- /dev/null +++ b/test/CodeGen/Hexagon/expand-condsets-rm-segment.ll @@ -0,0 +1,131 @@ +; RUN: llc -O2 < %s +; REQUIRES: asserts + +target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32" +target triple = "hexagon-unknown--elf" + +%struct.cpumask = type { [1 x i32] } +%struct.load_weight = type { i32, i32 } + +@sysctl_sched_latency = global i32 6000000, align 4 +@normalized_sysctl_sched_latency = global i32 6000000, align 4 +@sysctl_sched_tunable_scaling = global i8 1, align 1 +@sysctl_sched_min_granularity = global i32 750000, align 4 +@normalized_sysctl_sched_min_granularity = global i32 750000, align 4 +@sysctl_sched_wakeup_granularity = global i32 1000000, align 4 +@normalized_sysctl_sched_wakeup_granularity = global i32 1000000, align 4 +@sysctl_sched_migration_cost = constant i32 500000, align 4 +@sysctl_sched_shares_window = global i32 10000000, align 4 +@sysctl_sched_child_runs_first = common global i32 0, align 4 +@cpu_online_mask = external constant %struct.cpumask* + +; Function Attrs: noinline nounwind +define void @sched_init_granularity() #0 { +entry: + tail call fastcc void @update_sysctl() + ret void +} + +; Function Attrs: noinline nounwind +define internal fastcc void @update_sysctl() #0 { +entry: + %call = tail call i32 @get_update_sysctl_factor() + %0 = load i32, i32* @normalized_sysctl_sched_min_granularity, align 4, !tbaa !1 + %mul = mul i32 %0, %call + store i32 %mul, i32* @sysctl_sched_min_granularity, align 4, !tbaa !1 + %1 = load i32, i32* @normalized_sysctl_sched_latency, align 4, !tbaa !1 + %mul1 = mul i32 %1, %call + store i32 %mul1, i32* @sysctl_sched_latency, align 4, !tbaa !1 + %2 = load i32, i32* @normalized_sysctl_sched_wakeup_granularity, align 4, !tbaa !1 + %mul2 = mul i32 %2, %call + store i32 %mul2, i32* @sysctl_sched_wakeup_granularity, align 4, !tbaa !1 + ret void +} + +; Function Attrs: noinline nounwind +define i32 @calc_delta_mine(i32 %delta_exec, i32 %weight, %struct.load_weight* nocapture %lw) #0 { +entry: + %cmp = icmp ugt i32 %weight, 1 + %conv = zext i32 %delta_exec to i64 + br i1 %cmp, label %if.then, label %if.end, !prof !5 + +if.then: ; preds = %entry + %conv2 = zext i32 %weight to i64 + %mul = mul i64 %conv2, %conv + br label %if.end + +if.end: ; preds = %entry, %if.then + %tmp.0 = phi i64 [ %mul, %if.then ], [ %conv, %entry ] + %inv_weight = getelementptr inbounds %struct.load_weight, %struct.load_weight* %lw, i32 0, i32 1 + %0 = load i32, i32* %inv_weight, align 4, !tbaa !6 + %tobool4 = icmp eq i32 %0, 0 + br i1 %tobool4, label %if.then5, label %if.end22 + +if.then5: ; preds = %if.end + %weight7 = getelementptr inbounds %struct.load_weight, %struct.load_weight* %lw, i32 0, i32 0 + %1 = load i32, i32* %weight7, align 4, !tbaa !9 + %lnot9 = icmp eq i32 %1, 0 + br i1 %lnot9, label %if.then17, label %if.else19, !prof !10 + +if.then17: ; preds = %if.then5 + store i32 -1, i32* %inv_weight, align 4, !tbaa !6 + br label %if.end22 + +if.else19: ; preds = %if.then5 + %div = udiv i32 -1, %1 + store i32 %div, i32* %inv_weight, align 4, !tbaa !6 + br label %if.end22 + +if.end22: ; preds = %if.end, %if.then17, %if.else19 + %2 = phi i32 [ %0, %if.end ], [ -1, %if.then17 ], [ %div, %if.else19 ] + %cmp23 = icmp ugt i64 %tmp.0, 4294967295 + br i1 %cmp23, label %if.then31, label %if.else37, !prof !10 + +if.then31: ; preds = %if.end22 + %add = add i64 %tmp.0, 32768 + %shr = lshr i64 %add, 16 + %conv33 = zext i32 %2 to i64 + %mul34 = mul i64 %conv33, %shr + %add35 = add i64 %mul34, 32768 + %shr36 = lshr i64 %add35, 16 + br label %if.end43 + +if.else37: ; preds = %if.end22 + %conv39 = zext i32 %2 to i64 + %mul40 = mul i64 %conv39, %tmp.0 + %add41 = add i64 %mul40, 2147483648 + %shr42 = lshr i64 %add41, 32 + br label %if.end43 + +if.end43: ; preds = %if.else37, %if.then31 + %tmp.1 = phi i64 [ %shr36, %if.then31 ], [ %shr42, %if.else37 ] + %cmp49 = icmp ult i64 %tmp.1, 2147483647 + %3 = trunc i64 %tmp.1 to i32 + %conv51 = select i1 %cmp49, i32 %3, i32 2147483647 + ret i32 %conv51 +} + +declare i32 @get_update_sysctl_factor() #0 +declare i32 @__bitmap_weight(i32*, i32) #1 + +attributes #0 = { noinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #2 = { nounwind } + +!llvm.ident = !{!0} + +!0 = !{!"Clang 3.1"} +!1 = !{!2, !2, i64 0} +!2 = !{!"int", !3, i64 0} +!3 = !{!"omnipotent char", !4, i64 0} +!4 = !{!"Simple C/C++ TBAA"} +!5 = !{!"branch_weights", i32 64, i32 4} +!6 = !{!7, !8, i64 4} +!7 = !{!"load_weight", !8, i64 0, !8, i64 4} +!8 = !{!"long", !3, i64 0} +!9 = !{!7, !8, i64 0} +!10 = !{!"branch_weights", i32 4, i32 64} +!11 = !{!12, !12, i64 0} +!12 = !{!"any pointer", !3, i64 0} +!13 = !{!3, !3, i64 0} +!14 = !{i32 45854, i32 45878} diff --git a/test/CodeGen/Hexagon/expand-condsets-undef.ll b/test/CodeGen/Hexagon/expand-condsets-undef.ll new file mode 100644 index 000000000000..85e72aa22f0a --- /dev/null +++ b/test/CodeGen/Hexagon/expand-condsets-undef.ll @@ -0,0 +1,28 @@ +; RUN: llc -O2 < %s +; REQUIRES: asserts + +target datalayout = "e-m:e-p:32:32-i64:64-a:0-v32:32-n16:32" +target triple = "hexagon" + +; Function Attrs: nounwind optsize ssp +define internal fastcc void @foo() nounwind { +if.else473: + %0 = load i64, i64* undef, align 8 + %sub = sub nsw i64 undef, %0 + %conv476 = sitofp i64 %sub to double + %mul477 = fmul double %conv476, 0x3F50624DE0000000 + br i1 undef, label %cond.true540, label %cond.end548 + +cond.true540: + %1 = fptrunc double %mul477 to float + %2 = fptosi float %1 to i32 + br label %cond.end548 + +cond.end548: + %cond549 = phi i32 [ %2, %cond.true540 ], [ undef, %if.else473 ] + call void @bar(i32 %cond549) nounwind + unreachable +} + +declare void @bar(i32) nounwind + diff --git a/test/CodeGen/Hexagon/extload-combine.ll b/test/CodeGen/Hexagon/extload-combine.ll index b3b8bf07032a..519177fc75fc 100644 --- a/test/CodeGen/Hexagon/extload-combine.ll +++ b/test/CodeGen/Hexagon/extload-combine.ll @@ -19,7 +19,7 @@ define i64 @short_test1() #0 { ; CHECK: combine(#0, [[VAR]]) entry: store i16 0, i16* @a, align 2 - %0 = load i16* @b, align 2 + %0 = load i16, i16* @b, align 2 %conv2 = zext i16 %0 to i64 ret i64 %conv2 } @@ -30,7 +30,7 @@ define i64 @short_test2() #0 { ; CHECK: sxtw([[VAR1]]) entry: store i16 0, i16* @a, align 2 - %0 = load i16* @c, align 2 + %0 = load i16, i16* @c, align 2 %conv2 = sext i16 %0 to i64 ret i64 %conv2 } @@ -41,7 +41,7 @@ define i64 @char_test1() #0 { ; CHECK: combine(#0, [[VAR2]]) entry: store i8 0, i8* @char_a, align 1 - %0 = load i8* @char_b, align 1 + %0 = load i8, i8* @char_b, align 1 %conv2 = zext i8 %0 to i64 ret i64 %conv2 } @@ -52,7 +52,7 @@ define i64 @char_test2() #0 { ; CHECK: sxtw([[VAR3]]) entry: store i8 0, i8* @char_a, align 1 - %0 = load i8* @char_c, align 1 + %0 = load i8, i8* @char_c, align 1 %conv2 = sext i8 %0 to i64 ret i64 %conv2 } @@ -63,7 +63,7 @@ define i64 @int_test1() #0 { ; CHECK: combine(#0, [[VAR4]]) entry: store i32 0, i32* @int_a, align 4 - %0 = load i32* @int_b, align 4 + %0 = load i32, i32* @int_b, align 4 %conv = zext i32 %0 to i64 ret i64 %conv } @@ -74,7 +74,7 @@ define i64 @int_test2() #0 { ; CHECK: sxtw([[VAR5]]) entry: store i32 0, i32* @int_a, align 4 - %0 = load i32* @int_c, align 4 + %0 = load i32, i32* @int_c, align 4 %conv = sext i32 %0 to i64 ret i64 %conv } diff --git a/test/CodeGen/Hexagon/fadd.ll b/test/CodeGen/Hexagon/fadd.ll index b95e1475ff73..6cf0fbbccf73 100644 --- a/test/CodeGen/Hexagon/fadd.ll +++ b/test/CodeGen/Hexagon/fadd.ll @@ -10,8 +10,8 @@ entry: %c = alloca float, align 4 store float 0x402ECCCCC0000000, float* %a, align 4 store float 0x4022333340000000, float* %b, align 4 - %0 = load float* %a, align 4 - %1 = load float* %b, align 4 + %0 = load float, float* %a, align 4 + %1 = load float, float* %b, align 4 %add = fadd float %0, %1 store float %add, float* %c, align 4 ret i32 0 diff --git a/test/CodeGen/Hexagon/fcmp.ll b/test/CodeGen/Hexagon/fcmp.ll index e7b649e2b8c0..5cf3c57b5e9c 100644 --- a/test/CodeGen/Hexagon/fcmp.ll +++ b/test/CodeGen/Hexagon/fcmp.ll @@ -8,7 +8,7 @@ entry: %retval = alloca i32, align 4 %y.addr = alloca float, align 4 store float %y, float* %y.addr, align 4 - %0 = load float* %y.addr, align 4 + %0 = load float, float* %y.addr, align 4 %cmp = fcmp ogt float %0, 0x406AD7EFA0000000 br i1 %cmp, label %if.then, label %if.else @@ -21,7 +21,7 @@ if.else: ; preds = %entry br label %return return: ; preds = %if.else, %if.then - %1 = load i32* %retval + %1 = load i32, i32* %retval ret i32 %1 } @@ -31,7 +31,7 @@ entry: %a = alloca float, align 4 store i32 0, i32* %retval store float 0x40012E0A00000000, float* %a, align 4 - %0 = load float* %a, align 4 + %0 = load float, float* %a, align 4 %call = call i32 @foo(float %0) ret i32 %call } diff --git a/test/CodeGen/Hexagon/float.ll b/test/CodeGen/Hexagon/float.ll index bec9f5852e3c..03d1fbf44cb6 100644 --- a/test/CodeGen/Hexagon/float.ll +++ b/test/CodeGen/Hexagon/float.ll @@ -10,13 +10,13 @@ entry: store float* %acc, float** %acc.addr, align 4 store float %num, float* %num.addr, align 4 store float %num2, float* %num2.addr, align 4 - %0 = load float** %acc.addr, align 4 - %1 = load float* %0 - %2 = load float* %num.addr, align 4 + %0 = load float*, float** %acc.addr, align 4 + %1 = load float, float* %0 + %2 = load float, float* %num.addr, align 4 %add = fadd float %1, %2 - %3 = load float* %num2.addr, align 4 + %3 = load float, float* %num2.addr, align 4 %sub = fsub float %add, %3 - %4 = load float** %acc.addr, align 4 + %4 = load float*, float** %acc.addr, align 4 store float %sub, float* %4 ret void } diff --git a/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll b/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll index bec9f5852e3c..03d1fbf44cb6 100644 --- a/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll +++ b/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll @@ -10,13 +10,13 @@ entry: store float* %acc, float** %acc.addr, align 4 store float %num, float* %num.addr, align 4 store float %num2, float* %num2.addr, align 4 - %0 = load float** %acc.addr, align 4 - %1 = load float* %0 - %2 = load float* %num.addr, align 4 + %0 = load float*, float** %acc.addr, align 4 + %1 = load float, float* %0 + %2 = load float, float* %num.addr, align 4 %add = fadd float %1, %2 - %3 = load float* %num2.addr, align 4 + %3 = load float, float* %num2.addr, align 4 %sub = fsub float %add, %3 - %4 = load float** %acc.addr, align 4 + %4 = load float*, float** %acc.addr, align 4 store float %sub, float* %4 ret void } diff --git a/test/CodeGen/Hexagon/fmul.ll b/test/CodeGen/Hexagon/fmul.ll index 4766845b1143..4f55d0bec471 100644 --- a/test/CodeGen/Hexagon/fmul.ll +++ b/test/CodeGen/Hexagon/fmul.ll @@ -11,8 +11,8 @@ entry: %c = alloca float, align 4 store float 0x402ECCCCC0000000, float* %a, align 4 store float 0x4022333340000000, float* %b, align 4 - %0 = load float* %b, align 4 - %1 = load float* %a, align 4 + %0 = load float, float* %b, align 4 + %1 = load float, float* %a, align 4 %mul = fmul float %0, %1 store float %mul, float* %c, align 4 ret i32 0 diff --git a/test/CodeGen/Hexagon/frame.ll b/test/CodeGen/Hexagon/frame.ll index dc87c732d6fe..e87acb8cd796 100644 --- a/test/CodeGen/Hexagon/frame.ll +++ b/test/CodeGen/Hexagon/frame.ll @@ -10,14 +10,14 @@ define i32 @foo() nounwind { entry: %i = alloca i32, align 4 - %0 = load i32* @num, align 4 + %0 = load i32, i32* @num, align 4 store i32 %0, i32* %i, align 4 - %1 = load i32* %i, align 4 - %2 = load i32* @acc, align 4 + %1 = load i32, i32* %i, align 4 + %2 = load i32, i32* @acc, align 4 %mul = mul nsw i32 %1, %2 - %3 = load i32* @num2, align 4 + %3 = load i32, i32* @num2, align 4 %add = add nsw i32 %mul, %3 store i32 %add, i32* %i, align 4 - %4 = load i32* %i, align 4 + %4 = load i32, i32* %i, align 4 ret i32 %4 } diff --git a/test/CodeGen/Hexagon/fsub.ll b/test/CodeGen/Hexagon/fsub.ll index 07c866f4c2e2..ca7bdc4d0b38 100644 --- a/test/CodeGen/Hexagon/fsub.ll +++ b/test/CodeGen/Hexagon/fsub.ll @@ -10,8 +10,8 @@ entry: %c = alloca float, align 4 store float 0x402ECCCCC0000000, float* %a, align 4 store float 0x4022333340000000, float* %b, align 4 - %0 = load float* %b, align 4 - %1 = load float* %a, align 4 + %0 = load float, float* %b, align 4 + %1 = load float, float* %a, align 4 %sub = fsub float %0, %1 store float %sub, float* %c, align 4 ret i32 0 diff --git a/test/CodeGen/Hexagon/fusedandshift.ll b/test/CodeGen/Hexagon/fusedandshift.ll index 022b3c673458..59a1e1d84fcc 100644 --- a/test/CodeGen/Hexagon/fusedandshift.ll +++ b/test/CodeGen/Hexagon/fusedandshift.ll @@ -5,7 +5,7 @@ define i32 @main(i16* %a, i16* %b) nounwind { entry: - %0 = load i16* %a, align 2 + %0 = load i16, i16* %a, align 2 %conv1 = sext i16 %0 to i32 %shr1 = ashr i32 %conv1, 3 %and1 = and i32 %shr1, 15 diff --git a/test/CodeGen/Hexagon/gp-plus-offset-load.ll b/test/CodeGen/Hexagon/gp-plus-offset-load.ll index a1b80a65f82a..cd1aacc2318a 100644 --- a/test/CodeGen/Hexagon/gp-plus-offset-load.ll +++ b/test/CodeGen/Hexagon/gp-plus-offset-load.ll @@ -12,7 +12,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - %0 = load i32* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 3), align 4 + %0 = load i32, i32* getelementptr inbounds (%struct.struc, %struct.struc* @foo, i32 0, i32 3), align 4 store i32 %0, i32* %ival, align 4 br label %if.end @@ -27,7 +27,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - %0 = load i8* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 1), align 1 + %0 = load i8, i8* getelementptr inbounds (%struct.struc, %struct.struc* @foo, i32 0, i32 1), align 1 store i8 %0, i8* %ival, align 1 br label %if.end @@ -42,7 +42,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - %0 = load i16* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 2), align 2 + %0 = load i16, i16* getelementptr inbounds (%struct.struc, %struct.struc* @foo, i32 0, i32 2), align 2 store i16 %0, i16* %ival, align 2 br label %if.end diff --git a/test/CodeGen/Hexagon/gp-plus-offset-store.ll b/test/CodeGen/Hexagon/gp-plus-offset-store.ll index c782b30920ea..6b181cabe475 100644 --- a/test/CodeGen/Hexagon/gp-plus-offset-store.ll +++ b/test/CodeGen/Hexagon/gp-plus-offset-store.ll @@ -12,7 +12,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - store i8 %ival, i8* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 1), align 1 + store i8 %ival, i8* getelementptr inbounds (%struct.struc, %struct.struc* @foo, i32 0, i32 1), align 1 br label %if.end if.end: ; preds = %if.then, %entry @@ -26,7 +26,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - store i16 %ival, i16* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 2), align 2 + store i16 %ival, i16* getelementptr inbounds (%struct.struc, %struct.struc* @foo, i32 0, i32 2), align 2 br label %if.end if.end: ; preds = %if.then, %entry diff --git a/test/CodeGen/Hexagon/gp-rel.ll b/test/CodeGen/Hexagon/gp-rel.ll index 561869e8ef35..bb7cb182bf1b 100644 --- a/test/CodeGen/Hexagon/gp-rel.ll +++ b/test/CodeGen/Hexagon/gp-rel.ll @@ -10,14 +10,14 @@ entry: ; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#a) ; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#b) ; CHECK: if{{ *}}(p{{[0-3]}}) memw(##c){{ *}}={{ *}}r{{[0-9]+}} - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %add = add nsw i32 %1, %0 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %if.then, label %entry.if.end_crit_edge entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: diff --git a/test/CodeGen/Hexagon/hwloop-cleanup.ll b/test/CodeGen/Hexagon/hwloop-cleanup.ll index 6456ebff16d3..c04966a5a4b2 100644 --- a/test/CodeGen/Hexagon/hwloop-cleanup.ll +++ b/test/CodeGen/Hexagon/hwloop-cleanup.ll @@ -1,4 +1,5 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s +; RUN: llc -march=hexagon -mcpu=hexagonv4 -no-phi-elim-live-out-early-exit \ +; RUN: < %s | FileCheck %s ; Check that we remove the compare and induction variable instructions ; after generating hardware loops. ; Bug 6685. @@ -20,11 +21,11 @@ for.body: ; preds = %for.body.preheader, %sum.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ] %arrayidx.phi = phi i32* [ %arrayidx.inc, %for.body ], [ %b, %for.body.preheader ] %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ] - %0 = load i32* %arrayidx.phi, align 4 + %0 = load i32, i32* %arrayidx.phi, align 4 %add = add nsw i32 %0, %sum.03 %inc = add nsw i32 %i.02, 1 %exitcond = icmp eq i32 %inc, %n - %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1 + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 br i1 %exitcond, label %for.end.loopexit, label %for.body for.end.loopexit: @@ -50,11 +51,11 @@ for.body: %sum.02 = phi i32 [ 0, %entry ], [ %add, %for.body ] %arrayidx.phi = phi i32* [ %b, %entry ], [ %arrayidx.inc, %for.body ] %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ] - %0 = load i32* %arrayidx.phi, align 4 + %0 = load i32, i32* %arrayidx.phi, align 4 %add = add nsw i32 %0, %sum.02 %inc = add nsw i32 %i.01, 1 %exitcond = icmp eq i32 %inc, 40 - %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1 + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 br i1 %exitcond, label %for.end, label %for.body for.end: @@ -76,7 +77,7 @@ for.body: store i32 %i.01, i32* %arrayidx.phi, align 4 %inc = add nsw i32 %i.01, 1 %exitcond = icmp eq i32 %inc, 40 - %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1 + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 br i1 %exitcond, label %for.end, label %for.body for.end: diff --git a/test/CodeGen/Hexagon/hwloop-const.ll b/test/CodeGen/Hexagon/hwloop-const.ll index 8204ddea3490..d549c1fef8c0 100644 --- a/test/CodeGen/Hexagon/hwloop-const.ll +++ b/test/CodeGen/Hexagon/hwloop-const.ll @@ -14,9 +14,9 @@ entry: ; CHECK: endloop for.body: ; preds = %for.body, %entry %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds [25000 x i32]* @b, i32 0, i32 %i.02 + %arrayidx = getelementptr inbounds [25000 x i32], [25000 x i32]* @b, i32 0, i32 %i.02 store i32 %i.02, i32* %arrayidx, align 4 - %arrayidx1 = getelementptr inbounds [25000 x i32]* @a, i32 0, i32 %i.02 + %arrayidx1 = getelementptr inbounds [25000 x i32], [25000 x i32]* @a, i32 0, i32 %i.02 store i32 %i.02, i32* %arrayidx1, align 4 %inc = add nsw i32 %i.02, 1 %exitcond = icmp eq i32 %inc, 25000 diff --git a/test/CodeGen/Hexagon/hwloop-crit-edge.ll b/test/CodeGen/Hexagon/hwloop-crit-edge.ll new file mode 100644 index 000000000000..4de4540c142e --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop-crit-edge.ll @@ -0,0 +1,58 @@ +; RUN: llc -O3 -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; +; Generate hardware loop when loop 'latch' block is different +; from the loop 'exiting' block. + +; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: endloop0 + +define void @test(i32* nocapture %pFL, i16 signext %nBS, i16* nocapture readonly %pHT) #0 { +entry: + %0 = load i32, i32* %pFL, align 4 + %1 = tail call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %0, i32 246) + %2 = tail call i64 @llvm.hexagon.S2.asl.r.p(i64 %1, i32 -13) + %3 = tail call i32 @llvm.hexagon.A2.sat(i64 %2) + store i32 %3, i32* %pFL, align 4 + %cmp16 = icmp sgt i16 %nBS, 0 + br i1 %cmp16, label %for.body.lr.ph, label %for.end + +for.body.lr.ph: + %4 = sext i16 %nBS to i32 + br label %for.body + +for.body: + %5 = phi i32 [ %3, %for.body.lr.ph ], [ %.pre, %for.body.for.body_crit_edge ] + %arrayidx3.phi = phi i32* [ %pFL, %for.body.lr.ph ], [ %arrayidx3.inc, %for.body.for.body_crit_edge ] + %arrayidx5.phi = phi i16* [ %pHT, %for.body.lr.ph ], [ %arrayidx5.inc, %for.body.for.body_crit_edge ] + %i.017.pmt = phi i32 [ 1, %for.body.lr.ph ], [ %phitmp, %for.body.for.body_crit_edge ] + %6 = load i16, i16* %arrayidx5.phi, align 2 + %conv6 = sext i16 %6 to i32 + %7 = tail call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %5, i32 %conv6) + %8 = tail call i64 @llvm.hexagon.S2.asl.r.p(i64 %7, i32 -13) + %9 = tail call i32 @llvm.hexagon.A2.sat(i64 %8) + store i32 %9, i32* %arrayidx3.phi, align 4 + %exitcond = icmp eq i32 %i.017.pmt, %4 + %arrayidx3.inc = getelementptr i32, i32* %arrayidx3.phi, i32 1 + br i1 %exitcond, label %for.end.loopexit, label %for.body.for.body_crit_edge + +for.body.for.body_crit_edge: + %arrayidx5.inc = getelementptr i16, i16* %arrayidx5.phi, i32 1 + %.pre = load i32, i32* %arrayidx3.inc, align 4 + %phitmp = add i32 %i.017.pmt, 1 + br label %for.body + +for.end.loopexit: + br label %for.end + +for.end: + ret void +} + +declare i32 @llvm.hexagon.A2.sat(i64) #1 + +declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32) #1 + +declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32) #1 + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "ssp-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone } diff --git a/test/CodeGen/Hexagon/hwloop-dbg.ll b/test/CodeGen/Hexagon/hwloop-dbg.ll index 3c05884f6a7d..66c6662f735a 100644 --- a/test/CodeGen/Hexagon/hwloop-dbg.ll +++ b/test/CodeGen/Hexagon/hwloop-dbg.ll @@ -5,9 +5,9 @@ target triple = "hexagon" define void @foo(i32* nocapture %a, i32* nocapture %b) nounwind { entry: - tail call void @llvm.dbg.value(metadata i32* %a, i64 0, metadata !13, metadata !{!"0x102"}), !dbg !17 - tail call void @llvm.dbg.value(metadata i32* %b, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !18 - tail call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !19 + tail call void @llvm.dbg.value(metadata i32* %a, i64 0, metadata !13, metadata !DIExpression()), !dbg !17 + tail call void @llvm.dbg.value(metadata i32* %b, i64 0, metadata !14, metadata !DIExpression()), !dbg !18 + tail call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !15, metadata !DIExpression()), !dbg !19 br label %for.body, !dbg !19 for.body: ; preds = %for.body, %entry @@ -17,14 +17,14 @@ for.body: ; preds = %for.body, %entry %arrayidx.phi = phi i32* [ %a, %entry ], [ %arrayidx.inc, %for.body ] %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %b.addr.01 = phi i32* [ %b, %entry ], [ %incdec.ptr, %for.body ] - %incdec.ptr = getelementptr inbounds i32* %b.addr.01, i32 1, !dbg !21 - tail call void @llvm.dbg.value(metadata i32* %incdec.ptr, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !21 - %0 = load i32* %b.addr.01, align 4, !dbg !21 + %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.01, i32 1, !dbg !21 + tail call void @llvm.dbg.value(metadata i32* %incdec.ptr, i64 0, metadata !14, metadata !DIExpression()), !dbg !21 + %0 = load i32, i32* %b.addr.01, align 4, !dbg !21 store i32 %0, i32* %arrayidx.phi, align 4, !dbg !21 %inc = add nsw i32 %i.02, 1, !dbg !26 - tail call void @llvm.dbg.value(metadata i32 %inc, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !26 + tail call void @llvm.dbg.value(metadata i32 %inc, i64 0, metadata !15, metadata !DIExpression()), !dbg !26 %exitcond = icmp eq i32 %inc, 10, !dbg !19 - %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1 + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 br i1 %exitcond, label %for.end, label %for.body, !dbg !19 for.end: ; preds = %for.body @@ -37,28 +37,28 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!29} -!0 = !{!"0x11\0012\00QuIC LLVM Hexagon Clang version 6.1-pre-unknown, (git://git-hexagon-aus.quicinc.com/llvm/clang-mainline.git e9382867661454cdf44addb39430741578e9765c) (llvm/llvm-mainline.git 36412bb1fcf03ed426d4437b41198bae066675ac)\001\00\000\00\001", !28, !2, !2, !3, !2, null} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c] [DW_LANG_C99] +!0 = !DICompileUnit(language: DW_LANG_C99, producer: "QuIC LLVM Hexagon Clang version 6.1-pre-unknown, (git://git-hexagon-aus.quicinc.com/llvm/clang-mainline.git e9382867661454cdf44addb39430741578e9765c) (llvm/llvm-mainline.git 36412bb1fcf03ed426d4437b41198bae066675ac)", isOptimized: true, emissionKind: 1, file: !28, enums: !2, retainedTypes: !2, subprograms: !3, globals: !2) !2 = !{} !3 = !{!5} -!5 = !{!"0x2e\00foo\00foo\00\001\000\001\000\006\00256\001\001", !28, null, !7, null, void (i32*, i32*)* @foo, null, null, !11} ; [ DW_TAG_subprogram ] [line 1] [def] [foo] -!6 = !{!"0x29", !28} ; [ DW_TAG_file_type ] -!7 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ] +!5 = !DISubprogram(name: "foo", line: 1, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 1, file: !28, scope: null, type: !7, function: void (i32*, i32*)* @foo, variables: !11) +!6 = !DIFile(filename: "hwloop-dbg.c", directory: "/usr2/kparzysz/s.hex/t") +!7 = !DISubroutineType(types: !8) !8 = !{null, !9, !9} -!9 = !{!"0xf\00\000\0032\0032\000\000", null, null, !10} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from int] -!10 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed] +!9 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 32, align: 32, baseType: !10) +!10 = !DIBasicType(tag: DW_TAG_base_type, name: "int", size: 32, align: 32, encoding: DW_ATE_signed) !11 = !{!13, !14, !15} -!13 = !{!"0x101\00a\0016777217\000", !5, !6, !9} ; [ DW_TAG_arg_variable ] [a] [line 1] -!14 = !{!"0x101\00b\0033554433\000", !5, !6, !9} ; [ DW_TAG_arg_variable ] [b] [line 1] -!15 = !{!"0x100\00i\002\000", !16, !6, !10} ; [ DW_TAG_auto_variable ] [i] [line 2] -!16 = !{!"0xb\001\0026\000", !28, !5} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c] -!17 = !MDLocation(line: 1, column: 15, scope: !5) -!18 = !MDLocation(line: 1, column: 23, scope: !5) -!19 = !MDLocation(line: 3, column: 8, scope: !20) -!20 = !{!"0xb\003\003\001", !28, !16} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c] -!21 = !MDLocation(line: 4, column: 5, scope: !22) -!22 = !{!"0xb\003\0028\002", !28, !20} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c] -!26 = !MDLocation(line: 3, column: 23, scope: !20) -!27 = !MDLocation(line: 6, column: 1, scope: !16) -!28 = !{!"hwloop-dbg.c", !"/usr2/kparzysz/s.hex/t"} -!29 = !{i32 1, !"Debug Info Version", i32 2} +!13 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "a", line: 1, arg: 1, scope: !5, file: !6, type: !9) +!14 = !DILocalVariable(tag: DW_TAG_arg_variable, name: "b", line: 1, arg: 2, scope: !5, file: !6, type: !9) +!15 = !DILocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 2, scope: !16, file: !6, type: !10) +!16 = distinct !DILexicalBlock(line: 1, column: 26, file: !28, scope: !5) +!17 = !DILocation(line: 1, column: 15, scope: !5) +!18 = !DILocation(line: 1, column: 23, scope: !5) +!19 = !DILocation(line: 3, column: 8, scope: !20) +!20 = distinct !DILexicalBlock(line: 3, column: 3, file: !28, scope: !16) +!21 = !DILocation(line: 4, column: 5, scope: !22) +!22 = distinct !DILexicalBlock(line: 3, column: 28, file: !28, scope: !20) +!26 = !DILocation(line: 3, column: 23, scope: !20) +!27 = !DILocation(line: 6, column: 1, scope: !16) +!28 = !DIFile(filename: "hwloop-dbg.c", directory: "/usr2/kparzysz/s.hex/t") +!29 = !{i32 1, !"Debug Info Version", i32 3} !30 = !{i32 0} diff --git a/test/CodeGen/Hexagon/hwloop-le.ll b/test/CodeGen/Hexagon/hwloop-le.ll index 9c8cec7c2a1b..85a1b3db673b 100644 --- a/test/CodeGen/Hexagon/hwloop-le.ll +++ b/test/CodeGen/Hexagon/hwloop-le.ll @@ -14,8 +14,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 28395, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -43,8 +43,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 9073, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -72,8 +72,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 21956, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -101,8 +101,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 16782, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -130,8 +130,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 19097, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -159,8 +159,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -188,8 +188,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -217,8 +217,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -246,8 +246,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -275,8 +275,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -304,8 +304,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -333,8 +333,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -362,8 +362,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -391,8 +391,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -420,8 +420,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 diff --git a/test/CodeGen/Hexagon/hwloop-loop1.ll b/test/CodeGen/Hexagon/hwloop-loop1.ll new file mode 100644 index 000000000000..8b02736e0374 --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop-loop1.ll @@ -0,0 +1,68 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; +; Generate loop1 instruction for double loop sequence. + +; CHECK: loop0(.LBB{{.}}_{{.}}, #100) +; CHECK: endloop0 +; CHECK: loop1(.LBB{{.}}_{{.}}, #100) +; CHECK: loop0(.LBB{{.}}_{{.}}, #100) +; CHECK: endloop0 +; CHECK: endloop1 + +define i32 @main() #0 { +entry: + %array = alloca [100 x i32], align 8 + %doublearray = alloca [100 x [100 x i32]], align 8 + %0 = bitcast [100 x i32]* %array to i8* + call void @llvm.lifetime.start(i64 400, i8* %0) #1 + %1 = bitcast [100 x [100 x i32]]* %doublearray to i8* + call void @llvm.lifetime.start(i64 40000, i8* %1) #1 + %arrayidx1 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %doublearray, i32 0, i32 10, i32 10 + %arrayidx2.gep = getelementptr [100 x i32], [100 x i32]* %array, i32 0, i32 0 + br label %for.body + +for.body: + %2 = phi i32 [ undef, %entry ], [ %.pre, %for.body.for.body_crit_edge ] + %sum.031 = phi i32 [ undef, %entry ], [ %add, %for.body.for.body_crit_edge ] + %arrayidx2.phi = phi i32* [ %arrayidx2.gep, %entry ], [ %arrayidx2.inc, %for.body.for.body_crit_edge ] + %i.030 = phi i32 [ 1, %entry ], [ %phitmp, %for.body.for.body_crit_edge ] + %add = add nsw i32 %2, %sum.031 + %exitcond33 = icmp eq i32 %i.030, 100 + %arrayidx2.inc = getelementptr i32, i32* %arrayidx2.phi, i32 1 + br i1 %exitcond33, label %for.cond7.preheader.preheader, label %for.body.for.body_crit_edge + +for.cond7.preheader.preheader: + br label %for.cond7.preheader + +for.body.for.body_crit_edge: + %.pre = load i32, i32* %arrayidx2.inc, align 4 + %phitmp = add i32 %i.030, 1 + br label %for.body + +for.cond7.preheader: + %i.129 = phi i32 [ %inc16, %for.inc15 ], [ 0, %for.cond7.preheader.preheader ] + br label %for.body9 + +for.body9: + %j.028 = phi i32 [ 0, %for.cond7.preheader ], [ %inc13, %for.body9 ] + %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %doublearray, i32 0, i32 %i.129, i32 %j.028 + store i32 %add, i32* %arrayidx11, align 4 + %inc13 = add nsw i32 %j.028, 1 + %exitcond = icmp eq i32 %inc13, 100 + br i1 %exitcond, label %for.inc15, label %for.body9 + +for.inc15: + %inc16 = add nsw i32 %i.129, 1 + %exitcond32 = icmp eq i32 %inc16, 100 + br i1 %exitcond32, label %for.end17, label %for.cond7.preheader + +for.end17: + %3 = load i32, i32* %arrayidx1, align 8 + call void @llvm.lifetime.end(i64 40000, i8* %1) #1 + call void @llvm.lifetime.end(i64 400, i8* %0) #1 + ret i32 %3 +} + +declare void @llvm.lifetime.start(i64, i8* nocapture) #1 + +declare void @llvm.lifetime.end(i64, i8* nocapture) #1 diff --git a/test/CodeGen/Hexagon/hwloop-lt.ll b/test/CodeGen/Hexagon/hwloop-lt.ll index 7e43733da2a6..7e2ad2a4678e 100644 --- a/test/CodeGen/Hexagon/hwloop-lt.ll +++ b/test/CodeGen/Hexagon/hwloop-lt.ll @@ -1,7 +1,6 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv4 -O3 < %s | FileCheck %s - -; CHECK: test_pos1_ir_slt +; CHECK-LABEL: @test_pos1_ir_slt ; CHECK: loop0 ; a < b define void @test_pos1_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -9,13 +8,13 @@ entry: %cmp3 = icmp slt i32 8531, %b br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ 8531, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -24,13 +23,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, %b br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos2_ir_slt +; CHECK-LABEL: @test_pos2_ir_slt ; CHECK: loop0 ; a < b define void @test_pos2_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -38,13 +35,13 @@ entry: %cmp3 = icmp slt i32 9152, %b br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ 9152, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -53,13 +50,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, %b br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos4_ir_slt +; CHECK-LABEL: @test_pos4_ir_slt ; CHECK: loop0 ; a < b define void @test_pos4_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -67,13 +62,13 @@ entry: %cmp3 = icmp slt i32 18851, %b br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ 18851, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -82,13 +77,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, %b br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos8_ir_slt +; CHECK-LABEL: @test_pos8_ir_slt ; CHECK: loop0 ; a < b define void @test_pos8_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -96,13 +89,13 @@ entry: %cmp3 = icmp slt i32 25466, %b br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ 25466, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -111,13 +104,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, %b br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos16_ir_slt +; CHECK-LABEL: @test_pos16_ir_slt ; CHECK: loop0 ; a < b define void @test_pos16_ir_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -125,13 +116,13 @@ entry: %cmp3 = icmp slt i32 9295, %b br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ 9295, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -140,13 +131,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, %b br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos1_ri_slt +; CHECK-LABEL: @test_pos1_ri_slt ; CHECK: loop0 ; a < b define void @test_pos1_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -154,13 +143,13 @@ entry: %cmp3 = icmp slt i32 %a, 31236 br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -169,13 +158,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, 31236 br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos2_ri_slt +; CHECK-LABEL: @test_pos2_ri_slt ; CHECK: loop0 ; a < b define void @test_pos2_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -183,13 +170,13 @@ entry: %cmp3 = icmp slt i32 %a, 22653 br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -198,13 +185,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, 22653 br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos4_ri_slt +; CHECK-LABEL: @test_pos4_ri_slt ; CHECK: loop0 ; a < b define void @test_pos4_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -212,13 +197,13 @@ entry: %cmp3 = icmp slt i32 %a, 1431 br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -227,13 +212,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, 1431 br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos8_ri_slt +; CHECK-LABEL: @test_pos8_ri_slt ; CHECK: loop0 ; a < b define void @test_pos8_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -241,13 +224,13 @@ entry: %cmp3 = icmp slt i32 %a, 22403 br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -256,13 +239,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, 22403 br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos16_ri_slt +; CHECK-LABEL: @test_pos16_ri_slt ; CHECK: loop0 ; a < b define void @test_pos16_ri_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -270,13 +251,13 @@ entry: %cmp3 = icmp slt i32 %a, 21715 br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -285,13 +266,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, 21715 br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos1_rr_slt +; CHECK-LABEL: @test_pos1_rr_slt ; CHECK: loop0 ; a < b define void @test_pos1_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -299,13 +278,13 @@ entry: %cmp3 = icmp slt i32 %a, %b br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -314,13 +293,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, %b br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos2_rr_slt +; CHECK-LABEL: @test_pos2_rr_slt ; CHECK: loop0 ; a < b define void @test_pos2_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -328,13 +305,13 @@ entry: %cmp3 = icmp slt i32 %a, %b br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -343,13 +320,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, %b br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos4_rr_slt +; CHECK-LABEL: @test_pos4_rr_slt ; CHECK: loop0 ; a < b define void @test_pos4_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -357,13 +332,13 @@ entry: %cmp3 = icmp slt i32 %a, %b br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -372,13 +347,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, %b br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos8_rr_slt +; CHECK-LABEL: @test_pos8_rr_slt ; CHECK: loop0 ; a < b define void @test_pos8_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -386,13 +359,13 @@ entry: %cmp3 = icmp slt i32 %a, %b br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -401,13 +374,11 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, %b br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } - - -; CHECK: test_pos16_rr_slt +; CHECK-LABEL: @test_pos16_rr_slt ; CHECK: loop0 ; a < b define void @test_pos16_rr_slt(i8* nocapture %p, i32 %a, i32 %b) nounwind { @@ -415,13 +386,13 @@ entry: %cmp3 = icmp slt i32 %a, %b br i1 %cmp3, label %for.body.lr.ph, label %for.end -for.body.lr.ph: ; preds = %entry +for.body.lr.ph: br label %for.body -for.body: ; preds = %for.body.lr.ph, %for.body +for.body: %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -430,7 +401,7 @@ for.body: ; preds = %for.body.lr.ph, %fo %cmp = icmp slt i32 %inc, %b br i1 %cmp, label %for.body, label %for.end -for.end: ; preds = %for.body, %entry +for.end: ret void } diff --git a/test/CodeGen/Hexagon/hwloop-lt1.ll b/test/CodeGen/Hexagon/hwloop-lt1.ll index cf5874011ee0..16fe728fa7bc 100644 --- a/test/CodeGen/Hexagon/hwloop-lt1.ll +++ b/test/CodeGen/Hexagon/hwloop-lt1.ll @@ -19,10 +19,10 @@ polly.loop_body: ; preds = %entry, %polly.loop_ %p_vector_iv14 = or i32 %polly.loopiv16, 1 %p_vector_iv3 = add i32 %p_vector_iv14, 1 %p_vector_iv415 = or i32 %polly.loopiv16, 3 - %p_arrayidx = getelementptr [400 x i8]* @A, i32 0, i32 %polly.loopiv16 - %p_arrayidx5 = getelementptr [400 x i8]* @A, i32 0, i32 %p_vector_iv14 - %p_arrayidx6 = getelementptr [400 x i8]* @A, i32 0, i32 %p_vector_iv3 - %p_arrayidx7 = getelementptr [400 x i8]* @A, i32 0, i32 %p_vector_iv415 + %p_arrayidx = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %polly.loopiv16 + %p_arrayidx5 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %p_vector_iv14 + %p_arrayidx6 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %p_vector_iv3 + %p_arrayidx7 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %p_vector_iv415 store i8 123, i8* %p_arrayidx, align 1 store i8 123, i8* %p_arrayidx5, align 1 store i8 123, i8* %p_arrayidx6, align 1 diff --git a/test/CodeGen/Hexagon/hwloop-missed.ll b/test/CodeGen/Hexagon/hwloop-missed.ll new file mode 100644 index 000000000000..bcc800652294 --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop-missed.ll @@ -0,0 +1,49 @@ +; RUN: llc -march=hexagon -hexagon-hwloop-preheader < %s | FileCheck %s + +; Generate hardware loops when we also need to add a new preheader. +; we should generate two hardware loops for this test case. + +; CHECK: loop0 +; CHECK: endloop0 +; CHECK: loop0 +; CHECK: endloop0 + +@g = external global i32 + +define void @test(i32* nocapture %a, i32* nocapture %b, i32 %n) nounwind { +entry: + %tobool = icmp eq i32 %n, 0 + br i1 %tobool, label %for.body4.preheader, label %for.body.preheader + +for.body.preheader: + br label %for.body + +for.body: + %arrayidx.phi = phi i32* [ %arrayidx.inc, %for.body ], [ %a, %for.body.preheader ] + %i.014 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ] + %0 = load i32, i32* @g, align 4 + store i32 %0, i32* %arrayidx.phi, align 4 + %inc = add nsw i32 %i.014, 1 + %exitcond15 = icmp eq i32 %inc, 3 + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 + br i1 %exitcond15, label %for.body4.preheader.loopexit, label %for.body + +for.body4.preheader.loopexit: + br label %for.body4.preheader + +for.body4.preheader: + br label %for.body4 + +for.body4: + %arrayidx5.phi = phi i32* [ %arrayidx5.inc, %for.body4 ], [ %b, %for.body4.preheader ] + %i1.013 = phi i32 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ] + %1 = load i32, i32* @g, align 4 + store i32 %1, i32* %arrayidx5.phi, align 4 + %inc7 = add nsw i32 %i1.013, 1 + %exitcond = icmp eq i32 %inc7, 3 + %arrayidx5.inc = getelementptr i32, i32* %arrayidx5.phi, i32 1 + br i1 %exitcond, label %for.end8, label %for.body4 + +for.end8: + ret void +} diff --git a/test/CodeGen/Hexagon/hwloop-ne.ll b/test/CodeGen/Hexagon/hwloop-ne.ll index bceef2a16955..12ef3b5dd0bc 100644 --- a/test/CodeGen/Hexagon/hwloop-ne.ll +++ b/test/CodeGen/Hexagon/hwloop-ne.ll @@ -14,8 +14,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 32623, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -43,8 +43,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 29554, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -72,8 +72,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 15692, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -101,8 +101,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 10449, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -130,8 +130,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 32087, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -159,8 +159,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -188,8 +188,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -217,8 +217,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -246,8 +246,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -275,8 +275,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -304,8 +304,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -333,8 +333,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -362,8 +362,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -391,8 +391,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -420,8 +420,8 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 diff --git a/test/CodeGen/Hexagon/hwloop-ph-deadcode.ll b/test/CodeGen/Hexagon/hwloop-ph-deadcode.ll new file mode 100644 index 000000000000..06e6db420f8f --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop-ph-deadcode.ll @@ -0,0 +1,23 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 -O2 -disable-block-placement=0 < %s | FileCheck %s + +; Test that there is no redundant register assignment in the hardware loop +; preheader. + +; CHECK-NOT: r{{.*}} = #5 + +@g = external global i32 + +define void @foo() #0 { +entry: + br i1 undef, label %if.end38, label %for.body + +for.body: + %loopIdx.051 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + store i32 1, i32* @g, align 4 + %inc = add i32 %loopIdx.051, 1 + %cmp9 = icmp ult i32 %inc, 5 + br i1 %cmp9, label %for.body, label %if.end38 + +if.end38: + ret void +} diff --git a/test/CodeGen/Hexagon/hwloop-pos-ivbump1.ll b/test/CodeGen/Hexagon/hwloop-pos-ivbump1.ll new file mode 100644 index 000000000000..7c5ea031ffae --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop-pos-ivbump1.ll @@ -0,0 +1,45 @@ +;RUN: llc -march=hexagon < %s | FileCheck %s + +; Test that a hardware loop is not generaetd due to a potential +; underflow. + +; CHECK-NOT: loop0 + +define i32 @main() #0 { +entry: + br label %while.cond.outer + +while.cond.outer.loopexit: + %.lcssa = phi i32 [ %0, %for.body.preheader ] + br label %while.cond.outer + +while.cond.outer: + %i.0.ph = phi i32 [ 0, %entry ], [ 3, %while.cond.outer.loopexit ] + %j.0.ph = phi i32 [ 0, %entry ], [ %.lcssa, %while.cond.outer.loopexit ] + %k.0.ph = phi i32 [ 0, %entry ], [ 1, %while.cond.outer.loopexit ] + br label %while.cond + +while.cond: + %i.0 = phi i32 [ %i.0.ph, %while.cond.outer ], [ %inc, %for.body.preheader ] + %j.0 = phi i32 [ %j.0.ph, %while.cond.outer ], [ %0, %for.body.preheader ] + %inc = add nsw i32 %i.0, 1 + %cmp = icmp slt i32 %i.0, 4 + br i1 %cmp, label %for.body.preheader, label %while.end + +for.body.preheader: + %0 = add i32 %j.0, 3 + %cmp5 = icmp eq i32 %inc, 3 + br i1 %cmp5, label %while.cond.outer.loopexit, label %while.cond + +while.end: + %k.0.ph.lcssa = phi i32 [ %k.0.ph, %while.cond ] + %inc.lcssa = phi i32 [ %inc, %while.cond ] + %j.0.lcssa = phi i32 [ %j.0, %while.cond ] + %cmp6 = icmp ne i32 %inc.lcssa, 5 + %cmp7 = icmp ne i32 %j.0.lcssa, 12 + %or.cond = or i1 %cmp6, %cmp7 + %cmp9 = icmp ne i32 %k.0.ph.lcssa, 1 + %or.cond12 = or i1 %or.cond, %cmp9 + %locflg.0 = zext i1 %or.cond12 to i32 + ret i32 %locflg.0 +} diff --git a/test/CodeGen/Hexagon/hwloop-preheader.ll b/test/CodeGen/Hexagon/hwloop-preheader.ll new file mode 100644 index 000000000000..66efd2089fce --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop-preheader.ll @@ -0,0 +1,40 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 -hexagon-hwloop-preheader < %s +; REQUIRES: asserts + +; Test that the preheader is added to the parent loop, otherwise +; we generate an invalid hardware loop. + +; Function Attrs: nounwind readonly +define void @test(i16 signext %n) #0 { +entry: + br i1 undef, label %for.cond4.preheader.preheader.split.us, label %for.end22 + +for.cond4.preheader.preheader.split.us: + %0 = sext i16 %n to i32 + br label %for.body9.preheader.us + +for.body9.us: + %indvars.iv = phi i32 [ %indvars.iv.next.7, %for.body9.us ], [ 0, %for.body9.preheader.us ] + %indvars.iv.next.7 = add i32 %indvars.iv, 8 + %lftr.wideiv.7 = trunc i32 %indvars.iv.next.7 to i16 + %exitcond.7 = icmp slt i16 %lftr.wideiv.7, 0 + br i1 %exitcond.7, label %for.body9.us, label %for.body9.us.ur + +for.body9.preheader.us: + %i.030.us.pmt = phi i32 [ %inc21.us.pmt, %for.end.loopexit.us ], [ 0, %for.cond4.preheader.preheader.split.us ] + br i1 undef, label %for.body9.us, label %for.body9.us.ur + +for.body9.us.ur: + %exitcond.ur.old = icmp eq i16 undef, %n + br i1 %exitcond.ur.old, label %for.end.loopexit.us, label %for.body9.us.ur + +for.end.loopexit.us: + %inc21.us.pmt = add i32 %i.030.us.pmt, 1 + %exitcond33 = icmp eq i32 %inc21.us.pmt, %0 + br i1 %exitcond33, label %for.end22, label %for.body9.preheader.us + +for.end22: + ret void +} + +attributes #0 = { nounwind readonly "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/test/CodeGen/Hexagon/hwloop-range.ll b/test/CodeGen/Hexagon/hwloop-range.ll new file mode 100644 index 000000000000..5e6fe78d0e0b --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop-range.ll @@ -0,0 +1,36 @@ +; RUN: llc -march=hexagon -hexagon-loop-range=0 < %s | FileCheck %s + +; Test that the loop start address operand uses a constant extender +; if the offset is out of range. + +; CHECK: loop0(##.LBB +; CHECK: endloop0 + +@g = external global i32, align 4 + +define void @test(i32* nocapture %a, i32* nocapture readonly %b, i32 %n) #0 { +entry: + %cmp6 = icmp slt i32 %n, 1 + br i1 %cmp6, label %for.end, label %for.body.preheader + +for.body.preheader: + br label %for.body + +for.body: + %i.07 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.07 + %0 = load i32, i32* %arrayidx, align 4 + %1 = load i32, i32* @g, align 4 + %mul = mul nsw i32 %1, %0 + %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 %i.07 + store i32 %mul, i32* %arrayidx1, align 4 + %inc = add nuw nsw i32 %i.07, 1 + %exitcond = icmp eq i32 %inc, %n + br i1 %exitcond, label %for.end.loopexit, label %for.body + +for.end.loopexit: + br label %for.end + +for.end: + ret void +} diff --git a/test/CodeGen/Hexagon/hwloop-recursion.ll b/test/CodeGen/Hexagon/hwloop-recursion.ll new file mode 100644 index 000000000000..8ab2dc37d021 --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop-recursion.ll @@ -0,0 +1,64 @@ +; RUN: llc -O2 -march=hexagon -mcpu=hexagonv5 < %s +; REQUIRES: asserts +; Check for successful compilation. + +@c = common global i32 0, align 4 +@e = common global i32 0, align 4 +@g = common global i32* null, align 4 +@a = common global i32 0, align 4 +@b = common global i32 0, align 4 +@h = common global i32* null, align 4 +@d = common global i32 0, align 4 +@f = common global i32 0, align 4 + +define i32 @fn1([0 x i32]* nocapture readnone %p1) #0 { +entry: + %0 = load i32*, i32** @h, align 4 + %1 = load i32*, i32** @g, align 4 + %.pre = load i32, i32* @c, align 4 + br label %for.cond + +for.cond: + %2 = phi i32 [ %10, %if.end ], [ %.pre, %entry ] + store i32 %2, i32* @e, align 4 + %tobool5 = icmp eq i32 %2, 0 + br i1 %tobool5, label %for.end, label %for.body.lr.ph + +for.body.lr.ph: + %3 = sub i32 -5, %2 + %4 = urem i32 %3, 5 + %5 = sub i32 %3, %4 + br label %for.body + +for.body: + %add6 = phi i32 [ %2, %for.body.lr.ph ], [ %add, %for.body ] + %6 = load i32, i32* %1, align 4 + store i32 %6, i32* @a, align 4 + %add = add nsw i32 %add6, 5 + %tobool = icmp eq i32 %add, 0 + br i1 %tobool, label %for.cond1.for.end_crit_edge, label %for.body + +for.cond1.for.end_crit_edge: + %7 = add i32 %2, 5 + %8 = add i32 %7, %5 + store i32 %8, i32* @e, align 4 + br label %for.end + +for.end: + %9 = load i32, i32* @b, align 4 + %tobool2 = icmp eq i32 %9, 0 + br i1 %tobool2, label %if.end, label %if.then + +if.then: + store i32 0, i32* %0, align 4 + %.pre7 = load i32, i32* @c, align 4 + br label %if.end + +if.end: + %10 = phi i32 [ %2, %for.end ], [ %.pre7, %if.then ] + store i32 %10, i32* @d, align 4 + %11 = load i32, i32* @f, align 4 + %inc = add nsw i32 %11, 1 + store i32 %inc, i32* @f, align 4 + br label %for.cond +} diff --git a/test/CodeGen/Hexagon/hwloop-wrap.ll b/test/CodeGen/Hexagon/hwloop-wrap.ll new file mode 100644 index 000000000000..e0f6a87fd2e4 --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop-wrap.ll @@ -0,0 +1,22 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s + +; We shouldn't generate a hardware loop in this case because the initial +; value may be zero, which means the endloop instruction will not decrement +; the loop counter, and the loop will execute only once. + +; CHECK-NOT: loop0 + +define void @foo(i32 %count, i32 %v) #0 { +entry: + br label %do.body + +do.body: + %count.addr.0 = phi i32 [ %count, %entry ], [ %dec, %do.body ] + tail call void asm sideeffect "nop", ""() #1 + %dec = add i32 %count.addr.0, -1 + %cmp = icmp eq i32 %dec, 0 + br i1 %cmp, label %do.end, label %do.body + +do.end: + ret void +} diff --git a/test/CodeGen/Hexagon/hwloop-wrap2.ll b/test/CodeGen/Hexagon/hwloop-wrap2.ll new file mode 100644 index 000000000000..50675d6b681b --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop-wrap2.ll @@ -0,0 +1,67 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 -O3 < %s | FileCheck %s + +; Test that we do not generate a hardware loop due to a potential underflow. + +; CHECK-NOT: loop0 + +%struct.3 = type { i8*, i8, i8, i32, i32, i16, i16, i16, i16, i16, i16, i16, %struct.2* } +%struct.2 = type { i16, i16, i16, i16, %struct.1* } +%struct.1 = type { %struct.1*, %struct.0*, i32, i32, i16, [2 x i16], [2 x i16], i16 } +%struct.0 = type { %struct.0*, i32, i32, i32, i32, i32, i32, i16, i16, i16, i8, i8, i8, i8 } + +@pairArray = external global i32** +@carray = external global %struct.3** + +define void @test() #0 { +entry: + %0 = load i32**, i32*** @pairArray, align 4 + %1 = load %struct.3**, %struct.3*** @carray, align 4 + br i1 undef, label %for.end110, label %for.body + +for.body: + %row.0199 = phi i32 [ %inc109, %for.inc108 ], [ 1, %entry ] + %arrayidx = getelementptr inbounds i32*, i32** %0, i32 %row.0199 + %2 = load i32*, i32** %arrayidx, align 4 + br i1 undef, label %for.body48, label %for.inc108 + +for.cond45: + %cmp46 = icmp sgt i32 %dec58, 0 + br i1 %cmp46, label %for.body48, label %for.inc108 + +for.body48: + %i.1190 = phi i32 [ %dec58, %for.cond45 ], [ 0, %for.body ] + %arrayidx50 = getelementptr inbounds i32, i32* %2, i32 %i.1190 + %3 = load i32, i32* %arrayidx50, align 4 + %cmp53 = icmp slt i32 %3, 0 + %dec58 = add nsw i32 %i.1190, -1 + br i1 %cmp53, label %for.end59, label %for.cond45 + +for.end59: + %cmp60 = icmp slt i32 %i.1190, 0 + br i1 %cmp60, label %if.then65, label %for.inc108 + +if.then65: + br label %for.body80 + +for.body80: + %j.1196.in = phi i32 [ %j.1196, %for.body80 ], [ %i.1190, %if.then65 ] + %j.1196 = add nsw i32 %j.1196.in, 1 + %arrayidx81 = getelementptr inbounds i32, i32* %2, i32 %j.1196 + %4 = load i32, i32* %arrayidx81, align 4 + %arrayidx82 = getelementptr inbounds %struct.3*, %struct.3** %1, i32 %4 + %5 = load %struct.3*, %struct.3** %arrayidx82, align 4 + %cxcenter83 = getelementptr inbounds %struct.3, %struct.3* %5, i32 0, i32 3 + store i32 0, i32* %cxcenter83, align 4 + %6 = load i32, i32* %arrayidx81, align 4 + %arrayidx87 = getelementptr inbounds i32, i32* %2, i32 %j.1196.in + store i32 %6, i32* %arrayidx87, align 4 + %exitcond = icmp eq i32 %j.1196, 0 + br i1 %exitcond, label %for.inc108, label %for.body80 + +for.inc108: + %inc109 = add nsw i32 %row.0199, 1 + br i1 undef, label %for.body, label %for.end110 + +for.end110: + ret void +} diff --git a/test/CodeGen/Hexagon/hwloop1.ll b/test/CodeGen/Hexagon/hwloop1.ll new file mode 100644 index 000000000000..97b779cf9628 --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop1.ll @@ -0,0 +1,161 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; Check that we generate hardware loop instructions. + +; Case 1 : Loop with a constant number of iterations. +; CHECK-LABEL: @hwloop1 +; CHECK: loop0(.LBB{{.}}_{{.}}, #10) +; CHECK: endloop0 + +@a = common global [10 x i32] zeroinitializer, align 4 +define i32 @hwloop1() nounwind { +entry: + br label %for.body +for.body: + %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @a, i32 0, i32 %i.01 + store i32 %i.01, i32* %arrayidx, align 4 + %inc = add nsw i32 %i.01, 1 + %exitcond = icmp eq i32 %inc, 10 + br i1 %exitcond, label %for.end, label %for.body +for.end: + ret i32 0 +} + +; Case 2 : Loop with a run-time number of iterations. +; CHECK-LABEL: @hwloop2 +; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: endloop0 + +define i32 @hwloop2(i32 %n, i32* nocapture %b) nounwind { +entry: + %cmp1 = icmp sgt i32 %n, 0 + br i1 %cmp1, label %for.body.preheader, label %for.end + +for.body.preheader: + br label %for.body + +for.body: + %a.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ] + %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.02 + %0 = load i32, i32* %arrayidx, align 4 + %add = add nsw i32 %0, %a.03 + %inc = add nsw i32 %i.02, 1 + %exitcond = icmp eq i32 %inc, %n + br i1 %exitcond, label %for.end.loopexit, label %for.body + +for.end.loopexit: + br label %for.end + +for.end: + %a.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.end.loopexit ] + ret i32 %a.0.lcssa +} + +; Case 3 : Induction variable increment more than 1. +; CHECK-LABEL: @hwloop3 +; CHECK: lsr(r{{[0-9]+}}, #2) +; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: endloop0 + +define i32 @hwloop3(i32 %n, i32* nocapture %b) nounwind { +entry: + %cmp1 = icmp sgt i32 %n, 0 + br i1 %cmp1, label %for.body.preheader, label %for.end + +for.body.preheader: + br label %for.body + +for.body: + %a.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ] + %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.02 + %0 = load i32, i32* %arrayidx, align 4 + %add = add nsw i32 %0, %a.03 + %inc = add nsw i32 %i.02, 4 + %exitcond = icmp eq i32 %inc, %n + br i1 %exitcond, label %for.end.loopexit, label %for.body + +for.end.loopexit: + br label %for.end + +for.end: + %a.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.end.loopexit ] + ret i32 %a.0.lcssa +} + +; Case 4 : Loop exit compare uses register instead of immediate value. +; CHECK-LABEL: @hwloop4 +; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: endloop0 + +define i32 @hwloop4(i32 %n, i32* nocapture %b) nounwind { +entry: + %cmp1 = icmp sgt i32 %n, 0 + br i1 %cmp1, label %for.body.preheader, label %for.end + +for.body.preheader: + br label %for.body + +for.body: + %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.02 + store i32 %i.02, i32* %arrayidx, align 4 + %inc = add nsw i32 %i.02, 1 + %exitcond = icmp eq i32 %inc, %n + br i1 %exitcond, label %for.end.loopexit, label %for.body + +for.end.loopexit: + br label %for.end + +for.end: + ret i32 0 +} + +; Case 5: After LSR, the initial value is 100 and the iv decrements to 0. +; CHECK-LABEL: @hwloop5 +; CHECK: loop0(.LBB{{.}}_{{.}}, #100) +; CHECK: endloop0 + +define void @hwloop5(i32* nocapture %a, i32* nocapture %res) nounwind { +entry: + br label %for.body + +for.body: + %i.03 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.03 + %0 = load i32, i32* %arrayidx, align 4 + %mul = mul nsw i32 %0, %0 + %arrayidx2 = getelementptr inbounds i32, i32* %res, i32 %i.03 + store i32 %mul, i32* %arrayidx2, align 4 + %inc = add nsw i32 %i.03, 1 + %exitcond = icmp eq i32 %inc, 100 + br i1 %exitcond, label %for.end, label %for.body + +for.end: + ret void +} + +; Case 6: Large immediate offset +; CHECK-LABEL: @hwloop6 +; CHECK-NOT: loop0(.LBB{{.}}_{{.}}, #1024) +; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: endloop0 + +define void @hwloop6(i32* nocapture %a, i32* nocapture %res) nounwind { +entry: + br label %for.body + +for.body: + %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.02 + %0 = load i32, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %res, i32 %i.02 + store i32 %0, i32* %arrayidx1, align 4 + %inc = add nsw i32 %i.02, 1 + %exitcond = icmp eq i32 %inc, 1024 + br i1 %exitcond, label %for.end, label %for.body + +for.end: + ret void +} diff --git a/test/CodeGen/Hexagon/hwloop2.ll b/test/CodeGen/Hexagon/hwloop2.ll new file mode 100644 index 000000000000..d411d979904e --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop2.ll @@ -0,0 +1,37 @@ +; RUN: llc -disable-lsr -march=hexagon < %s | FileCheck %s + +; Test for multiple phis with induction variables. + +; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: endloop0 + +define i32 @hwloop4(i32* nocapture %s, i32* nocapture %a, i32 %n) { +entry: + %cmp3 = icmp eq i32 %n, 0 + br i1 %cmp3, label %for.end, label %for.body.lr.ph + +for.body.lr.ph: + %.pre = load i32, i32* %s, align 4 + br label %for.body + +for.body: + %0 = phi i32 [ %.pre, %for.body.lr.ph ], [ %add1, %for.body ] + %j.05 = phi i32 [ 0, %for.body.lr.ph ], [ %add2, %for.body ] + %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ %n, %for.body.lr.ph ] + %lsr.iv1 = phi i32* [ %scevgep, %for.body ], [ %a, %for.body.lr.ph ] + %1 = load i32, i32* %lsr.iv1, align 4 + %add1 = add nsw i32 %0, %1 + store i32 %add1, i32* %s, align 4 + %add2 = add nsw i32 %j.05, 1 + %lsr.iv.next = add i32 %lsr.iv, -1 + %scevgep = getelementptr i32, i32* %lsr.iv1, i32 1 + %cmp = icmp eq i32 %lsr.iv.next, 0 + br i1 %cmp, label %for.end.loopexit, label %for.body + +for.end.loopexit: + br label %for.end + +for.end: + %j.0.lcssa = phi i32 [ 0, %entry ], [ %add2, %for.end.loopexit ] + ret i32 %j.0.lcssa +} diff --git a/test/CodeGen/Hexagon/hwloop3.ll b/test/CodeGen/Hexagon/hwloop3.ll new file mode 100644 index 000000000000..1135e06a0c43 --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop3.ll @@ -0,0 +1,27 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; +; Remove the unconditional jump to following instruction. + +; CHECK: endloop0 +; CHECK-NOT: jump [[L1:.]] +; CHECK-NOT: [[L1]] + +define void @test(i32* nocapture %a, i32 %n) nounwind { +entry: + br label %for.body + +for.body: + %arrayidx.phi = phi i32* [ %a, %entry ], [ %arrayidx.inc, %for.body ] + %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %0 = load i32, i32* %arrayidx.phi, align 4 + %add = add nsw i32 %0, 1 + store i32 %add, i32* %arrayidx.phi, align 4 + %inc = add nsw i32 %i.02, 1 + %exitcond = icmp eq i32 %inc, 100 + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 + br i1 %exitcond, label %for.end, label %for.body + +for.end: + ret void +} + diff --git a/test/CodeGen/Hexagon/hwloop4.ll b/test/CodeGen/Hexagon/hwloop4.ll new file mode 100644 index 000000000000..d159c45e3fb8 --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop4.ll @@ -0,0 +1,76 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; +; Remove the unnecessary 'add' instruction used for the hardware loop setup. + +; CHECK: [[OP0:r[0-9]+]] = add([[OP1:r[0-9]+]], #-[[OP2:[0-9]+]] +; CHECK-NOT: add([[OP0]], #[[OP2]]) +; CHECK: lsr([[OP1]], #{{[0-9]+}}) +; CHECK: loop0 + +define void @matrix_mul_matrix(i32 %N, i32* nocapture %C, i16* nocapture readnone %A, i16* nocapture readnone %B) #0 { +entry: + %cmp4 = icmp eq i32 %N, 0 + br i1 %cmp4, label %for.end, label %for.body.preheader + +for.body.preheader: + %maxval = add i32 %N, -7 + %0 = icmp sgt i32 %maxval, 0 + br i1 %0, label %for.body.preheader9, label %for.body.ur.preheader + +for.body.preheader9: + br label %for.body + +for.body: + %arrayidx.phi = phi i32* [ %arrayidx.inc.7, %for.body ], [ %C, %for.body.preheader9 ] + %i.05 = phi i32 [ %inc.7, %for.body ], [ 0, %for.body.preheader9 ] + store i32 %i.05, i32* %arrayidx.phi, align 4 + %inc = add i32 %i.05, 1 + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 + store i32 %inc, i32* %arrayidx.inc, align 4 + %inc.1 = add i32 %i.05, 2 + %arrayidx.inc.1 = getelementptr i32, i32* %arrayidx.phi, i32 2 + store i32 %inc.1, i32* %arrayidx.inc.1, align 4 + %inc.2 = add i32 %i.05, 3 + %arrayidx.inc.2 = getelementptr i32, i32* %arrayidx.phi, i32 3 + store i32 %inc.2, i32* %arrayidx.inc.2, align 4 + %inc.3 = add i32 %i.05, 4 + %arrayidx.inc.3 = getelementptr i32, i32* %arrayidx.phi, i32 4 + store i32 %inc.3, i32* %arrayidx.inc.3, align 4 + %inc.4 = add i32 %i.05, 5 + %arrayidx.inc.4 = getelementptr i32, i32* %arrayidx.phi, i32 5 + store i32 %inc.4, i32* %arrayidx.inc.4, align 4 + %inc.5 = add i32 %i.05, 6 + %arrayidx.inc.5 = getelementptr i32, i32* %arrayidx.phi, i32 6 + store i32 %inc.5, i32* %arrayidx.inc.5, align 4 + %inc.6 = add i32 %i.05, 7 + %arrayidx.inc.6 = getelementptr i32, i32* %arrayidx.phi, i32 7 + store i32 %inc.6, i32* %arrayidx.inc.6, align 4 + %inc.7 = add i32 %i.05, 8 + %exitcond.7 = icmp slt i32 %inc.7, %maxval + %arrayidx.inc.7 = getelementptr i32, i32* %arrayidx.phi, i32 8 + br i1 %exitcond.7, label %for.body, label %for.end.loopexit.ur-lcssa + +for.end.loopexit.ur-lcssa: + %1 = icmp eq i32 %inc.7, %N + br i1 %1, label %for.end, label %for.body.ur.preheader + +for.body.ur.preheader: + %arrayidx.phi.ur.ph = phi i32* [ %C, %for.body.preheader ], [ %arrayidx.inc.7, %for.end.loopexit.ur-lcssa ] + %i.05.ur.ph = phi i32 [ 0, %for.body.preheader ], [ %inc.7, %for.end.loopexit.ur-lcssa ] + br label %for.body.ur + +for.body.ur: + %arrayidx.phi.ur = phi i32* [ %arrayidx.inc.ur, %for.body.ur ], [ %arrayidx.phi.ur.ph, %for.body.ur.preheader ] + %i.05.ur = phi i32 [ %inc.ur, %for.body.ur ], [ %i.05.ur.ph, %for.body.ur.preheader ] + store i32 %i.05.ur, i32* %arrayidx.phi.ur, align 4 + %inc.ur = add i32 %i.05.ur, 1 + %exitcond.ur = icmp eq i32 %inc.ur, %N + %arrayidx.inc.ur = getelementptr i32, i32* %arrayidx.phi.ur, i32 1 + br i1 %exitcond.ur, label %for.end.loopexit, label %for.body.ur + +for.end.loopexit: + br label %for.end + +for.end: + ret void +} diff --git a/test/CodeGen/Hexagon/hwloop5.ll b/test/CodeGen/Hexagon/hwloop5.ll new file mode 100644 index 000000000000..0886b03cc754 --- /dev/null +++ b/test/CodeGen/Hexagon/hwloop5.ll @@ -0,0 +1,93 @@ +; RUN: llc -O3 -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; +; Generate hardware loop when unknown trip count loop is vectorized. + +; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}}, r{{[0-9]+}}) +; CHECK: endloop0 +; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}}, r{{[0-9]+}}) +; CHECK: endloop0 + +@A = common global [1000 x i32] zeroinitializer, align 8 +@B = common global [1000 x i32] zeroinitializer, align 8 + +define i32 @dotprod2(i32 %count) #0 { +entry.split: + %cmp6 = icmp sgt i32 %count, 0 + br i1 %cmp6, label %polly.cond, label %for.end + +for.end.loopexit: + br label %for.end + +for.end: + %sum.0.lcssa.reg2mem.0.load37 = phi i32 [ 0, %entry.split ], [ %p_add34, %polly.loop_if13 ], [ %p_add, %for.end.loopexit ] + ret i32 %sum.0.lcssa.reg2mem.0.load37 + +polly.cond: + %0 = icmp sgt i32 %count, 1 + br i1 %0, label %polly.loop_if, label %polly.loop_if13 + +polly.loop_exit.loopexit: + br label %polly.loop_exit + +polly.loop_exit: + %1 = phi <2 x i32> [ zeroinitializer, %polly.loop_if ], [ %addp_vec, %polly.loop_exit.loopexit ] + %2 = extractelement <2 x i32> %1, i32 0 + %3 = extractelement <2 x i32> %1, i32 1 + %add_sum = add i32 %2, %3 + br label %polly.loop_if13 + +polly.loop_if: + %4 = add i32 %count, -1 + %leftover_lb = and i32 %4, -2 + %polly.loop_guard = icmp eq i32 %leftover_lb, 0 + br i1 %polly.loop_guard, label %polly.loop_exit, label %polly.loop_preheader + +polly.stmt.for.body: + %addp_vec28 = phi <2 x i32> [ zeroinitializer, %polly.loop_preheader ], [ %addp_vec, %polly.stmt.for.body ] + %scevgep.phi = phi i32* [ getelementptr inbounds ([1000 x i32], [1000 x i32]* @A, i32 0, i32 0), %polly.loop_preheader ], [ %scevgep.inc, %polly.stmt.for.body ] + %scevgep9.phi = phi i32* [ getelementptr inbounds ([1000 x i32], [1000 x i32]* @B, i32 0, i32 0), %polly.loop_preheader ], [ %scevgep9.inc, %polly.stmt.for.body ] + %polly.indvar = phi i32 [ 0, %polly.loop_preheader ], [ %polly.indvar_next, %polly.stmt.for.body ] + %vector_ptr = bitcast i32* %scevgep.phi to <2 x i32>* + %_p_vec_full = load <2 x i32>, <2 x i32>* %vector_ptr, align 8 + %vector_ptr10 = bitcast i32* %scevgep9.phi to <2 x i32>* + %_p_vec_full11 = load <2 x i32>, <2 x i32>* %vector_ptr10, align 8 + %mulp_vec = mul <2 x i32> %_p_vec_full11, %_p_vec_full + %addp_vec = add <2 x i32> %mulp_vec, %addp_vec28 + %polly.indvar_next = add nsw i32 %polly.indvar, 2 + %polly.loop_cond = icmp eq i32 %polly.indvar, %polly.adjust_ub + %scevgep.inc = getelementptr i32, i32* %scevgep.phi, i32 2 + %scevgep9.inc = getelementptr i32, i32* %scevgep9.phi, i32 2 + br i1 %polly.loop_cond, label %polly.loop_exit.loopexit, label %polly.stmt.for.body + +polly.loop_preheader: + %polly.adjust_ub = add i32 %leftover_lb, -2 + br label %polly.stmt.for.body + +polly.loop_if13: + %p_add34 = phi i32 [ 0, %polly.cond ], [ %add_sum, %polly.loop_exit ] + %merge.lb = phi i32 [ 0, %polly.cond ], [ %leftover_lb, %polly.loop_exit ] + %polly.loop_guard17 = icmp slt i32 %merge.lb, %count + br i1 %polly.loop_guard17, label %polly.loop_preheader15, label %for.end + +polly.stmt.for.body22: + %p_add30 = phi i32 [ %p_add34, %polly.loop_preheader15 ], [ %p_add, %polly.stmt.for.body22 ] + %polly.indvar18 = phi i32 [ %merge.lb, %polly.loop_preheader15 ], [ %polly.indvar_next19, %polly.stmt.for.body22 ] + %5 = tail call i32 @llvm.annotation.i32(i32 %polly.indvar18, i8* null, i8* null, i32 0), !polly.loop.smallTripCount !0 + %scevgep23 = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %polly.indvar18 + %_p_scalar_ = load i32, i32* %scevgep23, align 4 + %scevgep24 = getelementptr [1000 x i32], [1000 x i32]* @B, i32 0, i32 %polly.indvar18 + %_p_scalar_25 = load i32, i32* %scevgep24, align 4 + %p_mul = mul nsw i32 %_p_scalar_25, %_p_scalar_ + %p_add = add nsw i32 %p_mul, %p_add30 + %polly.indvar_next19 = add nsw i32 %polly.indvar18, 1 + %polly.loop_cond21 = icmp slt i32 %polly.indvar18, %polly.adjust_ub20 + br i1 %polly.loop_cond21, label %polly.stmt.for.body22, label %for.end.loopexit + +polly.loop_preheader15: + %polly.adjust_ub20 = add i32 %count, -1 + br label %polly.stmt.for.body22 +} + +declare i32 @llvm.annotation.i32(i32, i8*, i8*, i32) #1 + +!0 = !{} diff --git a/test/CodeGen/Hexagon/i16_VarArg.ll b/test/CodeGen/Hexagon/i16_VarArg.ll index c5d05a5e6ed8..ba98f6226683 100644 --- a/test/CodeGen/Hexagon/i16_VarArg.ll +++ b/test/CodeGen/Hexagon/i16_VarArg.ll @@ -20,8 +20,8 @@ declare i32 @printf(i8*, ...) define i32 @main() { - %a = load double* @A - %b = load double* @B + %a = load double, double* @A + %b = load double, double* @B %lt_r = fcmp olt double %a, %b %le_r = fcmp ole double %a, %b %gt_r = fcmp ogt double %a, %b @@ -29,12 +29,12 @@ define i32 @main() { %eq_r = fcmp oeq double %a, %b %ne_r = fcmp une double %a, %b %val1 = zext i1 %lt_r to i16 - %lt_s = getelementptr [12 x i8]* @lt_str, i64 0, i64 0 - %le_s = getelementptr [13 x i8]* @le_str, i64 0, i64 0 - %gt_s = getelementptr [12 x i8]* @gt_str, i64 0, i64 0 - %ge_s = getelementptr [13 x i8]* @ge_str, i64 0, i64 0 - %eq_s = getelementptr [13 x i8]* @eq_str, i64 0, i64 0 - %ne_s = getelementptr [13 x i8]* @ne_str, i64 0, i64 0 - call i32 (i8*, ...)* @printf( i8* %lt_s, i16 %val1 ) + %lt_s = getelementptr [12 x i8], [12 x i8]* @lt_str, i64 0, i64 0 + %le_s = getelementptr [13 x i8], [13 x i8]* @le_str, i64 0, i64 0 + %gt_s = getelementptr [12 x i8], [12 x i8]* @gt_str, i64 0, i64 0 + %ge_s = getelementptr [13 x i8], [13 x i8]* @ge_str, i64 0, i64 0 + %eq_s = getelementptr [13 x i8], [13 x i8]* @eq_str, i64 0, i64 0 + %ne_s = getelementptr [13 x i8], [13 x i8]* @ne_str, i64 0, i64 0 + call i32 (i8*, ...) @printf( i8* %lt_s, i16 %val1 ) ret i32 0 } diff --git a/test/CodeGen/Hexagon/i1_VarArg.ll b/test/CodeGen/Hexagon/i1_VarArg.ll index 37f27787c186..1908b3c71f3f 100644 --- a/test/CodeGen/Hexagon/i1_VarArg.ll +++ b/test/CodeGen/Hexagon/i1_VarArg.ll @@ -20,25 +20,25 @@ declare i32 @printf(i8*, ...) define i32 @main() { - %a = load double* @A - %b = load double* @B + %a = load double, double* @A + %b = load double, double* @B %lt_r = fcmp olt double %a, %b %le_r = fcmp ole double %a, %b %gt_r = fcmp ogt double %a, %b %ge_r = fcmp oge double %a, %b %eq_r = fcmp oeq double %a, %b %ne_r = fcmp une double %a, %b - %lt_s = getelementptr [12 x i8]* @lt_str, i64 0, i64 0 - %le_s = getelementptr [13 x i8]* @le_str, i64 0, i64 0 - %gt_s = getelementptr [12 x i8]* @gt_str, i64 0, i64 0 - %ge_s = getelementptr [13 x i8]* @ge_str, i64 0, i64 0 - %eq_s = getelementptr [13 x i8]* @eq_str, i64 0, i64 0 - %ne_s = getelementptr [13 x i8]* @ne_str, i64 0, i64 0 - call i32 (i8*, ...)* @printf( i8* %lt_s, i1 %lt_r ) - call i32 (i8*, ...)* @printf( i8* %le_s, i1 %le_r ) - call i32 (i8*, ...)* @printf( i8* %gt_s, i1 %gt_r ) - call i32 (i8*, ...)* @printf( i8* %ge_s, i1 %ge_r ) - call i32 (i8*, ...)* @printf( i8* %eq_s, i1 %eq_r ) - call i32 (i8*, ...)* @printf( i8* %ne_s, i1 %ne_r ) + %lt_s = getelementptr [12 x i8], [12 x i8]* @lt_str, i64 0, i64 0 + %le_s = getelementptr [13 x i8], [13 x i8]* @le_str, i64 0, i64 0 + %gt_s = getelementptr [12 x i8], [12 x i8]* @gt_str, i64 0, i64 0 + %ge_s = getelementptr [13 x i8], [13 x i8]* @ge_str, i64 0, i64 0 + %eq_s = getelementptr [13 x i8], [13 x i8]* @eq_str, i64 0, i64 0 + %ne_s = getelementptr [13 x i8], [13 x i8]* @ne_str, i64 0, i64 0 + call i32 (i8*, ...) @printf( i8* %lt_s, i1 %lt_r ) + call i32 (i8*, ...) @printf( i8* %le_s, i1 %le_r ) + call i32 (i8*, ...) @printf( i8* %gt_s, i1 %gt_r ) + call i32 (i8*, ...) @printf( i8* %ge_s, i1 %ge_r ) + call i32 (i8*, ...) @printf( i8* %eq_s, i1 %eq_r ) + call i32 (i8*, ...) @printf( i8* %ne_s, i1 %ne_r ) ret i32 0 } diff --git a/test/CodeGen/Hexagon/i8_VarArg.ll b/test/CodeGen/Hexagon/i8_VarArg.ll index 6f056ff417af..c40a6a957270 100644 --- a/test/CodeGen/Hexagon/i8_VarArg.ll +++ b/test/CodeGen/Hexagon/i8_VarArg.ll @@ -20,8 +20,8 @@ declare i32 @printf(i8*, ...) define i32 @main() { - %a = load double* @A - %b = load double* @B + %a = load double, double* @A + %b = load double, double* @B %lt_r = fcmp olt double %a, %b %le_r = fcmp ole double %a, %b %gt_r = fcmp ogt double %a, %b @@ -29,12 +29,12 @@ define i32 @main() { %eq_r = fcmp oeq double %a, %b %ne_r = fcmp une double %a, %b %val1 = zext i1 %lt_r to i8 - %lt_s = getelementptr [12 x i8]* @lt_str, i64 0, i64 0 - %le_s = getelementptr [13 x i8]* @le_str, i64 0, i64 0 - %gt_s = getelementptr [12 x i8]* @gt_str, i64 0, i64 0 - %ge_s = getelementptr [13 x i8]* @ge_str, i64 0, i64 0 - %eq_s = getelementptr [13 x i8]* @eq_str, i64 0, i64 0 - %ne_s = getelementptr [13 x i8]* @ne_str, i64 0, i64 0 - call i32 (i8*, ...)* @printf( i8* %lt_s, i8 %val1 ) + %lt_s = getelementptr [12 x i8], [12 x i8]* @lt_str, i64 0, i64 0 + %le_s = getelementptr [13 x i8], [13 x i8]* @le_str, i64 0, i64 0 + %gt_s = getelementptr [12 x i8], [12 x i8]* @gt_str, i64 0, i64 0 + %ge_s = getelementptr [13 x i8], [13 x i8]* @ge_str, i64 0, i64 0 + %eq_s = getelementptr [13 x i8], [13 x i8]* @eq_str, i64 0, i64 0 + %ne_s = getelementptr [13 x i8], [13 x i8]* @ne_str, i64 0, i64 0 + call i32 (i8*, ...) @printf( i8* %lt_s, i8 %val1 ) ret i32 0 } diff --git a/test/CodeGen/Hexagon/idxload-with-zero-offset.ll b/test/CodeGen/Hexagon/idxload-with-zero-offset.ll index 729d79f55a6e..f1a9d38f1b1c 100644 --- a/test/CodeGen/Hexagon/idxload-with-zero-offset.ll +++ b/test/CodeGen/Hexagon/idxload-with-zero-offset.ll @@ -1,70 +1,70 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s -; Check that we generate load instruction with (base + register offset << 0) +; RUN: llc -march=hexagon < %s | FileCheck %s +; Check that we generate load instruction with (base + register offset << x) ; load word -define i32 @load_w(i32* nocapture %a, i32 %n) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}<<#0) +define i32 @load_w(i32* nocapture %a, i32 %n, i32 %m) nounwind { +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#2) entry: - %tmp = shl i32 %n, 4 - %scevgep9 = getelementptr i32* %a, i32 %tmp - %val = load i32* %scevgep9, align 4 + %tmp = add i32 %n, %m + %scevgep9 = getelementptr i32, i32* %a, i32 %tmp + %val = load i32, i32* %scevgep9, align 4 ret i32 %val } ; load unsigned half word -define i16 @load_uh(i16* nocapture %a, i32 %n) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memuh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}<<#0) +define i16 @load_uh(i16* nocapture %a, i32 %n, i32 %m) nounwind { +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memuh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#1) entry: - %tmp = shl i32 %n, 4 - %scevgep9 = getelementptr i16* %a, i32 %tmp - %val = load i16* %scevgep9, align 2 + %tmp = add i32 %n, %m + %scevgep9 = getelementptr i16, i16* %a, i32 %tmp + %val = load i16, i16* %scevgep9, align 2 ret i16 %val } ; load signed half word -define i32 @load_h(i16* nocapture %a, i32 %n) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}<<#0) +define i32 @load_h(i16* nocapture %a, i32 %n, i32 %m) nounwind { +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#1) entry: - %tmp = shl i32 %n, 4 - %scevgep9 = getelementptr i16* %a, i32 %tmp - %val = load i16* %scevgep9, align 2 + %tmp = add i32 %n, %m + %scevgep9 = getelementptr i16, i16* %a, i32 %tmp + %val = load i16, i16* %scevgep9, align 2 %conv = sext i16 %val to i32 ret i32 %conv } ; load unsigned byte -define i8 @load_ub(i8* nocapture %a, i32 %n) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}<<#0) +define i8 @load_ub(i8* nocapture %a, i32 %n, i32 %m) nounwind { +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#0) entry: - %tmp = shl i32 %n, 4 - %scevgep9 = getelementptr i8* %a, i32 %tmp - %val = load i8* %scevgep9, align 1 + %tmp = add i32 %n, %m + %scevgep9 = getelementptr i8, i8* %a, i32 %tmp + %val = load i8, i8* %scevgep9, align 1 ret i8 %val } ; load signed byte -define i32 @foo_2(i8* nocapture %a, i32 %n) nounwind { -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memb(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}<<#0) +define i32 @foo_2(i8* nocapture %a, i32 %n, i32 %m) nounwind { +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memb(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#0) entry: - %tmp = shl i32 %n, 4 - %scevgep9 = getelementptr i8* %a, i32 %tmp - %val = load i8* %scevgep9, align 1 + %tmp = add i32 %n, %m + %scevgep9 = getelementptr i8, i8* %a, i32 %tmp + %val = load i8, i8* %scevgep9, align 1 %conv = sext i8 %val to i32 ret i32 %conv } ; load doubleword -define i64 @load_d(i64* nocapture %a, i32 %n) nounwind { -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}memd(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}<<#0) +define i64 @load_d(i64* nocapture %a, i32 %n, i32 %m) nounwind { +; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}memd(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#3) entry: - %tmp = shl i32 %n, 4 - %scevgep9 = getelementptr i64* %a, i32 %tmp - %val = load i64* %scevgep9, align 8 + %tmp = add i32 %n, %m + %scevgep9 = getelementptr i64, i64* %a, i32 %tmp + %val = load i64, i64* %scevgep9, align 8 ret i64 %val } diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll new file mode 100644 index 000000000000..37f9f4007b67 --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll @@ -0,0 +1,202 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.1.1 ALU32/ALU + +; Add +declare i32 @llvm.hexagon.A2.addi(i32, i32) +define i32 @A2_addi(i32 %a) { + %z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(r0, #0) + +declare i32 @llvm.hexagon.A2.add(i32, i32) +define i32 @A2_add(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0, r1) + +declare i32 @llvm.hexagon.A2.addsat(i32, i32) +define i32 @A2_addsat(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0, r1):sat + +; Logical operations +declare i32 @llvm.hexagon.A2.and(i32, i32) +define i32 @A2_and(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = and(r0, r1) + +declare i32 @llvm.hexagon.A2.or(i32, i32) +define i32 @A2_or(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = or(r0, r1) + +declare i32 @llvm.hexagon.A2.xor(i32, i32) +define i32 @A2_xor(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = xor(r0, r1) + +declare i32 @llvm.hexagon.A4.andn(i32, i32) +define i32 @A4_andn(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = and(r0, ~r1) + +declare i32 @llvm.hexagon.A4.orn(i32, i32) +define i32 @A4_orn(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = or(r0, ~r1) + +; Nop +declare void @llvm.hexagon.A2.nop() +define void @A2_nop(i32 %a, i32 %b) { + call void @llvm.hexagon.A2.nop() + ret void +} +; CHECK: nop + +; Subtract +declare i32 @llvm.hexagon.A2.sub(i32, i32) +define i32 @A2_sub(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0, r1) + +declare i32 @llvm.hexagon.A2.subsat(i32, i32) +define i32 @A2_subsat(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0, r1):sat + +; Sign extend +declare i32 @llvm.hexagon.A2.sxtb(i32) +define i32 @A2_sxtb(i32 %a) { + %z = call i32 @llvm.hexagon.A2.sxtb(i32 %a) + ret i32 %z +} +; CHECK: r0 = sxtb(r0) + +declare i32 @llvm.hexagon.A2.sxth(i32) +define i32 @A2_sxth(i32 %a) { + %z = call i32 @llvm.hexagon.A2.sxth(i32 %a) + ret i32 %z +} +; CHECK: r0 = sxth(r0) + +; Transfer immediate +declare i32 @llvm.hexagon.A2.tfril(i32, i32) +define i32 @A2_tfril(i32 %a) { + %z = call i32 @llvm.hexagon.A2.tfril(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0.l = #0 + +declare i32 @llvm.hexagon.A2.tfrih(i32, i32) +define i32 @A2_tfrih(i32 %a) { + %z = call i32 @llvm.hexagon.A2.tfrih(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0.h = #0 + +declare i32 @llvm.hexagon.A2.tfrsi(i32) +define i32 @A2_tfrsi() { + %z = call i32 @llvm.hexagon.A2.tfrsi(i32 0) + ret i32 %z +} +; CHECK: r0 = #0 + +; Transfer register +declare i32 @llvm.hexagon.A2.tfr(i32) +define i32 @A2_tfr(i32 %a) { + %z = call i32 @llvm.hexagon.A2.tfr(i32 %a) + ret i32 %z +} +; CHECK: r0 = r0 + +; Vector add halfwords +declare i32 @llvm.hexagon.A2.svaddh(i32, i32) +define i32 @A2_svaddh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.svaddh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vaddh(r0, r1) + +declare i32 @llvm.hexagon.A2.svaddhs(i32, i32) +define i32 @A2_svaddhs(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.svaddhs(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vaddh(r0, r1):sat + +declare i32 @llvm.hexagon.A2.svadduhs(i32, i32) +define i32 @A2_svadduhs(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.svadduhs(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vadduh(r0, r1):sat + +; Vector average halfwords +declare i32 @llvm.hexagon.A2.svavgh(i32, i32) +define i32 @A2_svavgh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.svavgh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vavgh(r0, r1) + +declare i32 @llvm.hexagon.A2.svavghs(i32, i32) +define i32 @A2_svavghs(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.svavghs(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vavgh(r0, r1):rnd + +declare i32 @llvm.hexagon.A2.svnavgh(i32, i32) +define i32 @A2_svnavgh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.svnavgh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vnavgh(r0, r1) + +; Vector subtract halfwords +declare i32 @llvm.hexagon.A2.svsubh(i32, i32) +define i32 @A2_svsubh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.svsubh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vsubh(r0, r1) + +declare i32 @llvm.hexagon.A2.svsubhs(i32, i32) +define i32 @A2_svsubhs(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.svsubhs(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vsubh(r0, r1):sat + +declare i32 @llvm.hexagon.A2.svsubuhs(i32, i32) +define i32 @A2_svsubuhs(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.svsubuhs(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vsubuh(r0, r1):sat + +; Zero extend +declare i32 @llvm.hexagon.A2.zxth(i32) +define i32 @A2_zxth(i32 %a) { + %z = call i32 @llvm.hexagon.A2.zxth(i32 %a) + ret i32 %z +} +; CHECK: r0 = zxth(r0) diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll new file mode 100644 index 000000000000..a9cc01c5dcb0 --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll @@ -0,0 +1,104 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.1.2 ALU32/PERM + +; Combine words into doubleword +declare i64 @llvm.hexagon.A4.combineri(i32, i32) +define i64 @A4_combineri(i32 %a) { + %z = call i64 @llvm.hexagon.A4.combineri(i32 %a, i32 0) + ret i64 %z +} +; CHECK: = combine(r0, #0) + +declare i64 @llvm.hexagon.A4.combineir(i32, i32) +define i64 @A4_combineir(i32 %a) { + %z = call i64 @llvm.hexagon.A4.combineir(i32 0, i32 %a) + ret i64 %z +} +; CHECK: = combine(#0, r0) + +declare i64 @llvm.hexagon.A2.combineii(i32, i32) +define i64 @A2_combineii() { + %z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0) + ret i64 %z +} +; CHECK: r1:0 = combine(#0, #0) + +declare i32 @llvm.hexagon.A2.combine.hh(i32, i32) +define i32 @A2_combine_hh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = combine(r0.h, r1.h) + +declare i32 @llvm.hexagon.A2.combine.hl(i32, i32) +define i32 @A2_combine_hl(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = combine(r0.h, r1.l) + +declare i32 @llvm.hexagon.A2.combine.lh(i32, i32) +define i32 @A2_combine_lh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = combine(r0.l, r1.h) + +declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) +define i32 @A2_combine_ll(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = combine(r0.l, r1.l) + +declare i64 @llvm.hexagon.A2.combinew(i32, i32) +define i64 @A2_combinew(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = combine(r0, r1) + +; Mux +declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32) +define i32 @C2_muxri(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b) + ret i32 %z +} +; CHECK: r0 = mux(p0, #0, r1) + +declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32) +define i32 @C2_muxir(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 = mux(p0, r1, #0) + +declare i32 @llvm.hexagon.C2.mux(i32, i32, i32) +define i32 @C2_mux(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.C2.mux(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 = mux(p0, r1, r2) + +; Shift word by 16 +declare i32 @llvm.hexagon.A2.aslh(i32) +define i32 @A2_aslh(i32 %a) { + %z = call i32 @llvm.hexagon.A2.aslh(i32 %a) + ret i32 %z +} +; CHECK: r0 = aslh(r0) + +declare i32 @llvm.hexagon.A2.asrh(i32) +define i32 @A2_asrh(i32 %a) { + %z = call i32 @llvm.hexagon.A2.asrh(i32 %a) + ret i32 %z +} +; CHECK: r0 = asrh(r0) + +; Pack high and low halfwords +declare i64 @llvm.hexagon.S2.packhl(i32, i32) +define i64 @S2_packhl(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = packhl(r0, r1) diff --git a/test/CodeGen/Hexagon/intrinsics/cr.ll b/test/CodeGen/Hexagon/intrinsics/cr.ll new file mode 100644 index 000000000000..9bdcb253fe2f --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/cr.ll @@ -0,0 +1,132 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.2 CR + +; Corner detection acceleration +declare i32 @llvm.hexagon.C4.fastcorner9(i32, i32) +define i32 @C4_fastcorner9(i32 %a, i32 %b) { + %z = call i32@llvm.hexagon.C4.fastcorner9(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = fastcorner9(p0, p1) + +declare i32 @llvm.hexagon.C4.fastcorner9.not(i32, i32) +define i32 @C4_fastcorner9_not(i32 %a, i32 %b) { + %z = call i32@llvm.hexagon.C4.fastcorner9.not(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = !fastcorner9(p0, p1) + +; Logical reductions on predicates +declare i32 @llvm.hexagon.C2.any8(i32) +define i32 @C2_any8(i32 %a) { + %z = call i32@llvm.hexagon.C2.any8(i32 %a) + ret i32 %z +} +; CHECK: p0 = any8(p0) + +declare i32 @llvm.hexagon.C2.all8(i32) +define i32 @C2_all8(i32 %a) { + %z = call i32@llvm.hexagon.C2.all8(i32 %a) + ret i32 %z +} + +; CHECK: p0 = all8(p0) + +; Logical operations on predicates +declare i32 @llvm.hexagon.C2.and(i32, i32) +define i32 @C2_and(i32 %a, i32 %b) { + %z = call i32@llvm.hexagon.C2.and(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = and(p0, p1) + +declare i32 @llvm.hexagon.C4.and.and(i32, i32, i32) +define i32 @C4_and_and(i32 %a, i32 %b, i32 %c) { + %z = call i32@llvm.hexagon.C4.and.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: p0 = and(p0, and(p1, p2)) + +declare i32 @llvm.hexagon.C2.or(i32, i32) +define i32 @C2_or(i32 %a, i32 %b) { + %z = call i32@llvm.hexagon.C2.or(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = or(p0, p1) + +declare i32 @llvm.hexagon.C4.and.or(i32, i32, i32) +define i32 @C4_and_or(i32 %a, i32 %b, i32 %c) { + %z = call i32@llvm.hexagon.C4.and.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: p0 = and(p0, or(p1, p2)) + +declare i32 @llvm.hexagon.C2.xor(i32, i32) +define i32 @C2_xor(i32 %a, i32 %b) { + %z = call i32@llvm.hexagon.C2.xor(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = xor(p0, p1) + +declare i32 @llvm.hexagon.C4.or.and(i32, i32, i32) +define i32 @C4_or_and(i32 %a, i32 %b, i32 %c) { + %z = call i32@llvm.hexagon.C4.or.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: p0 = or(p0, and(p1, p2)) + +declare i32 @llvm.hexagon.C2.andn(i32, i32) +define i32 @C2_andn(i32 %a, i32 %b) { + %z = call i32@llvm.hexagon.C2.andn(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = and(p0, !p1) + +declare i32 @llvm.hexagon.C4.or.or(i32, i32, i32) +define i32 @C4_or_or(i32 %a, i32 %b, i32 %c) { + %z = call i32@llvm.hexagon.C4.or.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: p0 = or(p0, or(p1, p2)) + +declare i32 @llvm.hexagon.C4.and.andn(i32, i32, i32) +define i32 @C4_and_andn(i32 %a, i32 %b, i32 %c) { + %z = call i32@llvm.hexagon.C4.and.andn(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: p0 = and(p0, and(p1, !p2)) + +declare i32 @llvm.hexagon.C4.and.orn(i32, i32, i32) +define i32 @C4_and_orn(i32 %a, i32 %b, i32 %c) { + %z = call i32@llvm.hexagon.C4.and.orn(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: p0 = and(p0, or(p1, !p2)) + +declare i32 @llvm.hexagon.C2.not(i32) +define i32 @C2_not(i32 %a) { + %z = call i32@llvm.hexagon.C2.not(i32 %a) + ret i32 %z +} +; CHECK: p0 = not(p0) + +declare i32 @llvm.hexagon.C4.or.andn(i32, i32, i32) +define i32 @C4_or_andn(i32 %a, i32 %b, i32 %c) { + %z = call i32@llvm.hexagon.C4.or.andn(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: p0 = or(p0, and(p1, !p2)) + +declare i32 @llvm.hexagon.C2.orn(i32, i32) +define i32 @C2_orn(i32 %a, i32 %b) { + %z = call i32@llvm.hexagon.C2.orn(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = or(p0, !p1) + +declare i32 @llvm.hexagon.C4.or.orn(i32, i32, i32) +define i32 @C4_or_orn(i32 %a, i32 %b, i32 %c) { + %z = call i32@llvm.hexagon.C4.or.orn(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: p0 = or(p0, or(p1, !p2)) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll new file mode 100644 index 000000000000..4a11112d73a9 --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll @@ -0,0 +1,1020 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.1 XTYPE/ALU + +; Absolute value doubleword +declare i64 @llvm.hexagon.A2.absp(i64) +define i64 @A2_absp(i64 %a) { + %z = call i64 @llvm.hexagon.A2.absp(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = abs(r1:0) + +; Absolute value word +declare i32 @llvm.hexagon.A2.abs(i32) +define i32 @A2_abs(i32 %a) { + %z = call i32 @llvm.hexagon.A2.abs(i32 %a) + ret i32 %z +} +; CHECK: r0 = abs(r0) + +declare i32 @llvm.hexagon.A2.abssat(i32) +define i32 @A2_abssat(i32 %a) { + %z = call i32 @llvm.hexagon.A2.abssat(i32 %a) + ret i32 %z +} +; CHECK: r0 = abs(r0):sat + +; Add and accumulate +declare i32 @llvm.hexagon.S4.addaddi(i32, i32, i32) +define i32 @S4_addaddi(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S4.addaddi(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 = add(r0, add(r1, #0)) + +declare i32 @llvm.hexagon.S4.subaddi(i32, i32, i32) +define i32 @S4_subaddi(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S4.subaddi(i32 %a, i32 0, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0, sub(#0, r1)) + +declare i32 @llvm.hexagon.M2.accii(i32, i32, i32) +define i32 @M2_accii(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.accii(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 += add(r1, #0) + +declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32) +define i32 @M2_naccii(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.naccii(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 -= add(r1, #0) + +declare i32 @llvm.hexagon.M2.acci(i32, i32, i32) +define i32 @M2_acci(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.acci(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += add(r1, r2) + +declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32) +define i32 @M2_nacci(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.nacci(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= add(r1, r2) + +; Add doublewords +declare i64 @llvm.hexagon.A2.addp(i64, i64) +define i64 @A2_addp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.addp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = add(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.addpsat(i64, i64) +define i64 @A2_addpsat(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.addpsat(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = add(r1:0, r3:2):sat + +; Add halfword +declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32) +define i32 @A2_addh_l16_ll(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.l, r1.l) + +declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32) +define i32 @A2_addh_l16_hl(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.l, r1.h) + +declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32) +define i32 @A2_addh_l16_sat.ll(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.l, r1.l):sat + +declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32) +define i32 @A2_addh_l16_sat.hl(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.l, r1.h):sat + +declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32) +define i32 @A2_addh_h16_ll(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.l, r1.l):<<16 + +declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32) +define i32 @A2_addh_h16_lh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.l, r1.h):<<16 + +declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32) +define i32 @A2_addh_h16_hl(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.h, r1.l):<<16 + +declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32) +define i32 @A2_addh_h16_hh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.h, r1.h):<<16 + +declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32) +define i32 @A2_addh_h16_sat_ll(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.l, r1.l):sat:<<16 + +declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32) +define i32 @A2_addh_h16_sat_lh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.l, r1.h):sat:<<16 + +declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32) +define i32 @A2_addh_h16_sat_hl(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.h, r1.l):sat:<<16 + +declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32) +define i32 @A2_addh_h16_sat_hh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0.h, r1.h):sat:<<16 + +; Logical doublewords +declare i64 @llvm.hexagon.A2.notp(i64) +define i64 @A2_notp(i64 %a) { + %z = call i64 @llvm.hexagon.A2.notp(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = not(r1:0) + +declare i64 @llvm.hexagon.A2.andp(i64, i64) +define i64 @A2_andp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.andp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = and(r1:0, r3:2) + +declare i64 @llvm.hexagon.A4.andnp(i64, i64) +define i64 @A2_andnp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A4.andnp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = and(r1:0, ~r3:2) + +declare i64 @llvm.hexagon.A2.orp(i64, i64) +define i64 @A2_orp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.orp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = or(r1:0, r3:2) + +declare i64 @llvm.hexagon.A4.ornp(i64, i64) +define i64 @A2_ornp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A4.ornp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = or(r1:0, ~r3:2) + +declare i64 @llvm.hexagon.A2.xorp(i64, i64) +define i64 @A2_xorp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.xorp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = xor(r1:0, r3:2) + +; Logical-logical doublewords +declare i64 @llvm.hexagon.M4.xor.xacc(i64, i64, i64) +define i64 @M4_xor_xacc(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M4.xor.xacc(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 ^= xor(r3:2, r5:4) + +; Logical-logical words +declare i32 @llvm.hexagon.S4.or.andi(i32, i32, i32) +define i32 @S4_or_andi(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S4.or.andi(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 |= and(r1, #0) + +declare i32 @llvm.hexagon.S4.or.andix(i32, i32, i32) +define i32 @S4_or_andix(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S4.or.andix(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r1 = or(r0, and(r1, #0)) + +declare i32 @llvm.hexagon.M4.or.andn(i32, i32, i32) +define i32 @M4_or_andn(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.or.andn(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= and(r1, ~r2) + +declare i32 @llvm.hexagon.M4.and.andn(i32, i32, i32) +define i32 @M4_and_andn(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.and.andn(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= and(r1, ~r2) + +declare i32 @llvm.hexagon.M4.xor.andn(i32, i32, i32) +define i32 @M4_xor_andn(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.xor.andn(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 ^= and(r1, ~r2) + +declare i32 @llvm.hexagon.M4.and.and(i32, i32, i32) +define i32 @M4_and_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.and.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= and(r1, r2) + +declare i32 @llvm.hexagon.M4.and.or(i32, i32, i32) +define i32 @M4_and_or(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.and.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= or(r1, r2) + +declare i32 @llvm.hexagon.M4.and.xor(i32, i32, i32) +define i32 @M4_and_xor(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.and.xor(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= xor(r1, r2) + +declare i32 @llvm.hexagon.M4.or.and(i32, i32, i32) +define i32 @M4_or_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.or.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= and(r1, r2) + +declare i32 @llvm.hexagon.M4.or.or(i32, i32, i32) +define i32 @M4_or_or(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.or.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= or(r1, r2) + +declare i32 @llvm.hexagon.M4.or.xor(i32, i32, i32) +define i32 @M4_or_xor(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.or.xor(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= xor(r1, r2) + +declare i32 @llvm.hexagon.M4.xor.and(i32, i32, i32) +define i32 @M4_xor_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.xor.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 ^= and(r1, r2) + +declare i32 @llvm.hexagon.M4.xor.or(i32, i32, i32) +define i32 @M4_xor_or(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.xor.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 ^= or(r1, r2) + +; Maximum words +declare i32 @llvm.hexagon.A2.max(i32, i32) +define i32 @A2_max(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.max(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = max(r0, r1) + +declare i32 @llvm.hexagon.A2.maxu(i32, i32) +define i32 @A2_maxu(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.maxu(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = maxu(r0, r1) + +; Maximum doublewords +declare i64 @llvm.hexagon.A2.maxp(i64, i64) +define i64 @A2_maxp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.maxp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = max(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.maxup(i64, i64) +define i64 @A2_maxup(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.maxup(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = maxu(r1:0, r3:2) + +; Minimum words +declare i32 @llvm.hexagon.A2.min(i32, i32) +define i32 @A2_min(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.min(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = min(r0, r1) + +declare i32 @llvm.hexagon.A2.minu(i32, i32) +define i32 @A2_minu(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.minu(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = minu(r0, r1) + +; Minimum doublewords +declare i64 @llvm.hexagon.A2.minp(i64, i64) +define i64 @A2_minp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.minp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = min(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.minup(i64, i64) +define i64 @A2_minup(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.minup(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = minu(r1:0, r3:2) + +; Module wrap +declare i32 @llvm.hexagon.A4.modwrapu(i32, i32) +define i32 @A4_modwrapu(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.modwrapu(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = modwrap(r0, r1) + +; Negate +declare i64 @llvm.hexagon.A2.negp(i64) +define i64 @A2_negp(i64 %a) { + %z = call i64 @llvm.hexagon.A2.negp(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = neg(r1:0) + +declare i32 @llvm.hexagon.A2.negsat(i32) +define i32 @A2_negsat(i32 %a) { + %z = call i32 @llvm.hexagon.A2.negsat(i32 %a) + ret i32 %z +} +; CHECK: r0 = neg(r0):sat + +; Round +declare i32 @llvm.hexagon.A2.roundsat(i64) +define i32 @A2_roundsat(i64 %a) { + %z = call i32 @llvm.hexagon.A2.roundsat(i64 %a) + ret i32 %z +} +; CHECK: r0 = round(r1:0):sat + +declare i32 @llvm.hexagon.A4.cround.ri(i32, i32) +define i32 @A4_cround_ri(i32 %a) { + %z = call i32 @llvm.hexagon.A4.cround.ri(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = cround(r0, #0) + +declare i32 @llvm.hexagon.A4.round.ri(i32, i32) +define i32 @A4_round_ri(i32 %a) { + %z = call i32 @llvm.hexagon.A4.round.ri(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = round(r0, #0) + +declare i32 @llvm.hexagon.A4.round.ri.sat(i32, i32) +define i32 @A4_round_ri_sat(i32 %a) { + %z = call i32 @llvm.hexagon.A4.round.ri.sat(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = round(r0, #0):sat + +declare i32 @llvm.hexagon.A4.cround.rr(i32, i32) +define i32 @A4_cround_rr(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.cround.rr(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = cround(r0, r1) + +declare i32 @llvm.hexagon.A4.round.rr(i32, i32) +define i32 @A4_round_rr(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.round.rr(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = round(r0, r1) + +declare i32 @llvm.hexagon.A4.round.rr.sat(i32, i32) +define i32 @A4_round_rr_sat(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.round.rr.sat(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = round(r0, r1):sat + +; Subtract doublewords +declare i64 @llvm.hexagon.A2.subp(i64, i64) +define i64 @A2_subp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.subp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = sub(r1:0, r3:2) + +; Subtract and accumulate +declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32) +define i32 @M2_subacc(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.subacc(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += sub(r1, r2) + +; Subtract halfwords +declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32) +define i32 @A2_subh_l16_ll(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.l, r1.l) + +declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32) +define i32 @A2_subh_l16_hl(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.l, r1.h) + +declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32) +define i32 @A2_subh_l16_sat.ll(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.l, r1.l):sat + +declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32) +define i32 @A2_subh_l16_sat.hl(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.l, r1.h):sat + +declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32) +define i32 @A2_subh_h16_ll(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.l, r1.l):<<16 + +declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32) +define i32 @A2_subh_h16_lh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.l, r1.h):<<16 + +declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32) +define i32 @A2_subh_h16_hl(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.h, r1.l):<<16 + +declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32) +define i32 @A2_subh_h16_hh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.h, r1.h):<<16 + +declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32) +define i32 @A2_subh_h16_sat_ll(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.l, r1.l):sat:<<16 + +declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32) +define i32 @A2_subh_h16_sat_lh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.l, r1.h):sat:<<16 + +declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32) +define i32 @A2_subh_h16_sat_hl(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.h, r1.l):sat:<<16 + +declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32) +define i32 @A2_subh_h16_sat_hh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0.h, r1.h):sat:<<16 + +; Sign extend word to doubleword +declare i64 @llvm.hexagon.A2.sxtw(i32) +define i64 @A2_sxtw(i32 %a) { + %z = call i64 @llvm.hexagon.A2.sxtw(i32 %a) + ret i64 %z +} +; CHECK: = sxtw(r0) + +; Vector absolute value halfwords +declare i64 @llvm.hexagon.A2.vabsh(i64) +define i64 @A2_vabsh(i64 %a) { + %z = call i64 @llvm.hexagon.A2.vabsh(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = vabsh(r1:0) + +declare i64 @llvm.hexagon.A2.vabshsat(i64) +define i64 @A2_vabshsat(i64 %a) { + %z = call i64 @llvm.hexagon.A2.vabshsat(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = vabsh(r1:0):sat + +; Vector absolute value words +declare i64 @llvm.hexagon.A2.vabsw(i64) +define i64 @A2_vabsw(i64 %a) { + %z = call i64 @llvm.hexagon.A2.vabsw(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = vabsw(r1:0) + +declare i64 @llvm.hexagon.A2.vabswsat(i64) +define i64 @A2_vabswsat(i64 %a) { + %z = call i64 @llvm.hexagon.A2.vabswsat(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = vabsw(r1:0):sat + +; Vector absolute difference halfwords +declare i64 @llvm.hexagon.M2.vabsdiffh(i64, i64) +define i64 @M2_vabsdiffh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vabsdiffh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vabsdiffh(r1:0, r3:2) + +; Vector absolute difference words +declare i64 @llvm.hexagon.M2.vabsdiffw(i64, i64) +define i64 @M2_vabsdiffw(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vabsdiffw(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vabsdiffw(r1:0, r3:2) + +; Vector add halfwords +declare i64 @llvm.hexagon.A2.vaddh(i64, i64) +define i64 @A2_vaddh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vaddh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vaddh(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vaddhs(i64, i64) +define i64 @A2_vaddhs(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vaddhs(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vaddh(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.A2.vadduhs(i64, i64) +define i64 @A2_vadduhs(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vadduhs(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vadduh(r1:0, r3:2):sat + +; Vector add halfwords with saturate and pack to unsigned bytes +declare i32 @llvm.hexagon.A5.vaddhubs(i64, i64) +define i32 @A5_vaddhubs(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A5.vaddhubs(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: r0 = vaddhub(r1:0, r3:2):sat + +; Vector reduce add unsigned bytes +declare i64 @llvm.hexagon.A2.vraddub(i64, i64) +define i64 @A2_vraddub(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vraddub(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vraddub(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vraddub.acc(i64, i64, i64) +define i64 @A2_vraddub_acc(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.A2.vraddub.acc(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vraddub(r3:2, r5:4) + +; Vector reduce add halfwords +declare i32 @llvm.hexagon.M2.vradduh(i64, i64) +define i32 @M2_vradduh(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.M2.vradduh(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: r0 = vradduh(r1:0, r3:2) + +declare i32 @llvm.hexagon.M2.vraddh(i64, i64) +define i32 @M2_vraddh(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.M2.vraddh(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: r0 = vraddh(r1:0, r3:2) + +; Vector add bytes +declare i64 @llvm.hexagon.A2.vaddub(i64, i64) +define i64 @A2_vaddub(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vaddub(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vaddub(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vaddubs(i64, i64) +define i64 @A2_vaddubs(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vaddubs(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vaddub(r1:0, r3:2):sat + +; Vector add words +declare i64 @llvm.hexagon.A2.vaddw(i64, i64) +define i64 @A2_vaddw(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vaddw(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vaddw(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vaddws(i64, i64) +define i64 @A2_vaddws(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vaddws(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vaddw(r1:0, r3:2):sat + +; Vector average halfwords +declare i64 @llvm.hexagon.A2.vavgh(i64, i64) +define i64 @A2_vavgh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavgh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavgh(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vavghr(i64, i64) +define i64 @A2_vavghr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavghr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavgh(r1:0, r3:2):rnd + +declare i64 @llvm.hexagon.A2.vavghcr(i64, i64) +define i64 @A2_vavghcr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavghcr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavgh(r1:0, r3:2):crnd + +declare i64 @llvm.hexagon.A2.vavguh(i64, i64) +define i64 @A2_vavguh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavguh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavguh(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vavguhr(i64, i64) +define i64 @A2_vavguhr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavguhr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavguh(r1:0, r3:2):rnd + +declare i64 @llvm.hexagon.A2.vnavgh(i64, i64) +define i64 @A2_vnavgh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vnavgh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vnavgh(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vnavghr(i64, i64) +define i64 @A2_vnavghr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vnavghr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vnavgh(r1:0, r3:2):rnd + +declare i64 @llvm.hexagon.A2.vnavghcr(i64, i64) +define i64 @A2_vnavghcr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vnavghcr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vnavgh(r1:0, r3:2):crnd + +; Vector average unsigned bytes +declare i64 @llvm.hexagon.A2.vavgub(i64, i64) +define i64 @A2_vavgub(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavgub(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavgub(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vavgubr(i64, i64) +define i64 @A2_vavgubr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavgubr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavgub(r1:0, r3:2):rnd + +; Vector average words +declare i64 @llvm.hexagon.A2.vavgw(i64, i64) +define i64 @A2_vavgw(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavgw(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavgw(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vavgwr(i64, i64) +define i64 @A2_vavgwr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavgwr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavgw(r1:0, r3:2):rnd + +declare i64 @llvm.hexagon.A2.vavgwcr(i64, i64) +define i64 @A2_vavgwcr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavgwcr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavgw(r1:0, r3:2):crnd + +declare i64 @llvm.hexagon.A2.vavguw(i64, i64) +define i64 @A2_vavguw(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavguw(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavguw(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vavguwr(i64, i64) +define i64 @A2_vavguwr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vavguwr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vavguw(r1:0, r3:2):rnd + +declare i64 @llvm.hexagon.A2.vnavgw(i64, i64) +define i64 @A2_vnavgw(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vnavgw(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vnavgw(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vnavgwr(i64, i64) +define i64 @A2_vnavgwr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vnavgwr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vnavgw(r1:0, r3:2):rnd + +declare i64 @llvm.hexagon.A2.vnavgwcr(i64, i64) +define i64 @A2_vnavgwcr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vnavgwcr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vnavgw(r1:0, r3:2):crnd + +; Vector conditional negate +declare i64 @llvm.hexagon.S2.vcnegh(i64, i32) +define i64 @S2_vcnegh(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.vcnegh(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vcnegh(r1:0, r2) + +declare i64 @llvm.hexagon.S2.vrcnegh(i64, i64, i32) +define i64 @S2_vrcnegh(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.vrcnegh(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += vrcnegh(r3:2, r4) + +; Vector maximum bytes +declare i64 @llvm.hexagon.A2.vmaxub(i64, i64) +define i64 @A2_vmaxub(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vmaxub(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmaxub(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vmaxb(i64, i64) +define i64 @A2_vmaxb(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vmaxb(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmaxb(r1:0, r3:2) + +; Vector maximum halfwords +declare i64 @llvm.hexagon.A2.vmaxh(i64, i64) +define i64 @A2_vmaxh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vmaxh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmaxh(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vmaxuh(i64, i64) +define i64 @A2_vmaxuh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vmaxuh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmaxuh(r1:0, r3:2) + +; Vector reduce maximum halfwords +declare i64 @llvm.hexagon.A4.vrmaxh(i64, i64, i32) +define i64 @A4_vrmaxh(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.A4.vrmaxh(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 = vrmaxh(r3:2, r4) + +declare i64 @llvm.hexagon.A4.vrmaxuh(i64, i64, i32) +define i64 @A4_vrmaxuh(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.A4.vrmaxuh(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 = vrmaxuh(r3:2, r4) + +; Vector reduce maximum words +declare i64 @llvm.hexagon.A4.vrmaxw(i64, i64, i32) +define i64 @A4_vrmaxw(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.A4.vrmaxw(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 = vrmaxw(r3:2, r4) + +declare i64 @llvm.hexagon.A4.vrmaxuw(i64, i64, i32) +define i64 @A4_vrmaxuw(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.A4.vrmaxuw(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 = vrmaxuw(r3:2, r4) + +; Vector minimum bytes +declare i64 @llvm.hexagon.A2.vminub(i64, i64) +define i64 @A2_vminub(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vminub(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vminub(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vminb(i64, i64) +define i64 @A2_vminb(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vminb(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vminb(r1:0, r3:2) + +; Vector minimum halfwords +declare i64 @llvm.hexagon.A2.vminh(i64, i64) +define i64 @A2_vminh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vminh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vminh(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vminuh(i64, i64) +define i64 @A2_vminuh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vminuh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vminuh(r1:0, r3:2) + +; Vector reduce minimum halfwords +declare i64 @llvm.hexagon.A4.vrminh(i64, i64, i32) +define i64 @A4_vrminh(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.A4.vrminh(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 = vrminh(r3:2, r4) + +declare i64 @llvm.hexagon.A4.vrminuh(i64, i64, i32) +define i64 @A4_vrminuh(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.A4.vrminuh(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 = vrminuh(r3:2, r4) + +; Vector reduce minimum words +declare i64 @llvm.hexagon.A4.vrminw(i64, i64, i32) +define i64 @A4_vrminw(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.A4.vrminw(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 = vrminw(r3:2, r4) + +declare i64 @llvm.hexagon.A4.vrminuw(i64, i64, i32) +define i64 @A4_vrminuw(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.A4.vrminuw(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 = vrminuw(r3:2, r4) + +; Vector sum of absolute differences unsigned bytes +declare i64 @llvm.hexagon.A2.vrsadub(i64, i64) +define i64 @A2_vrsadub(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vrsadub(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrsadub(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vrsadub.acc(i64, i64, i64) +define i64 @A2_vrsadub_acc(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.A2.vrsadub.acc(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrsadub(r3:2, r5:4) + +; Vector subtract halfwords +declare i64 @llvm.hexagon.A2.vsubh(i64, i64) +define i64 @A2_vsubh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vsubh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vsubh(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vsubhs(i64, i64) +define i64 @A2_vsubhs(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vsubhs(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vsubh(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.A2.vsubuhs(i64, i64) +define i64 @A2_vsubuhs(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vsubuhs(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vsubuh(r1:0, r3:2):sat + +; Vector subtract bytes +declare i64 @llvm.hexagon.A2.vsubub(i64, i64) +define i64 @A2_vsubub(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vsubub(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vsubub(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vsububs(i64, i64) +define i64 @A2_vsububs(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vsububs(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vsubub(r1:0, r3:2):sat + +; Vector subtract words +declare i64 @llvm.hexagon.A2.vsubw(i64, i64) +define i64 @A2_vsubw(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vsubw(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vsubw(r1:0, r3:2) + +declare i64 @llvm.hexagon.A2.vsubws(i64, i64) +define i64 @A2_vsubws(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.A2.vsubws(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vsubw(r1:0, r3:2):sat diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll new file mode 100644 index 000000000000..8531b2f9334b --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll @@ -0,0 +1,329 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.2 XTYPE/BIT + +; Count leading +declare i32 @llvm.hexagon.S2.clbp(i64) +define i32 @S2_clbp(i64 %a) { + %z = call i32 @llvm.hexagon.S2.clbp(i64 %a) + ret i32 %z +} +; CHECK: r0 = clb(r1:0) + +declare i32 @llvm.hexagon.S2.cl0p(i64) +define i32 @S2_cl0p(i64 %a) { + %z = call i32 @llvm.hexagon.S2.cl0p(i64 %a) + ret i32 %z +} +; CHECK: r0 = cl0(r1:0) + +declare i32 @llvm.hexagon.S2.cl1p(i64) +define i32 @S2_cl1p(i64 %a) { + %z = call i32 @llvm.hexagon.S2.cl1p(i64 %a) + ret i32 %z +} +; CHECK: r0 = cl1(r1:0) + +declare i32 @llvm.hexagon.S4.clbpnorm(i64) +define i32 @S4_clbpnorm(i64 %a) { + %z = call i32 @llvm.hexagon.S4.clbpnorm(i64 %a) + ret i32 %z +} +; CHECK: r0 = normamt(r1:0) + +declare i32 @llvm.hexagon.S4.clbpaddi(i64, i32) +define i32 @S4_clbpaddi(i64 %a) { + %z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(clb(r1:0), #0) + +declare i32 @llvm.hexagon.S4.clbaddi(i32, i32) +define i32 @S4_clbaddi(i32 %a) { + %z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(clb(r0), #0) + +declare i32 @llvm.hexagon.S2.cl0(i32) +define i32 @S2_cl0(i32 %a) { + %z = call i32 @llvm.hexagon.S2.cl0(i32 %a) + ret i32 %z +} +; CHECK: r0 = cl0(r0) + +declare i32 @llvm.hexagon.S2.cl1(i32) +define i32 @S2_cl1(i32 %a) { + %z = call i32 @llvm.hexagon.S2.cl1(i32 %a) + ret i32 %z +} +; CHECK: r0 = cl1(r0) + +declare i32 @llvm.hexagon.S2.clbnorm(i32) +define i32 @S4_clbnorm(i32 %a) { + %z = call i32 @llvm.hexagon.S2.clbnorm(i32 %a) + ret i32 %z +} +; CHECK: r0 = normamt(r0) + +; Count population +declare i32 @llvm.hexagon.S5.popcountp(i64) +define i32 @S5_popcountp(i64 %a) { + %z = call i32 @llvm.hexagon.S5.popcountp(i64 %a) + ret i32 %z +} +; CHECK: r0 = popcount(r1:0) + +; Count trailing +declare i32 @llvm.hexagon.S2.ct0p(i64) +define i32 @S2_ct0p(i64 %a) { + %z = call i32 @llvm.hexagon.S2.ct0p(i64 %a) + ret i32 %z +} +; CHECK: r0 = ct0(r1:0) + +declare i32 @llvm.hexagon.S2.ct1p(i64) +define i32 @S2_ct1p(i64 %a) { + %z = call i32 @llvm.hexagon.S2.ct1p(i64 %a) + ret i32 %z +} +; CHECK: r0 = ct1(r1:0) + +declare i32 @llvm.hexagon.S2.ct0(i32) +define i32 @S2_ct0(i32 %a) { + %z = call i32 @llvm.hexagon.S2.ct0(i32 %a) + ret i32 %z +} +; CHECK: r0 = ct0(r0) + +declare i32 @llvm.hexagon.S2.ct1(i32) +define i32 @S2_ct1(i32 %a) { + %z = call i32 @llvm.hexagon.S2.ct1(i32 %a) + ret i32 %z +} +; CHECK: r0 = ct1(r0) + +; Extract bitfield +declare i64 @llvm.hexagon.S2.extractup(i64, i32, i32) +define i64 @S2_extractup(i64 %a) { + %z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0) + ret i64 %z +} +; CHECK: r1:0 = extractu(r1:0, #0, #0) + +declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32) +define i64 @S2_extractp(i64 %a) { + %z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0) + ret i64 %z +} +; CHECK: r1:0 = extract(r1:0, #0, #0) + +declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32) +define i32 @S2_extractu(i32 %a) { + %z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = extractu(r0, #0, #0) + +declare i32 @llvm.hexagon.S4.extract(i32, i32, i32) +define i32 @S2_extract(i32 %a) { + %z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = extract(r0, #0, #0) + +declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64) +define i64 @S2_extractup_rp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = extractu(r1:0, r3:2) + +declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64) +define i64 @S4_extractp_rp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = extract(r1:0, r3:2) + +declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64) +define i32 @S2_extractu_rp(i32 %a, i64 %b) { + %z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b) + ret i32 %z +} +; CHECK: r0 = extractu(r0, r3:2) + +declare i32 @llvm.hexagon.S4.extract.rp(i32, i64) +define i32 @S4_extract_rp(i32 %a, i64 %b) { + %z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b) + ret i32 %z +} +; CHECK: r0 = extract(r0, r3:2) + +; Insert bitfield +declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32) +define i64 @S2_insertp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0) + ret i64 %z +} +; CHECK: r1:0 = insert(r3:2, #0, #0) + +declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32) +define i32 @S2_insert(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = insert(r1, #0, #0) + +declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64) +define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) { + %z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c) + ret i32 %z +} +; CHECK: r0 = insert(r1, r3:2) + +declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64) +define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 = insert(r3:2, r5:4) + +; Interleave/deinterleave +declare i64 @llvm.hexagon.S2.deinterleave(i64) +define i64 @S2_deinterleave(i64 %a) { + %z = call i64 @llvm.hexagon.S2.deinterleave(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = deinterleave(r1:0) + +declare i64 @llvm.hexagon.S2.interleave(i64) +define i64 @S2_interleave(i64 %a) { + %z = call i64 @llvm.hexagon.S2.interleave(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = interleave(r1:0) + +; Linear feedback-shift operation +declare i64 @llvm.hexagon.S2.lfsp(i64, i64) +define i64 @S2_lfsp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = lfs(r1:0, r3:2) + +; Masked parity +declare i32 @llvm.hexagon.S2.parityp(i64, i64) +define i32 @S2_parityp(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: r0 = parity(r1:0, r3:2) + +declare i32 @llvm.hexagon.S4.parity(i32, i32) +define i32 @S4_parity(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = parity(r0, r1) + +; Bit reverse +declare i64 @llvm.hexagon.S2.brevp(i64) +define i64 @S2_brevp(i64 %a) { + %z = call i64 @llvm.hexagon.S2.brevp(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = brev(r1:0) + +declare i32 @llvm.hexagon.S2.brev(i32) +define i32 @S2_brev(i32 %a) { + %z = call i32 @llvm.hexagon.S2.brev(i32 %a) + ret i32 %z +} +; CHECK: r0 = brev(r0) + +; Set/clear/toggle bit +declare i32 @llvm.hexagon.S2.setbit.i(i32, i32) +define i32 @S2_setbit_i(i32 %a) { + %z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = setbit(r0, #0) + +declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32) +define i32 @S2_clrbit_i(i32 %a) { + %z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = clrbit(r0, #0) + +declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32) +define i32 @S2_togglebit_i(i32 %a) { + %z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = togglebit(r0, #0) + +declare i32 @llvm.hexagon.S2.setbit.r(i32, i32) +define i32 @S2_setbit_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = setbit(r0, r1) + +declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32) +define i32 @S2_clrbit_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = clrbit(r0, r1) + +declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32) +define i32 @S2_togglebit_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = togglebit(r0, r1) + +; Split bitfield +declare i64 @llvm.hexagon.A4.bitspliti(i32, i32) +define i64 @A4_bitspliti(i32 %a) { + %z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0) + ret i64 %z +} +; CHECK: = bitsplit(r0, #0) + +declare i64 @llvm.hexagon.A4.bitsplit(i32, i32) +define i64 @A4_bitsplit(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = bitsplit(r0, r1) + +; Table index +declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32) +define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = tableidxb(r1, #0, #0) + +declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32) +define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = tableidxh(r1, #0, #-1) + +declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32) +define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = tableidxw(r1, #0, #-2) + +declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32) +define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = tableidxd(r1, #0, #-3) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll new file mode 100644 index 000000000000..57b0c5b6db56 --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll @@ -0,0 +1,349 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.3 XTYPE/COMPLEX + +; Complex add/sub halfwords +declare i64 @llvm.hexagon.S4.vxaddsubh(i64, i64) +define i64 @S4_vxaddsubh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64) +define i64 @S4_vxsubaddh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64) +define i64 @S4_vxaddsubhr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):rnd:>>1:sat + +declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64) +define i64 @S4_vxsubaddhr(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):rnd:>>1:sat + +; Complex add/sub words +declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64) +define i64 @S4_vxaddsubw(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vxaddsubw(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64) +define i64 @S4_vxsubaddw(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vxsubaddw(r1:0, r3:2):sat + +; Complex multiply +declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32) +define i64 @M2_cmpys_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = cmpy(r0, r1):sat + +declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32) +define i64 @M2_cmpys_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = cmpy(r0, r1):<<1:sat + +declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32) +define i64 @M2_cmpysc_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = cmpy(r0, r1*):sat + +declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32) +define i64 @M2_cmpysc_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = cmpy(r0, r1*):<<1:sat + +declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32) +define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += cmpy(r2, r3):sat + +declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32) +define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += cmpy(r2, r3):<<1:sat + +declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32) +define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= cmpy(r2, r3):sat + +declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32) +define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= cmpy(r2, r3):<<1:sat + +declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32) +define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += cmpy(r2, r3*):sat + +declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32) +define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += cmpy(r2, r3*):<<1:sat + +declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32) +define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= cmpy(r2, r3*):sat + +declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32) +define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= cmpy(r2, r3*):<<1:sat + +; Complex multiply real or imaginary +declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32) +define i64 @M2_cmpyi_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = cmpyi(r0, r1) + +declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32) +define i64 @M2_cmpyr_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = cmpyr(r0, r1) + +declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32) +define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += cmpyi(r2, r3) + +declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32) +define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += cmpyr(r2, r3) + +; Complex multiply with round and pack +declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32) +define i32 @M2_cmpyrs_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = cmpy(r0, r1):rnd:sat + +declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32) +define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = cmpy(r0, r1):<<1:rnd:sat + +declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32) +define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = cmpy(r0, r1*):rnd:sat + +declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32) +define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = cmpy(r0, r1*):<<1:rnd:sat + +; Complex multiply 32x16 +declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32) +define i32 @M4_cmpyi_wh(i64 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = cmpyiwh(r1:0, r2):<<1:rnd:sat + +declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32) +define i32 @M4_cmpyi_whc(i64 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = cmpyiwh(r1:0, r2*):<<1:rnd:sat + +declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32) +define i32 @M4_cmpyr_wh(i64 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = cmpyrwh(r1:0, r2):<<1:rnd:sat + +declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32) +define i32 @M4_cmpyr_whc(i64 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = cmpyrwh(r1:0, r2*):<<1:rnd:sat + +; Vector complex multiply real or imaginary +declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64) +define i64 @M2_vcmpy_s0_sat_r(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vcmpyr(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64) +define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vcmpyr(r1:0, r3:2):<<1:sat + +declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64) +define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vcmpyi(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64) +define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vcmpyi(r1:0, r3:2):<<1:sat + +declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64) +define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vcmpyr(r3:2, r5:4):sat + +declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64) +define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vcmpyi(r3:2, r5:4):sat + +; Vector complex conjugate +declare i64 @llvm.hexagon.A2.vconj(i64) +define i64 @A2_vconj(i64 %a) { + %z = call i64 @llvm.hexagon.A2.vconj(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = vconj(r1:0):sat + +; Vector complex rotate +declare i64 @llvm.hexagon.S2.vcrotate(i64, i32) +define i64 @S2_vcrotate(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vcrotate(r1:0, r2) + +; Vector reduce complex multiply real or imaginary +declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64) +define i64 @M2_vrcmpyi_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrcmpyi(r1:0, r3:2) + +declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64) +define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrcmpyr(r1:0, r3:2) + +declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64) +define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrcmpyi(r1:0, r3:2*) + +declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64) +define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrcmpyr(r1:0, r3:2*) + +declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64) +define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrcmpyi(r3:2, r5:4) + +declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64) +define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrcmpyr(r3:2, r5:4) + +declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64) +define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrcmpyi(r3:2, r5:4*) + +declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64) +define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrcmpyr(r3:2, r5:4*) + +; Vector reduce complex rotate +declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32) +define i64 @S4_vrcrotate(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 = vrcrotate(r1:0, r2, #0) + +declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32) +define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0) + ret i64 %z +} +; CHECK: r1:0 += vrcrotate(r3:2, r4, #0) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll new file mode 100644 index 000000000000..aef8127d668c --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll @@ -0,0 +1,388 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.4 XTYPE/FP + +; Floating point addition +declare float @llvm.hexagon.F2.sfadd(float, float) +define float @F2_sfadd(float %a, float %b) { + %z = call float @llvm.hexagon.F2.sfadd(float %a, float %b) + ret float %z +} +; CHECK: r0 = sfadd(r0, r1) + +; Classify floating-point value +declare i32 @llvm.hexagon.F2.sfclass(float, i32) +define i32 @F2_sfclass(float %a) { + %z = call i32 @llvm.hexagon.F2.sfclass(float %a, i32 0) + ret i32 %z +} +; CHECK: p0 = sfclass(r0, #0) + +declare i32 @llvm.hexagon.F2.dfclass(double, i32) +define i32 @F2_dfclass(double %a) { + %z = call i32 @llvm.hexagon.F2.dfclass(double %a, i32 0) + ret i32 %z +} +; CHECK: p0 = dfclass(r1:0, #0) + +; Compare floating-point value +declare i32 @llvm.hexagon.F2.sfcmpge(float, float) +define i32 @F2_sfcmpge(float %a, float %b) { + %z = call i32 @llvm.hexagon.F2.sfcmpge(float %a, float %b) + ret i32 %z +} +; CHECK: p0 = sfcmp.ge(r0, r1) + +declare i32 @llvm.hexagon.F2.sfcmpuo(float, float) +define i32 @F2_sfcmpuo(float %a, float %b) { + %z = call i32 @llvm.hexagon.F2.sfcmpuo(float %a, float %b) + ret i32 %z +} +; CHECK: p0 = sfcmp.uo(r0, r1) + +declare i32 @llvm.hexagon.F2.sfcmpeq(float, float) +define i32 @F2_sfcmpeq(float %a, float %b) { + %z = call i32 @llvm.hexagon.F2.sfcmpeq(float %a, float %b) + ret i32 %z +} +; CHECK: p0 = sfcmp.eq(r0, r1) + +declare i32 @llvm.hexagon.F2.sfcmpgt(float, float) +define i32 @F2_sfcmpgt(float %a, float %b) { + %z = call i32 @llvm.hexagon.F2.sfcmpgt(float %a, float %b) + ret i32 %z +} +; CHECK: p0 = sfcmp.gt(r0, r1) + +declare i32 @llvm.hexagon.F2.dfcmpge(double, double) +define i32 @F2_dfcmpge(double %a, double %b) { + %z = call i32 @llvm.hexagon.F2.dfcmpge(double %a, double %b) + ret i32 %z +} +; CHECK: p0 = dfcmp.ge(r1:0, r3:2) + +declare i32 @llvm.hexagon.F2.dfcmpuo(double, double) +define i32 @F2_dfcmpuo(double %a, double %b) { + %z = call i32 @llvm.hexagon.F2.dfcmpuo(double %a, double %b) + ret i32 %z +} +; CHECK: p0 = dfcmp.uo(r1:0, r3:2) + +declare i32 @llvm.hexagon.F2.dfcmpeq(double, double) +define i32 @F2_dfcmpeq(double %a, double %b) { + %z = call i32 @llvm.hexagon.F2.dfcmpeq(double %a, double %b) + ret i32 %z +} +; CHECK: p0 = dfcmp.eq(r1:0, r3:2) + +declare i32 @llvm.hexagon.F2.dfcmpgt(double, double) +define i32 @F2_dfcmpgt(double %a, double %b) { + %z = call i32 @llvm.hexagon.F2.dfcmpgt(double %a, double %b) + ret i32 %z +} +; CHECK: p0 = dfcmp.gt(r1:0, r3:2) + +; Convert floating-point value to other format +declare double @llvm.hexagon.F2.conv.sf2df(float) +define double @F2_conv_sf2df(float %a) { + %z = call double @llvm.hexagon.F2.conv.sf2df(float %a) + ret double %z +} +; CHECK: = convert_sf2df(r0) + +declare float @llvm.hexagon.F2.conv.df2sf(double) +define float @F2_conv_df2sf(double %a) { + %z = call float @llvm.hexagon.F2.conv.df2sf(double %a) + ret float %z +} +; CHECK: r0 = convert_df2sf(r1:0) + +; Convert integer to floating-point value +declare double @llvm.hexagon.F2.conv.ud2df(i64) +define double @F2_conv_ud2df(i64 %a) { + %z = call double @llvm.hexagon.F2.conv.ud2df(i64 %a) + ret double %z +} +; CHECK: r1:0 = convert_ud2df(r1:0) + +declare double @llvm.hexagon.F2.conv.d2df(i64) +define double @F2_conv_d2df(i64 %a) { + %z = call double @llvm.hexagon.F2.conv.d2df(i64 %a) + ret double %z +} +; CHECK: r1:0 = convert_d2df(r1:0) + +declare double @llvm.hexagon.F2.conv.uw2df(i32) +define double @F2_conv_uw2df(i32 %a) { + %z = call double @llvm.hexagon.F2.conv.uw2df(i32 %a) + ret double %z +} +; CHECK: = convert_uw2df(r0) + +declare double @llvm.hexagon.F2.conv.w2df(i32) +define double @F2_conv_w2df(i32 %a) { + %z = call double @llvm.hexagon.F2.conv.w2df(i32 %a) + ret double %z +} +; CHECK: = convert_w2df(r0) + +declare float @llvm.hexagon.F2.conv.ud2sf(i64) +define float @F2_conv_ud2sf(i64 %a) { + %z = call float @llvm.hexagon.F2.conv.ud2sf(i64 %a) + ret float %z +} +; CHECK: r0 = convert_ud2sf(r1:0) + +declare float @llvm.hexagon.F2.conv.d2sf(i64) +define float @F2_conv_d2sf(i64 %a) { + %z = call float @llvm.hexagon.F2.conv.d2sf(i64 %a) + ret float %z +} +; CHECK: r0 = convert_d2sf(r1:0) + +declare float @llvm.hexagon.F2.conv.uw2sf(i32) +define float @F2_conv_uw2sf(i32 %a) { + %z = call float @llvm.hexagon.F2.conv.uw2sf(i32 %a) + ret float %z +} +; CHECK: r0 = convert_uw2sf(r0) + +declare float @llvm.hexagon.F2.conv.w2sf(i32) +define float @F2_conv_w2sf(i32 %a) { + %z = call float @llvm.hexagon.F2.conv.w2sf(i32 %a) + ret float %z +} +; CHECK: r0 = convert_w2sf(r0) + +; Convert floating-point value to integer +declare i64 @llvm.hexagon.F2.conv.df2d(double) +define i64 @F2_conv_df2d(double %a) { + %z = call i64 @llvm.hexagon.F2.conv.df2d(double %a) + ret i64 %z +} +; CHECK: r1:0 = convert_df2d(r1:0) + +declare i64 @llvm.hexagon.F2.conv.df2ud(double) +define i64 @F2_conv_df2ud(double %a) { + %z = call i64 @llvm.hexagon.F2.conv.df2ud(double %a) + ret i64 %z +} +; CHECK: r1:0 = convert_df2ud(r1:0) + +declare i64 @llvm.hexagon.F2.conv.df2d.chop(double) +define i64 @F2_conv_df2d_chop(double %a) { + %z = call i64 @llvm.hexagon.F2.conv.df2d.chop(double %a) + ret i64 %z +} +; CHECK: r1:0 = convert_df2d(r1:0):chop + +declare i64 @llvm.hexagon.F2.conv.df2ud.chop(double) +define i64 @F2_conv_df2ud_chop(double %a) { + %z = call i64 @llvm.hexagon.F2.conv.df2ud.chop(double %a) + ret i64 %z +} +; CHECK: r1:0 = convert_df2ud(r1:0):chop + +declare i64 @llvm.hexagon.F2.conv.sf2ud(float) +define i64 @F2_conv_sf2ud(float %a) { + %z = call i64 @llvm.hexagon.F2.conv.sf2ud(float %a) + ret i64 %z +} +; CHECK: = convert_sf2ud(r0) + +declare i64 @llvm.hexagon.F2.conv.sf2d(float) +define i64 @F2_conv_sf2d(float %a) { + %z = call i64 @llvm.hexagon.F2.conv.sf2d(float %a) + ret i64 %z +} +; CHECK: = convert_sf2d(r0) + +declare i64 @llvm.hexagon.F2.conv.sf2d.chop(float) +define i64 @F2_conv_sf2d_chop(float %a) { + %z = call i64 @llvm.hexagon.F2.conv.sf2d.chop(float %a) + ret i64 %z +} +; CHECK: = convert_sf2d(r0):chop + +declare i64 @llvm.hexagon.F2.conv.sf2ud.chop(float) +define i64 @F2_conv_sf2ud_chop(float %a) { + %z = call i64 @llvm.hexagon.F2.conv.sf2ud.chop(float %a) + ret i64 %z +} +; CHECK: = convert_sf2ud(r0):chop + +declare i32 @llvm.hexagon.F2.conv.df2uw(double) +define i32 @F2_conv_df2uw(double %a) { + %z = call i32 @llvm.hexagon.F2.conv.df2uw(double %a) + ret i32 %z +} +; CHECK: r0 = convert_df2uw(r1:0) + +declare i32 @llvm.hexagon.F2.conv.df2w(double) +define i32 @F2_conv_df2w(double %a) { + %z = call i32 @llvm.hexagon.F2.conv.df2w(double %a) + ret i32 %z +} +; CHECK: r0 = convert_df2w(r1:0) + +declare i32 @llvm.hexagon.F2.conv.df2w.chop(double) +define i32 @F2_conv_df2w_chop(double %a) { + %z = call i32 @llvm.hexagon.F2.conv.df2w.chop(double %a) + ret i32 %z +} +; CHECK: r0 = convert_df2w(r1:0):chop + +declare i32 @llvm.hexagon.F2.conv.df2uw.chop(double) +define i32 @F2_conv_df2uw_chop(double %a) { + %z = call i32 @llvm.hexagon.F2.conv.df2uw.chop(double %a) + ret i32 %z +} +; CHECK: r0 = convert_df2uw(r1:0):chop + +declare i32 @llvm.hexagon.F2.conv.sf2uw(float) +define i32 @F2_conv_sf2uw(float %a) { + %z = call i32 @llvm.hexagon.F2.conv.sf2uw(float %a) + ret i32 %z +} +; CHECK: r0 = convert_sf2uw(r0) + +declare i32 @llvm.hexagon.F2.conv.sf2uw.chop(float) +define i32 @F2_conv_sf2uw_chop(float %a) { + %z = call i32 @llvm.hexagon.F2.conv.sf2uw.chop(float %a) + ret i32 %z +} +; CHECK: r0 = convert_sf2uw(r0):chop + +declare i32 @llvm.hexagon.F2.conv.sf2w(float) +define i32 @F2_conv_sf2w(float %a) { + %z = call i32 @llvm.hexagon.F2.conv.sf2w(float %a) + ret i32 %z +} +; CHECK: r0 = convert_sf2w(r0) + +declare i32 @llvm.hexagon.F2.conv.sf2w.chop(float) +define i32 @F2_conv_sf2w_chop(float %a) { + %z = call i32 @llvm.hexagon.F2.conv.sf2w.chop(float %a) + ret i32 %z +} +; CHECK: r0 = convert_sf2w(r0):chop + +; Floating point extreme value assistance +declare float @llvm.hexagon.F2.sffixupr(float) +define float @F2_sffixupr(float %a) { + %z = call float @llvm.hexagon.F2.sffixupr(float %a) + ret float %z +} +; CHECK: r0 = sffixupr(r0) + +declare float @llvm.hexagon.F2.sffixupn(float, float) +define float @F2_sffixupn(float %a, float %b) { + %z = call float @llvm.hexagon.F2.sffixupn(float %a, float %b) + ret float %z +} +; CHECK: r0 = sffixupn(r0, r1) + +declare float @llvm.hexagon.F2.sffixupd(float, float) +define float @F2_sffixupd(float %a, float %b) { + %z = call float @llvm.hexagon.F2.sffixupd(float %a, float %b) + ret float %z +} +; CHECK: r0 = sffixupd(r0, r1) + +; Floating point fused multiply-add +declare float @llvm.hexagon.F2.sffma(float, float, float) +define float @F2_sffma(float %a, float %b, float %c) { + %z = call float @llvm.hexagon.F2.sffma(float %a, float %b, float %c) + ret float %z +} +; CHECK: r0 += sfmpy(r1, r2) + +declare float @llvm.hexagon.F2.sffms(float, float, float) +define float @F2_sffms(float %a, float %b, float %c) { + %z = call float @llvm.hexagon.F2.sffms(float %a, float %b, float %c) + ret float %z +} +; CHECK: r0 -= sfmpy(r1, r2) + +; Floating point fused multiply-add with scaling +declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32) +define float @F2_sffma_sc(float %a, float %b, float %c, i32 %d) { + %z = call float @llvm.hexagon.F2.sffma.sc(float %a, float %b, float %c, i32 %d) + ret float %z +} +; CHECK: r0 += sfmpy(r1, r2, p0):scale + +; Floating point fused multiply-add for library routines +declare float @llvm.hexagon.F2.sffma.lib(float, float, float) +define float @F2_sffma_lib(float %a, float %b, float %c) { + %z = call float @llvm.hexagon.F2.sffma.lib(float %a, float %b, float %c) + ret float %z +} +; CHECK: r0 += sfmpy(r1, r2):lib + +declare float @llvm.hexagon.F2.sffms.lib(float, float, float) +define float @F2_sffms_lib(float %a, float %b, float %c) { + %z = call float @llvm.hexagon.F2.sffms.lib(float %a, float %b, float %c) + ret float %z +} +; CHECK: r0 -= sfmpy(r1, r2):lib + +; Create floating-point constant +declare float @llvm.hexagon.F2.sfimm.p(i32) +define float @F2_sfimm_p() { + %z = call float @llvm.hexagon.F2.sfimm.p(i32 0) + ret float %z +} +; CHECK: r0 = sfmake(#0):pos + +declare float @llvm.hexagon.F2.sfimm.n(i32) +define float @F2_sfimm_n() { + %z = call float @llvm.hexagon.F2.sfimm.n(i32 0) + ret float %z +} +; CHECK: r0 = sfmake(#0):neg + +declare double @llvm.hexagon.F2.dfimm.p(i32) +define double @F2_dfimm_p() { + %z = call double @llvm.hexagon.F2.dfimm.p(i32 0) + ret double %z +} +; CHECK: r1:0 = dfmake(#0):pos + +declare double @llvm.hexagon.F2.dfimm.n(i32) +define double @F2_dfimm_n() { + %z = call double @llvm.hexagon.F2.dfimm.n(i32 0) + ret double %z +} +; CHECK: r1:0 = dfmake(#0):neg + +; Floating point maximum +declare float @llvm.hexagon.F2.sfmax(float, float) +define float @F2_sfmax(float %a, float %b) { + %z = call float @llvm.hexagon.F2.sfmax(float %a, float %b) + ret float %z +} +; CHECK: r0 = sfmax(r0, r1) + +; Floating point minimum +declare float @llvm.hexagon.F2.sfmin(float, float) +define float @F2_sfmin(float %a, float %b) { + %z = call float @llvm.hexagon.F2.sfmin(float %a, float %b) + ret float %z +} +; CHECK: r0 = sfmin(r0, r1) + +; Floating point multiply +declare float @llvm.hexagon.F2.sfmpy(float, float) +define float @F2_sfmpy(float %a, float %b) { + %z = call float @llvm.hexagon.F2.sfmpy(float %a, float %b) + ret float %z +} +; CHECK: r0 = sfmpy(r0, r1) + +; Floating point subtraction +declare float @llvm.hexagon.F2.sfsub(float, float) +define float @F2_sfsub(float %a, float %b) { + %z = call float @llvm.hexagon.F2.sfsub(float %a, float %b) + ret float %z +} +; CHECK: r0 = sfsub(r0, r1) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll new file mode 100644 index 000000000000..6409e4e10ca2 --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll @@ -0,0 +1,1525 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.5 XTYPE/MPY + +; Multiply and use lower result +declare i32 @llvm.hexagon.M4.mpyrr.addi(i32, i32, i32) +define i32 @M4_mpyrr_addi(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M4.mpyrr.addi(i32 0, i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(#0, mpyi(r0, r1)) + +declare i32 @llvm.hexagon.M4.mpyri.addi(i32, i32, i32) +define i32 @M4_mpyri_addi(i32 %a) { + %z = call i32 @llvm.hexagon.M4.mpyri.addi(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(#0, mpyi(r0, #0)) + +declare i32 @llvm.hexagon.M4.mpyri.addr.u2(i32, i32, i32) +define i32 @M4_mpyri_addr_u2(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M4.mpyri.addr.u2(i32 %a, i32 0, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0, mpyi(#0, r1)) + +declare i32 @llvm.hexagon.M4.mpyri.addr(i32, i32, i32) +define i32 @M4_mpyri_addr(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M4.mpyri.addr(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 = add(r0, mpyi(r1, #0)) + +declare i32 @llvm.hexagon.M4.mpyrr.addr(i32, i32, i32) +define i32 @M4_mpyrr_addr(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.mpyrr.addr(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r1 = add(r0, mpyi(r1, r2)) + +; Vector multiply word by signed half (32x16) +declare i64 @llvm.hexagon.M2.mmpyl.s0(i64, i64) +define i64 @M2_mmpyl_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyweh(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64) +define i64 @M2_mmpyl_s1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyweh(r1:0, r3:2):<<1:sat + +declare i64 @llvm.hexagon.M2.mmpyh.s0(i64, i64) +define i64 @M2_mmpyh_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpywoh(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.M2.mmpyh.s1(i64, i64) +define i64 @M2_mmpyh_s1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyh.s1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpywoh(r1:0, r3:2):<<1:sat + +declare i64 @llvm.hexagon.M2.mmpyl.rs0(i64, i64) +define i64 @M2_mmpyl_rs0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyl.rs0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyweh(r1:0, r3:2):rnd:sat + +declare i64 @llvm.hexagon.M2.mmpyl.rs1(i64, i64) +define i64 @M2_mmpyl_rs1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyl.rs1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyweh(r1:0, r3:2):<<1:rnd:sat + +declare i64 @llvm.hexagon.M2.mmpyh.rs0(i64, i64) +define i64 @M2_mmpyh_rs0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyh.rs0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpywoh(r1:0, r3:2):rnd:sat + +declare i64 @llvm.hexagon.M2.mmpyh.rs1(i64, i64) +define i64 @M2_mmpyh_rs1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyh.rs1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpywoh(r1:0, r3:2):<<1:rnd:sat + +; Vector multiply word by unsigned half (32x16) +declare i64 @llvm.hexagon.M2.mmpyul.s0(i64, i64) +define i64 @M2_mmpyul_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyul.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.M2.mmpyul.s1(i64, i64) +define i64 @M2_mmpyul_s1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyul.s1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):<<1:sat + +declare i64 @llvm.hexagon.M2.mmpyuh.s0(i64, i64) +define i64 @M2_mmpyuh_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyuh.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpywouh(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.M2.mmpyuh.s1(i64, i64) +define i64 @M2_mmpyuh_s1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyuh.s1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpywouh(r1:0, r3:2):<<1:sat + +declare i64 @llvm.hexagon.M2.mmpyul.rs0(i64, i64) +define i64 @M2_mmpyul_rs0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyul.rs0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):rnd:sat + +declare i64 @llvm.hexagon.M2.mmpyul.rs1(i64, i64) +define i64 @M2_mmpyul_rs1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyul.rs1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):<<1:rnd:sat + +declare i64 @llvm.hexagon.M2.mmpyuh.rs0(i64, i64) +define i64 @M2_mmpyuh_rs0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyuh.rs0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpywouh(r1:0, r3:2):rnd:sat + +declare i64 @llvm.hexagon.M2.mmpyuh.rs1(i64, i64) +define i64 @M2_mmpyuh_rs1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.mmpyuh.rs1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpywouh(r1:0, r3:2):<<1:rnd:sat + +; Multiply signed halfwords +declare i64 @llvm.hexagon.M2.mpyd.ll.s0(i32, i32) +define i64 @M2_mpyd_ll_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.ll.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.l, r1.l) + +declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32) +define i64 @M2_mpyd_ll_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.ll.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.l, r1.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.lh.s0(i32, i32) +define i64 @M2_mpyd_lh_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.lh.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.l, r1.h) + +declare i64 @llvm.hexagon.M2.mpyd.lh.s1(i32, i32) +define i64 @M2_mpyd_lh_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.lh.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.l, r1.h):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.hl.s0(i32, i32) +define i64 @M2_mpyd_hl_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.hl.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.h, r1.l) + +declare i64 @llvm.hexagon.M2.mpyd.hl.s1(i32, i32) +define i64 @M2_mpyd_hl_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.hl.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.h, r1.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.hh.s0(i32, i32) +define i64 @M2_mpyd_hh_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.hh.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.h, r1.h) + +declare i64 @llvm.hexagon.M2.mpyd.hh.s1(i32, i32) +define i64 @M2_mpyd_hh_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.hh.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.h, r1.h):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32, i32) +define i64 @M2_mpyd_rnd_ll_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.l, r1.l):rnd + +declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32, i32) +define i64 @M2_mpyd_rnd_ll_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.l, r1.l):<<1:rnd + +declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32, i32) +define i64 @M2_mpyd_rnd_lh_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.l, r1.h):rnd + +declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32, i32) +define i64 @M2_mpyd_rnd_lh_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.l, r1.h):<<1:rnd + +declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32, i32) +define i64 @M2_mpyd_rnd_hl_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.h, r1.l):rnd + +declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32, i32) +define i64 @M2_mpyd_rnd_hl_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.h, r1.l):<<1:rnd + +declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32, i32) +define i64 @M2_mpyd_rnd_hh_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.h, r1.h):rnd + +declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32, i32) +define i64 @M2_mpyd_rnd_hh_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0.h, r1.h):<<1:rnd + +declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64, i32, i32) +define i64 @M2_mpyd_acc_ll_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpy(r2.l, r3.l) + +declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64, i32, i32) +define i64 @M2_mpyd_acc_ll_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpy(r2.l, r3.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64, i32, i32) +define i64 @M2_mpyd_acc_lh_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpy(r2.l, r3.h) + +declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64, i32, i32) +define i64 @M2_mpyd_acc_lh_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpy(r2.l, r3.h):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64, i32, i32) +define i64 @M2_mpyd_acc_hl_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpy(r2.h, r3.l) + +declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64, i32, i32) +define i64 @M2_mpyd_acc_hl_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpy(r2.h, r3.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64, i32, i32) +define i64 @M2_mpyd_acc_hh_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpy(r2.h, r3.h) + +declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64, i32, i32) +define i64 @M2_mpyd_acc_hh_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpy(r2.h, r3.h):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64, i32, i32) +define i64 @M2_mpyd_nac_ll_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpy(r2.l, r3.l) + +declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64, i32, i32) +define i64 @M2_mpyd_nac_ll_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpy(r2.l, r3.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64, i32, i32) +define i64 @M2_mpyd_nac_lh_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpy(r2.l, r3.h) + +declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64, i32, i32) +define i64 @M2_mpyd_nac_lh_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpy(r2.l, r3.h):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64, i32, i32) +define i64 @M2_mpyd_nac_hl_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpy(r2.h, r3.l) + +declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64, i32, i32) +define i64 @M2_mpyd_nac_hl_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpy(r2.h, r3.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64, i32, i32) +define i64 @M2_mpyd_nac_hh_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpy(r2.h, r3.h) + +declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64, i32, i32) +define i64 @M2_mpyd_nac_hh_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpy(r2.h, r3.h):<<1 + +declare i32 @llvm.hexagon.M2.mpy.ll.s0(i32, i32) +define i32 @M2_mpy_ll_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.l) + +declare i32 @llvm.hexagon.M2.mpy.ll.s1(i32, i32) +define i32 @M2_mpy_ll_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.ll.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.l):<<1 + +declare i32 @llvm.hexagon.M2.mpy.lh.s0(i32, i32) +define i32 @M2_mpy_lh_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.lh.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.h) + +declare i32 @llvm.hexagon.M2.mpy.lh.s1(i32, i32) +define i32 @M2_mpy_lh_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.lh.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.h):<<1 + +declare i32 @llvm.hexagon.M2.mpy.hl.s0(i32, i32) +define i32 @M2_mpy_hl_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.hl.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.l) + +declare i32 @llvm.hexagon.M2.mpy.hl.s1(i32, i32) +define i32 @M2_mpy_hl_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.hl.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.l):<<1 + +declare i32 @llvm.hexagon.M2.mpy.hh.s0(i32, i32) +define i32 @M2_mpy_hh_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.hh.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.h) + +declare i32 @llvm.hexagon.M2.mpy.hh.s1(i32, i32) +define i32 @M2_mpy_hh_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.hh.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.h):<<1 + +declare i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32, i32) +define i32 @M2_mpy_sat_ll_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.l):sat + +declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32) +define i32 @M2_mpy_sat_ll_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.l):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32, i32) +define i32 @M2_mpy_sat_lh_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.h):sat + +declare i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32, i32) +define i32 @M2_mpy_sat_lh_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.h):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32, i32) +define i32 @M2_mpy_sat_hl_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.l):sat + +declare i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32, i32) +define i32 @M2_mpy_sat_hl_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.l):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32, i32) +define i32 @M2_mpy_sat_hh_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.h):sat + +declare i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32, i32) +define i32 @M2_mpy_sat_hh_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.h):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32, i32) +define i32 @M2_mpy_sat_rnd_ll_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.l):rnd:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32, i32) +define i32 @M2_mpy_sat_rnd_ll_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.l):<<1:rnd:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32, i32) +define i32 @M2_mpy_sat_rnd_lh_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.h):rnd:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32, i32) +define i32 @M2_mpy_sat_rnd_lh_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.l, r1.h):<<1:rnd:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32, i32) +define i32 @M2_mpy_sat_rnd_hl_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.l):rnd:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32, i32) +define i32 @M2_mpy_sat_rnd_hl_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.l):<<1:rnd:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32, i32) +define i32 @M2_mpy_sat_rnd_hh_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.h):rnd:sat + +declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32, i32) +define i32 @M2_mpy_sat_rnd_hh_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0.h, r1.h):<<1:rnd:sat + +declare i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32, i32, i32) +define i32 @M2_mpy_acc_ll_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.l, r2.l) + +declare i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32, i32, i32) +define i32 @M2_mpy_acc_ll_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.l, r2.l):<<1 + +declare i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32, i32, i32) +define i32 @M2_mpy_acc_lh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.l, r2.h) + +declare i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32, i32, i32) +define i32 @M2_mpy_acc_lh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.l, r2.h):<<1 + +declare i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32, i32, i32) +define i32 @M2_mpy_acc_hl_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.h, r2.l) + +declare i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32, i32, i32) +define i32 @M2_mpy_acc_hl_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.h, r2.l):<<1 + +declare i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32, i32, i32) +define i32 @M2_mpy_acc_hh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.h, r2.h) + +declare i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32, i32, i32) +define i32 @M2_mpy_acc_hh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.h, r2.h):<<1 + +declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32, i32, i32) +define i32 @M2_mpy_acc_sat_ll_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.l, r2.l):sat + +declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) +define i32 @M2_mpy_acc_sat_ll_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.l, r2.l):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32, i32, i32) +define i32 @M2_mpy_acc_sat_lh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.l, r2.h):sat + +declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32, i32, i32) +define i32 @M2_mpy_acc_sat_lh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.l, r2.h):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32, i32, i32) +define i32 @M2_mpy_acc_sat_hl_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.h, r2.l):sat + +declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32, i32, i32) +define i32 @M2_mpy_acc_sat_hl_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.h, r2.l):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32, i32, i32) +define i32 @M2_mpy_acc_sat_hh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.h, r2.h):sat + +declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32, i32, i32) +define i32 @M2_mpy_acc_sat_hh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1.h, r2.h):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32, i32, i32) +define i32 @M2_mpy_nac_ll_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.l, r2.l) + +declare i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32, i32, i32) +define i32 @M2_mpy_nac_ll_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.l, r2.l):<<1 + +declare i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32, i32, i32) +define i32 @M2_mpy_nac_lh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.l, r2.h) + +declare i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32, i32, i32) +define i32 @M2_mpy_nac_lh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.l, r2.h):<<1 + +declare i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32, i32, i32) +define i32 @M2_mpy_nac_hl_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.h, r2.l) + +declare i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32, i32, i32) +define i32 @M2_mpy_nac_hl_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.h, r2.l):<<1 + +declare i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32, i32, i32) +define i32 @M2_mpy_nac_hh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.h, r2.h) + +declare i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32, i32, i32) +define i32 @M2_mpy_nac_hh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.h, r2.h):<<1 + +declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32, i32, i32) +define i32 @M2_mpy_nac_sat_ll_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.l, r2.l):sat + +declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32) +define i32 @M2_mpy_nac_sat_ll_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.l, r2.l):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32, i32, i32) +define i32 @M2_mpy_nac_sat_lh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.l, r2.h):sat + +declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32, i32, i32) +define i32 @M2_mpy_nac_sat_lh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.l, r2.h):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32, i32, i32) +define i32 @M2_mpy_nac_sat_hl_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.h, r2.l):sat + +declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32, i32, i32) +define i32 @M2_mpy_nac_sat_hl_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.h, r2.l):<<1:sat + +declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32, i32, i32) +define i32 @M2_mpy_nac_sat_hh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.h, r2.h):sat + +declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32, i32, i32) +define i32 @M2_mpy_nac_sat_hh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1.h, r2.h):<<1:sat + +; Multiply unsigned halfwords +declare i64 @llvm.hexagon.M2.mpyud.ll.s0(i32, i32) +define i64 @M2_mpyud_ll_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyud.ll.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpyu(r0.l, r1.l) + +declare i64 @llvm.hexagon.M2.mpyud.ll.s1(i32, i32) +define i64 @M2_mpyud_ll_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyud.ll.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpyu(r0.l, r1.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.lh.s0(i32, i32) +define i64 @M2_mpyud_lh_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyud.lh.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpyu(r0.l, r1.h) + +declare i64 @llvm.hexagon.M2.mpyud.lh.s1(i32, i32) +define i64 @M2_mpyud_lh_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyud.lh.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpyu(r0.l, r1.h):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.hl.s0(i32, i32) +define i64 @M2_mpyud_hl_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyud.hl.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpyu(r0.h, r1.l) + +declare i64 @llvm.hexagon.M2.mpyud.hl.s1(i32, i32) +define i64 @M2_mpyud_hl_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyud.hl.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpyu(r0.h, r1.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.hh.s0(i32, i32) +define i64 @M2_mpyud_hh_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyud.hh.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpyu(r0.h, r1.h) + +declare i64 @llvm.hexagon.M2.mpyud.hh.s1(i32, i32) +define i64 @M2_mpyud_hh_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.mpyud.hh.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpyu(r0.h, r1.h):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64, i32, i32) +define i64 @M2_mpyud_acc_ll_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpyu(r2.l, r3.l) + +declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64, i32, i32) +define i64 @M2_mpyud_acc_ll_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpyu(r2.l, r3.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64, i32, i32) +define i64 @M2_mpyud_acc_lh_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpyu(r2.l, r3.h) + +declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64, i32, i32) +define i64 @M2_mpyud_acc_lh_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpyu(r2.l, r3.h):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64, i32, i32) +define i64 @M2_mpyud_acc_hl_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpyu(r2.h, r3.l) + +declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64, i32, i32) +define i64 @M2_mpyud_acc_hl_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpyu(r2.h, r3.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64, i32, i32) +define i64 @M2_mpyud_acc_hh_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpyu(r2.h, r3.h) + +declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64, i32, i32) +define i64 @M2_mpyud_acc_hh_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpyu(r2.h, r3.h):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64, i32, i32) +define i64 @M2_mpyud_nac_ll_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpyu(r2.l, r3.l) + +declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64, i32, i32) +define i64 @M2_mpyud_nac_ll_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpyu(r2.l, r3.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64, i32, i32) +define i64 @M2_mpyud_nac_lh_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpyu(r2.l, r3.h) + +declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64, i32, i32) +define i64 @M2_mpyud_nac_lh_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpyu(r2.l, r3.h):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64, i32, i32) +define i64 @M2_mpyud_nac_hl_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpyu(r2.h, r3.l) + +declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64, i32, i32) +define i64 @M2_mpyud_nac_hl_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpyu(r2.h, r3.l):<<1 + +declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64, i32, i32) +define i64 @M2_mpyud_nac_hh_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpyu(r2.h, r3.h) + +declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64, i32, i32) +define i64 @M2_mpyud_nac_hh_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpyu(r2.h, r3.h):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.ll.s0(i32, i32) +define i32 @M2_mpyu_ll_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpyu.ll.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpyu(r0.l, r1.l) + +declare i32 @llvm.hexagon.M2.mpyu.ll.s1(i32, i32) +define i32 @M2_mpyu_ll_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpyu.ll.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpyu(r0.l, r1.l):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.lh.s0(i32, i32) +define i32 @M2_mpyu_lh_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpyu.lh.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpyu(r0.l, r1.h) + +declare i32 @llvm.hexagon.M2.mpyu.lh.s1(i32, i32) +define i32 @M2_mpyu_lh_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpyu.lh.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpyu(r0.l, r1.h):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.hl.s0(i32, i32) +define i32 @M2_mpyu_hl_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpyu.hl.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpyu(r0.h, r1.l) + +declare i32 @llvm.hexagon.M2.mpyu.hl.s1(i32, i32) +define i32 @M2_mpyu_hl_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpyu.hl.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpyu(r0.h, r1.l):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.hh.s0(i32, i32) +define i32 @M2_mpyu_hh_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpyu.hh.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpyu(r0.h, r1.h) + +declare i32 @llvm.hexagon.M2.mpyu.hh.s1(i32, i32) +define i32 @M2_mpyu_hh_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpyu.hh.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpyu(r0.h, r1.h):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32, i32, i32) +define i32 @M2_mpyu_acc_ll_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpyu(r1.l, r2.l) + +declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32, i32, i32) +define i32 @M2_mpyu_acc_ll_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpyu(r1.l, r2.l):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32, i32, i32) +define i32 @M2_mpyu_acc_lh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpyu(r1.l, r2.h) + +declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32, i32, i32) +define i32 @M2_mpyu_acc_lh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpyu(r1.l, r2.h):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32, i32, i32) +define i32 @M2_mpyu_acc_hl_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpyu(r1.h, r2.l) + +declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32, i32, i32) +define i32 @M2_mpyu_acc_hl_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpyu(r1.h, r2.l):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32, i32, i32) +define i32 @M2_mpyu_acc_hh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpyu(r1.h, r2.h) + +declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32, i32, i32) +define i32 @M2_mpyu_acc_hh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpyu(r1.h, r2.h):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32, i32, i32) +define i32 @M2_mpyu_nac_ll_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpyu(r1.l, r2.l) + +declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32, i32, i32) +define i32 @M2_mpyu_nac_ll_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpyu(r1.l, r2.l):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32, i32, i32) +define i32 @M2_mpyu_nac_lh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpyu(r1.l, r2.h) + +declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32, i32, i32) +define i32 @M2_mpyu_nac_lh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpyu(r1.l, r2.h):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32, i32, i32) +define i32 @M2_mpyu_nac_hl_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpyu(r1.h, r2.l) + +declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32, i32, i32) +define i32 @M2_mpyu_nac_hl_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpyu(r1.h, r2.l):<<1 + +declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32, i32, i32) +define i32 @M2_mpyu_nac_hh_s0(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpyu(r1.h, r2.h) + +declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32, i32, i32) +define i32 @M2_mpyu_nac_hh_s1(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpyu(r1.h, r2.h):<<1 + +; Polynomial multiply words +declare i64 @llvm.hexagon.M4.pmpyw(i32, i32) +define i64 @M4_pmpyw(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M4.pmpyw(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = pmpyw(r0, r1) + +declare i64 @llvm.hexagon.M4.pmpyw.acc(i64, i32, i32) +define i64 @M4_pmpyw_acc(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 ^= pmpyw(r2, r3) + +; Vector reduce multiply word by signed half +declare i64 @llvm.hexagon.M4.vrmpyoh.s0(i64, i64) +define i64 @M4_vrmpyoh_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M4.vrmpyoh.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrmpywoh(r1:0, r3:2) + +declare i64 @llvm.hexagon.M4.vrmpyoh.s1(i64, i64) +define i64 @M4_vrmpyoh_s1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M4.vrmpyoh.s1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrmpywoh(r1:0, r3:2):<<1 + +declare i64 @llvm.hexagon.M4.vrmpyeh.s0(i64, i64) +define i64 @M4_vrmpyeh_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M4.vrmpyeh.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrmpyweh(r1:0, r3:2) + +declare i64 @llvm.hexagon.M4.vrmpyeh.s1(i64, i64) +define i64 @M4_vrmpyeh_s1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M4.vrmpyeh.s1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrmpyweh(r1:0, r3:2):<<1 + +declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64, i64, i64) +define i64 @M4_vrmpyoh_acc_s0(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrmpywoh(r3:2, r5:4) + +declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64, i64, i64) +define i64 @M4_vrmpyoh_acc_s1(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrmpywoh(r3:2, r5:4):<<1 + +declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64, i64, i64) +define i64 @M4_vrmpyeh_acc_s0(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrmpyweh(r3:2, r5:4) + +declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64, i64, i64) +define i64 @M4_vrmpyeh_acc_s1(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrmpyweh(r3:2, r5:4):<<1 + +; Multiply and use upper result +declare i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32, i32) +define i32 @M2_dpmpyss_rnd_s0(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0, r1):rnd + +declare i32 @llvm.hexagon.M2.mpyu.up(i32, i32) +define i32 @M2_mpyu_up(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpyu.up(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpyu(r0, r1) + +declare i32 @llvm.hexagon.M2.mpysu.up(i32, i32) +define i32 @M2_mpysu_up(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpysu.up(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpysu(r0, r1) + +declare i32 @llvm.hexagon.M2.hmmpyh.s1(i32, i32) +define i32 @M2_hmmpyh_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.hmmpyh.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0, r1.h):<<1:sat + +declare i32 @llvm.hexagon.M2.hmmpyl.s1(i32, i32) +define i32 @M2_hmmpyl_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0, r1.l):<<1:sat + +declare i32 @llvm.hexagon.M2.hmmpyh.rs1(i32, i32) +define i32 @M2_hmmpyh_rs1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.hmmpyh.rs1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0, r1.h):<<1:rnd:sat + +declare i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32, i32) +define i32 @M2_mpy_up_s1_sat(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0, r1):<<1:sat + +declare i32 @llvm.hexagon.M2.hmmpyl.rs1(i32, i32) +define i32 @M2_hmmpyl_rs1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.hmmpyl.rs1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0, r1.l):<<1:rnd:sat + +declare i32 @llvm.hexagon.M2.mpy.up(i32, i32) +define i32 @M2_mpy_up(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.up(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0, r1) + +declare i32 @llvm.hexagon.M2.mpy.up.s1(i32, i32) +define i32 @M2_mpy_up_s1(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.M2.mpy.up.s1(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = mpy(r0, r1):<<1 + +declare i32 @llvm.hexagon.M4.mac.up.s1.sat(i32, i32, i32) +define i32 @M4_mac_up_s1_sat(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.mac.up.s1.sat(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += mpy(r1, r2):<<1:sat + +declare i32 @llvm.hexagon.M4.nac.up.s1.sat(i32, i32, i32) +define i32 @M4_nac_up_s1_sat(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.M4.nac.up.s1.sat(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= mpy(r1, r2):<<1:sat + +; Multiply and use full result +declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32) +define i64 @M2_dpmpyss_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpy(r0, r1) + +declare i64 @llvm.hexagon.M2.dpmpyuu.s0(i32, i32) +define i64 @M2_dpmpyuu_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.dpmpyuu.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = mpyu(r0, r1) + +declare i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64, i32, i32) +define i64 @M2_dpmpyss_acc_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpy(r2, r3) + +declare i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64, i32, i32) +define i64 @M2_dpmpyss_nac_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpy(r2, r3) + +declare i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64, i32, i32) +define i64 @M2_dpmpyuu_acc_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += mpyu(r2, r3) + +declare i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64, i32, i32) +define i64 @M2_dpmpyuu_nac_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= mpyu(r2, r3) + +; Vector dual multiply +declare i64 @llvm.hexagon.M2.vdmpys.s0(i64, i64) +define i64 @M2_vdmpys_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vdmpys.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vdmpy(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.M2.vdmpys.s1(i64, i64) +define i64 @M2_vdmpys_s1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vdmpys.s1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vdmpy(r1:0, r3:2):<<1:sat + +; Vector reduce multiply bytes +declare i64 @llvm.hexagon.M5.vrmpybuu(i64, i64) +define i64 @M5_vrmpybuu(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M5.vrmpybuu(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrmpybu(r1:0, r3:2) + +declare i64 @llvm.hexagon.M5.vrmpybsu(i64, i64) +define i64 @M5_vrmpybsu(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M5.vrmpybsu(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrmpybsu(r1:0, r3:2) + +declare i64 @llvm.hexagon.M5.vrmacbuu(i64, i64, i64) +define i64 @M5_vrmacbuu(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M5.vrmacbuu(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrmpybu(r3:2, r5:4) + +declare i64 @llvm.hexagon.M5.vrmacbsu(i64, i64, i64) +define i64 @M5_vrmacbsu(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M5.vrmacbsu(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrmpybsu(r3:2, r5:4) + +; Vector dual multiply signed by unsigned bytes +declare i64 @llvm.hexagon.M5.vdmpybsu(i64, i64) +define i64 @M5_vdmpybsu(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M5.vdmpybsu(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vdmpybsu(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.M5.vdmacbsu(i64, i64, i64) +define i64 @M5_vdmacbsu(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M5.vdmacbsu(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vdmpybsu(r3:2, r5:4):sat + +; Vector multiply even halfwords +declare i64 @llvm.hexagon.M2.vmpy2es.s0(i64, i64) +define i64 @M2_vmpy2es_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vmpy2es.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyeh(r1:0, r3:2):sat + +declare i64 @llvm.hexagon.M2.vmpy2es.s1(i64, i64) +define i64 @M2_vmpy2es_s1(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vmpy2es.s1(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyeh(r1:0, r3:2):<<1:sat + +declare i64 @llvm.hexagon.M2.vmac2es(i64, i64, i64) +define i64 @M2_vmac2es(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M2.vmac2es(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vmpyeh(r3:2, r5:4) + +declare i64 @llvm.hexagon.M2.vmac2es.s0(i64, i64, i64) +define i64 @M2_vmac2es_s0(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M2.vmac2es.s0(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vmpyeh(r3:2, r5:4):sat + +declare i64 @llvm.hexagon.M2.vmac2es.s1(i64, i64, i64) +define i64 @M2_vmac2es_s1(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M2.vmac2es.s1(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vmpyeh(r3:2, r5:4):<<1:sat + +; Vector multiply halfwords +declare i64 @llvm.hexagon.M2.vmpy2s.s0(i32, i32) +define i64 @M2_vmpy2s_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyh(r0, r1):sat + +declare i64 @llvm.hexagon.M2.vmpy2s.s1(i32, i32) +define i64 @M2_vmpy2s_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.vmpy2s.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyh(r0, r1):<<1:sat + +declare i64 @llvm.hexagon.M2.vmac2(i64, i32, i32) +define i64 @M2_vmac2(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.vmac2(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += vmpyh(r2, r3) + +declare i64 @llvm.hexagon.M2.vmac2s.s0(i64, i32, i32) +define i64 @M2_vmac2s_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += vmpyh(r2, r3):sat + +declare i64 @llvm.hexagon.M2.vmac2s.s1(i64, i32, i32) +define i64 @M2_vmac2s_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.vmac2s.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += vmpyh(r2, r3):<<1:sat + +; Vector multiply halfwords signed by unsigned +declare i64 @llvm.hexagon.M2.vmpy2su.s0(i32, i32) +define i64 @M2_vmpy2su_s0(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.vmpy2su.s0(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyhsu(r0, r1):sat + +declare i64 @llvm.hexagon.M2.vmpy2su.s1(i32, i32) +define i64 @M2_vmpy2su_s1(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M2.vmpy2su.s1(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpyhsu(r0, r1):<<1:sat + +declare i64 @llvm.hexagon.M2.vmac2su.s0(i64, i32, i32) +define i64 @M2_vmac2su_s0(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.vmac2su.s0(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += vmpyhsu(r2, r3):sat + +declare i64 @llvm.hexagon.M2.vmac2su.s1(i64, i32, i32) +define i64 @M2_vmac2su_s1(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M2.vmac2su.s1(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += vmpyhsu(r2, r3):<<1:sat + +; Vector reduce multiply halfwords +declare i64 @llvm.hexagon.M2.vrmpy.s0(i64, i64) +define i64 @M2_vrmpy_s0(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.M2.vrmpy.s0(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vrmpyh(r1:0, r3:2) + +declare i64 @llvm.hexagon.M2.vrmac.s0(i64, i64, i64) +define i64 @M2_vrmac_s0(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.M2.vrmac.s0(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 += vrmpyh(r3:2, r5:4) + +; Vector multiply bytes +declare i64 @llvm.hexagon.M5.vmpybsu(i32, i32) +define i64 @M2_vmpybsu(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M5.vmpybsu(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpybsu(r0, r1) + +declare i64 @llvm.hexagon.M5.vmpybuu(i32, i32) +define i64 @M2_vmpybuu(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M5.vmpybuu(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vmpybu(r0, r1) + +declare i64 @llvm.hexagon.M5.vmacbuu(i64, i32, i32) +define i64 @M2_vmacbuu(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M5.vmacbuu(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += vmpybu(r2, r3) + +declare i64 @llvm.hexagon.M5.vmacbsu(i64, i32, i32) +define i64 @M2_vmacbsu(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M5.vmacbsu(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += vmpybsu(r2, r3) + +; Vector polynomial multiply halfwords +declare i64 @llvm.hexagon.M4.vpmpyh(i32, i32) +define i64 @M4_vpmpyh(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.M4.vpmpyh(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vpmpyh(r0, r1) + +declare i64 @llvm.hexagon.M4.vpmpyh.acc(i64, i32, i32) +define i64 @M4_vpmpyh_acc(i64 %a, i32 %b, i32 %c) { + %z = call i64 @llvm.hexagon.M4.vpmpyh.acc(i64 %a, i32 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 ^= vpmpyh(r2, r3) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll new file mode 100644 index 000000000000..0b761323e31e --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll @@ -0,0 +1,252 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.6 XTYPE/PERM + +; Saturate +declare i32 @llvm.hexagon.A2.sat(i64) +define i32 @A2_sat(i64 %a) { + %z = call i32 @llvm.hexagon.A2.sat(i64 %a) + ret i32 %z +} +; CHECK: r0 = sat(r1:0) + +declare i32 @llvm.hexagon.A2.sath(i32) +define i32 @A2_sath(i32 %a) { + %z = call i32 @llvm.hexagon.A2.sath(i32 %a) + ret i32 %z +} +; CHECK: r0 = sath(r0) + +declare i32 @llvm.hexagon.A2.satuh(i32) +define i32 @A2_satuh(i32 %a) { + %z = call i32 @llvm.hexagon.A2.satuh(i32 %a) + ret i32 %z +} +; CHECK: r0 = satuh(r0) + +declare i32 @llvm.hexagon.A2.satub(i32) +define i32 @A2_satub(i32 %a) { + %z = call i32 @llvm.hexagon.A2.satub(i32 %a) + ret i32 %z +} +; CHECK: r0 = satub(r0) + +declare i32 @llvm.hexagon.A2.satb(i32) +define i32 @A2_satb(i32 %a) { + %z = call i32 @llvm.hexagon.A2.satb(i32 %a) + ret i32 %z +} +; CHECK: r0 = satb(r0) + +; Swizzle bytes +declare i32 @llvm.hexagon.A2.swiz(i32) +define i32 @A2_swiz(i32 %a) { + %z = call i32 @llvm.hexagon.A2.swiz(i32 %a) + ret i32 %z +} +; CHECK: r0 = swiz(r0) + +; Vector round and pack +declare i32 @llvm.hexagon.S2.vrndpackwh(i64) +define i32 @S2_vrndpackwh(i64 %a) { + %z = call i32 @llvm.hexagon.S2.vrndpackwh(i64 %a) + ret i32 %z +} +; CHECK: r0 = vrndwh(r1:0) + +declare i32 @llvm.hexagon.S2.vrndpackwhs(i64) +define i32 @S2_vrndpackwhs(i64 %a) { + %z = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %a) + ret i32 %z +} +; CHECK: r0 = vrndwh(r1:0):sat + +; Vector saturate and pack +declare i32 @llvm.hexagon.S2.vsathub(i64) +define i32 @S2_vsathub(i64 %a) { + %z = call i32 @llvm.hexagon.S2.vsathub(i64 %a) + ret i32 %z +} +; CHECK: r0 = vsathub(r1:0) + +declare i32 @llvm.hexagon.S2.vsatwh(i64) +define i32 @S2_vsatwh(i64 %a) { + %z = call i32 @llvm.hexagon.S2.vsatwh(i64 %a) + ret i32 %z +} +; CHECK: r0 = vsatwh(r1:0) + +declare i32 @llvm.hexagon.S2.vsatwuh(i64) +define i32 @S2_vsatwuh(i64 %a) { + %z = call i32 @llvm.hexagon.S2.vsatwuh(i64 %a) + ret i32 %z +} +; CHECK: r0 = vsatwuh(r1:0) + +declare i32 @llvm.hexagon.S2.vsathb(i64) +define i32 @S2_vsathb(i64 %a) { + %z = call i32 @llvm.hexagon.S2.vsathb(i64 %a) + ret i32 %z +} +; CHECK: r0 = vsathb(r1:0) + +declare i32 @llvm.hexagon.S2.svsathb(i32) +define i32 @S2_svsathb(i32 %a) { + %z = call i32 @llvm.hexagon.S2.svsathb(i32 %a) + ret i32 %z +} +; CHECK: r0 = vsathb(r0) + +declare i32 @llvm.hexagon.S2.svsathub(i32) +define i32 @S2_svsathub(i32 %a) { + %z = call i32 @llvm.hexagon.S2.svsathub(i32 %a) + ret i32 %z +} +; CHECK: r0 = vsathub(r0) + +; Vector saturate without pack +declare i64 @llvm.hexagon.S2.vsathub.nopack(i64) +define i64 @S2_vsathub_nopack(i64 %a) { + %z = call i64 @llvm.hexagon.S2.vsathub.nopack(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = vsathub(r1:0) + +declare i64 @llvm.hexagon.S2.vsatwuh.nopack(i64) +define i64 @S2_vsatwuh_nopack(i64 %a) { + %z = call i64 @llvm.hexagon.S2.vsatwuh.nopack(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = vsatwuh(r1:0) + +declare i64 @llvm.hexagon.S2.vsatwh.nopack(i64) +define i64 @S2_vsatwh_nopack(i64 %a) { + %z = call i64 @llvm.hexagon.S2.vsatwh.nopack(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = vsatwh(r1:0) + +declare i64 @llvm.hexagon.S2.vsathb.nopack(i64) +define i64 @S2_vsathb_nopack(i64 %a) { + %z = call i64 @llvm.hexagon.S2.vsathb.nopack(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = vsathb(r1:0) + +; Vector shuffle +declare i64 @llvm.hexagon.S2.shuffeb(i64, i64) +define i64 @S2_shuffeb(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.shuffeb(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = shuffeb(r1:0, r3:2) + +declare i64 @llvm.hexagon.S2.shuffob(i64, i64) +define i64 @S2_shuffob(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.shuffob(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = shuffob(r1:0, r3:2) + +declare i64 @llvm.hexagon.S2.shuffeh(i64, i64) +define i64 @S2_shuffeh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.shuffeh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = shuffeh(r1:0, r3:2) + +declare i64 @llvm.hexagon.S2.shuffoh(i64, i64) +define i64 @S2_shuffoh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.shuffoh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = shuffoh(r1:0, r3:2) + +; Vector splat bytes +declare i32 @llvm.hexagon.S2.vsplatrb(i32) +define i32 @S2_vsplatrb(i32 %a) { + %z = call i32 @llvm.hexagon.S2.vsplatrb(i32 %a) + ret i32 %z +} +; CHECK: r0 = vsplatb(r0) + +; Vector splat halfwords +declare i64 @llvm.hexagon.S2.vsplatrh(i32) +define i64 @S2_vsplatrh(i32 %a) { + %z = call i64 @llvm.hexagon.S2.vsplatrh(i32 %a) + ret i64 %z +} +; CHECK: = vsplath(r0) + +; Vector splice +declare i64 @llvm.hexagon.S2.vspliceib(i64, i64, i32) +define i64 @S2_vspliceib(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.vspliceib(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 = vspliceb(r1:0, r3:2, #0) + +declare i64 @llvm.hexagon.S2.vsplicerb(i64, i64, i32) +define i64 @S2_vsplicerb(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.vsplicerb(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 = vspliceb(r1:0, r3:2, p0) + +; Vector sign extend +declare i64 @llvm.hexagon.S2.vsxtbh(i32) +define i64 @S2_vsxtbh(i32 %a) { + %z = call i64 @llvm.hexagon.S2.vsxtbh(i32 %a) + ret i64 %z +} +; CHECK: = vsxtbh(r0) + +declare i64 @llvm.hexagon.S2.vsxthw(i32) +define i64 @S2_vsxthw(i32 %a) { + %z = call i64 @llvm.hexagon.S2.vsxthw(i32 %a) + ret i64 %z +} +; CHECK: = vsxthw(r0) + +; Vector truncate +declare i32 @llvm.hexagon.S2.vtrunohb(i64) +define i32 @S2_vtrunohb(i64 %a) { + %z = call i32 @llvm.hexagon.S2.vtrunohb(i64 %a) + ret i32 %z +} +; CHECK: r0 = vtrunohb(r1:0) + +declare i32 @llvm.hexagon.S2.vtrunehb(i64) +define i32 @S2_vtrunehb(i64 %a) { + %z = call i32 @llvm.hexagon.S2.vtrunehb(i64 %a) + ret i32 %z +} +; CHECK: r0 = vtrunehb(r1:0) + +declare i64 @llvm.hexagon.S2.vtrunowh(i64, i64) +define i64 @S2_vtrunowh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.vtrunowh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vtrunowh(r1:0, r3:2) + +declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64) +define i64 @S2_vtrunewh(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.vtrunewh(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = vtrunewh(r1:0, r3:2) + +; Vector zero extend +declare i64 @llvm.hexagon.S2.vzxtbh(i32) +define i64 @S2_vzxtbh(i32 %a) { + %z = call i64 @llvm.hexagon.S2.vzxtbh(i32 %a) + ret i64 %z +} +; CHECK: = vzxtbh(r0) + +declare i64 @llvm.hexagon.S2.vzxthw(i32) +define i64 @S2_vzxthw(i32 %a) { + %z = call i64 @llvm.hexagon.S2.vzxthw(i32 %a) + ret i64 %z +} +; CHECK: = vzxthw(r0) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll new file mode 100644 index 000000000000..96e63d8d7790 --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll @@ -0,0 +1,351 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.7 XTYPE/PRED + +; Compare byte +declare i32 @llvm.hexagon.A4.cmpbgt(i32, i32) +define i32 @A4_cmpbgt(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.cmpbgt(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = cmpb.gt(r0, r1) + +declare i32 @llvm.hexagon.A4.cmpbeq(i32, i32) +define i32 @A4_cmpbeq(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.cmpbeq(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = cmpb.eq(r0, r1) + +declare i32 @llvm.hexagon.A4.cmpbgtu(i32, i32) +define i32 @A4_cmpbgtu(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.cmpbgtu(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = cmpb.gtu(r0, r1) + +declare i32 @llvm.hexagon.A4.cmpbgti(i32, i32) +define i32 @A4_cmpbgti(i32 %a) { + %z = call i32 @llvm.hexagon.A4.cmpbgti(i32 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = cmpb.gt(r0, #0) + +declare i32 @llvm.hexagon.A4.cmpbeqi(i32, i32) +define i32 @A4_cmpbeqi(i32 %a) { + %z = call i32 @llvm.hexagon.A4.cmpbeqi(i32 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = cmpb.eq(r0, #0) + +declare i32 @llvm.hexagon.A4.cmpbgtui(i32, i32) +define i32 @A4_cmpbgtui(i32 %a) { + %z = call i32 @llvm.hexagon.A4.cmpbgtui(i32 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = cmpb.gtu(r0, #0) + +; Compare half +declare i32 @llvm.hexagon.A4.cmphgt(i32, i32) +define i32 @A4_cmphgt(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.cmphgt(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = cmph.gt(r0, r1) + +declare i32 @llvm.hexagon.A4.cmpheq(i32, i32) +define i32 @A4_cmpheq(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.cmpheq(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = cmph.eq(r0, r1) + +declare i32 @llvm.hexagon.A4.cmphgtu(i32, i32) +define i32 @A4_cmphgtu(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.cmphgtu(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = cmph.gtu(r0, r1) + +declare i32 @llvm.hexagon.A4.cmphgti(i32, i32) +define i32 @A4_cmphgti(i32 %a) { + %z = call i32 @llvm.hexagon.A4.cmphgti(i32 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = cmph.gt(r0, #0) + +declare i32 @llvm.hexagon.A4.cmpheqi(i32, i32) +define i32 @A4_cmpheqi(i32 %a) { + %z = call i32 @llvm.hexagon.A4.cmpheqi(i32 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = cmph.eq(r0, #0) + +declare i32 @llvm.hexagon.A4.cmphgtui(i32, i32) +define i32 @A4_cmphgtui(i32 %a) { + %z = call i32 @llvm.hexagon.A4.cmphgtui(i32 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = cmph.gtu(r0, #0) + +; Compare doublewords +declare i32 @llvm.hexagon.C2.cmpgtp(i64, i64) +define i32 @C2_cmpgtp(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.C2.cmpgtp(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = cmp.gt(r1:0, r3:2) + +declare i32 @llvm.hexagon.C2.cmpeqp(i64, i64) +define i32 @C2_cmpeqp(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.C2.cmpeqp(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = cmp.eq(r1:0, r3:2) + +declare i32 @llvm.hexagon.C2.cmpgtup(i64, i64) +define i32 @C2_cmpgtup(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.C2.cmpgtup(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = cmp.gtu(r1:0, r3:2) + +; Compare bitmask +declare i32 @llvm.hexagon.C2.bitsclri(i32, i32) +define i32 @C2_bitsclri(i32 %a) { + %z = call i32 @llvm.hexagon.C2.bitsclri(i32 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = bitsclr(r0, #0) + +declare i32 @llvm.hexagon.C4.nbitsclri(i32, i32) +define i32 @C4_nbitsclri(i32 %a) { + %z = call i32 @llvm.hexagon.C4.nbitsclri(i32 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = !bitsclr(r0, #0) + +declare i32 @llvm.hexagon.C2.bitsset(i32, i32) +define i32 @C2_bitsset(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.C2.bitsset(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = bitsset(r0, r1) + +declare i32 @llvm.hexagon.C4.nbitsset(i32, i32) +define i32 @C4_nbitsset(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.C4.nbitsset(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = !bitsset(r0, r1) + +declare i32 @llvm.hexagon.C2.bitsclr(i32, i32) +define i32 @C2_bitsclr(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.C2.bitsclr(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = bitsclr(r0, r1) + +declare i32 @llvm.hexagon.C4.nbitsclr(i32, i32) +define i32 @C4_nbitsclr(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.C4.nbitsclr(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = !bitsclr(r0, r1) + +; Mask generate from predicate +declare i64 @llvm.hexagon.C2.mask(i32) +define i64 @C2_mask(i32 %a) { + %z = call i64 @llvm.hexagon.C2.mask(i32 %a) + ret i64 %z +} +; CHECK: = mask(p0) + +; Check for TLB match +declare i32 @llvm.hexagon.A4.tlbmatch(i64, i32) +define i32 @A4_tlbmatch(i64 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.tlbmatch(i64 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = tlbmatch(r1:0, r2) + +; Test bit +declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32) +define i32 @S2_tstbit_i(i32 %a) { + %z = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = tstbit(r0, #0) + +declare i32 @llvm.hexagon.S4.ntstbit.i(i32, i32) +define i32 @S4_ntstbit_i(i32 %a) { + %z = call i32 @llvm.hexagon.S4.ntstbit.i(i32 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = !tstbit(r0, #0) + +declare i32 @llvm.hexagon.S2.tstbit.r(i32, i32) +define i32 @S2_tstbit_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.tstbit.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = tstbit(r0, r1) + +declare i32 @llvm.hexagon.S4.ntstbit.r(i32, i32) +define i32 @S4_ntstbit_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S4.ntstbit.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: p0 = !tstbit(r0, r1) + +; Vector compare halfwords +declare i32 @llvm.hexagon.A2.vcmpheq(i64, i64) +define i32 @A2_vcmpheq(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A2.vcmpheq(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = vcmph.eq(r1:0, r3:2) + +declare i32 @llvm.hexagon.A2.vcmphgt(i64, i64) +define i32 @A2_vcmphgt(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A2.vcmphgt(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = vcmph.gt(r1:0, r3:2) + +declare i32 @llvm.hexagon.A2.vcmphgtu(i64, i64) +define i32 @A2_vcmphgtu(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A2.vcmphgtu(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = vcmph.gtu(r1:0, r3:2) + +declare i32 @llvm.hexagon.A4.vcmpheqi(i64, i32) +define i32 @A4_vcmpheqi(i64 %a) { + %z = call i32 @llvm.hexagon.A4.vcmpheqi(i64 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = vcmph.eq(r1:0, #0) + +declare i32 @llvm.hexagon.A4.vcmphgti(i64, i32) +define i32 @A4_vcmphgti(i64 %a) { + %z = call i32 @llvm.hexagon.A4.vcmphgti(i64 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = vcmph.gt(r1:0, #0) + +declare i32 @llvm.hexagon.A4.vcmphgtui(i64, i32) +define i32 @A4_vcmphgtui(i64 %a) { + %z = call i32 @llvm.hexagon.A4.vcmphgtui(i64 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = vcmph.gtu(r1:0, #0) + +; Vector compare bytes for any match +declare i32 @llvm.hexagon.A4.vcmpbeq.any(i64, i64) +define i32 @A4_vcmpbeq_any(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = any8(vcmpb.eq(r1:0, r3:2)) + +; Vector compare bytes +declare i32 @llvm.hexagon.A2.vcmpbeq(i64, i64) +define i32 @A2_vcmpbeq(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A2.vcmpbeq(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = vcmpb.eq(r1:0, r3:2) + +declare i32 @llvm.hexagon.A2.vcmpbgtu(i64, i64) +define i32 @A2_vcmpbgtu(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A2.vcmpbgtu(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = vcmpb.gtu(r1:0, r3:2) + +declare i32 @llvm.hexagon.A4.vcmpbgt(i64, i64) +define i32 @A4_vcmpbgt(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A4.vcmpbgt(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = vcmpb.gt(r1:0, r3:2) + +declare i32 @llvm.hexagon.A4.vcmpbeqi(i64, i32) +define i32 @A4_vcmpbeqi(i64 %a) { + %z = call i32 @llvm.hexagon.A4.vcmpbeqi(i64 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = vcmpb.eq(r1:0, #0) + +declare i32 @llvm.hexagon.A4.vcmpbgti(i64, i32) +define i32 @A4_vcmpbgti(i64 %a) { + %z = call i32 @llvm.hexagon.A4.vcmpbgti(i64 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = vcmpb.gt(r1:0, #0) + +declare i32 @llvm.hexagon.A4.vcmpbgtui(i64, i32) +define i32 @A4_vcmpbgtui(i64 %a) { + %z = call i32 @llvm.hexagon.A4.vcmpbgtui(i64 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = vcmpb.gtu(r1:0, #0) + +; Vector compare words +declare i32 @llvm.hexagon.A2.vcmpweq(i64, i64) +define i32 @A2_vcmpweq(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A2.vcmpweq(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = vcmpw.eq(r1:0, r3:2) + +declare i32 @llvm.hexagon.A2.vcmpwgt(i64, i64) +define i32 @A2_vcmpwgt(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A2.vcmpwgt(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = vcmpw.gt(r1:0, r3:2) + +declare i32 @llvm.hexagon.A2.vcmpwgtu(i64, i64) +define i32 @A2_vcmpwgtu(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: p0 = vcmpw.gtu(r1:0, r3:2) + +declare i32 @llvm.hexagon.A4.vcmpweqi(i64, i32) +define i32 @A4_vcmpweqi(i64 %a) { + %z = call i32 @llvm.hexagon.A4.vcmpweqi(i64 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = vcmpw.eq(r1:0, #0) + +declare i32 @llvm.hexagon.A4.vcmpwgti(i64, i32) +define i32 @A4_vcmpwgti(i64 %a) { + %z = call i32 @llvm.hexagon.A4.vcmpwgti(i64 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = vcmpw.gt(r1:0, #0) + +declare i32 @llvm.hexagon.A4.vcmpwgtui(i64, i32) +define i32 @A4_vcmpwgtui(i64 %a) { + %z = call i32 @llvm.hexagon.A4.vcmpwgtui(i64 %a, i32 0) + ret i32 %z +} +; CHECK: p0 = vcmpw.gtu(r1:0, #0) + +; Viterbi pack even and odd predicate bitsclr +declare i32 @llvm.hexagon.C2.vitpack(i32, i32) +define i32 @C2_vitpack(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.C2.vitpack(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vitpack(p1, p0) + +; Vector mux +declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64) +define i64 @C2_vmux(i32 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.C2.vmux(i32 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: = vmux(p0, r3:2, r5:4) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll new file mode 100644 index 000000000000..c84999bf94fd --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll @@ -0,0 +1,723 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.8 XTYPE/SHIFT + +; Shift by immediate +declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32) +define i64 @S2_asr_i_p(i64 %a) { + %z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = asr(r1:0, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32) +define i64 @S2_lsr_i_p(i64 %a) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = lsr(r1:0, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32) +define i64 @S2_asl_i_p(i64 %a) { + %z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = asl(r1:0, #0) + +declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32) +define i32 @S2_asr_i_r(i32 %a) { + %z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = asr(r0, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32) +define i32 @S2_lsr_i_r(i32 %a) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = lsr(r0, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32) +define i32 @S2_asl_i_r(i32 %a) { + %z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = asl(r0, #0) + +; Shift by immediate and accumulate +declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32) +define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 -= asr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32) +define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 -= lsr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32) +define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 -= asl(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32) +define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 += asr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32) +define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 += lsr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32) +define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 += asl(r3:2, #0) + +declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32) +define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 -= asr(r1, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32) +define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 -= lsr(r1, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32) +define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 -= asl(r1, #0) + +declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32) +define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 += asr(r1, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32) +define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 += lsr(r1, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32) +define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 += asl(r1, #0) + +; Shift by immediate and add +declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32) +define i32 @S4_addi_asl_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(#0, asl(r0, #0)) + +declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32) +define i32 @S4_subi_asl_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = sub(#0, asl(r0, #0)) + +declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32) +define i32 @S4_addi_lsr_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(#0, lsr(r0, #0)) + +declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32) +define i32 @S4_subi_lsr_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = sub(#0, lsr(r0, #0)) + +declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32) +define i32 @S2_addasl_rrri(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 = addasl(r0, r1, #0) + +; Shift by immediate and logical +declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32) +define i64 @S2_asr_i_p_and(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 &= asr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32) +define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 &= lsr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32) +define i64 @S2_asl_i_p_and(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 &= asl(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32) +define i64 @S2_asr_i_p_or(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 |= asr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32) +define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 |= lsr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32) +define i64 @S2_asl_i_p_or(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 |= asl(r3:2, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32) +define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 ^= lsr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32) +define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 ^= asl(r3:2, #0) + +declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32) +define i32 @S2_asr_i_r_and(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 &= asr(r1, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32) +define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 &= lsr(r1, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32) +define i32 @S2_asl_i_r_and(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 &= asl(r1, #0) + +declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32) +define i32 @S2_asr_i_r_or(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 |= asr(r1, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32) +define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 |= lsr(r1, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32) +define i32 @S2_asl_i_r_or(i32%a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 |= asl(r1, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32) +define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 ^= lsr(r1, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32) +define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 ^= asl(r1, #0) + +declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32) +define i32 @S4_andi_asl_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = and(#0, asl(r0, #0)) + +declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32) +define i32 @S4_ori_asl_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = or(#0, asl(r0, #0)) + +declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32) +define i32 @S4_andi_lsr_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = and(#0, lsr(r0, #0)) + +declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32) +define i32 @S4_ori_lsr_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = or(#0, lsr(r0, #0)) + +; Shift right by immediate with rounding +declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32) +define i64 @S2_asr_i_p_rnd(i64 %a) { + %z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = asr(r1:0, #0):rnd + +declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32) +define i32 @S2_asr_i_r_rnd(i32 %a) { + %z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = asr(r0, #0):rnd + +; Shift left by immediate with saturation +declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32) +define i32 @S2_asl_i_r_sat(i32 %a) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = asl(r0, #0):sat + +; Shift by register +declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32) +define i64 @S2_asr_r_p(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = asr(r1:0, r2) + +declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32) +define i64 @S2_lsr_r_p(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = lsr(r1:0, r2) + +declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32) +define i64 @S2_asl_r_p(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = asl(r1:0, r2) + +declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32) +define i64 @S2_lsl_r_p(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = lsl(r1:0, r2) + +declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32) +define i32 @S2_asr_r_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = asr(r0, r1) + +declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32) +define i32 @S2_lsr_r_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = lsr(r0, r1) + +declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32) +define i32 @S2_asl_r_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = asl(r0, r1) + +declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32) +define i32 @S2_lsl_r_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = lsl(r0, r1) + +declare i32 @llvm.hexagon.S4.lsli(i32, i32) +define i32 @S4_lsli(i32 %a) { + %z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a) + ret i32 %z +} +; CHECK: r0 = lsl(#0, r0) + +; Shift by register and accumulate +declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32) +define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= asr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32) +define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= lsr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32) +define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= asl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32) +define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= lsl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32) +define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += asr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32) +define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += lsr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32) +define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += asl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32) +define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += lsl(r3:2, r4) + +declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32) +define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= asr(r1, r2) + +declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32) +define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= lsr(r1, r2) + +declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32) +define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= asl(r1, r2) + +declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32) +define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= lsl(r1, r2) + +declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32) +define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += asr(r1, r2) + +declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32) +define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += lsr(r1, r2) + +declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32) +define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += asl(r1, r2) + +declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32) +define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += lsl(r1, r2) + +; Shift by register and logical +declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32) +define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 |= asr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32) +define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 |= lsr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32) +define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 |= asl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32) +define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 |= lsl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32) +define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 &= asr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32) +define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 &= lsr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32) +define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 &= asl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32) +define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 &= lsl(r3:2, r4) + +declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32) +define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= asr(r1, r2) + +declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32) +define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= lsr(r1, r2) + +declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32) +define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= asl(r1, r2) + +declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32) +define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= lsl(r1, r2) + +declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32) +define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= asr(r1, r2) + +declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32) +define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= lsr(r1, r2) + +declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32) +define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= asl(r1, r2) + +declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32) +define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= lsl(r1, r2) + +; Shift by register with saturation +declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) +define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = asr(r0, r1):sat + +declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32) +define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = asl(r0, r1):sat + +; Vector shift halfwords by immediate +declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32) +define i64 @S2_asr_i_vh(i64 %a) { + %z = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = vasrh(r1:0, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32) +define i64 @S2_lsr_i_vh(i64 %a) { + %z = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = vlsrh(r1:0, #0) + +declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32) +define i64 @S2_asl_i_vh(i64 %a) { + %z = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = vaslh(r1:0, #0) + +; Vector shift halfwords by register +declare i64 @llvm.hexagon.S2.asr.r.vh(i64, i32) +define i64 @S2_asr_r_vh(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.asr.r.vh(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vasrh(r1:0, r2) + +declare i64 @llvm.hexagon.S2.lsr.r.vh(i64, i32) +define i64 @S2_lsr_r_vh(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.r.vh(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vlsrh(r1:0, r2) + +declare i64 @llvm.hexagon.S2.asl.r.vh(i64, i32) +define i64 @S2_asl_r_vh(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.asl.r.vh(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vaslh(r1:0, r2) + +declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32) +define i64 @S2_lsl_r_vh(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = vlslh(r1:0, r2) + +; Vector shift words by immediate +declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32) +define i64 @S2_asr_i_vw(i64 %a) { + %z = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = vasrw(r1:0, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32) +define i64 @S2_lsr_i_vw(i64 %a) { + %z = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = vlsrw(r1:0, #0) + +declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32) +define i64 @S2_asl_i_vw(i64 %a) { + %z = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = vaslw(r1:0, #0) + +; Vector shift words by with truncate and pack +declare i32 @llvm.hexagon.S2.asr.i.svw.trun(i64, i32) +define i32 @S2_asr_i_svw_trun(i64 %a) { + %z = call i32 @llvm.hexagon.S2.asr.i.svw.trun(i64 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = vasrw(r1:0, #0) + +declare i32 @llvm.hexagon.S2.asr.r.svw.trun(i64, i32) +define i32 @S2_asr_r_svw_trun(i64 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.r.svw.trun(i64 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = vasrw(r1:0, r2) diff --git a/test/CodeGen/Hexagon/macint.ll b/test/CodeGen/Hexagon/macint.ll index b3b9d0ee7a01..514ba5b91308 100644 --- a/test/CodeGen/Hexagon/macint.ll +++ b/test/CodeGen/Hexagon/macint.ll @@ -1,11 +1,11 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s ; Check that we generate integer multiply accumulate. -; CHECK: r{{[0-9]+}} += mpyi(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+}} {{\+|\-}}= mpyi(r{{[0-9]+}}, define i32 @main(i32* %a, i32* %b) nounwind { entry: - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %div = udiv i32 %0, 10000 %rem = urem i32 %div, 10 store i32 %rem, i32* %b, align 4 diff --git a/test/CodeGen/Hexagon/mem-fi-add.ll b/test/CodeGen/Hexagon/mem-fi-add.ll new file mode 100644 index 000000000000..a46029fdb5ec --- /dev/null +++ b/test/CodeGen/Hexagon/mem-fi-add.ll @@ -0,0 +1,29 @@ +; RUN: llc -O2 < %s | FileCheck %s +; Look for four stores directly via r29. +; CHECK: memd(r29 +; CHECK: memd(r29 +; CHECK: memd(r29 +; CHECK: memd(r29 + +target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32" +target triple = "hexagon" + +; Function Attrs: nounwind +define void @foo() #0 { +entry: + %t = alloca [4 x [2 x i32]], align 8 + %0 = bitcast [4 x [2 x i32]]* %t to i8* + call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 32, i32 8, i1 false) + %arraydecay = getelementptr inbounds [4 x [2 x i32]], [4 x [2 x i32]]* %t, i32 0, i32 0 + call void @bar([2 x i32]* %arraydecay) #1 + ret void +} + +; Function Attrs: nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #1 + +declare void @bar([2 x i32]*) #2 + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind } +attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/test/CodeGen/Hexagon/memops.ll b/test/CodeGen/Hexagon/memops.ll index fca1a73811a9..e4a8bf7c95e9 100644 --- a/test/CodeGen/Hexagon/memops.ll +++ b/test/CodeGen/Hexagon/memops.ll @@ -4,7 +4,7 @@ define void @memop_unsigned_char_add5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i8 @@ -16,7 +16,7 @@ define void @memop_unsigned_char_add(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv1 = zext i8 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i8 @@ -28,7 +28,7 @@ define void @memop_unsigned_char_sub(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv1 = zext i8 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i8 @@ -39,7 +39,7 @@ entry: define void @memop_unsigned_char_or(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %p, align 1 ret void @@ -48,7 +48,7 @@ entry: define void @memop_unsigned_char_and(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %p, align 1 ret void @@ -57,7 +57,7 @@ entry: define void @memop_unsigned_char_clrbit(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv = zext i8 %0 to i32 %and = and i32 %conv, 223 %conv1 = trunc i32 %and to i8 @@ -68,7 +68,7 @@ entry: define void @memop_unsigned_char_setbit(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv = zext i8 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i8 @@ -79,8 +79,8 @@ entry: define void @memop_unsigned_char_add5_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i8 @@ -92,8 +92,8 @@ define void @memop_unsigned_char_add_index(i8* nocapture %p, i32 %i, i8 zeroext entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %conv1 = zext i8 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i8 @@ -105,8 +105,8 @@ define void @memop_unsigned_char_sub_index(i8* nocapture %p, i32 %i, i8 zeroext entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %conv1 = zext i8 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i8 @@ -117,8 +117,8 @@ entry: define void @memop_unsigned_char_or_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %add.ptr, align 1 ret void @@ -127,8 +127,8 @@ entry: define void @memop_unsigned_char_and_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %add.ptr, align 1 ret void @@ -137,8 +137,8 @@ entry: define void @memop_unsigned_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %and = and i32 %conv, 223 %conv1 = trunc i32 %and to i8 @@ -149,8 +149,8 @@ entry: define void @memop_unsigned_char_setbit_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i8 @@ -161,8 +161,8 @@ entry: define void @memop_unsigned_char_add5_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i8 @@ -174,8 +174,8 @@ define void @memop_unsigned_char_add_index5(i8* nocapture %p, i8 zeroext %x) nou entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %conv1 = zext i8 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i8 @@ -187,8 +187,8 @@ define void @memop_unsigned_char_sub_index5(i8* nocapture %p, i8 zeroext %x) nou entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %conv1 = zext i8 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i8 @@ -199,8 +199,8 @@ entry: define void @memop_unsigned_char_or_index5(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %add.ptr, align 1 ret void @@ -209,8 +209,8 @@ entry: define void @memop_unsigned_char_and_index5(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %add.ptr, align 1 ret void @@ -219,8 +219,8 @@ entry: define void @memop_unsigned_char_clrbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %and = and i32 %conv, 223 %conv1 = trunc i32 %and to i8 @@ -231,8 +231,8 @@ entry: define void @memop_unsigned_char_setbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i8 @@ -243,7 +243,7 @@ entry: define void @memop_signed_char_add5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv2 = zext i8 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i8 @@ -255,7 +255,7 @@ define void @memop_signed_char_add(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv13 = zext i8 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i8 @@ -267,7 +267,7 @@ define void @memop_signed_char_sub(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv13 = zext i8 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i8 @@ -278,7 +278,7 @@ entry: define void @memop_signed_char_or(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %p, align 1 ret void @@ -287,7 +287,7 @@ entry: define void @memop_signed_char_and(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %p, align 1 ret void @@ -296,7 +296,7 @@ entry: define void @memop_signed_char_clrbit(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv2 = zext i8 %0 to i32 %and = and i32 %conv2, 223 %conv1 = trunc i32 %and to i8 @@ -307,7 +307,7 @@ entry: define void @memop_signed_char_setbit(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv2 = zext i8 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i8 @@ -318,8 +318,8 @@ entry: define void @memop_signed_char_add5_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i8 @@ -331,8 +331,8 @@ define void @memop_signed_char_add_index(i8* nocapture %p, i32 %i, i8 signext %x entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %conv13 = zext i8 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i8 @@ -344,8 +344,8 @@ define void @memop_signed_char_sub_index(i8* nocapture %p, i32 %i, i8 signext %x entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %conv13 = zext i8 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i8 @@ -356,8 +356,8 @@ entry: define void @memop_signed_char_or_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %add.ptr, align 1 ret void @@ -366,8 +366,8 @@ entry: define void @memop_signed_char_and_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %add.ptr, align 1 ret void @@ -376,8 +376,8 @@ entry: define void @memop_signed_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %and = and i32 %conv2, 223 %conv1 = trunc i32 %and to i8 @@ -388,8 +388,8 @@ entry: define void @memop_signed_char_setbit_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i8 @@ -400,8 +400,8 @@ entry: define void @memop_signed_char_add5_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i8 @@ -413,8 +413,8 @@ define void @memop_signed_char_add_index5(i8* nocapture %p, i8 signext %x) nounw entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %conv13 = zext i8 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i8 @@ -426,8 +426,8 @@ define void @memop_signed_char_sub_index5(i8* nocapture %p, i8 signext %x) nounw entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %conv13 = zext i8 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i8 @@ -438,8 +438,8 @@ entry: define void @memop_signed_char_or_index5(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %add.ptr, align 1 ret void @@ -448,8 +448,8 @@ entry: define void @memop_signed_char_and_index5(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %add.ptr, align 1 ret void @@ -458,8 +458,8 @@ entry: define void @memop_signed_char_clrbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %and = and i32 %conv2, 223 %conv1 = trunc i32 %and to i8 @@ -470,8 +470,8 @@ entry: define void @memop_signed_char_setbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i8 @@ -482,7 +482,7 @@ entry: define void @memop_unsigned_short_add5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv = zext i16 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i16 @@ -494,7 +494,7 @@ define void @memop_unsigned_short_add(i16* nocapture %p, i16 zeroext %x) nounwin entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv1 = zext i16 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i16 @@ -506,7 +506,7 @@ define void @memop_unsigned_short_sub(i16* nocapture %p, i16 zeroext %x) nounwin entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv1 = zext i16 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i16 @@ -517,7 +517,7 @@ entry: define void @memop_unsigned_short_or(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %p, align 2 ret void @@ -526,7 +526,7 @@ entry: define void @memop_unsigned_short_and(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %p, align 2 ret void @@ -535,7 +535,7 @@ entry: define void @memop_unsigned_short_clrbit(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv = zext i16 %0 to i32 %and = and i32 %conv, 65503 %conv1 = trunc i32 %and to i16 @@ -546,7 +546,7 @@ entry: define void @memop_unsigned_short_setbit(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv = zext i16 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i16 @@ -557,8 +557,8 @@ entry: define void @memop_unsigned_short_add5_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i16 @@ -570,8 +570,8 @@ define void @memop_unsigned_short_add_index(i16* nocapture %p, i32 %i, i16 zeroe entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %conv1 = zext i16 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i16 @@ -583,8 +583,8 @@ define void @memop_unsigned_short_sub_index(i16* nocapture %p, i32 %i, i16 zeroe entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %conv1 = zext i16 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i16 @@ -595,8 +595,8 @@ entry: define void @memop_unsigned_short_or_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %add.ptr, align 2 ret void @@ -605,8 +605,8 @@ entry: define void @memop_unsigned_short_and_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %add.ptr, align 2 ret void @@ -615,8 +615,8 @@ entry: define void @memop_unsigned_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %and = and i32 %conv, 65503 %conv1 = trunc i32 %and to i16 @@ -627,8 +627,8 @@ entry: define void @memop_unsigned_short_setbit_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i16 @@ -639,8 +639,8 @@ entry: define void @memop_unsigned_short_add5_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i16 @@ -652,8 +652,8 @@ define void @memop_unsigned_short_add_index5(i16* nocapture %p, i16 zeroext %x) entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %conv1 = zext i16 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i16 @@ -665,8 +665,8 @@ define void @memop_unsigned_short_sub_index5(i16* nocapture %p, i16 zeroext %x) entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %conv1 = zext i16 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i16 @@ -677,8 +677,8 @@ entry: define void @memop_unsigned_short_or_index5(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %add.ptr, align 2 ret void @@ -687,8 +687,8 @@ entry: define void @memop_unsigned_short_and_index5(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %add.ptr, align 2 ret void @@ -697,8 +697,8 @@ entry: define void @memop_unsigned_short_clrbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %and = and i32 %conv, 65503 %conv1 = trunc i32 %and to i16 @@ -709,8 +709,8 @@ entry: define void @memop_unsigned_short_setbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i16 @@ -721,7 +721,7 @@ entry: define void @memop_signed_short_add5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv2 = zext i16 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i16 @@ -733,7 +733,7 @@ define void @memop_signed_short_add(i16* nocapture %p, i16 signext %x) nounwind entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv13 = zext i16 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i16 @@ -745,7 +745,7 @@ define void @memop_signed_short_sub(i16* nocapture %p, i16 signext %x) nounwind entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv13 = zext i16 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i16 @@ -756,7 +756,7 @@ entry: define void @memop_signed_short_or(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %p, align 2 ret void @@ -765,7 +765,7 @@ entry: define void @memop_signed_short_and(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %p, align 2 ret void @@ -774,7 +774,7 @@ entry: define void @memop_signed_short_clrbit(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv2 = zext i16 %0 to i32 %and = and i32 %conv2, 65503 %conv1 = trunc i32 %and to i16 @@ -785,7 +785,7 @@ entry: define void @memop_signed_short_setbit(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv2 = zext i16 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i16 @@ -796,8 +796,8 @@ entry: define void @memop_signed_short_add5_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i16 @@ -809,8 +809,8 @@ define void @memop_signed_short_add_index(i16* nocapture %p, i32 %i, i16 signext entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %conv13 = zext i16 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i16 @@ -822,8 +822,8 @@ define void @memop_signed_short_sub_index(i16* nocapture %p, i32 %i, i16 signext entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %conv13 = zext i16 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i16 @@ -834,8 +834,8 @@ entry: define void @memop_signed_short_or_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %add.ptr, align 2 ret void @@ -844,8 +844,8 @@ entry: define void @memop_signed_short_and_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %add.ptr, align 2 ret void @@ -854,8 +854,8 @@ entry: define void @memop_signed_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %and = and i32 %conv2, 65503 %conv1 = trunc i32 %and to i16 @@ -866,8 +866,8 @@ entry: define void @memop_signed_short_setbit_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i16 @@ -878,8 +878,8 @@ entry: define void @memop_signed_short_add5_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i16 @@ -891,8 +891,8 @@ define void @memop_signed_short_add_index5(i16* nocapture %p, i16 signext %x) no entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %conv13 = zext i16 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i16 @@ -904,8 +904,8 @@ define void @memop_signed_short_sub_index5(i16* nocapture %p, i16 signext %x) no entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %conv13 = zext i16 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i16 @@ -916,8 +916,8 @@ entry: define void @memop_signed_short_or_index5(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %add.ptr, align 2 ret void @@ -926,8 +926,8 @@ entry: define void @memop_signed_short_and_index5(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %add.ptr, align 2 ret void @@ -936,8 +936,8 @@ entry: define void @memop_signed_short_clrbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %and = and i32 %conv2, 65503 %conv1 = trunc i32 %and to i16 @@ -948,8 +948,8 @@ entry: define void @memop_signed_short_setbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i16 @@ -960,7 +960,7 @@ entry: define void @memop_signed_int_add5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %add = add i32 %0, 5 store i32 %add, i32* %p, align 4 ret void @@ -969,7 +969,7 @@ entry: define void @memop_signed_int_add(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %add = add i32 %0, %x store i32 %add, i32* %p, align 4 ret void @@ -978,7 +978,7 @@ entry: define void @memop_signed_int_sub(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %sub = sub i32 %0, %x store i32 %sub, i32* %p, align 4 ret void @@ -987,7 +987,7 @@ entry: define void @memop_signed_int_or(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %or = or i32 %0, %x store i32 %or, i32* %p, align 4 ret void @@ -996,7 +996,7 @@ entry: define void @memop_signed_int_and(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %and = and i32 %0, %x store i32 %and, i32* %p, align 4 ret void @@ -1005,7 +1005,7 @@ entry: define void @memop_signed_int_clrbit(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %and = and i32 %0, -33 store i32 %and, i32* %p, align 4 ret void @@ -1014,7 +1014,7 @@ entry: define void @memop_signed_int_setbit(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %or = or i32 %0, 128 store i32 %or, i32* %p, align 4 ret void @@ -1023,8 +1023,8 @@ entry: define void @memop_signed_int_add5_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, 5 store i32 %add, i32* %add.ptr, align 4 ret void @@ -1033,8 +1033,8 @@ entry: define void @memop_signed_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, %x store i32 %add, i32* %add.ptr, align 4 ret void @@ -1043,8 +1043,8 @@ entry: define void @memop_signed_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %sub = sub i32 %0, %x store i32 %sub, i32* %add.ptr, align 4 ret void @@ -1053,8 +1053,8 @@ entry: define void @memop_signed_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x store i32 %or, i32* %add.ptr, align 4 ret void @@ -1063,8 +1063,8 @@ entry: define void @memop_signed_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x store i32 %and, i32* %add.ptr, align 4 ret void @@ -1073,8 +1073,8 @@ entry: define void @memop_signed_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 store i32 %and, i32* %add.ptr, align 4 ret void @@ -1083,8 +1083,8 @@ entry: define void @memop_signed_int_setbit_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 store i32 %or, i32* %add.ptr, align 4 ret void @@ -1093,8 +1093,8 @@ entry: define void @memop_signed_int_add5_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, 5 store i32 %add, i32* %add.ptr, align 4 ret void @@ -1103,8 +1103,8 @@ entry: define void @memop_signed_int_add_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, %x store i32 %add, i32* %add.ptr, align 4 ret void @@ -1113,8 +1113,8 @@ entry: define void @memop_signed_int_sub_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %sub = sub i32 %0, %x store i32 %sub, i32* %add.ptr, align 4 ret void @@ -1123,8 +1123,8 @@ entry: define void @memop_signed_int_or_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x store i32 %or, i32* %add.ptr, align 4 ret void @@ -1133,8 +1133,8 @@ entry: define void @memop_signed_int_and_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x store i32 %and, i32* %add.ptr, align 4 ret void @@ -1143,8 +1143,8 @@ entry: define void @memop_signed_int_clrbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 store i32 %and, i32* %add.ptr, align 4 ret void @@ -1153,8 +1153,8 @@ entry: define void @memop_signed_int_setbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 store i32 %or, i32* %add.ptr, align 4 ret void @@ -1163,7 +1163,7 @@ entry: define void @memop_unsigned_int_add5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %add = add nsw i32 %0, 5 store i32 %add, i32* %p, align 4 ret void @@ -1172,7 +1172,7 @@ entry: define void @memop_unsigned_int_add(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %add = add nsw i32 %0, %x store i32 %add, i32* %p, align 4 ret void @@ -1181,7 +1181,7 @@ entry: define void @memop_unsigned_int_sub(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %sub = sub nsw i32 %0, %x store i32 %sub, i32* %p, align 4 ret void @@ -1190,7 +1190,7 @@ entry: define void @memop_unsigned_int_or(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %or = or i32 %0, %x store i32 %or, i32* %p, align 4 ret void @@ -1199,7 +1199,7 @@ entry: define void @memop_unsigned_int_and(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %and = and i32 %0, %x store i32 %and, i32* %p, align 4 ret void @@ -1208,7 +1208,7 @@ entry: define void @memop_unsigned_int_clrbit(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %and = and i32 %0, -33 store i32 %and, i32* %p, align 4 ret void @@ -1217,7 +1217,7 @@ entry: define void @memop_unsigned_int_setbit(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %or = or i32 %0, 128 store i32 %or, i32* %p, align 4 ret void @@ -1226,8 +1226,8 @@ entry: define void @memop_unsigned_int_add5_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, 5 store i32 %add, i32* %add.ptr, align 4 ret void @@ -1236,8 +1236,8 @@ entry: define void @memop_unsigned_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, %x store i32 %add, i32* %add.ptr, align 4 ret void @@ -1246,8 +1246,8 @@ entry: define void @memop_unsigned_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %sub = sub nsw i32 %0, %x store i32 %sub, i32* %add.ptr, align 4 ret void @@ -1256,8 +1256,8 @@ entry: define void @memop_unsigned_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x store i32 %or, i32* %add.ptr, align 4 ret void @@ -1266,8 +1266,8 @@ entry: define void @memop_unsigned_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x store i32 %and, i32* %add.ptr, align 4 ret void @@ -1276,8 +1276,8 @@ entry: define void @memop_unsigned_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 store i32 %and, i32* %add.ptr, align 4 ret void @@ -1286,8 +1286,8 @@ entry: define void @memop_unsigned_int_setbit_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 store i32 %or, i32* %add.ptr, align 4 ret void @@ -1296,8 +1296,8 @@ entry: define void @memop_unsigned_int_add5_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5 - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, 5 store i32 %add, i32* %add.ptr, align 4 ret void @@ -1306,8 +1306,8 @@ entry: define void @memop_unsigned_int_add_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, %x store i32 %add, i32* %add.ptr, align 4 ret void @@ -1316,8 +1316,8 @@ entry: define void @memop_unsigned_int_sub_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %sub = sub nsw i32 %0, %x store i32 %sub, i32* %add.ptr, align 4 ret void @@ -1326,8 +1326,8 @@ entry: define void @memop_unsigned_int_or_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x store i32 %or, i32* %add.ptr, align 4 ret void @@ -1336,8 +1336,8 @@ entry: define void @memop_unsigned_int_and_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}} - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x store i32 %and, i32* %add.ptr, align 4 ret void @@ -1346,8 +1346,8 @@ entry: define void @memop_unsigned_int_clrbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 store i32 %and, i32* %add.ptr, align 4 ret void @@ -1356,8 +1356,8 @@ entry: define void @memop_unsigned_int_setbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %add.ptr = getelementptr inbounds i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 store i32 %or, i32* %add.ptr, align 4 ret void diff --git a/test/CodeGen/Hexagon/memops1.ll b/test/CodeGen/Hexagon/memops1.ll index 2babdc848ddc..37e885b6e0cb 100644 --- a/test/CodeGen/Hexagon/memops1.ll +++ b/test/CodeGen/Hexagon/memops1.ll @@ -7,9 +7,9 @@ entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#40){{ *}}-={{ *}}#1 %p.addr = alloca i32*, align 4 store i32* %p, i32** %p.addr, align 4 - %0 = load i32** %p.addr, align 4 - %add.ptr = getelementptr inbounds i32* %0, i32 10 - %1 = load i32* %add.ptr, align 4 + %0 = load i32*, i32** %p.addr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %0, i32 10 + %1 = load i32, i32* %add.ptr, align 4 %sub = sub nsw i32 %1, 1 store i32 %sub, i32* %add.ptr, align 4 ret void @@ -22,11 +22,11 @@ entry: %i.addr = alloca i32, align 4 store i32* %p, i32** %p.addr, align 4 store i32 %i, i32* %i.addr, align 4 - %0 = load i32** %p.addr, align 4 - %1 = load i32* %i.addr, align 4 - %add.ptr = getelementptr inbounds i32* %0, i32 %1 - %add.ptr1 = getelementptr inbounds i32* %add.ptr, i32 10 - %2 = load i32* %add.ptr1, align 4 + %0 = load i32*, i32** %p.addr, align 4 + %1 = load i32, i32* %i.addr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %0, i32 %1 + %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10 + %2 = load i32, i32* %add.ptr1, align 4 %sub = sub nsw i32 %2, 1 store i32 %sub, i32* %add.ptr1, align 4 ret void diff --git a/test/CodeGen/Hexagon/memops2.ll b/test/CodeGen/Hexagon/memops2.ll index d6d1a50bcefa..f9f8a2478119 100644 --- a/test/CodeGen/Hexagon/memops2.ll +++ b/test/CodeGen/Hexagon/memops2.ll @@ -5,8 +5,8 @@ define void @f(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1 - %add.ptr = getelementptr inbounds i16* %p, i32 10 - %0 = load i16* %add.ptr, align 2 + %add.ptr = getelementptr inbounds i16, i16* %p, i32 10 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %sub = add nsw i32 %conv2, 65535 %conv1 = trunc i32 %sub to i16 @@ -18,8 +18,8 @@ define void @g(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1 %add.ptr.sum = add i32 %i, 10 - %add.ptr1 = getelementptr inbounds i16* %p, i32 %add.ptr.sum - %0 = load i16* %add.ptr1, align 2 + %add.ptr1 = getelementptr inbounds i16, i16* %p, i32 %add.ptr.sum + %0 = load i16, i16* %add.ptr1, align 2 %conv3 = zext i16 %0 to i32 %sub = add nsw i32 %conv3, 65535 %conv2 = trunc i32 %sub to i16 diff --git a/test/CodeGen/Hexagon/memops3.ll b/test/CodeGen/Hexagon/memops3.ll index d9e4e8f53709..6cd7fdc48617 100644 --- a/test/CodeGen/Hexagon/memops3.ll +++ b/test/CodeGen/Hexagon/memops3.ll @@ -5,8 +5,8 @@ define void @f(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1 - %add.ptr = getelementptr inbounds i8* %p, i32 10 - %0 = load i8* %add.ptr, align 1 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 10 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %sub = add nsw i32 %conv, 255 %conv1 = trunc i32 %sub to i8 @@ -18,8 +18,8 @@ define void @g(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1 %add.ptr.sum = add i32 %i, 10 - %add.ptr1 = getelementptr inbounds i8* %p, i32 %add.ptr.sum - %0 = load i8* %add.ptr1, align 1 + %add.ptr1 = getelementptr inbounds i8, i8* %p, i32 %add.ptr.sum + %0 = load i8, i8* %add.ptr1, align 1 %conv = zext i8 %0 to i32 %sub = add nsw i32 %conv, 255 %conv2 = trunc i32 %sub to i8 diff --git a/test/CodeGen/Hexagon/misaligned-access.ll b/test/CodeGen/Hexagon/misaligned-access.ll index 4dafb44cc3ef..f4b0cb9cb1e3 100644 --- a/test/CodeGen/Hexagon/misaligned-access.ll +++ b/test/CodeGen/Hexagon/misaligned-access.ll @@ -7,10 +7,10 @@ declare i32 @_hi(i64) #1 define i32 @CSDRSEARCH_executeSearchManager() #0 { entry: %temp = alloca i32, align 4 - %0 = load i32* @temp1, align 4 + %0 = load i32, i32* @temp1, align 4 store i32 %0, i32* %temp, align 4 %1 = bitcast i32* %temp to i64* - %2 = load i64* %1, align 8 + %2 = load i64, i64* %1, align 8 %call = call i32 @_hi(i64 %2) ret i32 %call } diff --git a/test/CodeGen/Hexagon/mpy.ll b/test/CodeGen/Hexagon/mpy.ll index d5c5ae345352..3ecf7d46ccb0 100644 --- a/test/CodeGen/Hexagon/mpy.ll +++ b/test/CodeGen/Hexagon/mpy.ll @@ -9,10 +9,10 @@ entry: store i32 %acc, i32* %acc.addr, align 4 store i32 %num, i32* %num.addr, align 4 store i32 %num2, i32* %num2.addr, align 4 - %0 = load i32* %num.addr, align 4 - %1 = load i32* %acc.addr, align 4 + %0 = load i32, i32* %num.addr, align 4 + %1 = load i32, i32* %acc.addr, align 4 %mul = mul nsw i32 %0, %1 - %2 = load i32* %num2.addr, align 4 + %2 = load i32, i32* %num2.addr, align 4 %add = add nsw i32 %mul, %2 store i32 %add, i32* %num.addr, align 4 ret void diff --git a/test/CodeGen/Hexagon/newvaluejump.ll b/test/CodeGen/Hexagon/newvaluejump.ll index 9c7ca55cb8f6..3e1ee179573a 100644 --- a/test/CodeGen/Hexagon/newvaluejump.ll +++ b/test/CodeGen/Hexagon/newvaluejump.ll @@ -9,10 +9,10 @@ entry: ; CHECK: if (cmp.eq(r{{[0-9]+}}.new, #0)) jump{{.}} %addr1 = alloca i32, align 4 %addr2 = alloca i32, align 4 - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 store i32 %0, i32* %addr1, align 4 call void @bar(i32 1, i32 2) - %1 = load i32* @j, align 4 + %1 = load i32, i32* @j, align 4 %tobool = icmp ne i32 %1, 0 br i1 %tobool, label %if.then, label %if.else diff --git a/test/CodeGen/Hexagon/newvaluejump2.ll b/test/CodeGen/Hexagon/newvaluejump2.ll index 3d50ea5422c7..a812a7d96659 100644 --- a/test/CodeGen/Hexagon/newvaluejump2.ll +++ b/test/CodeGen/Hexagon/newvaluejump2.ll @@ -1,17 +1,16 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s +; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hexagon-misched < %s \ +; RUN: | FileCheck %s ; Check that we generate new value jump, both registers, with one ; of the registers as new. -@Reg = common global i8 0, align 1 +@Reg = common global i32 0, align 4 define i32 @main() nounwind { entry: -; CHECK: if (cmp.gt(r{{[0-9]+}}.new, r{{[0-9]+}})) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}} - %Reg2 = alloca i8, align 1 - %0 = load i8* %Reg2, align 1 - %conv0 = zext i8 %0 to i32 - %1 = load i8* @Reg, align 1 - %conv1 = zext i8 %1 to i32 - %tobool = icmp sle i32 %conv0, %conv1 +; CHECK: if (cmp.gt(r{{[0-9]+}}, r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}} + %Reg2 = alloca i32, align 4 + %0 = load i32, i32* %Reg2, align 4 + %1 = load i32, i32* @Reg, align 4 + %tobool = icmp sle i32 %0, %1 br i1 %tobool, label %if.then, label %if.else if.then: diff --git a/test/CodeGen/Hexagon/newvaluestore.ll b/test/CodeGen/Hexagon/newvaluestore.ll index 93cf3479ab5e..13cbba2d08e1 100644 --- a/test/CodeGen/Hexagon/newvaluestore.ll +++ b/test/CodeGen/Hexagon/newvaluestore.ll @@ -11,11 +11,11 @@ entry: %number1 = alloca i32, align 4 %number2 = alloca i32, align 4 %number3 = alloca i32, align 4 - %0 = load i32 * @i, align 4 + %0 = load i32 , i32 * @i, align 4 store i32 %0, i32* %number1, align 4 - %1 = load i32 * @j, align 4 + %1 = load i32 , i32 * @j, align 4 store i32 %1, i32* %number2, align 4 - %2 = load i32 * @k, align 4 + %2 = load i32 , i32 * @k, align 4 store i32 %2, i32* %number3, align 4 ret i32 %0 } diff --git a/test/CodeGen/Hexagon/opt-fabs.ll b/test/CodeGen/Hexagon/opt-fabs.ll index 31b56fd6e982..da657e4b1b8f 100644 --- a/test/CodeGen/Hexagon/opt-fabs.ll +++ b/test/CodeGen/Hexagon/opt-fabs.ll @@ -7,7 +7,7 @@ define float @my_fabsf(float %x) nounwind { entry: %x.addr = alloca float, align 4 store float %x, float* %x.addr, align 4 - %0 = load float* %x.addr, align 4 + %0 = load float, float* %x.addr, align 4 %call = call float @fabsf(float %0) readnone ret float %call } diff --git a/test/CodeGen/Hexagon/opt-fneg.ll b/test/CodeGen/Hexagon/opt-fneg.ll index 479b4b64069a..978957865863 100644 --- a/test/CodeGen/Hexagon/opt-fneg.ll +++ b/test/CodeGen/Hexagon/opt-fneg.ll @@ -6,7 +6,7 @@ entry: ; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31) %x.addr = alloca float, align 4 store float %x, float* %x.addr, align 4 - %0 = load float* %x.addr, align 4 + %0 = load float, float* %x.addr, align 4 %sub = fsub float -0.000000e+00, %0 ret float %sub } diff --git a/test/CodeGen/Hexagon/postinc-load.ll b/test/CodeGen/Hexagon/postinc-load.ll index 855a347d74f5..a9d987981d65 100644 --- a/test/CodeGen/Hexagon/postinc-load.ll +++ b/test/CodeGen/Hexagon/postinc-load.ll @@ -12,13 +12,13 @@ for.body: %arrayidx.phi = phi i32* [ %a, %entry ], [ %arrayidx.inc, %for.body ] %arrayidx1.phi = phi i16* [ %b, %entry ], [ %arrayidx1.inc, %for.body ] %sum.03 = phi i32 [ 0, %entry ], [ %add2, %for.body ] - %0 = load i32* %arrayidx.phi, align 4 - %1 = load i16* %arrayidx1.phi, align 2 + %0 = load i32, i32* %arrayidx.phi, align 4 + %1 = load i16, i16* %arrayidx1.phi, align 2 %conv = sext i16 %1 to i32 %add = add i32 %0, %sum.03 %add2 = add i32 %add, %conv - %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1 - %arrayidx1.inc = getelementptr i16* %arrayidx1.phi, i32 1 + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 + %arrayidx1.inc = getelementptr i16, i16* %arrayidx1.phi, i32 1 %lsr.iv.next = add i32 %lsr.iv, -1 %exitcond = icmp eq i32 %lsr.iv.next, 0 br i1 %exitcond, label %for.end, label %for.body diff --git a/test/CodeGen/Hexagon/postinc-store.ll b/test/CodeGen/Hexagon/postinc-store.ll index 99a3a58ad39c..6315ca14a952 100644 --- a/test/CodeGen/Hexagon/postinc-store.ll +++ b/test/CodeGen/Hexagon/postinc-store.ll @@ -11,15 +11,15 @@ for.body: ; preds = %for.body, %entry %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 10, %entry ] %arrayidx.phi = phi i32* [ %a, %entry ], [ %arrayidx.inc, %for.body ] %arrayidx1.phi = phi i16* [ %b, %entry ], [ %arrayidx1.inc, %for.body ] - %0 = load i32* %arrayidx.phi, align 4 - %1 = load i16* %arrayidx1.phi, align 2 + %0 = load i32, i32* %arrayidx.phi, align 4 + %1 = load i16, i16* %arrayidx1.phi, align 2 %conv = sext i16 %1 to i32 %factor = mul i32 %0, 2 %add3 = add i32 %factor, %conv store i32 %add3, i32* %arrayidx.phi, align 4 - %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1 - %arrayidx1.inc = getelementptr i16* %arrayidx1.phi, i32 1 + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 + %arrayidx1.inc = getelementptr i16, i16* %arrayidx1.phi, i32 1 %lsr.iv.next = add i32 %lsr.iv, -1 %exitcond = icmp eq i32 %lsr.iv.next, 0 br i1 %exitcond, label %for.end, label %for.body diff --git a/test/CodeGen/Hexagon/pred-absolute-store.ll b/test/CodeGen/Hexagon/pred-absolute-store.ll index 64635b176daf..3e5e98270d53 100644 --- a/test/CodeGen/Hexagon/pred-absolute-store.ll +++ b/test/CodeGen/Hexagon/pred-absolute-store.ll @@ -1,8 +1,7 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s +; RUN: llc -march=hexagon < %s | FileCheck %s ; Check that we are able to predicate instructions with abosolute ; addressing mode. - -; CHECK: if{{ *}}(p{{[0-3]+}}.new){{ *}}memw(##gvar){{ *}}={{ *}}r{{[0-9]+}} +; CHECK: if ({{!*}}p{{[0-2]}}.new) memw(##gvar) = r{{[0-9]+}} @gvar = external global i32 define i32 @test2(i32 %a, i32 %b) nounwind { diff --git a/test/CodeGen/Hexagon/pred-gp.ll b/test/CodeGen/Hexagon/pred-gp.ll index 299bd8679dad..3868e098007f 100644 --- a/test/CodeGen/Hexagon/pred-gp.ll +++ b/test/CodeGen/Hexagon/pred-gp.ll @@ -14,11 +14,11 @@ entry: br i1 %cmp, label %if.then, label %entry.if.end_crit_edge entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: - %0 = load i32* @d, align 4 + %0 = load i32, i32* @d, align 4 store i32 %0, i32* @c, align 4 br label %if.end diff --git a/test/CodeGen/Hexagon/pred-instrs.ll b/test/CodeGen/Hexagon/pred-instrs.ll index 800073e49b03..e0a75f13dfa8 100644 --- a/test/CodeGen/Hexagon/pred-instrs.ll +++ b/test/CodeGen/Hexagon/pred-instrs.ll @@ -25,6 +25,6 @@ if.else: ; preds = %entry if.end: ; preds = %if.else, %if.then %storemerge = phi i32 [ %and, %if.else ], [ %shl, %if.then ] store i32 %storemerge, i32* @a, align 4 - %0 = load i32* @d, align 4 + %0 = load i32, i32* @d, align 4 ret i32 %0 } diff --git a/test/CodeGen/Hexagon/remove-endloop.ll b/test/CodeGen/Hexagon/remove-endloop.ll new file mode 100644 index 000000000000..73e1ad02cd80 --- /dev/null +++ b/test/CodeGen/Hexagon/remove-endloop.ll @@ -0,0 +1,56 @@ +; RUN: llc -march=hexagon -O2 < %s | FileCheck %s + +define void @foo(i32 %n, i32* nocapture %A, i32* nocapture %B) nounwind optsize { +entry: + %cmp = icmp sgt i32 %n, 100 + br i1 %cmp, label %for.body.preheader, label %for.cond4.preheader + +; CHECK: endloop0 +; CHECK: endloop0 +; CHECK-NOT: endloop0 + +for.body.preheader: + br label %for.body + +for.cond4.preheader: + %cmp113 = icmp sgt i32 %n, 0 + br i1 %cmp113, label %for.body7.preheader, label %if.end + +for.body7.preheader: + br label %for.body7 + +for.body: + %arrayidx.phi = phi i32* [ %arrayidx.inc, %for.body ], [ %B, %for.body.preheader ] + %arrayidx3.phi = phi i32* [ %arrayidx3.inc, %for.body ], [ %A, %for.body.preheader ] + %i.014 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ] + %0 = load i32, i32* %arrayidx.phi, align 4 + %sub = add nsw i32 %0, -1 + store i32 %sub, i32* %arrayidx3.phi, align 4 + %inc = add nsw i32 %i.014, 1 + %exitcond = icmp eq i32 %inc, %n + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 + %arrayidx3.inc = getelementptr i32, i32* %arrayidx3.phi, i32 1 + br i1 %exitcond, label %if.end.loopexit, label %for.body + +for.body7: + %arrayidx8.phi = phi i32* [ %arrayidx8.inc, %for.body7 ], [ %B, %for.body7.preheader ] + %arrayidx9.phi = phi i32* [ %arrayidx9.inc, %for.body7 ], [ %A, %for.body7.preheader ] + %i.117 = phi i32 [ %inc11, %for.body7 ], [ 0, %for.body7.preheader ] + %1 = load i32, i32* %arrayidx8.phi, align 4 + %add = add nsw i32 %1, 1 + store i32 %add, i32* %arrayidx9.phi, align 4 + %inc11 = add nsw i32 %i.117, 1 + %exitcond18 = icmp eq i32 %inc11, %n + %arrayidx8.inc = getelementptr i32, i32* %arrayidx8.phi, i32 1 + %arrayidx9.inc = getelementptr i32, i32* %arrayidx9.phi, i32 1 + br i1 %exitcond18, label %if.end.loopexit21, label %for.body7 + +if.end.loopexit: + br label %if.end + +if.end.loopexit21: + br label %if.end + +if.end: + ret void +} diff --git a/test/CodeGen/Hexagon/remove_lsr.ll b/test/CodeGen/Hexagon/remove_lsr.ll index 3128dbb8b21b..3b85c486348d 100644 --- a/test/CodeGen/Hexagon/remove_lsr.ll +++ b/test/CodeGen/Hexagon/remove_lsr.ll @@ -21,11 +21,11 @@ define void @foo(%union.vect64* nocapture %sss_extracted_bit_rx_data_ptr, i8* nocapture %scr_s_even_code_ptr, i8* nocapture %scr_s_odd_code_ptr) nounwind { entry: - %scevgep = getelementptr %union.vect64* %sss_extracted_bit_rx_data_ptr, i32 1 - %scevgep28 = getelementptr %union.vect32* %s_odd, i32 1 - %scevgep32 = getelementptr %union.vect32* %s_even, i32 1 - %scevgep36 = getelementptr i8* %scr_s_odd_code_ptr, i32 1 - %scevgep39 = getelementptr i8* %scr_s_even_code_ptr, i32 1 + %scevgep = getelementptr %union.vect64, %union.vect64* %sss_extracted_bit_rx_data_ptr, i32 1 + %scevgep28 = getelementptr %union.vect32, %union.vect32* %s_odd, i32 1 + %scevgep32 = getelementptr %union.vect32, %union.vect32* %s_even, i32 1 + %scevgep36 = getelementptr i8, i8* %scr_s_odd_code_ptr, i32 1 + %scevgep39 = getelementptr i8, i8* %scr_s_even_code_ptr, i32 1 br label %for.body for.body: ; preds = %for.body, %entry @@ -54,16 +54,16 @@ for.body: ; preds = %for.body, %entry %7 = trunc i64 %6 to i32 %8 = tail call i32 @llvm.hexagon.C2.mux(i32 %conv8, i32 %5, i32 %7) store i32 %8, i32* %lsr.iv2931, align 4 - %srcval = load i64* %lsr.iv27, align 8 - %9 = load i8* %lsr.iv40, align 1 - %10 = load i8* %lsr.iv37, align 1 + %srcval = load i64, i64* %lsr.iv27, align 8 + %9 = load i8, i8* %lsr.iv40, align 1 + %10 = load i8, i8* %lsr.iv37, align 1 %lftr.wideiv = trunc i32 %lsr.iv42 to i8 %exitcond = icmp eq i8 %lftr.wideiv, 32 - %scevgep26 = getelementptr %union.vect64* %lsr.iv, i32 1 - %scevgep30 = getelementptr %union.vect32* %lsr.iv29, i32 1 - %scevgep34 = getelementptr %union.vect32* %lsr.iv33, i32 1 - %scevgep38 = getelementptr i8* %lsr.iv37, i32 1 - %scevgep41 = getelementptr i8* %lsr.iv40, i32 1 + %scevgep26 = getelementptr %union.vect64, %union.vect64* %lsr.iv, i32 1 + %scevgep30 = getelementptr %union.vect32, %union.vect32* %lsr.iv29, i32 1 + %scevgep34 = getelementptr %union.vect32, %union.vect32* %lsr.iv33, i32 1 + %scevgep38 = getelementptr i8, i8* %lsr.iv37, i32 1 + %scevgep41 = getelementptr i8, i8* %lsr.iv40, i32 1 %lsr.iv.next = add i32 %lsr.iv42, 1 br i1 %exitcond, label %for.end, label %for.body diff --git a/test/CodeGen/Hexagon/shrink-frame-basic.ll b/test/CodeGen/Hexagon/shrink-frame-basic.ll new file mode 100644 index 000000000000..50b37885eda4 --- /dev/null +++ b/test/CodeGen/Hexagon/shrink-frame-basic.ll @@ -0,0 +1,36 @@ +; RUN: llc < %s | FileCheck %s +; Check for allocframe in a non-entry block LBB0_n. +; CHECK: LBB0_{{[0-9]+}}: +; CHECK: allocframe +; Deallocframe may be in a different block, but must follow. +; CHECK: deallocframe + +target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32" +target triple = "hexagon" + +; Function Attrs: nounwind +define i32 @foo(i32 %n, i32* %p) #0 { +entry: + %cmp = icmp eq i32* %p, null + br i1 %cmp, label %if.end, label %if.then + +if.then: ; preds = %entry + %0 = load i32, i32* %p, align 4 + %inc = add nsw i32 %0, 1 + store i32 %inc, i32* %p, align 4 + br label %return + +if.end: ; preds = %entry + %call = tail call i32 bitcast (i32 (...)* @bar to i32 (i32)*)(i32 %n) #0 + %add = add nsw i32 %call, 1 + br label %return + +return: ; preds = %if.end, %if.then + %retval.0 = phi i32 [ %0, %if.then ], [ %add, %if.end ] + ret i32 %retval.0 +} + +declare i32 @bar(...) #0 + +attributes #0 = { nounwind } + diff --git a/test/CodeGen/Hexagon/stack-align1.ll b/test/CodeGen/Hexagon/stack-align1.ll new file mode 100644 index 000000000000..4efa70f59854 --- /dev/null +++ b/test/CodeGen/Hexagon/stack-align1.ll @@ -0,0 +1,21 @@ +; RUN: llc -O0 -march=hexagon < %s | FileCheck %s +; CHECK: and(r29, #-32) +; CHECK-DAG: add(r29, #0) +; CHECK-DAG: add(r29, #28) + +target triple = "hexagon-unknown-unknown" + +; Function Attrs: nounwind uwtable +define void @foo() #0 { +entry: + %x = alloca i32, align 4 + %y = alloca i32, align 32 + %0 = bitcast i32* %x to i8* + %1 = bitcast i32* %y to i8* + call void @bar(i8* %0, i8* %1) + ret void +} + +declare void @bar(i8*, i8*) #0 + +attributes #0 = { nounwind } diff --git a/test/CodeGen/Hexagon/stack-align2.ll b/test/CodeGen/Hexagon/stack-align2.ll new file mode 100644 index 000000000000..1bbd57820325 --- /dev/null +++ b/test/CodeGen/Hexagon/stack-align2.ll @@ -0,0 +1,27 @@ +; RUN: llc -O0 -march=hexagon < %s | FileCheck %s +; CHECK: and(r29, #-128) +; CHECK-DAG: add(r29, #0) +; CHECK-DAG: add(r29, #64) +; CHECK-DAG: add(r29, #96) +; CHECK-DAG: add(r29, #124) + +target triple = "hexagon-unknown-unknown" + +; Function Attrs: nounwind uwtable +define void @foo() #0 { +entry: + %x = alloca i32, align 4 + %y = alloca i32, align 32 + %z = alloca i32, align 64 + %w = alloca i32, align 128 + %0 = bitcast i32* %x to i8* + %1 = bitcast i32* %y to i8* + %2 = bitcast i32* %z to i8* + %3 = bitcast i32* %w to i8* + call void @bar(i8* %0, i8* %1, i8* %2, i8* %3) + ret void +} + +declare void @bar(i8*, i8*, i8*, i8*) #0 + +attributes #0 = { nounwind } diff --git a/test/CodeGen/Hexagon/stack-alloca1.ll b/test/CodeGen/Hexagon/stack-alloca1.ll new file mode 100644 index 000000000000..00e9e051aebb --- /dev/null +++ b/test/CodeGen/Hexagon/stack-alloca1.ll @@ -0,0 +1,18 @@ +; RUN: llc -O0 -march=hexagon < %s | FileCheck %s +; CHECK: sub(r29, r[[REG:[0-9]+]]) +; CHECK: r29 = r[[REG]] + +target triple = "hexagon-unknown-unknown" + +; Function Attrs: nounwind uwtable +define void @foo(i32 %n) #0 { +entry: + %x = alloca i32, i32 %n + %0 = bitcast i32* %x to i8* + call void @bar(i8* %0) + ret void +} + +declare void @bar(i8*) #0 + +attributes #0 = { nounwind } diff --git a/test/CodeGen/Hexagon/stack-alloca2.ll b/test/CodeGen/Hexagon/stack-alloca2.ll new file mode 100644 index 000000000000..ad5e13166aa2 --- /dev/null +++ b/test/CodeGen/Hexagon/stack-alloca2.ll @@ -0,0 +1,23 @@ +; RUN: llc -O0 -march=hexagon < %s | FileCheck %s +; CHECK-DAG: r[[AP:[0-9]+]] = and(r30, #-32) +; CHECK-DAG: r1 = add(r[[AP]], #-32) + +; CHECK-DAG: sub(r29, r[[SP:[0-9]+]]) +; CHECK-DAG: r29 = r[[SP]] + +target triple = "hexagon-unknown-unknown" + +; Function Attrs: nounwind uwtable +define void @foo(i32 %n) #0 { +entry: + %x = alloca i32, i32 %n + %y = alloca i32, align 32 + %0 = bitcast i32* %x to i8* + %1 = bitcast i32* %y to i8* + call void @bar(i8* %0, i8* %1) + ret void +} + +declare void @bar(i8*, i8* %y) #0 + +attributes #0 = { nounwind } diff --git a/test/CodeGen/Hexagon/static.ll b/test/CodeGen/Hexagon/static.ll index 683a4c21bcb8..760b8b559725 100644 --- a/test/CodeGen/Hexagon/static.ll +++ b/test/CodeGen/Hexagon/static.ll @@ -10,10 +10,10 @@ define void @foo() nounwind { entry: - %0 = load i32* @num, align 4 - %1 = load i32* @acc, align 4 + %0 = load i32, i32* @num, align 4 + %1 = load i32, i32* @acc, align 4 %mul = mul nsw i32 %0, %1 - %2 = load i32* @val, align 4 + %2 = load i32, i32* @val, align 4 %add = add nsw i32 %mul, %2 store i32 %add, i32* @num, align 4 ret void diff --git a/test/CodeGen/Hexagon/struct_args.ll b/test/CodeGen/Hexagon/struct_args.ll index f91300b5067e..95b76c7999d4 100644 --- a/test/CodeGen/Hexagon/struct_args.ll +++ b/test/CodeGen/Hexagon/struct_args.ll @@ -8,7 +8,7 @@ define void @foo() nounwind { entry: - %0 = load i64* bitcast (%struct.small* @s1 to i64*), align 1 + %0 = load i64, i64* bitcast (%struct.small* @s1 to i64*), align 1 call void @bar(i64 %0) ret void } diff --git a/test/CodeGen/Hexagon/struct_args_large.ll b/test/CodeGen/Hexagon/struct_args_large.ll index db87d9e81db1..1438d73eacf7 100644 --- a/test/CodeGen/Hexagon/struct_args_large.ll +++ b/test/CodeGen/Hexagon/struct_args_large.ll @@ -1,4 +1,5 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s +; XFAIL: +; RUN: llc -march=hexagon < %s | FileCheck %s ; CHECK: r[[T0:[0-9]+]] = CONST32(#s2) ; CHECK: memw(r29+#0) = r{{.}} ; CHECK: memw(r29+#8) = r{{.}} diff --git a/test/CodeGen/Hexagon/sube.ll b/test/CodeGen/Hexagon/sube.ll index 735ac9eb82e4..873f52b2d5df 100644 --- a/test/CodeGen/Hexagon/sube.ll +++ b/test/CodeGen/Hexagon/sube.ll @@ -1,7 +1,7 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s -; CHECK: r{{[0-9]+:[0-9]+}} = #0 ; CHECK: r{{[0-9]+:[0-9]+}} = #1 +; CHECK: r{{[0-9]+:[0-9]+}} = #0 ; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) ; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) ; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) diff --git a/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll b/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll new file mode 100644 index 000000000000..90fb75e5be06 --- /dev/null +++ b/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll @@ -0,0 +1,31 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s + +; CHECK-LABEL: tail_memcpy: +; CHECK: jump memcpy +define void @tail_memcpy(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { +entry: + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + ret void +} + +; CHECK-LABEL: tail_memmove: +; CHECK: jump memmove +define void @tail_memmove(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { +entry: + tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + ret void +} + +; CHECK-LABEL: tail_memset: +; CHECK: jump memset +define void @tail_memset(i8* nocapture %p, i8 %c, i32 %n) #0 { +entry: + tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false) + ret void +} + +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0 + +attributes #0 = { nounwind } diff --git a/test/CodeGen/Hexagon/tfr-to-combine.ll b/test/CodeGen/Hexagon/tfr-to-combine.ll index e3057cd1611d..a257acfeb49b 100644 --- a/test/CodeGen/Hexagon/tfr-to-combine.ll +++ b/test/CodeGen/Hexagon/tfr-to-combine.ll @@ -20,14 +20,14 @@ define i64 @test2() #0 { ; CHECK: combine(#0, r{{[0-9]+}}) entry: store i16 0, i16* @a, align 2 - %0 = load i16* @c, align 2 + %0 = load i16, i16* @c, align 2 %conv2 = zext i16 %0 to i64 ret i64 %conv2 } ; Function Attrs: nounwind define i64 @test4() #0 { -; CHECK: combine(#0, ##100) +; CHECK: combine(#0, #100) entry: store i16 100, i16* @b, align 2 store i16 0, i16* @a, align 2 diff --git a/test/CodeGen/Hexagon/union-1.ll b/test/CodeGen/Hexagon/union-1.ll index fe79f9510fe8..1d93797db858 100644 --- a/test/CodeGen/Hexagon/union-1.ll +++ b/test/CodeGen/Hexagon/union-1.ll @@ -2,13 +2,15 @@ ; CHECK: word ; CHECK-NOT: combine(#0 ; CHECK: jump bar +; XFAIL: * +; Disable this test temporarily. define void @word(i32* nocapture %a) nounwind { entry: - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %1 = zext i32 %0 to i64 - %add.ptr = getelementptr inbounds i32* %a, i32 1 - %2 = load i32* %add.ptr, align 4 + %add.ptr = getelementptr inbounds i32, i32* %a, i32 1 + %2 = load i32, i32* %add.ptr, align 4 %3 = zext i32 %2 to i64 %4 = shl nuw i64 %3, 32 %ins = or i64 %4, %1 diff --git a/test/CodeGen/Hexagon/vaddh.ll b/test/CodeGen/Hexagon/vaddh.ll index 01d20410978e..88194b750ad5 100644 --- a/test/CodeGen/Hexagon/vaddh.ll +++ b/test/CodeGen/Hexagon/vaddh.ll @@ -6,8 +6,8 @@ define void @foo() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @k, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @k, align 4 %2 = call i32 @llvm.hexagon.A2.svaddh(i32 %0, i32 %1) store i32 %2, i32* @k, align 4 ret void diff --git a/test/CodeGen/Hexagon/validate-offset.ll b/test/CodeGen/Hexagon/validate-offset.ll index 9e7d0aa07832..8de006c80b11 100644 --- a/test/CodeGen/Hexagon/validate-offset.ll +++ b/test/CodeGen/Hexagon/validate-offset.ll @@ -11,26 +11,26 @@ entry: %b.addr = alloca i32, align 4 store i32 %a, i32* %a.addr, align 4 store i32 %b, i32* %b.addr, align 4 - %0 = load i32* %a.addr, align 4 - %1 = load i32* %b.addr, align 4 + %0 = load i32, i32* %a.addr, align 4 + %1 = load i32, i32* %b.addr, align 4 %cmp = icmp sgt i32 %0, %1 br i1 %cmp, label %if.then, label %if.else if.then: - %2 = load i32* %a.addr, align 4 - %3 = load i32* %b.addr, align 4 + %2 = load i32, i32* %a.addr, align 4 + %3 = load i32, i32* %b.addr, align 4 %add = add nsw i32 %2, %3 store i32 %add, i32* %retval br label %return if.else: - %4 = load i32* %a.addr, align 4 - %5 = load i32* %b.addr, align 4 + %4 = load i32, i32* %a.addr, align 4 + %5 = load i32, i32* %b.addr, align 4 %sub = sub nsw i32 %4, %5 store i32 %sub, i32* %retval br label %return return: - %6 = load i32* %retval + %6 = load i32, i32* %retval ret i32 %6 } diff --git a/test/CodeGen/Hexagon/vect/vect-anyextend.ll b/test/CodeGen/Hexagon/vect/vect-anyextend.ll new file mode 100644 index 000000000000..fe5fe84fc37d --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-anyextend.ll @@ -0,0 +1,15 @@ +; RUN: llc -march=hexagon < %s +; Used to fail with "Cannot select: 0x17300f0: v2i32 = any_extend" + +; ModuleID = 'bugpoint-reduced-simplified.bc' +target datalayout = +"e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32" +target triple = "hexagon-unknown-linux-gnu" + +define void @foo() nounwind { +entry: + %_p_vec_full48 = load <4 x i8>, <4 x i8>* undef, align 8 + %0 = zext <4 x i8> %_p_vec_full48 to <4 x i32> + store <4 x i32> %0, <4 x i32>* undef, align 8 + unreachable +} diff --git a/test/CodeGen/Hexagon/vect/vect-apint-truncate.ll b/test/CodeGen/Hexagon/vect/vect-apint-truncate.ll new file mode 100644 index 000000000000..eb94ddfe2961 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-apint-truncate.ll @@ -0,0 +1,27 @@ +; RUN: llc -march=hexagon < %s +; Used to fail with "Invalid APInt Truncate request". +; Used to fail with "Cannot select: 0x596010: v2i32 = sign_extend_inreg". + +; ModuleID = 'bugpoint-reduced-simplified.bc' +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32" +target triple = "hexagon-unknown-linux-gnu" + +define void @foo() nounwind { +entry: + br label %polly.loop_header + +polly.loop_after: ; preds = %polly.loop_header + unreachable + +polly.loop_header: ; preds = %polly.loop_body, %entry + %0 = icmp sle i32 undef, 63 + br i1 %0, label %polly.loop_body, label %polly.loop_after + +polly.loop_body: ; preds = %polly.loop_header + %_p_vec_full = load <4 x i8>, <4 x i8>* undef, align 8 + %1 = sext <4 x i8> %_p_vec_full to <4 x i32> + %p_vec = mul <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3> + %mulp_vec = add <4 x i32> %p_vec, <i32 21, i32 21, i32 21, i32 21> + store <4 x i32> %mulp_vec, <4 x i32>* undef, align 8 + br label %polly.loop_header +} diff --git a/test/CodeGen/Hexagon/vect/vect-bad-bitcast.ll b/test/CodeGen/Hexagon/vect/vect-bad-bitcast.ll new file mode 100644 index 000000000000..1672a789a26d --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-bad-bitcast.ll @@ -0,0 +1,61 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s +; REQUIRES: asserts +; Check for successful compilation. + +target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32" +target triple = "hexagon" + +@input_buf = internal unnamed_addr constant [256 x i16] [i16 0, i16 0, i16 0, i16 1280, i16 2560, i16 4864, i16 7168, i16 9472, i16 11776, i16 12672, i16 13568, i16 14080, i16 15360, i16 15360, i16 15360, i16 15360, i16 15360, i16 15104, i16 14848, i16 14592, i16 14336, i16 14080, i16 14080, i16 13952, i16 13824, i16 13696, i16 13568, i16 13440, i16 13312, i16 13184, i16 13056, i16 12928, i16 12800, i16 12800, i16 12800, i16 12800, i16 12800, i16 12672, i16 12544, i16 12544, i16 12544, i16 12544, i16 12672, i16 12800, i16 12800, i16 12928, i16 13056, i16 13184, i16 13312, i16 13440, i16 13568, i16 13696, i16 13824, i16 14208, i16 14592, i16 14976, i16 15104, i16 15360, i16 15616, i16 15872, i16 16128, i16 16512, i16 16896, i16 17152, i16 17408, i16 17536, i16 17664, i16 17792, i16 17920, i16 18304, i16 18688, i16 19072, i16 19456, i16 19712, i16 19968, i16 20224, i16 20480, i16 20608, i16 20864, i16 20992, i16 21248, i16 21248, i16 21248, i16 21248, i16 21248, i16 21248, i16 21376, i16 21504, i16 21760, i16 21760, i16 21632, i16 21504, i16 21504, i16 21632, i16 21632, i16 21504, i16 21504, i16 21376, i16 21248, i16 21120, i16 20992, i16 20992, i16 20864, i16 20736, i16 20736, i16 20736, i16 20480, i16 20352, i16 20224, i16 20224, i16 20224, i16 20224, i16 20352, i16 20352, i16 20480, i16 20352, i16 20352, i16 20352, i16 20352, i16 20224, i16 20224, i16 20224, i16 20096, i16 20096, i16 19968, i16 19840, i16 19712, i16 19584, i16 19456, i16 19584, i16 19584, i16 19456, i16 19456, i16 19328, i16 19328, i16 19456, i16 19456, i16 19328, i16 19328, i16 19200, i16 19200, i16 19200, i16 19072, i16 19072, i16 18944, i16 18816, i16 18688, i16 18560, i16 18432, i16 18304, i16 18304, i16 18176, i16 18176, i16 18176, i16 18304, i16 18304, i16 18432, i16 18560, i16 18432, i16 18176, i16 17920, i16 17920, i16 17792, i16 17792, i16 17664, i16 17664, i16 17536, i16 17536, i16 17408, i16 17408, i16 17280, i16 17280, i16 17280, i16 17152, i16 17152, i16 17152, i16 17152, i16 17024, i16 17024, i16 16896, i16 16896, i16 16896, i16 16768, i16 16768, i16 16640, i16 16640, i16 16512, i16 16512, i16 16384, i16 16256, i16 16128, i16 16000, i16 15872, i16 15744, i16 15616, i16 15488, i16 15360, i16 15488, i16 15360, i16 15232, i16 15360, i16 15232, i16 15104, i16 14976, i16 14336, i16 14336, i16 14592, i16 14464, i16 13824, i16 13824, i16 13568, i16 13568, i16 13440, i16 13312, i16 13184, i16 13056, i16 13056, i16 13056, i16 12928, i16 12800, i16 12672, i16 12672, i16 12544, i16 12416, i16 12288, i16 12160, i16 11904, i16 11776, i16 11571, i16 11520, i16 11392, i16 11136, i16 10905, i16 10752, i16 10624, i16 10444, i16 10240, i16 9984, i16 9728, i16 9472, i16 9216, i16 8960, i16 8704, i16 8448, i16 8192, i16 7936, i16 7680, i16 7424, i16 7168, i16 6400, i16 5632, i16 4864, i16 3584, i16 1536, i16 0, i16 0], align 8 + +; Function Attrs: nounwind +define i32 @t_run_test() #0 { +entry: + %WaterLeveldB_out = alloca i16, align 2 + br label %polly.stmt.for.body + +for.body8: ; preds = %for.body8, %polly.loop_exit.loopexit + %i.120 = phi i32 [ 0, %polly.loop_exit.loopexit ], [ %inc11.24, %for.body8 ] + %call = call i32 bitcast (i32 (...)* @fxpBitAllocation to i32 (i32, i32, i32, i32, i16*, i32, i32, i32)*)(i32 0, i32 0, i32 256, i32 %conv9, i16* %WaterLeveldB_out, i32 0, i32 1920, i32 %i.120) #2 + %inc11.24 = add i32 %i.120, 25 + %exitcond.24 = icmp eq i32 %inc11.24, 500 + br i1 %exitcond.24, label %for.end12, label %for.body8 + +for.end12: ; preds = %for.body8 + ret i32 0 + +polly.loop_exit.loopexit: ; preds = %polly.stmt.for.body + %WaterLeveldB.1p_vsel.lcssa = phi <4 x i16> [ %WaterLeveldB.1p_vsel, %polly.stmt.for.body ] + %_low_half = shufflevector <4 x i16> %WaterLeveldB.1p_vsel.lcssa, <4 x i16> undef, <2 x i32> <i32 0, i32 1> + %_high_half = shufflevector <4 x i16> %WaterLeveldB.1p_vsel.lcssa, <4 x i16> undef, <2 x i32> <i32 2, i32 3> + %0 = icmp sgt <2 x i16> %_low_half, %_high_half + %1 = select <2 x i1> %0, <2 x i16> %_low_half, <2 x i16> %_high_half + %2 = extractelement <2 x i16> %1, i32 0 + %3 = extractelement <2 x i16> %1, i32 1 + %4 = icmp sgt i16 %2, %3 + %5 = select i1 %4, i16 %2, i16 %3 + %conv9 = sext i16 %5 to i32 + br label %for.body8 + +polly.stmt.for.body: ; preds = %entry, %polly.stmt.for.body + %WaterLeveldB.1p_vsel35 = phi <4 x i16> [ <i16 -32768, i16 -32768, i16 -32768, i16 -32768>, %entry ], [ %WaterLeveldB.1p_vsel, %polly.stmt.for.body ] + %scevgep.phi = phi i16* [ getelementptr inbounds ([256 x i16], [256 x i16]* @input_buf, i32 0, i32 0), %entry ], [ %scevgep.inc, %polly.stmt.for.body ] + %polly.indvar = phi i32 [ 0, %entry ], [ %polly.indvar_next, %polly.stmt.for.body ] + %vector_ptr = bitcast i16* %scevgep.phi to <4 x i16>* + %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 8 + %cmp2p_vicmp = icmp sgt <4 x i16> %_p_vec_full, %WaterLeveldB.1p_vsel35 + %WaterLeveldB.1p_vsel = select <4 x i1> %cmp2p_vicmp, <4 x i16> %_p_vec_full, <4 x i16> %WaterLeveldB.1p_vsel35 + %polly.indvar_next = add nsw i32 %polly.indvar, 4 + %polly.loop_cond = icmp slt i32 %polly.indvar, 252 + %scevgep.inc = getelementptr i16, i16* %scevgep.phi, i32 4 + br i1 %polly.loop_cond, label %polly.stmt.for.body, label %polly.loop_exit.loopexit +} + +declare i32 @fxpBitAllocation(...) #1 + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #2 = { nounwind } + +!llvm.ident = !{!0} + +!0 = !{!"QuIC LLVM Hexagon Clang version 3.1"} diff --git a/test/CodeGen/Hexagon/vect/vect-bitcast-1.ll b/test/CodeGen/Hexagon/vect/vect-bitcast-1.ll new file mode 100644 index 000000000000..b834744d9b12 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-bitcast-1.ll @@ -0,0 +1,68 @@ +; RUN: llc -march=hexagon < %s +; REQUIRES: asserts +; Used to fail with: Assertion `VT.getSizeInBits() == Operand.getValueType().getSizeInBits() && "Cannot BITCAST between types of different sizes!"' failed. + +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32" +target triple = "hexagon-unknown-linux-gnu" + +define void @foo() nounwind { +entry: + br label %while.body + +while.body: ; preds = %if.then155, %if.then12, %entry + %cmp.i = icmp eq i8* undef, null + br i1 %cmp.i, label %lab_ci.exit, label %if.end.i + +if.end.i: ; preds = %while.body + unreachable + +lab_ci.exit: ; preds = %while.body + br i1 false, label %if.then, label %if.else + +if.then: ; preds = %lab_ci.exit + unreachable + +if.else: ; preds = %lab_ci.exit + br i1 undef, label %if.then12, label %if.else17 + +if.then12: ; preds = %if.else + br label %while.body + +if.else17: ; preds = %if.else + br i1 false, label %if.then22, label %if.else35 + +if.then22: ; preds = %if.else17 + unreachable + +if.else35: ; preds = %if.else17 + br i1 false, label %if.then40, label %if.else83 + +if.then40: ; preds = %if.else35 + unreachable + +if.else83: ; preds = %if.else35 + br i1 false, label %if.then88, label %if.else150 + +if.then88: ; preds = %if.else83 + unreachable + +if.else150: ; preds = %if.else83 + %cmp154 = icmp eq i32 undef, 0 + br i1 %cmp154, label %if.then155, label %if.else208 + +if.then155: ; preds = %if.else150 + %call191 = call i32 @strtol() nounwind + %conv192 = trunc i32 %call191 to i16 + %_p_splat_one = insertelement <1 x i16> undef, i16 %conv192, i32 0 + %_p_splat = shufflevector <1 x i16> %_p_splat_one, <1 x i16> undef, <2 x i32> zeroinitializer + %0 = sext <2 x i16> %_p_splat to <2 x i32> + %mul198p_vec = shl <2 x i32> %0, <i32 2, i32 2> + %1 = extractelement <2 x i32> %mul198p_vec, i32 0 + store i32 %1, i32* null, align 4 + br label %while.body + +if.else208: ; preds = %if.else150 + unreachable +} + +declare i32 @strtol() nounwind diff --git a/test/CodeGen/Hexagon/vect/vect-bitcast.ll b/test/CodeGen/Hexagon/vect/vect-bitcast.ll new file mode 100644 index 000000000000..2d6b0b827397 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-bitcast.ll @@ -0,0 +1,56 @@ +; RUN: llc -march=hexagon < %s +; REQUIRES: asserts +; Used to fail with "Cannot BITCAST between types of different sizes!" + +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +define void @foo() nounwind { +entry: + br label %while.body + +while.body: ; preds = %if.then155, %if.then12, %if.then, %entry + br i1 undef, label %if.then, label %if.else + +if.then: ; preds = %while.body + br label %while.body + +if.else: ; preds = %while.body + br i1 undef, label %if.then12, label %if.else17 + +if.then12: ; preds = %if.else + br label %while.body + +if.else17: ; preds = %if.else + br i1 false, label %if.then22, label %if.else35 + +if.then22: ; preds = %if.else17 + unreachable + +if.else35: ; preds = %if.else17 + br i1 false, label %if.then40, label %if.else83 + +if.then40: ; preds = %if.else35 + unreachable + +if.else83: ; preds = %if.else35 + br i1 false, label %if.then88, label %if.else150 + +if.then88: ; preds = %if.else83 + unreachable + +if.else150: ; preds = %if.else83 + %cmp154 = icmp eq i32 undef, 0 + br i1 %cmp154, label %if.then155, label %if.else208 + +if.then155: ; preds = %if.else150 + %_p_splat.1 = shufflevector <1 x i16> zeroinitializer, <1 x i16> undef, <2 x i32> zeroinitializer + %0 = sext <2 x i16> %_p_splat.1 to <2 x i32> + %mul198p_vec.1 = mul <2 x i32> %0, <i32 4, i32 4> + %1 = extractelement <2 x i32> %mul198p_vec.1, i32 0 + store i32 %1, i32* undef, align 4 + br label %while.body + +if.else208: ; preds = %if.else150 + unreachable +} diff --git a/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll b/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll new file mode 100644 index 000000000000..f5ee5d001510 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; This one should generate a combine with two immediates. +; CHECK: combine(#7, #7) +@B = common global [400 x i32] zeroinitializer, align 8 +@A = common global [400 x i32] zeroinitializer, align 8 +@C = common global [400 x i32] zeroinitializer, align 8 + +define void @run() nounwind { +entry: + br label %polly.loop_body + +polly.loop_after: ; preds = %polly.loop_body + ret void + +polly.loop_body: ; preds = %entry, %polly.loop_body + %polly.loopiv23 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ] + %polly.next_loopiv = add nsw i32 %polly.loopiv23, 4 + %p_arrayidx1 = getelementptr [400 x i32], [400 x i32]* @A, i32 0, i32 %polly.loopiv23 + %p_arrayidx = getelementptr [400 x i32], [400 x i32]* @B, i32 0, i32 %polly.loopiv23 + %vector_ptr = bitcast i32* %p_arrayidx to <4 x i32>* + %_p_vec_full = load <4 x i32>, <4 x i32>* %vector_ptr, align 8 + %mulp_vec = mul <4 x i32> %_p_vec_full, <i32 7, i32 7, i32 7, i32 7> + %vector_ptr12 = bitcast i32* %p_arrayidx1 to <4 x i32>* + %_p_vec_full13 = load <4 x i32>, <4 x i32>* %vector_ptr12, align 8 + %addp_vec = add <4 x i32> %_p_vec_full13, %mulp_vec + store <4 x i32> %addp_vec, <4 x i32>* %vector_ptr12, align 8 + %0 = icmp slt i32 %polly.next_loopiv, 400 + br i1 %0, label %polly.loop_body, label %polly.loop_after +} diff --git a/test/CodeGen/Hexagon/vect/vect-cst-v4i8.ll b/test/CodeGen/Hexagon/vect/vect-cst-v4i8.ll new file mode 100644 index 000000000000..de3e14e2e91c --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-cst-v4i8.ll @@ -0,0 +1,30 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; Make sure we can build the constant vector <1, 2, 3, 4> +; CHECK-DAG: ##B +; CHECK-DAG: ##A +@B = common global [400 x i8] zeroinitializer, align 8 +@A = common global [400 x i8] zeroinitializer, align 8 +@C = common global [400 x i8] zeroinitializer, align 8 + +define void @run() nounwind { +entry: + br label %polly.loop_body + +polly.loop_after: ; preds = %polly.loop_body + ret void + +polly.loop_body: ; preds = %entry, %polly.loop_body + %polly.loopiv25 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ] + %polly.next_loopiv = add i32 %polly.loopiv25, 4 + %p_arrayidx1 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %polly.loopiv25 + %p_arrayidx = getelementptr [400 x i8], [400 x i8]* @B, i32 0, i32 %polly.loopiv25 + %vector_ptr = bitcast i8* %p_arrayidx to <4 x i8>* + %_p_vec_full = load <4 x i8>, <4 x i8>* %vector_ptr, align 8 + %mulp_vec = mul <4 x i8> %_p_vec_full, <i8 1, i8 2, i8 3, i8 4> + %vector_ptr14 = bitcast i8* %p_arrayidx1 to <4 x i8>* + %_p_vec_full15 = load <4 x i8>, <4 x i8>* %vector_ptr14, align 8 + %addp_vec = add <4 x i8> %_p_vec_full15, %mulp_vec + store <4 x i8> %addp_vec, <4 x i8>* %vector_ptr14, align 8 + %0 = icmp slt i32 %polly.next_loopiv, 400 + br i1 %0, label %polly.loop_body, label %polly.loop_after +} diff --git a/test/CodeGen/Hexagon/vect/vect-cst.ll b/test/CodeGen/Hexagon/vect/vect-cst.ll new file mode 100644 index 000000000000..370fa5c7539e --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-cst.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; Make sure we can build the constant vector <7, 7, 7, 7> +; CHECK: vaddub +@B = common global [400 x i8] zeroinitializer, align 8 +@A = common global [400 x i8] zeroinitializer, align 8 +@C = common global [400 x i8] zeroinitializer, align 8 + +define void @run() nounwind { +entry: + br label %polly.loop_body + +polly.loop_after: ; preds = %polly.loop_body + ret void + +polly.loop_body: ; preds = %entry, %polly.loop_body + %polly.loopiv25 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ] + %polly.next_loopiv = add i32 %polly.loopiv25, 4 + %p_arrayidx1 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %polly.loopiv25 + %p_arrayidx = getelementptr [400 x i8], [400 x i8]* @B, i32 0, i32 %polly.loopiv25 + %vector_ptr = bitcast i8* %p_arrayidx to <4 x i8>* + %_p_vec_full = load <4 x i8>, <4 x i8>* %vector_ptr, align 8 + %mulp_vec = mul <4 x i8> %_p_vec_full, <i8 7, i8 7, i8 7, i8 7> + %vector_ptr14 = bitcast i8* %p_arrayidx1 to <4 x i8>* + %_p_vec_full15 = load <4 x i8>, <4 x i8>* %vector_ptr14, align 8 + %addp_vec = add <4 x i8> %_p_vec_full15, %mulp_vec + store <4 x i8> %addp_vec, <4 x i8>* %vector_ptr14, align 8 + %0 = icmp slt i32 %polly.next_loopiv, 400 + br i1 %0, label %polly.loop_body, label %polly.loop_after +} diff --git a/test/CodeGen/Hexagon/vect/vect-extract.ll b/test/CodeGen/Hexagon/vect/vect-extract.ll new file mode 100644 index 000000000000..75dc6850f181 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-extract.ll @@ -0,0 +1,96 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s + +; Check that we do not generate extract. +; CHECK-NOT: extractu +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +define void @foo(i32 %N, i32* nocapture %C, i16* nocapture %A, i16 signext %val) #0 { +entry: + %cmp14 = icmp eq i32 %N, 0 + br i1 %cmp14, label %for.end11, label %for.cond1.preheader.single_entry.preheader + +for.cond1.preheader.single_entry.preheader: ; preds = %entry + %0 = add i32 %N, -1 + %leftover_lb = and i32 %0, -2 + %p_conv4 = sext i16 %val to i32 + br label %for.cond1.preheader.single_entry + +for.cond1.preheader.single_entry: ; preds = %for.inc9, %for.cond1.preheader.single_entry.preheader + %indvar = phi i32 [ %indvar.next, %for.inc9 ], [ 0, %for.cond1.preheader.single_entry.preheader ] + %1 = mul i32 %indvar, %N + %.not = icmp slt i32 %N, 2 + %.not41 = icmp slt i32 %leftover_lb, 1 + %brmerge = or i1 %.not, %.not41 + %.mux = select i1 %.not, i32 0, i32 %leftover_lb + br i1 %brmerge, label %polly.loop_header26.preheader, label %polly.loop_body.lr.ph + +for.inc9.loopexit: ; preds = %polly.stmt.for.body331 + br label %for.inc9 + +for.inc9: ; preds = %for.inc9.loopexit, %polly.loop_header26.preheader + %indvar.next = add i32 %indvar, 1 + %exitcond40 = icmp eq i32 %indvar.next, %N + br i1 %exitcond40, label %for.end11.loopexit, label %for.cond1.preheader.single_entry + +for.end11.loopexit: ; preds = %for.inc9 + br label %for.end11 + +for.end11: ; preds = %for.end11.loopexit, %entry + ret void + +polly.loop_body.lr.ph: ; preds = %for.cond1.preheader.single_entry + %2 = call i64 @llvm.hexagon.A2.combinew(i32 %1, i32 %1) + %3 = bitcast i64 %2 to <2 x i32> + %4 = extractelement <2 x i32> %3, i32 0 + %5 = call i64 @llvm.hexagon.A2.combinew(i32 %p_conv4, i32 %p_conv4) + %6 = bitcast i64 %5 to <2 x i32> + %p_arrayidx8.gep = getelementptr i32, i32* %C, i32 %4 + %p_arrayidx.gep = getelementptr i16, i16* %A, i32 %4 + br label %polly.loop_body + +polly.loop_body: ; preds = %polly.loop_body.lr.ph, %polly.loop_body + %p_arrayidx8.phi = phi i32* [ %p_arrayidx8.gep, %polly.loop_body.lr.ph ], [ %p_arrayidx8.inc, %polly.loop_body ] + %p_arrayidx.phi = phi i16* [ %p_arrayidx.gep, %polly.loop_body.lr.ph ], [ %p_arrayidx.inc, %polly.loop_body ] + %polly.loopiv38 = phi i32 [ 0, %polly.loop_body.lr.ph ], [ %polly.next_loopiv, %polly.loop_body ] + %polly.next_loopiv = add nsw i32 %polly.loopiv38, 2 + %vector_ptr = bitcast i16* %p_arrayidx.phi to <2 x i16>* + %_p_vec_full = load <2 x i16>, <2 x i16>* %vector_ptr, align 2 + %7 = sext <2 x i16> %_p_vec_full to <2 x i32> + %mul5p_vec = mul <2 x i32> %7, %6 + %vector_ptr21 = bitcast i32* %p_arrayidx8.phi to <2 x i32>* + store <2 x i32> %mul5p_vec, <2 x i32>* %vector_ptr21, align 4 + %8 = icmp slt i32 %polly.next_loopiv, %leftover_lb + %p_arrayidx8.inc = getelementptr i32, i32* %p_arrayidx8.phi, i32 2 + %p_arrayidx.inc = getelementptr i16, i16* %p_arrayidx.phi, i32 2 + br i1 %8, label %polly.loop_body, label %polly.loop_header26.preheader.loopexit + +polly.loop_header26.preheader.loopexit: ; preds = %polly.loop_body + br label %polly.loop_header26.preheader + +polly.loop_header26.preheader: ; preds = %polly.loop_header26.preheader.loopexit, %for.cond1.preheader.single_entry + %polly.loopiv29.ph = phi i32 [ %.mux, %for.cond1.preheader.single_entry ], [ %leftover_lb, %polly.loop_header26.preheader.loopexit ] + %9 = icmp slt i32 %polly.loopiv29.ph, %N + br i1 %9, label %polly.stmt.for.body331.preheader, label %for.inc9 + +polly.stmt.for.body331.preheader: ; preds = %polly.loop_header26.preheader + br label %polly.stmt.for.body331 + +polly.stmt.for.body331: ; preds = %polly.stmt.for.body331.preheader, %polly.stmt.for.body331 + %polly.loopiv2939 = phi i32 [ %polly.next_loopiv30, %polly.stmt.for.body331 ], [ %polly.loopiv29.ph, %polly.stmt.for.body331.preheader ] + %polly.next_loopiv30 = add nsw i32 %polly.loopiv2939, 1 + %p_32 = add i32 %polly.loopiv2939, %1 + %p_arrayidx833 = getelementptr i32, i32* %C, i32 %p_32 + %p_arrayidx34 = getelementptr i16, i16* %A, i32 %p_32 + %_p_scalar_ = load i16, i16* %p_arrayidx34, align 2 + %p_conv = sext i16 %_p_scalar_ to i32 + %p_mul5 = mul nsw i32 %p_conv, %p_conv4 + store i32 %p_mul5, i32* %p_arrayidx833, align 4 + %exitcond = icmp eq i32 %polly.next_loopiv30, %N + br i1 %exitcond, label %for.inc9.loopexit, label %polly.stmt.for.body331 +} + +declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1 + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone } diff --git a/test/CodeGen/Hexagon/vect/vect-fma.ll b/test/CodeGen/Hexagon/vect/vect-fma.ll new file mode 100644 index 000000000000..c35e0159df70 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-fma.ll @@ -0,0 +1,26 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s +; REQUIRES: asserts +; Used to fail with "SplitVectorResult #0: 0x16cbe60: v4f64 = fma" + +; ModuleID = 'bugpoint-reduced-simplified.bc' +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32" +target triple = "hexagon-unknown-linux-gnu" + +define void @run() nounwind { +entry: + br label %polly.loop_header + +polly.loop_after: ; preds = %polly.loop_header + ret void + +polly.loop_header: ; preds = %polly.loop_body, %entry + %0 = icmp sle i32 undef, 399 + br i1 %0, label %polly.loop_body, label %polly.loop_after + +polly.loop_body: ; preds = %polly.loop_header + %_p_vec_full = load <4 x double>, <4 x double>* undef, align 8 + %mulp_vec = fmul <4 x double> %_p_vec_full, <double 7.000000e+00, double 7.000000e+00, double 7.000000e+00, double 7.000000e+00> + %addp_vec = fadd <4 x double> undef, %mulp_vec + store <4 x double> %addp_vec, <4 x double>* undef, align 8 + br label %polly.loop_header +} diff --git a/test/CodeGen/Hexagon/vect/vect-illegal-type.ll b/test/CodeGen/Hexagon/vect/vect-illegal-type.ll new file mode 100644 index 000000000000..3d3bf88b64d3 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-illegal-type.ll @@ -0,0 +1,50 @@ +; RUN: llc -march=hexagon < %s +; REQUIRES: asserts +; Used to fail with "Unexpected illegal type!" +; Used to fail with "Cannot select: ch = store x,x,x,<ST4[undef](align=8), trunc to v4i8>" + +; ModuleID = 'bugpoint-reduced-simplified.bc' +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32" +target triple = "hexagon-unknown-linux-gnu" + +define void @foo() nounwind { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + br i1 undef, label %for.end, label %for.body + +for.end: ; preds = %for.body + br label %for.body71 + +for.body71: ; preds = %for.body71, %for.end + br i1 undef, label %for.end96, label %for.body71 + +for.end96: ; preds = %for.body71 + switch i32 undef, label %sw.epilog [ + i32 1, label %for.cond375.preheader + i32 8, label %for.cond591 + ] + +for.cond375.preheader: ; preds = %for.end96 + br label %polly.loop_header228 + +for.cond591: ; preds = %for.end96 + br label %for.body664 + +for.body664: ; preds = %for.body664, %for.cond591 + br i1 undef, label %for.end670, label %for.body664 + +for.end670: ; preds = %for.body664 + br label %sw.epilog + +sw.epilog: ; preds = %for.end670, %for.end96 + ret void + +polly.loop_header228: ; preds = %polly.loop_header228, %for.cond375.preheader + %_p_splat_one = load <1 x i16>, <1 x i16>* undef, align 8 + %_p_splat = shufflevector <1 x i16> %_p_splat_one, <1 x i16> %_p_splat_one, <4 x i32> zeroinitializer + %0 = trunc <4 x i16> %_p_splat to <4 x i8> + store <4 x i8> %0, <4 x i8>* undef, align 8 + br label %polly.loop_header228 +} diff --git a/test/CodeGen/Hexagon/vect/vect-insert-extract-elt.ll b/test/CodeGen/Hexagon/vect/vect-insert-extract-elt.ll new file mode 100644 index 000000000000..baf0cd748f7f --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-insert-extract-elt.ll @@ -0,0 +1,71 @@ +; RUN: llc -march=hexagon < %s +; Used to fail with an infinite recursion in the insn selection. +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon-unknown-linux-gnu" + +%struct.elt = type { [2 x [4 x %struct.block]] } +%struct.block = type { [2 x i16] } + +define void @foo(%struct.elt* noalias nocapture %p0, %struct.elt* noalias nocapture %p1) nounwind { +entry: + %arrayidx1 = getelementptr inbounds %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 3 + %arrayidx4 = getelementptr inbounds %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 2 + %arrayidx7 = getelementptr inbounds %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 3 + %0 = bitcast %struct.block* %arrayidx7 to i32* + %1 = bitcast %struct.block* %arrayidx4 to i32* + %2 = load i32, i32* %0, align 4 + store i32 %2, i32* %1, align 4 + %3 = bitcast %struct.block* %arrayidx1 to i32* + store i32 %2, i32* %3, align 4 + %arrayidx10 = getelementptr inbounds %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 1 + %arrayidx16 = getelementptr inbounds %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 2 + %4 = bitcast %struct.block* %arrayidx16 to i32* + %5 = bitcast %struct.elt* %p1 to i32* + %6 = load i32, i32* %4, align 4 + store i32 %6, i32* %5, align 4 + %7 = bitcast %struct.block* %arrayidx10 to i32* + store i32 %6, i32* %7, align 4 + %p_arrayidx26 = getelementptr %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1 + %p_arrayidx2632 = getelementptr %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1 + %p_arrayidx2633 = getelementptr %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 2, i32 0, i32 1 + %p_arrayidx2634 = getelementptr %struct.elt, %struct.elt* %p0, i32 0, i32 0, i32 0, i32 3, i32 0, i32 1 + %p_arrayidx20 = getelementptr %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1 + %p_arrayidx2035 = getelementptr %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1 + %p_arrayidx2036 = getelementptr %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 2, i32 0, i32 1 + %p_arrayidx2037 = getelementptr %struct.elt, %struct.elt* %p1, i32 0, i32 0, i32 0, i32 3, i32 0, i32 1 + %8 = lshr i32 %6, 16 + %9 = trunc i32 %8 to i16 + %_p_vec_ = insertelement <4 x i16> undef, i16 %9, i32 0 + %_p_vec_39 = insertelement <4 x i16> %_p_vec_, i16 %9, i32 1 + %10 = lshr i32 %2, 16 + %11 = trunc i32 %10 to i16 + %_p_vec_41 = insertelement <4 x i16> %_p_vec_39, i16 %11, i32 2 + %_p_vec_43 = insertelement <4 x i16> %_p_vec_41, i16 %11, i32 3 + %shlp_vec = shl <4 x i16> %_p_vec_43, <i16 1, i16 1, i16 1, i16 1> + %12 = extractelement <4 x i16> %shlp_vec, i32 0 + store i16 %12, i16* %p_arrayidx20, align 2 + %13 = extractelement <4 x i16> %shlp_vec, i32 1 + store i16 %13, i16* %p_arrayidx2035, align 2 + %14 = extractelement <4 x i16> %shlp_vec, i32 2 + store i16 %14, i16* %p_arrayidx2036, align 2 + %15 = extractelement <4 x i16> %shlp_vec, i32 3 + store i16 %15, i16* %p_arrayidx2037, align 2 + %_p_scalar_44 = load i16, i16* %p_arrayidx26, align 2 + %_p_vec_45 = insertelement <4 x i16> undef, i16 %_p_scalar_44, i32 0 + %_p_scalar_46 = load i16, i16* %p_arrayidx2632, align 2 + %_p_vec_47 = insertelement <4 x i16> %_p_vec_45, i16 %_p_scalar_46, i32 1 + %_p_scalar_48 = load i16, i16* %p_arrayidx2633, align 2 + %_p_vec_49 = insertelement <4 x i16> %_p_vec_47, i16 %_p_scalar_48, i32 2 + %_p_scalar_50 = load i16, i16* %p_arrayidx2634, align 2 + %_p_vec_51 = insertelement <4 x i16> %_p_vec_49, i16 %_p_scalar_50, i32 3 + %shl28p_vec = shl <4 x i16> %_p_vec_51, <i16 1, i16 1, i16 1, i16 1> + %16 = extractelement <4 x i16> %shl28p_vec, i32 0 + store i16 %16, i16* %p_arrayidx26, align 2 + %17 = extractelement <4 x i16> %shl28p_vec, i32 1 + store i16 %17, i16* %p_arrayidx2632, align 2 + %18 = extractelement <4 x i16> %shl28p_vec, i32 2 + store i16 %18, i16* %p_arrayidx2633, align 2 + %19 = extractelement <4 x i16> %shl28p_vec, i32 3 + store i16 %19, i16* %p_arrayidx2634, align 2 + ret void +} diff --git a/test/CodeGen/Hexagon/vect/vect-load-1.ll b/test/CodeGen/Hexagon/vect/vect-load-1.ll new file mode 100644 index 000000000000..fbaf61d545da --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-load-1.ll @@ -0,0 +1,26 @@ +; RUN: llc -march=hexagon < %s +; Used to fail with "Cannot select: v2i32,ch = load 0x16c5890, 0x16f76e0, 0x16f76e0<LD2[undef](align=8), sext from v2i8>", 0x16c5890, 0x16f76e0, 0x16f76e0<LD2[undef](align=8), sext from v2i8>" + +; ModuleID = 'bugpoint-reduced-simplified.bc' +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32" +target triple = "hexagon-unknown-linux-gnu" + +define void @foo() nounwind { +entry: + br label %polly.loop_header + +polly.loop_after: ; preds = %polly.loop_header + unreachable + +polly.loop_header: ; preds = %polly.loop_body, %entry + %0 = icmp sle i32 undef, 63 + br i1 %0, label %polly.loop_body, label %polly.loop_after + +polly.loop_body: ; preds = %polly.loop_header + %_p_vec_full = load <2 x i8>, <2 x i8>* undef, align 8 + %1 = sext <2 x i8> %_p_vec_full to <2 x i32> + %p_vec = mul <2 x i32> %1, <i32 3, i32 3> + %mulp_vec = add <2 x i32> %p_vec, <i32 21, i32 21> + store <2 x i32> %mulp_vec, <2 x i32>* undef, align 8 + br label %polly.loop_header +} diff --git a/test/CodeGen/Hexagon/vect/vect-load.ll b/test/CodeGen/Hexagon/vect/vect-load.ll new file mode 100644 index 000000000000..6bdcc6d3de61 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-load.ll @@ -0,0 +1,76 @@ +; RUN: llc -march=hexagon < %s +; Used to fail with "Cannot select: 0x16cf370: v2i16,ch = load" + +; ModuleID = 'bugpoint-reduced-simplified.bc' +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32" +target triple = "hexagon-unknown-linux-gnu" + +%struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958 = type { i8, i8, i8, i8, i8, i8, i16, i32, [8 x %struct.hcdc_ext_vec.9.64.141.273.306.317.328.680.691.702.713.724.735.757.790.801.845.856.867.878.889.900.944.955.957] } +%struct.hcdc_ext_vec.9.64.141.273.306.317.328.680.691.702.713.724.735.757.790.801.845.856.867.878.889.900.944.955.957 = type { i8, i8, i16 } + +define void @foo(%struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958* %hc_ext_info) nounwind { +entry: + br i1 undef, label %if.end, label %if.then + +if.then: ; preds = %entry + unreachable + +if.end: ; preds = %entry + br i1 undef, label %if.end5, label %if.then3 + +if.then3: ; preds = %if.end + br label %if.end5 + +if.end5: ; preds = %if.then3, %if.end + %add.ptr = getelementptr inbounds %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958, %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958* %hc_ext_info, i32 0, i32 8, i32 0 + %add.ptr22 = getelementptr inbounds %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958, %struct.ext_hdrs.10.65.142.274.307.318.329.681.692.703.714.725.736.758.791.802.846.857.868.879.890.901.945.956.958* null, i32 0, i32 8, i32 undef + br label %while.cond + +while.cond: ; preds = %if.end419, %if.end5 + %gre_chksum.0 = phi <2 x i8> [ undef, %if.end5 ], [ %gre_chksum.2, %if.end419 ] + %cmp23 = icmp ult %struct.hcdc_ext_vec.9.64.141.273.306.317.328.680.691.702.713.724.735.757.790.801.845.856.867.878.889.900.944.955.957* null, %add.ptr + %cmp25 = icmp ult %struct.hcdc_ext_vec.9.64.141.273.306.317.328.680.691.702.713.724.735.757.790.801.845.856.867.878.889.900.944.955.957* null, %add.ptr22 + %sel1 = and i1 %cmp23, %cmp25 + br i1 %sel1, label %while.body, label %while.end422 + +while.body: ; preds = %while.cond + switch i8 undef, label %if.end419 [ + i8 5, label %if.then70 + i8 3, label %if.then70 + i8 2, label %if.then70 + i8 1, label %if.then70 + i8 0, label %if.then70 + i8 4, label %if.then93 + i8 6, label %if.then195 + ] + +if.then70: ; preds = %while.body, %while.body, %while.body, %while.body, %while.body + unreachable + +if.then93: ; preds = %while.body + unreachable + +if.then195: ; preds = %while.body + br i1 undef, label %if.end274, label %if.then202 + +if.then202: ; preds = %if.then195 + br label %while.body222 + +while.body222: ; preds = %while.body222, %if.then202 + br i1 undef, label %if.end240, label %while.body222 + +if.end240: ; preds = %while.body222 + %_p_vec_full100 = load <2 x i8>, <2 x i8>* undef, align 8 + br label %if.end274 + +if.end274: ; preds = %if.end240, %if.then195 + %gre_chksum.1 = phi <2 x i8> [ %gre_chksum.0, %if.then195 ], [ %_p_vec_full100, %if.end240 ] + br label %if.end419 + +if.end419: ; preds = %if.end274, %while.body + %gre_chksum.2 = phi <2 x i8> [ %gre_chksum.0, %while.body ], [ %gre_chksum.1, %if.end274 ] + br label %while.cond + +while.end422: ; preds = %while.cond + ret void +} diff --git a/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll b/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll new file mode 100644 index 000000000000..16591ef68536 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll @@ -0,0 +1,73 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s + +; Check that store is post-incremented. +; CHECK: memuh(r{{[0-9]+}} + {{ *}}#6{{ *}}) +; CHECK: combine(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}{{ *}}) +; CHECK: vaddh + +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +define void @matrix_add_const(i32 %N, i16* nocapture %A, i16 signext %val) #0 { +entry: + %cmp5 = icmp eq i32 %N, 0 + br i1 %cmp5, label %for.end, label %polly.cond + +for.end.loopexit: ; preds = %polly.stmt.for.body29 + br label %for.end + +for.end: ; preds = %for.end.loopexit, %polly.loop_header24.preheader, %entry + ret void + +polly.cond: ; preds = %entry + %0 = icmp sgt i32 %N, 3 + br i1 %0, label %polly.then, label %polly.loop_header24.preheader + +polly.then: ; preds = %polly.cond + %1 = add i32 %N, -1 + %leftover_lb = and i32 %1, -4 + %2 = icmp sgt i32 %leftover_lb, 0 + br i1 %2, label %polly.loop_body.lr.ph, label %polly.loop_header24.preheader + +polly.loop_body.lr.ph: ; preds = %polly.then + %3 = insertelement <4 x i16> undef, i16 %val, i32 0 + %4 = insertelement <4 x i16> %3, i16 %val, i32 1 + %5 = insertelement <4 x i16> %4, i16 %val, i32 2 + %6 = insertelement <4 x i16> %5, i16 %val, i32 3 + br label %polly.loop_body + +polly.loop_header24.preheader.loopexit: ; preds = %polly.loop_body + br label %polly.loop_header24.preheader + +polly.loop_header24.preheader: ; preds = %polly.loop_header24.preheader.loopexit, %polly.then, %polly.cond + %polly.loopiv27.ph = phi i32 [ 0, %polly.cond ], [ %leftover_lb, %polly.then ], [ %leftover_lb, %polly.loop_header24.preheader.loopexit ] + %7 = icmp slt i32 %polly.loopiv27.ph, %N + br i1 %7, label %polly.stmt.for.body29.preheader, label %for.end + +polly.stmt.for.body29.preheader: ; preds = %polly.loop_header24.preheader + br label %polly.stmt.for.body29 + +polly.loop_body: ; preds = %polly.loop_body.lr.ph, %polly.loop_body + %p_arrayidx.phi = phi i16* [ %A, %polly.loop_body.lr.ph ], [ %p_arrayidx.inc, %polly.loop_body ] + %polly.loopiv34 = phi i32 [ 0, %polly.loop_body.lr.ph ], [ %polly.next_loopiv, %polly.loop_body ] + %polly.next_loopiv = add nsw i32 %polly.loopiv34, 4 + %vector_ptr = bitcast i16* %p_arrayidx.phi to <4 x i16>* + %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 2 + %addp_vec = add <4 x i16> %_p_vec_full, %6 + store <4 x i16> %addp_vec, <4 x i16>* %vector_ptr, align 2 + %8 = icmp slt i32 %polly.next_loopiv, %leftover_lb + %p_arrayidx.inc = getelementptr i16, i16* %p_arrayidx.phi, i32 4 + br i1 %8, label %polly.loop_body, label %polly.loop_header24.preheader.loopexit + +polly.stmt.for.body29: ; preds = %polly.stmt.for.body29.preheader, %polly.stmt.for.body29 + %polly.loopiv2733 = phi i32 [ %polly.next_loopiv28, %polly.stmt.for.body29 ], [ %polly.loopiv27.ph, %polly.stmt.for.body29.preheader ] + %polly.next_loopiv28 = add nsw i32 %polly.loopiv2733, 1 + %p_arrayidx30 = getelementptr i16, i16* %A, i32 %polly.loopiv2733 + %_p_scalar_ = load i16, i16* %p_arrayidx30, align 2 + %p_add = add i16 %_p_scalar_, %val + store i16 %p_add, i16* %p_arrayidx30, align 2 + %exitcond = icmp eq i32 %polly.next_loopiv28, %N + br i1 %exitcond, label %for.end.loopexit, label %polly.stmt.for.body29 +} + +attributes #0 = { nounwind "fp-contract-model"="standard" "no-frame-pointer-elim-non-leaf" "realign-stack" "relocation-model"="static" "ssp-buffers-size"="8" } diff --git a/test/CodeGen/Hexagon/vect/vect-mul-v2i16.ll b/test/CodeGen/Hexagon/vect/vect-mul-v2i16.ll new file mode 100644 index 000000000000..f1a80115cb61 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-mul-v2i16.ll @@ -0,0 +1,9 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vmpyh +; CHECK: vtrunewh + +define <2 x i16> @t_i2x16(<2 x i16> %a, <2 x i16> %b) nounwind { +entry: + %0 = mul <2 x i16> %a, %b + ret <2 x i16> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-mul-v2i32.ll b/test/CodeGen/Hexagon/vect/vect-mul-v2i32.ll new file mode 100644 index 000000000000..1d439dd37e14 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-mul-v2i32.ll @@ -0,0 +1,9 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: mpyi +; CHECK: mpyi + +define <2 x i32> @t_i2x32(<2 x i32> %a, <2 x i32> %b) nounwind { +entry: + %0 = mul <2 x i32> %a, %b + ret <2 x i32> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-mul-v4i16.ll b/test/CodeGen/Hexagon/vect/vect-mul-v4i16.ll new file mode 100644 index 000000000000..a50d7f8adc17 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-mul-v4i16.ll @@ -0,0 +1,10 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vmpyh +; CHECK: vmpyh +; CHECK: vtrunewh + +define <4 x i16> @t_i4x16(<4 x i16> %a, <4 x i16> %b) nounwind { +entry: + %0 = mul <4 x i16> %a, %b + ret <4 x i16> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-mul-v4i8.ll b/test/CodeGen/Hexagon/vect/vect-mul-v4i8.ll new file mode 100644 index 000000000000..d60d01460785 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-mul-v4i8.ll @@ -0,0 +1,9 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; CHECK: vmpybsu +; CHECK: vtrunehb + +define <4 x i8> @t_i4x8(<4 x i8> %a, <4 x i8> %b) nounwind { +entry: + %0 = mul <4 x i8> %a, %b + ret <4 x i8> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-mul-v8i8.ll b/test/CodeGen/Hexagon/vect/vect-mul-v8i8.ll new file mode 100644 index 000000000000..a84cd00234ea --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-mul-v8i8.ll @@ -0,0 +1,9 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; CHECK: vmpybsu +; CHECK: vmpybsu + +define <8 x i8> @t_i8x8(<8 x i8> %a, <8 x i8> %b) nounwind { +entry: + %0 = mul <8 x i8> %a, %b + ret <8 x i8> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-no-tfrs-1.ll b/test/CodeGen/Hexagon/vect/vect-no-tfrs-1.ll new file mode 100644 index 000000000000..550b0f81d33a --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-no-tfrs-1.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK-NOT: r1:0 = r1:0 + +define <4 x i16> @t_i4x16(<4 x i16> %a, <4 x i16> %b) nounwind { +entry: + %0 = mul <4 x i16> %a, %b + ret <4 x i16> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-no-tfrs.ll b/test/CodeGen/Hexagon/vect/vect-no-tfrs.ll new file mode 100644 index 000000000000..9081f18b3c27 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-no-tfrs.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK-NOT: r1:0 = combine(r1, r0) + +define <4 x i8> @t_i4x8(<4 x i8> %a, <4 x i8> %b) nounwind { +entry: + %0 = mul <4 x i8> %a, %b + ret <4 x i8> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-packhl.ll b/test/CodeGen/Hexagon/vect/vect-packhl.ll new file mode 100644 index 000000000000..dfdb019b677c --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-packhl.ll @@ -0,0 +1,10 @@ +; Extracted from test/CodeGen/Generic/vector-casts.ll: used to loop indefinitely. +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: packhl + +define void @a(<2 x double>* %p, <2 x i8>* %q) { + %t = load <2 x double>, <2 x double>* %p + %r = fptosi <2 x double> %t to <2 x i8> + store <2 x i8> %r, <2 x i8>* %q + ret void +} diff --git a/test/CodeGen/Hexagon/vect/vect-shift-imm.ll b/test/CodeGen/Hexagon/vect/vect-shift-imm.ll new file mode 100644 index 000000000000..4861181d4125 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-shift-imm.ll @@ -0,0 +1,41 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s --check-prefix=CHECK-ASLW +; RUN: llc -march=hexagon < %s | FileCheck %s --check-prefix=CHECK-ASRW +; RUN: llc -march=hexagon < %s | FileCheck %s --check-prefix=CHECK-LSRW +; RUN: llc -march=hexagon < %s | FileCheck %s --check-prefix=CHECK-ASLH +; RUN: llc -march=hexagon < %s | FileCheck %s --check-prefix=CHECK-ASRH +; RUN: llc -march=hexagon < %s | FileCheck %s --check-prefix=CHECK-LSRH +; +; Make sure that the instructions with immediate operands are generated. +; CHECK-ASLW: vaslw({{.*}}, #9) +; CHECK-ASRW: vasrw({{.*}}, #8) +; CHECK-LSRW: vlsrw({{.*}}, #7) +; CHECK-ASLH: vaslh({{.*}}, #6) +; CHECK-ASRH: vasrh({{.*}}, #5) +; CHECK-LSRH: vlsrh({{.*}}, #4) + +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +define i64 @foo(i64 %x) nounwind readnone { +entry: + %0 = tail call i64 @llvm.hexagon.S2.asl.i.vw(i64 %x, i32 9) + %1 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %x, i32 8) + %2 = tail call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %x, i32 7) + %3 = tail call i64 @llvm.hexagon.S2.asl.i.vh(i64 %x, i32 6) + %4 = tail call i64 @llvm.hexagon.S2.asr.i.vh(i64 %x, i32 5) + %5 = tail call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %x, i32 4) + %add = add i64 %1, %0 + %add1 = add i64 %add, %2 + %add2 = add i64 %add1, %3 + %add3 = add i64 %add2, %4 + %add4 = add i64 %add3, %5 + ret i64 %add4 +} + +declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32) nounwind readnone +declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32) nounwind readnone +declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32) nounwind readnone +declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32) nounwind readnone +declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32) nounwind readnone +declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32) nounwind readnone + diff --git a/test/CodeGen/Hexagon/vect/vect-shuffle.ll b/test/CodeGen/Hexagon/vect/vect-shuffle.ll new file mode 100644 index 000000000000..9d80df2e0887 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-shuffle.ll @@ -0,0 +1,47 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s + +; Check that store is post-incremented. +; CHECK-NOT: extractu +; CHECK-NOT: insert +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +define i32 @foo(i16* noalias nocapture %src, i16* noalias nocapture %dstImg, i32 %width, i32 %idx, i32 %flush) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A2.combinew(i32 %flush, i32 %flush) + %1 = bitcast i64 %0 to <2 x i32> + br label %polly.loop_body + +polly.loop_after: ; preds = %polly.loop_body + ret i32 0 + +polly.loop_body: ; preds = %entry, %polly.loop_body + %p_arrayidx35.phi = phi i16* [ %dstImg, %entry ], [ %p_arrayidx35.inc, %polly.loop_body ] + %p_arrayidx.phi = phi i16* [ %src, %entry ], [ %p_arrayidx.inc, %polly.loop_body ] + %polly.loopiv56 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ] + %polly.next_loopiv = add nsw i32 %polly.loopiv56, 4 + %vector_ptr = bitcast i16* %p_arrayidx.phi to <4 x i16>* + %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 2 + %_high_half = shufflevector <4 x i16> %_p_vec_full, <4 x i16> undef, <2 x i32> <i32 2, i32 3> + %_low_half = shufflevector <4 x i16> %_p_vec_full, <4 x i16> undef, <2 x i32> <i32 0, i32 1> + %2 = zext <2 x i16> %_low_half to <2 x i32> + %3 = zext <2 x i16> %_high_half to <2 x i32> + %add33p_vec = add <2 x i32> %2, %1 + %add33p_vec48 = add <2 x i32> %3, %1 + %4 = trunc <2 x i32> %add33p_vec to <2 x i16> + %5 = trunc <2 x i32> %add33p_vec48 to <2 x i16> + %_combined_vec = shufflevector <2 x i16> %4, <2 x i16> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %vector_ptr49 = bitcast i16* %p_arrayidx35.phi to <4 x i16>* + store <4 x i16> %_combined_vec, <4 x i16>* %vector_ptr49, align 2 + %6 = icmp slt i32 %polly.next_loopiv, 1024 + %p_arrayidx35.inc = getelementptr i16, i16* %p_arrayidx35.phi, i32 4 + %p_arrayidx.inc = getelementptr i16, i16* %p_arrayidx.phi, i32 4 + br i1 %6, label %polly.loop_body, label %polly.loop_after +} + +declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1 + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone } + + diff --git a/test/CodeGen/Hexagon/vect/vect-splat.ll b/test/CodeGen/Hexagon/vect/vect-splat.ll new file mode 100644 index 000000000000..3613dbf6fdd1 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-splat.ll @@ -0,0 +1,16 @@ +; Extracted from test/CodeGen/Generic/vector.ll: used to loop indefinitely. +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s +; CHECK: combine + +%i4 = type <4 x i32> + +define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) { + %tmp = insertelement %i4 undef, i32 %X, i32 0 ; <%i4> [#uses=1] + %tmp2 = insertelement %i4 %tmp, i32 %X, i32 1 ; <%i4> [#uses=1] + %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1] + %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1] + %q = load %i4, %i4* %Q ; <%i4> [#uses=1] + %R = add %i4 %q, %tmp6 ; <%i4> [#uses=1] + store %i4 %R, %i4* %P + ret void +} diff --git a/test/CodeGen/Hexagon/vect/vect-store-v2i16.ll b/test/CodeGen/Hexagon/vect/vect-store-v2i16.ll new file mode 100644 index 000000000000..1de3058e68a6 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-store-v2i16.ll @@ -0,0 +1,51 @@ +; RUN: llc -march=hexagon < %s +; Used to fail with: "Cannot select: 0x3bab680: ch = store <ST4[%lsr.iv522525], trunc to v2i16> +; ModuleID = 'bugpoint-reduced-simplified.bc' +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32" +target triple = "hexagon-unknown-linux-gnu" + +define void @foobar() nounwind { +entry: + br label %for.cond7.preheader.single_entry.i + +for.cond7.preheader.single_entry.i: ; preds = %for.cond7.preheader.single_entry.i, %entry + %exitcond72.i = icmp eq i32 undef, 64 + br i1 %exitcond72.i, label %foo_32.exit, label %for.cond7.preheader.single_entry.i + +foo_32.exit: ; preds = %for.cond7.preheader.single_entry.i + br label %for.body.i428 + +for.body.i428: ; preds = %for.body.i428, %foo_32.exit + br i1 undef, label %foo_12.exit, label %for.body.i428 + +foo_12.exit: ; preds = %for.body.i428 + br label %for.body.i.i + +for.body.i.i: ; preds = %for.body.i.i, %foo_12.exit + br i1 undef, label %foo_14.exit, label %for.body.i.i + +foo_14.exit: ; preds = %for.body.i.i + br label %for.body + +for.body: ; preds = %for.body, %foo_14.exit + br i1 undef, label %for.end, label %for.body + +for.end: ; preds = %for.body + %storemerge294 = select i1 undef, i32 32767, i32 undef + %_p_splat_one386 = insertelement <1 x i32> undef, i32 %storemerge294, i32 0 + %_p_splat387 = shufflevector <1 x i32> %_p_splat_one386, <1 x i32> undef, <2 x i32> zeroinitializer + br label %polly.loop_body377 + +polly.loop_after378: ; preds = %polly.loop_body377 + unreachable + +polly.loop_body377: ; preds = %polly.loop_body377, %for.end + %_p_vec_full384 = load <2 x i16>, <2 x i16>* undef, align 4 + %0 = sext <2 x i16> %_p_vec_full384 to <2 x i32> + %mulp_vec = mul <2 x i32> %0, %_p_splat387 + %shr100293p_vec = lshr <2 x i32> %mulp_vec, <i32 15, i32 15> + %1 = trunc <2 x i32> %shr100293p_vec to <2 x i16> + store <2 x i16> %1, <2 x i16>* undef, align 4 + br i1 undef, label %polly.loop_body377, label %polly.loop_after378 +} + diff --git a/test/CodeGen/Hexagon/vect/vect-truncate.ll b/test/CodeGen/Hexagon/vect/vect-truncate.ll new file mode 100644 index 000000000000..fd75bbd58e36 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-truncate.ll @@ -0,0 +1,42 @@ +; RUN: llc -march=hexagon < %s +; Used to fail with "Cannot select: 0x16cb7f0: v2i16 = truncate" + +; ModuleID = 'bugpoint-reduced-simplified.bc' +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32" +target triple = "hexagon-unknown-linux-gnu" + +define void @Autocorr() nounwind { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + br i1 undef, label %polly.loop_header43, label %for.body + +do.cond: ; preds = %polly.loop_header + unreachable + +do.end: ; preds = %polly.loop_after45 + ret void + +polly.loop_header: ; preds = %polly.loop_after45, %polly.loop_body + %0 = icmp sle i32 undef, 239 + br i1 %0, label %polly.loop_body, label %do.cond + +polly.loop_body: ; preds = %polly.loop_header + %p_25 = call i32 @llvm.hexagon.SI.to.SXTHI.asrh(i32 undef) + %1 = insertelement <4 x i32> undef, i32 %p_25, i32 3 + %2 = trunc <4 x i32> %1 to <4 x i16> + store <4 x i16> %2, <4 x i16>* undef, align 8 + br label %polly.loop_header + +polly.loop_after45: ; preds = %polly.loop_header43 + br i1 undef, label %polly.loop_header, label %do.end + +polly.loop_header43: ; preds = %polly.loop_body44, %for.body + br i1 undef, label %polly.loop_body44, label %polly.loop_after45 + +polly.loop_body44: ; preds = %polly.loop_header43 + br label %polly.loop_header43 +} + +declare i32 @llvm.hexagon.SI.to.SXTHI.asrh(i32) nounwind readnone diff --git a/test/CodeGen/Hexagon/vect/vect-vaddb-1.ll b/test/CodeGen/Hexagon/vect/vect-vaddb-1.ll new file mode 100644 index 000000000000..e646f8efdd5e --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vaddb-1.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vaddub + +define <4 x i8> @t_i4x8(<4 x i8> %a, <4 x i8> %b) nounwind { +entry: + %0 = add <4 x i8> %a, %b + ret <4 x i8> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-vaddb.ll b/test/CodeGen/Hexagon/vect/vect-vaddb.ll new file mode 100644 index 000000000000..459546991903 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vaddb.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vaddub + +define <8 x i8> @t_i8x8(<8 x i8> %a, <8 x i8> %b) nounwind { +entry: + %0 = add <8 x i8> %a, %b + ret <8 x i8> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-vaddh-1.ll b/test/CodeGen/Hexagon/vect/vect-vaddh-1.ll new file mode 100644 index 000000000000..1b43d4fb6cc8 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vaddh-1.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vaddh + +define <4 x i16> @t_i4x16(<4 x i16> %a, <4 x i16> %b) nounwind { +entry: + %0 = add <4 x i16> %a, %b + ret <4 x i16> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-vaddh.ll b/test/CodeGen/Hexagon/vect/vect-vaddh.ll new file mode 100644 index 000000000000..32bf3cadacdc --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vaddh.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vaddh + +define <2 x i16> @t_i2x16(<2 x i16> %a, <2 x i16> %b) nounwind { +entry: + %0 = add <2 x i16> %a, %b + ret <2 x i16> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-vaddw.ll b/test/CodeGen/Hexagon/vect/vect-vaddw.ll new file mode 100644 index 000000000000..a8401345ab26 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vaddw.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vaddw + +define <2 x i32> @t_i2x32(<2 x i32> %a, <2 x i32> %b) nounwind { +entry: + %0 = add <2 x i32> %a, %b + ret <2 x i32> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-vaslw.ll b/test/CodeGen/Hexagon/vect/vect-vaslw.ll new file mode 100644 index 000000000000..c662b0bd3de2 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vaslw.ll @@ -0,0 +1,33 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vaslw + +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon-unknown-linux-gnu" + +define void @foo(i16* nocapture %v) nounwind { +entry: + %p_arrayidx = getelementptr i16, i16* %v, i32 4 + %vector_ptr = bitcast i16* %p_arrayidx to <4 x i16>* + %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 2 + %_high_half = shufflevector <4 x i16> %_p_vec_full, <4 x i16> undef, <2 x i32> <i32 2, i32 3> + %_low_half = shufflevector <4 x i16> %_p_vec_full, <4 x i16> undef, <2 x i32> <i32 0, i32 1> + %0 = sext <2 x i16> %_low_half to <2 x i32> + %1 = sext <2 x i16> %_high_half to <2 x i32> + %shr6p_vec = shl <2 x i32> %0, <i32 2, i32 2> + %shr6p_vec19 = shl <2 x i32> %1, <i32 2, i32 2> + %addp_vec = add <2 x i32> %shr6p_vec, <i32 34, i32 34> + %addp_vec20 = add <2 x i32> %shr6p_vec19, <i32 34, i32 34> + %vector_ptr21 = bitcast i16* %v to <4 x i16>* + %_p_vec_full22 = load <4 x i16>, <4 x i16>* %vector_ptr21, align 2 + %_high_half23 = shufflevector <4 x i16> %_p_vec_full22, <4 x i16> undef, <2 x i32> <i32 2, i32 3> + %_low_half24 = shufflevector <4 x i16> %_p_vec_full22, <4 x i16> undef, <2 x i32> <i32 0, i32 1> + %2 = zext <2 x i16> %_low_half24 to <2 x i32> + %3 = zext <2 x i16> %_high_half23 to <2 x i32> + %add3p_vec = add <2 x i32> %addp_vec, %2 + %add3p_vec25 = add <2 x i32> %addp_vec20, %3 + %4 = trunc <2 x i32> %add3p_vec to <2 x i16> + %5 = trunc <2 x i32> %add3p_vec25 to <2 x i16> + %_combined_vec = shufflevector <2 x i16> %4, <2 x i16> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + store <4 x i16> %_combined_vec, <4 x i16>* %vector_ptr21, align 2 + ret void +} diff --git a/test/CodeGen/Hexagon/vect/vect-vshifts.ll b/test/CodeGen/Hexagon/vect/vect-vshifts.ll new file mode 100644 index 000000000000..49ff812601ae --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vshifts.ll @@ -0,0 +1,279 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s + +; Check that store is post-incremented. +; CHECK: r{{[0-9]+:[0-9]+}} = vasrw(r{{[0-9]+:[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}} = vaslw(r{{[0-9]+:[0-9]+}}, r{{[0-9]+}}) +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +define void @foo(i32* nocapture %buf, i32* nocapture %dest, i32 %offset, i32 %oddBlock, i32 %gb) #0 { +entry: + %0 = load i32, i32* %buf, align 4, !tbaa !0 + %shr = ashr i32 %0, %gb + store i32 %shr, i32* %buf, align 4, !tbaa !0 + %not.tobool = icmp eq i32 %oddBlock, 0 + %1 = sub i32 %offset, %oddBlock + %2 = zext i1 %not.tobool to i32 + %3 = and i32 %1, 7 + %4 = add i32 %2, %3 + %5 = add i32 %4, 8 + %p_sub8 = sub nsw i32 31, %gb + %6 = insertelement <2 x i32> undef, i32 %p_sub8, i32 0 + %7 = insertelement <2 x i32> %6, i32 %p_sub8, i32 1 + %8 = bitcast <2 x i32> %7 to i64 + %9 = tail call i64 @llvm.hexagon.S2.asl.i.vw(i64 %8, i32 1) + %10 = bitcast i64 %9 to <2 x i32> + %11 = tail call i64 @llvm.hexagon.A2.combinew(i32 -1, i32 -1) + %12 = bitcast i64 %11 to <2 x i32> + %sub12p_vec = add <2 x i32> %10, %12 + %p_22 = add i32 %4, 64 + %p_d.018 = getelementptr i32, i32* %dest, i32 %4 + %p_d.01823 = getelementptr i32, i32* %dest, i32 %p_22 + %p_25 = add i32 %4, 72 + %p_arrayidx14 = getelementptr i32, i32* %dest, i32 %5 + %p_arrayidx1426 = getelementptr i32, i32* %dest, i32 %p_25 + %_p_scalar_ = load i32, i32* %p_d.018, align 4 + %_p_vec_ = insertelement <2 x i32> undef, i32 %_p_scalar_, i32 0 + %_p_scalar_27 = load i32, i32* %p_d.01823, align 4 + %_p_vec_28 = insertelement <2 x i32> %_p_vec_, i32 %_p_scalar_27, i32 1 + %13 = bitcast <2 x i32> %_p_vec_28 to i64 + %14 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %13, i32 31) + %15 = bitcast i64 %14 to <2 x i32> + %shr9p_vec = ashr <2 x i32> %_p_vec_28, %7 + %xorp_vec = xor <2 x i32> %15, %sub12p_vec + %16 = bitcast <2 x i32> %shr9p_vec to i64 + %17 = tail call i32 @llvm.hexagon.A2.vcmpweq(i64 %14, i64 %16) + %18 = bitcast <2 x i32> %xorp_vec to i64 + %19 = tail call i64 @llvm.hexagon.C2.vmux(i32 %17, i64 %13, i64 %18) + %20 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %19, i32 %gb) + %21 = bitcast i64 %20 to <2 x i32> + %22 = extractelement <2 x i32> %21, i32 0 + store i32 %22, i32* %p_arrayidx14, align 4 + %23 = extractelement <2 x i32> %21, i32 1 + store i32 %23, i32* %p_arrayidx1426, align 4 + store i32 %22, i32* %p_d.018, align 4 + store i32 %23, i32* %p_d.01823, align 4 + %p_21.1 = add i32 %4, 128 + %p_22.1 = add i32 %4, 192 + %p_d.018.1 = getelementptr i32, i32* %dest, i32 %p_21.1 + %p_d.01823.1 = getelementptr i32, i32* %dest, i32 %p_22.1 + %p_24.1 = add i32 %4, 136 + %p_25.1 = add i32 %4, 200 + %p_arrayidx14.1 = getelementptr i32, i32* %dest, i32 %p_24.1 + %p_arrayidx1426.1 = getelementptr i32, i32* %dest, i32 %p_25.1 + %_p_scalar_.1 = load i32, i32* %p_d.018.1, align 4 + %_p_vec_.1 = insertelement <2 x i32> undef, i32 %_p_scalar_.1, i32 0 + %_p_scalar_27.1 = load i32, i32* %p_d.01823.1, align 4 + %_p_vec_28.1 = insertelement <2 x i32> %_p_vec_.1, i32 %_p_scalar_27.1, i32 1 + %24 = bitcast <2 x i32> %_p_vec_28.1 to i64 + %25 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %24, i32 31) + %26 = bitcast i64 %25 to <2 x i32> + %shr9p_vec.1 = ashr <2 x i32> %_p_vec_28.1, %7 + %xorp_vec.1 = xor <2 x i32> %26, %sub12p_vec + %27 = bitcast <2 x i32> %shr9p_vec.1 to i64 + %28 = tail call i32 @llvm.hexagon.A2.vcmpweq(i64 %25, i64 %27) + %29 = bitcast <2 x i32> %xorp_vec.1 to i64 + %30 = tail call i64 @llvm.hexagon.C2.vmux(i32 %28, i64 %24, i64 %29) + %31 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %30, i32 %gb) + %32 = bitcast i64 %31 to <2 x i32> + %33 = extractelement <2 x i32> %32, i32 0 + store i32 %33, i32* %p_arrayidx14.1, align 4 + %34 = extractelement <2 x i32> %32, i32 1 + store i32 %34, i32* %p_arrayidx1426.1, align 4 + store i32 %33, i32* %p_d.018.1, align 4 + store i32 %34, i32* %p_d.01823.1, align 4 + %p_21.2 = add i32 %4, 256 + %p_22.2 = add i32 %4, 320 + %p_d.018.2 = getelementptr i32, i32* %dest, i32 %p_21.2 + %p_d.01823.2 = getelementptr i32, i32* %dest, i32 %p_22.2 + %p_24.2 = add i32 %4, 264 + %p_25.2 = add i32 %4, 328 + %p_arrayidx14.2 = getelementptr i32, i32* %dest, i32 %p_24.2 + %p_arrayidx1426.2 = getelementptr i32, i32* %dest, i32 %p_25.2 + %_p_scalar_.2 = load i32, i32* %p_d.018.2, align 4 + %_p_vec_.2 = insertelement <2 x i32> undef, i32 %_p_scalar_.2, i32 0 + %_p_scalar_27.2 = load i32, i32* %p_d.01823.2, align 4 + %_p_vec_28.2 = insertelement <2 x i32> %_p_vec_.2, i32 %_p_scalar_27.2, i32 1 + %35 = bitcast <2 x i32> %_p_vec_28.2 to i64 + %36 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %35, i32 31) + %37 = bitcast i64 %36 to <2 x i32> + %shr9p_vec.2 = ashr <2 x i32> %_p_vec_28.2, %7 + %xorp_vec.2 = xor <2 x i32> %37, %sub12p_vec + %38 = bitcast <2 x i32> %shr9p_vec.2 to i64 + %39 = tail call i32 @llvm.hexagon.A2.vcmpweq(i64 %36, i64 %38) + %40 = bitcast <2 x i32> %xorp_vec.2 to i64 + %41 = tail call i64 @llvm.hexagon.C2.vmux(i32 %39, i64 %35, i64 %40) + %42 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %41, i32 %gb) + %43 = bitcast i64 %42 to <2 x i32> + %44 = extractelement <2 x i32> %43, i32 0 + store i32 %44, i32* %p_arrayidx14.2, align 4 + %45 = extractelement <2 x i32> %43, i32 1 + store i32 %45, i32* %p_arrayidx1426.2, align 4 + store i32 %44, i32* %p_d.018.2, align 4 + store i32 %45, i32* %p_d.01823.2, align 4 + %p_21.3 = add i32 %4, 384 + %p_22.3 = add i32 %4, 448 + %p_d.018.3 = getelementptr i32, i32* %dest, i32 %p_21.3 + %p_d.01823.3 = getelementptr i32, i32* %dest, i32 %p_22.3 + %p_24.3 = add i32 %4, 392 + %p_25.3 = add i32 %4, 456 + %p_arrayidx14.3 = getelementptr i32, i32* %dest, i32 %p_24.3 + %p_arrayidx1426.3 = getelementptr i32, i32* %dest, i32 %p_25.3 + %_p_scalar_.3 = load i32, i32* %p_d.018.3, align 4 + %_p_vec_.3 = insertelement <2 x i32> undef, i32 %_p_scalar_.3, i32 0 + %_p_scalar_27.3 = load i32, i32* %p_d.01823.3, align 4 + %_p_vec_28.3 = insertelement <2 x i32> %_p_vec_.3, i32 %_p_scalar_27.3, i32 1 + %46 = bitcast <2 x i32> %_p_vec_28.3 to i64 + %47 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %46, i32 31) + %48 = bitcast i64 %47 to <2 x i32> + %shr9p_vec.3 = ashr <2 x i32> %_p_vec_28.3, %7 + %xorp_vec.3 = xor <2 x i32> %48, %sub12p_vec + %49 = bitcast <2 x i32> %shr9p_vec.3 to i64 + %50 = tail call i32 @llvm.hexagon.A2.vcmpweq(i64 %47, i64 %49) + %51 = bitcast <2 x i32> %xorp_vec.3 to i64 + %52 = tail call i64 @llvm.hexagon.C2.vmux(i32 %50, i64 %46, i64 %51) + %53 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %52, i32 %gb) + %54 = bitcast i64 %53 to <2 x i32> + %55 = extractelement <2 x i32> %54, i32 0 + store i32 %55, i32* %p_arrayidx14.3, align 4 + %56 = extractelement <2 x i32> %54, i32 1 + store i32 %56, i32* %p_arrayidx1426.3, align 4 + store i32 %55, i32* %p_d.018.3, align 4 + store i32 %56, i32* %p_d.01823.3, align 4 + %p_21.4 = add i32 %4, 512 + %p_22.4 = add i32 %4, 576 + %p_d.018.4 = getelementptr i32, i32* %dest, i32 %p_21.4 + %p_d.01823.4 = getelementptr i32, i32* %dest, i32 %p_22.4 + %p_24.4 = add i32 %4, 520 + %p_25.4 = add i32 %4, 584 + %p_arrayidx14.4 = getelementptr i32, i32* %dest, i32 %p_24.4 + %p_arrayidx1426.4 = getelementptr i32, i32* %dest, i32 %p_25.4 + %_p_scalar_.4 = load i32, i32* %p_d.018.4, align 4 + %_p_vec_.4 = insertelement <2 x i32> undef, i32 %_p_scalar_.4, i32 0 + %_p_scalar_27.4 = load i32, i32* %p_d.01823.4, align 4 + %_p_vec_28.4 = insertelement <2 x i32> %_p_vec_.4, i32 %_p_scalar_27.4, i32 1 + %57 = bitcast <2 x i32> %_p_vec_28.4 to i64 + %58 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %57, i32 31) + %59 = bitcast i64 %58 to <2 x i32> + %shr9p_vec.4 = ashr <2 x i32> %_p_vec_28.4, %7 + %xorp_vec.4 = xor <2 x i32> %59, %sub12p_vec + %60 = bitcast <2 x i32> %shr9p_vec.4 to i64 + %61 = tail call i32 @llvm.hexagon.A2.vcmpweq(i64 %58, i64 %60) + %62 = bitcast <2 x i32> %xorp_vec.4 to i64 + %63 = tail call i64 @llvm.hexagon.C2.vmux(i32 %61, i64 %57, i64 %62) + %64 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %63, i32 %gb) + %65 = bitcast i64 %64 to <2 x i32> + %66 = extractelement <2 x i32> %65, i32 0 + store i32 %66, i32* %p_arrayidx14.4, align 4 + %67 = extractelement <2 x i32> %65, i32 1 + store i32 %67, i32* %p_arrayidx1426.4, align 4 + store i32 %66, i32* %p_d.018.4, align 4 + store i32 %67, i32* %p_d.01823.4, align 4 + %p_21.5 = add i32 %4, 640 + %p_22.5 = add i32 %4, 704 + %p_d.018.5 = getelementptr i32, i32* %dest, i32 %p_21.5 + %p_d.01823.5 = getelementptr i32, i32* %dest, i32 %p_22.5 + %p_24.5 = add i32 %4, 648 + %p_25.5 = add i32 %4, 712 + %p_arrayidx14.5 = getelementptr i32, i32* %dest, i32 %p_24.5 + %p_arrayidx1426.5 = getelementptr i32, i32* %dest, i32 %p_25.5 + %_p_scalar_.5 = load i32, i32* %p_d.018.5, align 4 + %_p_vec_.5 = insertelement <2 x i32> undef, i32 %_p_scalar_.5, i32 0 + %_p_scalar_27.5 = load i32, i32* %p_d.01823.5, align 4 + %_p_vec_28.5 = insertelement <2 x i32> %_p_vec_.5, i32 %_p_scalar_27.5, i32 1 + %68 = bitcast <2 x i32> %_p_vec_28.5 to i64 + %69 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %68, i32 31) + %70 = bitcast i64 %69 to <2 x i32> + %shr9p_vec.5 = ashr <2 x i32> %_p_vec_28.5, %7 + %xorp_vec.5 = xor <2 x i32> %70, %sub12p_vec + %71 = bitcast <2 x i32> %shr9p_vec.5 to i64 + %72 = tail call i32 @llvm.hexagon.A2.vcmpweq(i64 %69, i64 %71) + %73 = bitcast <2 x i32> %xorp_vec.5 to i64 + %74 = tail call i64 @llvm.hexagon.C2.vmux(i32 %72, i64 %68, i64 %73) + %75 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %74, i32 %gb) + %76 = bitcast i64 %75 to <2 x i32> + %77 = extractelement <2 x i32> %76, i32 0 + store i32 %77, i32* %p_arrayidx14.5, align 4 + %78 = extractelement <2 x i32> %76, i32 1 + store i32 %78, i32* %p_arrayidx1426.5, align 4 + store i32 %77, i32* %p_d.018.5, align 4 + store i32 %78, i32* %p_d.01823.5, align 4 + %p_21.6 = add i32 %4, 768 + %p_22.6 = add i32 %4, 832 + %p_d.018.6 = getelementptr i32, i32* %dest, i32 %p_21.6 + %p_d.01823.6 = getelementptr i32, i32* %dest, i32 %p_22.6 + %p_24.6 = add i32 %4, 776 + %p_25.6 = add i32 %4, 840 + %p_arrayidx14.6 = getelementptr i32, i32* %dest, i32 %p_24.6 + %p_arrayidx1426.6 = getelementptr i32, i32* %dest, i32 %p_25.6 + %_p_scalar_.6 = load i32, i32* %p_d.018.6, align 4 + %_p_vec_.6 = insertelement <2 x i32> undef, i32 %_p_scalar_.6, i32 0 + %_p_scalar_27.6 = load i32, i32* %p_d.01823.6, align 4 + %_p_vec_28.6 = insertelement <2 x i32> %_p_vec_.6, i32 %_p_scalar_27.6, i32 1 + %79 = bitcast <2 x i32> %_p_vec_28.6 to i64 + %80 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %79, i32 31) + %81 = bitcast i64 %80 to <2 x i32> + %shr9p_vec.6 = ashr <2 x i32> %_p_vec_28.6, %7 + %xorp_vec.6 = xor <2 x i32> %81, %sub12p_vec + %82 = bitcast <2 x i32> %shr9p_vec.6 to i64 + %83 = tail call i32 @llvm.hexagon.A2.vcmpweq(i64 %80, i64 %82) + %84 = bitcast <2 x i32> %xorp_vec.6 to i64 + %85 = tail call i64 @llvm.hexagon.C2.vmux(i32 %83, i64 %79, i64 %84) + %86 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %85, i32 %gb) + %87 = bitcast i64 %86 to <2 x i32> + %88 = extractelement <2 x i32> %87, i32 0 + store i32 %88, i32* %p_arrayidx14.6, align 4 + %89 = extractelement <2 x i32> %87, i32 1 + store i32 %89, i32* %p_arrayidx1426.6, align 4 + store i32 %88, i32* %p_d.018.6, align 4 + store i32 %89, i32* %p_d.01823.6, align 4 + %p_21.7 = add i32 %4, 896 + %p_22.7 = add i32 %4, 960 + %p_d.018.7 = getelementptr i32, i32* %dest, i32 %p_21.7 + %p_d.01823.7 = getelementptr i32, i32* %dest, i32 %p_22.7 + %p_24.7 = add i32 %4, 904 + %p_25.7 = add i32 %4, 968 + %p_arrayidx14.7 = getelementptr i32, i32* %dest, i32 %p_24.7 + %p_arrayidx1426.7 = getelementptr i32, i32* %dest, i32 %p_25.7 + %_p_scalar_.7 = load i32, i32* %p_d.018.7, align 4 + %_p_vec_.7 = insertelement <2 x i32> undef, i32 %_p_scalar_.7, i32 0 + %_p_scalar_27.7 = load i32, i32* %p_d.01823.7, align 4 + %_p_vec_28.7 = insertelement <2 x i32> %_p_vec_.7, i32 %_p_scalar_27.7, i32 1 + %90 = bitcast <2 x i32> %_p_vec_28.7 to i64 + %91 = tail call i64 @llvm.hexagon.S2.asr.i.vw(i64 %90, i32 31) + %92 = bitcast i64 %91 to <2 x i32> + %shr9p_vec.7 = ashr <2 x i32> %_p_vec_28.7, %7 + %xorp_vec.7 = xor <2 x i32> %92, %sub12p_vec + %93 = bitcast <2 x i32> %shr9p_vec.7 to i64 + %94 = tail call i32 @llvm.hexagon.A2.vcmpweq(i64 %91, i64 %93) + %95 = bitcast <2 x i32> %xorp_vec.7 to i64 + %96 = tail call i64 @llvm.hexagon.C2.vmux(i32 %94, i64 %90, i64 %95) + %97 = tail call i64 @llvm.hexagon.S2.asl.r.vw(i64 %96, i32 %gb) + %98 = bitcast i64 %97 to <2 x i32> + %99 = extractelement <2 x i32> %98, i32 0 + store i32 %99, i32* %p_arrayidx14.7, align 4 + %100 = extractelement <2 x i32> %98, i32 1 + store i32 %100, i32* %p_arrayidx1426.7, align 4 + store i32 %99, i32* %p_d.018.7, align 4 + store i32 %100, i32* %p_d.01823.7, align 4 + ret void +} + +declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32) #1 + +declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32) #1 + +declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1 + +declare i32 @llvm.hexagon.A2.vcmpweq(i64, i64) #1 + +declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64) #1 + +declare i64 @llvm.hexagon.S2.asl.r.vw(i64, i32) #1 + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone } + +!0 = !{!"int", !1} +!1 = !{!"omnipotent char", !2} +!2 = !{!"Simple C/C++ TBAA"} diff --git a/test/CodeGen/Hexagon/vect/vect-vsplatb.ll b/test/CodeGen/Hexagon/vect/vect-vsplatb.ll new file mode 100644 index 000000000000..6996dd144eba --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vsplatb.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; Make sure we build the constant vector <7, 7, 7, 7> with a vsplatb. +; CHECK: vsplatb +@B = common global [400 x i8] zeroinitializer, align 8 +@A = common global [400 x i8] zeroinitializer, align 8 +@C = common global [400 x i8] zeroinitializer, align 8 + +define void @run() nounwind { +entry: + br label %polly.loop_body + +polly.loop_after: ; preds = %polly.loop_body + ret void + +polly.loop_body: ; preds = %entry, %polly.loop_body + %polly.loopiv25 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ] + %polly.next_loopiv = add i32 %polly.loopiv25, 4 + %p_arrayidx1 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %polly.loopiv25 + %p_arrayidx = getelementptr [400 x i8], [400 x i8]* @B, i32 0, i32 %polly.loopiv25 + %vector_ptr = bitcast i8* %p_arrayidx to <4 x i8>* + %_p_vec_full = load <4 x i8>, <4 x i8>* %vector_ptr, align 8 + %mulp_vec = mul <4 x i8> %_p_vec_full, <i8 7, i8 7, i8 7, i8 7> + %vector_ptr14 = bitcast i8* %p_arrayidx1 to <4 x i8>* + %_p_vec_full15 = load <4 x i8>, <4 x i8>* %vector_ptr14, align 8 + %addp_vec = add <4 x i8> %_p_vec_full15, %mulp_vec + store <4 x i8> %addp_vec, <4 x i8>* %vector_ptr14, align 8 + %0 = icmp slt i32 %polly.next_loopiv, 400 + br i1 %0, label %polly.loop_body, label %polly.loop_after +} diff --git a/test/CodeGen/Hexagon/vect/vect-vsplath.ll b/test/CodeGen/Hexagon/vect/vect-vsplath.ll new file mode 100644 index 000000000000..f5207109773e --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vsplath.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; Make sure we build the constant vector <7, 7, 7, 7> with a vsplath. +; CHECK: vsplath +@B = common global [400 x i16] zeroinitializer, align 8 +@A = common global [400 x i16] zeroinitializer, align 8 +@C = common global [400 x i16] zeroinitializer, align 8 + +define void @run() nounwind { +entry: + br label %polly.loop_body + +polly.loop_after: ; preds = %polly.loop_body + ret void + +polly.loop_body: ; preds = %entry, %polly.loop_body + %polly.loopiv26 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ] + %polly.next_loopiv = add nsw i32 %polly.loopiv26, 4 + %p_arrayidx1 = getelementptr [400 x i16], [400 x i16]* @A, i32 0, i32 %polly.loopiv26 + %p_arrayidx = getelementptr [400 x i16], [400 x i16]* @B, i32 0, i32 %polly.loopiv26 + %vector_ptr = bitcast i16* %p_arrayidx to <4 x i16>* + %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 8 + %mulp_vec = mul <4 x i16> %_p_vec_full, <i16 7, i16 7, i16 7, i16 7> + %vector_ptr15 = bitcast i16* %p_arrayidx1 to <4 x i16>* + %_p_vec_full16 = load <4 x i16>, <4 x i16>* %vector_ptr15, align 8 + %addp_vec = add <4 x i16> %_p_vec_full16, %mulp_vec + store <4 x i16> %addp_vec, <4 x i16>* %vector_ptr15, align 8 + %0 = icmp slt i32 %polly.next_loopiv, 400 + br i1 %0, label %polly.loop_body, label %polly.loop_after +} diff --git a/test/CodeGen/Hexagon/vect/vect-vsubb-1.ll b/test/CodeGen/Hexagon/vect/vect-vsubb-1.ll new file mode 100644 index 000000000000..8ac76a0bf13c --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vsubb-1.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vsubub + +define <4 x i8> @t_i4x8(<4 x i8> %a, <4 x i8> %b) nounwind { +entry: + %0 = sub <4 x i8> %a, %b + ret <4 x i8> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-vsubb.ll b/test/CodeGen/Hexagon/vect/vect-vsubb.ll new file mode 100644 index 000000000000..73cfc74074ad --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vsubb.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vsubub + +define <8 x i8> @t_i8x8(<8 x i8> %a, <8 x i8> %b) nounwind { +entry: + %0 = sub <8 x i8> %a, %b + ret <8 x i8> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-vsubh-1.ll b/test/CodeGen/Hexagon/vect/vect-vsubh-1.ll new file mode 100644 index 000000000000..c1f87bf090d6 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vsubh-1.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vsubh + +define <4 x i16> @t_i4x16(<4 x i16> %a, <4 x i16> %b) nounwind { +entry: + %0 = sub <4 x i16> %a, %b + ret <4 x i16> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-vsubh.ll b/test/CodeGen/Hexagon/vect/vect-vsubh.ll new file mode 100644 index 000000000000..cc7e595644d2 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vsubh.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vsubh + +define <2 x i16> @t_i2x16(<2 x i16> %a, <2 x i16> %b) nounwind { +entry: + %0 = sub <2 x i16> %a, %b + ret <2 x i16> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-vsubw.ll b/test/CodeGen/Hexagon/vect/vect-vsubw.ll new file mode 100644 index 000000000000..ba326a33109b --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-vsubw.ll @@ -0,0 +1,8 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: vsubw + +define <2 x i32> @t_i2x32(<2 x i32> %a, <2 x i32> %b) nounwind { +entry: + %0 = sub <2 x i32> %a, %b + ret <2 x i32> %0 +} diff --git a/test/CodeGen/Hexagon/vect/vect-xor.ll b/test/CodeGen/Hexagon/vect/vect-xor.ll new file mode 100644 index 000000000000..961185581128 --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-xor.ll @@ -0,0 +1,38 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s + +; Check that the parsing succeeded. +; CHECK: r{{[0-9]+:[0-9]+}} = xor(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" +target triple = "hexagon" + +@window_size = global i32 65536, align 4 +@prev = external global [0 x i16], align 8 +@block_start = common global i32 0, align 4 +@prev_length = common global i32 0, align 4 +@strstart = common global i32 0, align 4 +@match_start = common global i32 0, align 4 +@max_chain_length = common global i32 0, align 4 +@good_match = common global i32 0, align 4 + +define void @fill_window() #0 { +entry: + br label %polly.loop_body + +polly.loop_after: ; preds = %polly.loop_body + ret void + +polly.loop_body: ; preds = %entry, %polly.loop_body + %polly.loopiv36 = phi i32 [ 0, %entry ], [ %polly.next_loopiv, %polly.loop_body ] + %polly.next_loopiv = add nsw i32 %polly.loopiv36, 4 + %p_arrayidx4 = getelementptr [0 x i16], [0 x i16]* @prev, i32 0, i32 %polly.loopiv36 + %vector_ptr = bitcast i16* %p_arrayidx4 to <4 x i16>* + %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 2 + %cmp1p_vicmp = icmp slt <4 x i16> %_p_vec_full, zeroinitializer + %subp_vec = xor <4 x i16> %_p_vec_full, <i16 -32768, i16 -32768, i16 -32768, i16 -32768> + %sel1p_vsel = select <4 x i1> %cmp1p_vicmp, <4 x i16> %subp_vec, <4 x i16> zeroinitializer + store <4 x i16> %sel1p_vsel, <4 x i16>* %vector_ptr, align 2 + %0 = icmp slt i32 %polly.next_loopiv, 32768 + br i1 %0, label %polly.loop_body, label %polly.loop_after +} + +attributes #0 = { nounwind "fp-contract-model"="standard" "no-frame-pointer-elim-non-leaf" "realign-stack" "relocation-model"="static" "ssp-buffers-size"="8" } diff --git a/test/CodeGen/Hexagon/vect/vect-zeroextend.ll b/test/CodeGen/Hexagon/vect/vect-zeroextend.ll new file mode 100644 index 000000000000..3d0b7946f77a --- /dev/null +++ b/test/CodeGen/Hexagon/vect/vect-zeroextend.ll @@ -0,0 +1,23 @@ +; RUN: llc -march=hexagon < %s +; Used to fail with "Cannot select: 0x16cb2d0: v4i16 = zero_extend" + +; ModuleID = 'bugpoint-reduced-simplified.bc' +target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32" +target triple = "hexagon-unknown-linux-gnu" + +define void @foo() nounwind { +entry: + br i1 undef, label %for.cond30.preheader.lr.ph, label %for.end425 + +for.cond30.preheader.lr.ph: ; preds = %entry + br label %for.cond37.preheader + +for.cond37.preheader: ; preds = %for.cond37.preheader, %for.cond30.preheader.lr.ph + %_p_vec_full = load <3 x i8>, <3 x i8>* undef, align 8 + %0 = zext <3 x i8> %_p_vec_full to <3 x i16> + store <3 x i16> %0, <3 x i16>* undef, align 8 + br label %for.cond37.preheader + +for.end425: ; preds = %entry + ret void +} diff --git a/test/CodeGen/Hexagon/zextloadi1.ll b/test/CodeGen/Hexagon/zextloadi1.ll index b58d9332695d..9ce7bea9fce6 100644 --- a/test/CodeGen/Hexagon/zextloadi1.ll +++ b/test/CodeGen/Hexagon/zextloadi1.ll @@ -13,13 +13,13 @@ @i129_s = external global i129 define void @i129_ls() nounwind { - %tmp = load i129* @i129_l + %tmp = load i129, i129* @i129_l store i129 %tmp, i129* @i129_s ret void } define void @i65_ls() nounwind { - %tmp = load i65* @i65_l + %tmp = load i65, i65* @i65_l store i65 %tmp, i65* @i65_s ret void } |