diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2017-04-16 16:01:22 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2017-04-16 16:01:22 +0000 |
commit | 71d5a2540a98c81f5bcaeb48805e0e2881f530ef (patch) | |
tree | 5343938942df402b49ec7300a1c25a2d4ccd5821 /test/CodeGen/Hexagon | |
parent | 31bbf64f3a4974a2d6c8b3b27ad2f519caf74057 (diff) |
Vendor import of llvm trunk r300422:vendor/llvm/llvm-trunk-r300422
Notes
Notes:
svn path=/vendor/llvm/dist/; revision=317017
svn path=/vendor/llvm/llvm-trunk-r300422/; revision=317018; tag=vendor/llvm/llvm-trunk-r300422
Diffstat (limited to 'test/CodeGen/Hexagon')
144 files changed, 3125 insertions, 1075 deletions
diff --git a/test/CodeGen/Hexagon/BranchPredict.ll b/test/CodeGen/Hexagon/BranchPredict.ll index 17d169974e5a..40791c981483 100644 --- a/test/CodeGen/Hexagon/BranchPredict.ll +++ b/test/CodeGen/Hexagon/BranchPredict.ll @@ -9,7 +9,7 @@ @j = external global i32 define i32 @foo(i32 %a) nounwind { -; CHECK: if{{ *}}(!p{{[0-3]}}.new) jump:nt +; CHECK: if (!p{{[0-3]}}.new) jump:nt entry: %tobool = icmp eq i32 %a, 0 br i1 %tobool, label %if.else, label %if.then, !prof !0 @@ -31,7 +31,7 @@ return: ; preds = %if.else, %if.then declare i32 @foobar(...) define i32 @bar(i32 %a) nounwind { -; CHECK: if{{ *}}(p{{[0-3]}}.new) jump:nt +; CHECK: if (p{{[0-3]}}.new) jump:nt entry: %tobool = icmp eq i32 %a, 0 br i1 %tobool, label %if.else, label %if.then, !prof !1 @@ -51,7 +51,7 @@ return: ; preds = %if.else, %if.then } define i32 @foo_bar(i32 %a, i16 signext %b) nounwind { -; CHECK: if{{ *}}(!cmp.eq(r{{[0-9]*}}.new, #0)) jump:nt +; CHECK: if (!cmp.eq(r{{[0-9]*}}.new,#0)) jump:nt entry: %0 = load i32, i32* @j, align 4 %tobool = icmp eq i32 %0, 0 diff --git a/test/CodeGen/Hexagon/adde.ll b/test/CodeGen/Hexagon/adde.ll index 43ddb4307ef2..12913eea7e81 100644 --- a/test/CodeGen/Hexagon/adde.ll +++ b/test/CodeGen/Hexagon/adde.ll @@ -1,34 +1,27 @@ -; RUN: llc -march=hexagon -disable-hsdr -hexagon-expand-condsets=0 -hexagon-bit=0 -disable-post-ra < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s -; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #1) -; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #0) -; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) +; CHECK-DAG: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK-DAG: r{{[0-9]+:[0-9]+}} = add(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK-DAG: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK-DAG: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK-DAG: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}}) +; CHECK-DAG: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}}) - -define void @check_adde_addc (i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) { -entry: - %tmp1 = zext i64 %AL to i128 - %tmp23 = zext i64 %AH to i128 - %tmp4 = shl i128 %tmp23, 64 - %tmp5 = or i128 %tmp4, %tmp1 - %tmp67 = zext i64 %BL to i128 - %tmp89 = zext i64 %BH to i128 - %tmp11 = shl i128 %tmp89, 64 - %tmp12 = or i128 %tmp11, %tmp67 - %tmp15 = add i128 %tmp12, %tmp5 - %tmp1617 = trunc i128 %tmp15 to i64 - store i64 %tmp1617, i64* %RL - %tmp21 = lshr i128 %tmp15, 64 - %tmp2122 = trunc i128 %tmp21 to i64 - store i64 %tmp2122, i64* %RH - ret void +define void @check_adde_addc(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64* %a4, i64* %a5) { +b6: + %v7 = zext i64 %a0 to i128 + %v8 = zext i64 %a1 to i128 + %v9 = shl i128 %v8, 64 + %v10 = or i128 %v7, %v9 + %v11 = zext i64 %a2 to i128 + %v12 = zext i64 %a3 to i128 + %v13 = shl i128 %v12, 64 + %v14 = or i128 %v11, %v13 + %v15 = add i128 %v10, %v14 + %v16 = lshr i128 %v15, 64 + %v17 = trunc i128 %v15 to i64 + %v18 = trunc i128 %v16 to i64 + store i64 %v17, i64* %a4 + store i64 %v18, i64* %a5 + ret void } diff --git a/test/CodeGen/Hexagon/addh-sext-trunc.ll b/test/CodeGen/Hexagon/addh-sext-trunc.ll index 7f219944436b..ec5dc611105d 100644 --- a/test/CodeGen/Hexagon/addh-sext-trunc.ll +++ b/test/CodeGen/Hexagon/addh-sext-trunc.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{H|h}}) +; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{H|h}}) target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon-unknown-none" diff --git a/test/CodeGen/Hexagon/addh-shifted.ll b/test/CodeGen/Hexagon/addh-shifted.ll index eb263521b42f..697a5c5c69bf 100644 --- a/test/CodeGen/Hexagon/addh-shifted.ll +++ b/test/CodeGen/Hexagon/addh-shifted.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{L|l}}):<<16 +; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{L|l}}):<<16 define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone { entry: diff --git a/test/CodeGen/Hexagon/addh.ll b/test/CodeGen/Hexagon/addh.ll index c2b536c4669a..8217d6753cb3 100644 --- a/test/CodeGen/Hexagon/addh.ll +++ b/test/CodeGen/Hexagon/addh.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{L|l}}) +; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}},r{{[0-9]+}}.{{L|l}}) define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone { entry: diff --git a/test/CodeGen/Hexagon/alu64.ll b/test/CodeGen/Hexagon/alu64.ll index f986f1359374..453b40a6ee83 100644 --- a/test/CodeGen/Hexagon/alu64.ll +++ b/test/CodeGen/Hexagon/alu64.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s ; CHECK-LABEL: @test00 -; CHECK: = cmp.eq(r1:0, r3:2) +; CHECK: = cmp.eq(r1:0,r3:2) define i32 @test00(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.C2.cmpeqp(i64 %Rs, i64 %Rt) @@ -9,7 +9,7 @@ entry: } ; CHECK-LABEL: @test01 -; CHECK: = cmp.gt(r1:0, r3:2) +; CHECK: = cmp.gt(r1:0,r3:2) define i32 @test01(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.C2.cmpgtp(i64 %Rs, i64 %Rt) @@ -17,7 +17,7 @@ entry: } ; CHECK-LABEL: @test02 -; CHECK: = cmp.gtu(r1:0, r3:2) +; CHECK: = cmp.gtu(r1:0,r3:2) define i32 @test02(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.C2.cmpgtup(i64 %Rs, i64 %Rt) @@ -25,7 +25,7 @@ entry: } ; CHECK-LABEL: @test10 -; CHECK: = cmp.eq(r0, r1) +; CHECK: = cmp.eq(r0,r1) define i32 @test10(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.rcmpeq(i32 %Rs, i32 %Rt) @@ -33,7 +33,7 @@ entry: } ; CHECK-LABEL: @test11 -; CHECK: = !cmp.eq(r0, r1) +; CHECK: = !cmp.eq(r0,r1) define i32 @test11(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.rcmpneq(i32 %Rs, i32 %Rt) @@ -41,7 +41,7 @@ entry: } ; CHECK-LABEL: @test12 -; CHECK: = cmp.eq(r0, #23) +; CHECK: = cmp.eq(r0,#23) define i32 @test12(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.rcmpeqi(i32 %Rs, i32 23) @@ -49,7 +49,7 @@ entry: } ; CHECK-LABEL: @test13 -; CHECK: = !cmp.eq(r0, #47) +; CHECK: = !cmp.eq(r0,#47) define i32 @test13(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.rcmpneqi(i32 %Rs, i32 47) @@ -57,7 +57,7 @@ entry: } ; CHECK-LABEL: @test20 -; CHECK: = cmpb.eq(r0, r1) +; CHECK: = cmpb.eq(r0,r1) define i32 @test20(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbeq(i32 %Rs, i32 %Rt) @@ -65,7 +65,7 @@ entry: } ; CHECK-LABEL: @test21 -; CHECK: = cmpb.gt(r0, r1) +; CHECK: = cmpb.gt(r0,r1) define i32 @test21(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbgt(i32 %Rs, i32 %Rt) @@ -73,7 +73,7 @@ entry: } ; CHECK-LABEL: @test22 -; CHECK: = cmpb.gtu(r0, r1) +; CHECK: = cmpb.gtu(r0,r1) define i32 @test22(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbgtu(i32 %Rs, i32 %Rt) @@ -81,7 +81,7 @@ entry: } ; CHECK-LABEL: @test23 -; CHECK: = cmpb.eq(r0, #56) +; CHECK: = cmpb.eq(r0,#56) define i32 @test23(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbeqi(i32 %Rs, i32 56) @@ -89,7 +89,7 @@ entry: } ; CHECK-LABEL: @test24 -; CHECK: = cmpb.gt(r0, #29) +; CHECK: = cmpb.gt(r0,#29) define i32 @test24(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbgti(i32 %Rs, i32 29) @@ -97,7 +97,7 @@ entry: } ; CHECK-LABEL: @test25 -; CHECK: = cmpb.gtu(r0, #111) +; CHECK: = cmpb.gtu(r0,#111) define i32 @test25(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpbgtui(i32 %Rs, i32 111) @@ -105,7 +105,7 @@ entry: } ; CHECK-LABEL: @test30 -; CHECK: = cmph.eq(r0, r1) +; CHECK: = cmph.eq(r0,r1) define i32 @test30(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpheq(i32 %Rs, i32 %Rt) @@ -113,7 +113,7 @@ entry: } ; CHECK-LABEL: @test31 -; CHECK: = cmph.gt(r0, r1) +; CHECK: = cmph.gt(r0,r1) define i32 @test31(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmphgt(i32 %Rs, i32 %Rt) @@ -121,7 +121,7 @@ entry: } ; CHECK-LABEL: @test32 -; CHECK: = cmph.gtu(r0, r1) +; CHECK: = cmph.gtu(r0,r1) define i32 @test32(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmphgtu(i32 %Rs, i32 %Rt) @@ -129,7 +129,7 @@ entry: } ; CHECK-LABEL: @test33 -; CHECK: = cmph.eq(r0, #-123) +; CHECK: = cmph.eq(r0,#-123) define i32 @test33(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmpheqi(i32 %Rs, i32 -123) @@ -137,7 +137,7 @@ entry: } ; CHECK-LABEL: @test34 -; CHECK: = cmph.gt(r0, #-3) +; CHECK: = cmph.gt(r0,#-3) define i32 @test34(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmphgti(i32 %Rs, i32 -3) @@ -145,7 +145,7 @@ entry: } ; CHECK-LABEL: @test35 -; CHECK: = cmph.gtu(r0, #13) +; CHECK: = cmph.gtu(r0,#13) define i32 @test35(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.cmphgtui(i32 %Rs, i32 13) @@ -153,7 +153,7 @@ entry: } ; CHECK-LABEL: @test40 -; CHECK: = vmux(p0, r3:2, r5:4) +; CHECK: = vmux(p0,r3:2,r5:4) define i64 @test40(i32 %Pu, i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.C2.vmux(i32 %Pu, i64 %Rs, i64 %Rt) @@ -161,7 +161,7 @@ entry: } ; CHECK-LABEL: @test41 -; CHECK: = any8(vcmpb.eq(r1:0, r3:2)) +; CHECK: = any8(vcmpb.eq(r1:0,r3:2)) define i32 @test41(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %Rs, i64 %Rt) @@ -169,7 +169,7 @@ entry: } ; CHECK-LABEL: @test50 -; CHECK: = add(r1:0, r3:2) +; CHECK: = add(r1:0,r3:2) define i64 @test50(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.addp(i64 %Rs, i64 %Rt) @@ -177,7 +177,7 @@ entry: } ; CHECK-LABEL: @test51 -; CHECK: = add(r1:0, r3:2):sat +; CHECK: = add(r1:0,r3:2):sat define i64 @test51(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.addpsat(i64 %Rs, i64 %Rt) @@ -185,7 +185,7 @@ entry: } ; CHECK-LABEL: @test52 -; CHECK: = sub(r1:0, r3:2) +; CHECK: = sub(r1:0,r3:2) define i64 @test52(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.subp(i64 %Rs, i64 %Rt) @@ -193,7 +193,7 @@ entry: } ; CHECK-LABEL: @test53 -; CHECK: = add(r1:0, r3:2):raw: +; CHECK: = add(r1:0,r3:2):raw: define i64 @test53(i32 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.addsp(i32 %Rs, i64 %Rt) @@ -201,7 +201,7 @@ entry: } ; CHECK-LABEL: @test54 -; CHECK: = and(r1:0, r3:2) +; CHECK: = and(r1:0,r3:2) define i64 @test54(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.andp(i64 %Rs, i64 %Rt) @@ -209,7 +209,7 @@ entry: } ; CHECK-LABEL: @test55 -; CHECK: = or(r1:0, r3:2) +; CHECK: = or(r1:0,r3:2) define i64 @test55(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.orp(i64 %Rs, i64 %Rt) @@ -217,7 +217,7 @@ entry: } ; CHECK-LABEL: @test56 -; CHECK: = xor(r1:0, r3:2) +; CHECK: = xor(r1:0,r3:2) define i64 @test56(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A2.xorp(i64 %Rs, i64 %Rt) @@ -225,7 +225,7 @@ entry: } ; CHECK-LABEL: @test57 -; CHECK: = and(r1:0, ~r3:2) +; CHECK: = and(r1:0,~r3:2) define i64 @test57(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A4.andnp(i64 %Rs, i64 %Rt) @@ -233,7 +233,7 @@ entry: } ; CHECK-LABEL: @test58 -; CHECK: = or(r1:0, ~r3:2) +; CHECK: = or(r1:0,~r3:2) define i64 @test58(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A4.ornp(i64 %Rs, i64 %Rt) @@ -241,7 +241,7 @@ entry: } ; CHECK-LABEL: @test60 -; CHECK: = add(r0.l, r1.l) +; CHECK: = add(r0.l,r1.l) define i32 @test60(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %Rs, i32 %Rt) @@ -249,7 +249,7 @@ entry: } ; CHECK-LABEL: @test61 -; CHECK: = add(r0.l, r1.h) +; CHECK: = add(r0.l,r1.h) define i32 @test61(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %Rs, i32 %Rt) @@ -257,7 +257,7 @@ entry: } ; CHECK-LABEL: @test62 -; CHECK: = add(r0.l, r1.l):sat +; CHECK: = add(r0.l,r1.l):sat define i32 @test62(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %Rs, i32 %Rt) @@ -265,7 +265,7 @@ entry: } ; CHECK-LABEL: @test63 -; CHECK: = add(r0.l, r1.h):sat +; CHECK: = add(r0.l,r1.h):sat define i32 @test63(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %Rs, i32 %Rt) @@ -273,7 +273,7 @@ entry: } ; CHECK-LABEL: @test64 -; CHECK: = add(r0.l, r1.l):<<16 +; CHECK: = add(r0.l,r1.l):<<16 define i32 @test64(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %Rs, i32 %Rt) @@ -281,7 +281,7 @@ entry: } ; CHECK-LABEL: @test65 -; CHECK: = add(r0.l, r1.h):<<16 +; CHECK: = add(r0.l,r1.h):<<16 define i32 @test65(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %Rs, i32 %Rt) @@ -289,7 +289,7 @@ entry: } ; CHECK-LABEL: @test66 -; CHECK: = add(r0.h, r1.l):<<16 +; CHECK: = add(r0.h,r1.l):<<16 define i32 @test66(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %Rs, i32 %Rt) @@ -297,7 +297,7 @@ entry: } ; CHECK-LABEL: @test67 -; CHECK: = add(r0.h, r1.h):<<16 +; CHECK: = add(r0.h,r1.h):<<16 define i32 @test67(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %Rs, i32 %Rt) @@ -305,7 +305,7 @@ entry: } ; CHECK-LABEL: @test68 -; CHECK: = add(r0.l, r1.l):sat:<<16 +; CHECK: = add(r0.l,r1.l):sat:<<16 define i32 @test68(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %Rs, i32 %Rt) @@ -313,7 +313,7 @@ entry: } ; CHECK-LABEL: @test69 -; CHECK: = add(r0.l, r1.h):sat:<<16 +; CHECK: = add(r0.l,r1.h):sat:<<16 define i32 @test69(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %Rs, i32 %Rt) @@ -321,7 +321,7 @@ entry: } ; CHECK-LABEL: @test6A -; CHECK: = add(r0.h, r1.l):sat:<<16 +; CHECK: = add(r0.h,r1.l):sat:<<16 define i32 @test6A(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %Rs, i32 %Rt) @@ -329,7 +329,7 @@ entry: } ; CHECK-LABEL: @test6B -; CHECK: = add(r0.h, r1.h):sat:<<16 +; CHECK: = add(r0.h,r1.h):sat:<<16 define i32 @test6B(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %Rs, i32 %Rt) @@ -337,7 +337,7 @@ entry: } ; CHECK-LABEL: @test70 -; CHECK: = sub(r0.l, r1.l) +; CHECK: = sub(r0.l,r1.l) define i32 @test70(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %Rs, i32 %Rt) @@ -345,7 +345,7 @@ entry: } ; CHECK-LABEL: @test71 -; CHECK: = sub(r0.l, r1.h) +; CHECK: = sub(r0.l,r1.h) define i32 @test71(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %Rs, i32 %Rt) @@ -353,7 +353,7 @@ entry: } ; CHECK-LABEL: @test72 -; CHECK: = sub(r0.l, r1.l):sat +; CHECK: = sub(r0.l,r1.l):sat define i32 @test72(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %Rs, i32 %Rt) @@ -361,7 +361,7 @@ entry: } ; CHECK-LABEL: @test73 -; CHECK: = sub(r0.l, r1.h):sat +; CHECK: = sub(r0.l,r1.h):sat define i32 @test73(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %Rs, i32 %Rt) @@ -369,7 +369,7 @@ entry: } ; CHECK-LABEL: @test74 -; CHECK: = sub(r0.l, r1.l):<<16 +; CHECK: = sub(r0.l,r1.l):<<16 define i32 @test74(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %Rs, i32 %Rt) @@ -377,7 +377,7 @@ entry: } ; CHECK-LABEL: @test75 -; CHECK: = sub(r0.l, r1.h):<<16 +; CHECK: = sub(r0.l,r1.h):<<16 define i32 @test75(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %Rs, i32 %Rt) @@ -385,7 +385,7 @@ entry: } ; CHECK-LABEL: @test76 -; CHECK: = sub(r0.h, r1.l):<<16 +; CHECK: = sub(r0.h,r1.l):<<16 define i32 @test76(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %Rs, i32 %Rt) @@ -393,7 +393,7 @@ entry: } ; CHECK-LABEL: @test77 -; CHECK: = sub(r0.h, r1.h):<<16 +; CHECK: = sub(r0.h,r1.h):<<16 define i32 @test77(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %Rs, i32 %Rt) @@ -401,7 +401,7 @@ entry: } ; CHECK-LABEL: @test78 -; CHECK: = sub(r0.l, r1.l):sat:<<16 +; CHECK: = sub(r0.l,r1.l):sat:<<16 define i32 @test78(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %Rs, i32 %Rt) @@ -409,7 +409,7 @@ entry: } ; CHECK-LABEL: @test79 -; CHECK: = sub(r0.l, r1.h):sat:<<16 +; CHECK: = sub(r0.l,r1.h):sat:<<16 define i32 @test79(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %Rs, i32 %Rt) @@ -417,7 +417,7 @@ entry: } ; CHECK-LABEL: @test7A -; CHECK: = sub(r0.h, r1.l):sat:<<16 +; CHECK: = sub(r0.h,r1.l):sat:<<16 define i32 @test7A(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %Rs, i32 %Rt) @@ -425,7 +425,7 @@ entry: } ; CHECK-LABEL: @test7B -; CHECK: = sub(r0.h, r1.h):sat:<<16 +; CHECK: = sub(r0.h,r1.h):sat:<<16 define i32 @test7B(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %Rs, i32 %Rt) @@ -433,7 +433,7 @@ entry: } ; CHECK-LABEL: @test90 -; CHECK: = and(#1, asl(r0, #2)) +; CHECK: = and(#1,asl(r0,#2)) define i32 @test90(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.andi.asl.ri(i32 1, i32 %Rs, i32 2) @@ -441,7 +441,7 @@ entry: } ; CHECK-LABEL: @test91 -; CHECK: = or(#1, asl(r0, #2)) +; CHECK: = or(#1,asl(r0,#2)) define i32 @test91(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.ori.asl.ri(i32 1, i32 %Rs, i32 2) @@ -449,7 +449,7 @@ entry: } ; CHECK-LABEL: @test92 -; CHECK: = add(#1, asl(r0, #2)) +; CHECK: = add(#1,asl(r0,#2)) define i32 @test92(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.addi.asl.ri(i32 1, i32 %Rs, i32 2) @@ -457,7 +457,7 @@ entry: } ; CHECK-LABEL: @test93 -; CHECK: = sub(#1, asl(r0, #2)) +; CHECK: = sub(#1,asl(r0,#2)) define i32 @test93(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.subi.asl.ri(i32 1, i32 %Rs, i32 2) @@ -465,7 +465,7 @@ entry: } ; CHECK-LABEL: @test94 -; CHECK: = and(#1, lsr(r0, #2)) +; CHECK: = and(#1,lsr(r0,#2)) define i32 @test94(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 1, i32 %Rs, i32 2) @@ -473,7 +473,7 @@ entry: } ; CHECK-LABEL: @test95 -; CHECK: = or(#1, lsr(r0, #2)) +; CHECK: = or(#1,lsr(r0,#2)) define i32 @test95(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 1, i32 %Rs, i32 2) @@ -481,7 +481,7 @@ entry: } ; CHECK-LABEL: @test96 -; CHECK: = add(#1, lsr(r0, #2)) +; CHECK: = add(#1,lsr(r0,#2)) define i32 @test96(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 1, i32 %Rs, i32 2) @@ -489,7 +489,7 @@ entry: } ; CHECK-LABEL: @test97 -; CHECK: = sub(#1, lsr(r0, #2)) +; CHECK: = sub(#1,lsr(r0,#2)) define i32 @test97(i32 %Rs) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 1, i32 %Rs, i32 2) @@ -497,7 +497,7 @@ entry: } ; CHECK-LABEL: @test100 -; CHECK: = bitsplit(r0, r1) +; CHECK: = bitsplit(r0,r1) define i64 @test100(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i64 @llvm.hexagon.A4.bitsplit(i32 %Rs, i32 %Rt) @@ -505,7 +505,7 @@ entry: } ; CHECK-LABEL: @test101 -; CHECK: = modwrap(r0, r1) +; CHECK: = modwrap(r0,r1) define i32 @test101(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.A4.modwrapu(i32 %Rs, i32 %Rt) @@ -513,7 +513,7 @@ entry: } ; CHECK-LABEL: @test102 -; CHECK: = parity(r1:0, r3:2) +; CHECK: = parity(r1:0,r3:2) define i32 @test102(i64 %Rs, i64 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.S2.parityp(i64 %Rs, i64 %Rt) @@ -521,7 +521,7 @@ entry: } ; CHECK-LABEL: @test103 -; CHECK: = parity(r0, r1) +; CHECK: = parity(r0,r1) define i32 @test103(i32 %Rs, i32 %Rt) #0 { entry: %0 = tail call i32 @llvm.hexagon.S4.parity(i32 %Rs, i32 %Rt) diff --git a/test/CodeGen/Hexagon/args.ll b/test/CodeGen/Hexagon/args.ll index 3bfb8b159556..a1c7bc3230dd 100644 --- a/test/CodeGen/Hexagon/args.ll +++ b/test/CodeGen/Hexagon/args.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r5:4 = combine(#6, #5) -; CHECK: r3:2 = combine(#4, #3) -; CHECK: r1:0 = combine(#2, #1) -; CHECK: memw(r29+#0)=#7 +; CHECK: r5:4 = combine(#6,#5) +; CHECK: r3:2 = combine(#4,#3) +; CHECK: r1:0 = combine(#2,#1) +; CHECK: memw(r29+#0) = #7 define void @foo() nounwind { diff --git a/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll b/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll index 561013b174dd..906a877b91e5 100644 --- a/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll +++ b/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll @@ -7,7 +7,6 @@ ; without adding an extra spill of that register. ; ; CHECK: PredSpill: -; CHECK: memd(r29{{.*}}) = r17:16 ; CHECK-DAG: r{{[0-9]+}} = p0 ; CHECK-DAG: p0 = r{{[0-9]+}} ; CHECK-NOT: = memw(r29 diff --git a/test/CodeGen/Hexagon/bit-bitsplit-at.ll b/test/CodeGen/Hexagon/bit-bitsplit-at.ll new file mode 100644 index 000000000000..87d535fd0f22 --- /dev/null +++ b/test/CodeGen/Hexagon/bit-bitsplit-at.ll @@ -0,0 +1,33 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; REQUIRES: asserts + +; This testcase used to crash due to putting the bitsplit instruction in a +; wrong place. +; CHECK: bitsplit + +target triple = "hexagon" + +define hidden fastcc i32 @fred(i32 %a0) unnamed_addr #0 { +b1: + %v2 = lshr i32 %a0, 16 + %v3 = trunc i32 %v2 to i8 + br i1 undef, label %b6, label %b4 + +b4: ; preds = %b1 + %v5 = and i32 %a0, 65535 + br i1 undef, label %b8, label %b9 + +b6: ; preds = %b1 + %v7 = and i32 %a0, 65535 + br label %b9 + +b8: ; preds = %b4 + store i8 %v3, i8* undef, align 2 + unreachable + +b9: ; preds = %b6, %b4 + %v10 = phi i32 [ %v7, %b6 ], [ %v5, %b4 ] + ret i32 %v10 +} + +attributes #0 = { nounwind optsize "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" } diff --git a/test/CodeGen/Hexagon/bit-bitsplit-src.ll b/test/CodeGen/Hexagon/bit-bitsplit-src.ll new file mode 100644 index 000000000000..2d1c71c709f4 --- /dev/null +++ b/test/CodeGen/Hexagon/bit-bitsplit-src.ll @@ -0,0 +1,35 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; REQUIRES: asserts + +; This used to crash. Check for some sane output. +; CHECK: call printf + +target triple = "hexagon" + +@g0 = external local_unnamed_addr global [4 x i64], align 8 +@g1 = external hidden unnamed_addr constant [29 x i8], align 1 +@g2 = external hidden unnamed_addr constant [29 x i8], align 1 + +define void @fred() local_unnamed_addr #0 { +b0: + %v1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g0, i32 0, i32 0), align 8 + %v2 = trunc i64 %v1 to i32 + %v3 = lshr i64 %v1, 16 + %v4 = trunc i64 %v3 to i32 + %v5 = and i32 %v4, 255 + %v6 = add nuw nsw i32 0, %v5 + %v7 = add nuw nsw i32 %v6, 0 + %v8 = zext i32 %v7 to i64 + %v9 = and i32 %v2, 65535 + %v10 = and i32 %v4, 65535 + %v11 = add nuw nsw i32 %v10, %v9 + %v12 = zext i32 %v11 to i64 + tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g1, i32 0, i32 0), i64 %v8) #0 + tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g2, i32 0, i32 0), i64 %v12) #0 + ret void +} + +; Function Attrs: nounwind +declare void @printf(i8* nocapture readonly, ...) local_unnamed_addr #0 + +attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" } diff --git a/test/CodeGen/Hexagon/bit-bitsplit.ll b/test/CodeGen/Hexagon/bit-bitsplit.ll new file mode 100644 index 000000000000..4ae2e4e66508 --- /dev/null +++ b/test/CodeGen/Hexagon/bit-bitsplit.ll @@ -0,0 +1,17 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: bitsplit(r{{[0-9]+}},#5) + +target triple = "hexagon" + +define i32 @fred(i32 %a, i32* nocapture readonly %b) local_unnamed_addr #0 { +entry: + %and = and i32 %a, 31 + %shr = lshr i32 %a, 5 + %arrayidx = getelementptr inbounds i32, i32* %b, i32 %shr + %0 = load i32, i32* %arrayidx, align 4 + %shr1 = lshr i32 %0, %and + %and2 = and i32 %shr1, 1 + ret i32 %and2 +} + +attributes #0 = { norecurse nounwind readonly "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double" } diff --git a/test/CodeGen/Hexagon/bit-eval.ll b/test/CodeGen/Hexagon/bit-eval.ll index 1d2be5bfc19d..5b0111dfcd10 100644 --- a/test/CodeGen/Hexagon/bit-eval.ll +++ b/test/CodeGen/Hexagon/bit-eval.ll @@ -20,7 +20,7 @@ entry: } ; CHECK-LABEL: test3: -; CHECK: r1:0 = combine(#0, #1) +; CHECK: r1:0 = combine(#0,#1) define i64 @test3() #0 { entry: %0 = tail call i64 @llvm.hexagon.S4.extractp(i64 -1, i32 63, i32 63) diff --git a/test/CodeGen/Hexagon/bit-ext-sat.ll b/test/CodeGen/Hexagon/bit-ext-sat.ll new file mode 100644 index 000000000000..47c49c2364b7 --- /dev/null +++ b/test/CodeGen/Hexagon/bit-ext-sat.ll @@ -0,0 +1,57 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s + +target triple = "hexagon" + +; CHECK-LABEL: xh_sh +; CHECK: sath +; CHECK-NOT: sxth +define i32 @xh_sh(i32 %x) local_unnamed_addr #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.sath(i32 %x) + %1 = tail call i32 @llvm.hexagon.A2.sxth(i32 %0) + ret i32 %1 +} + +; CHECK-LABEL: xb_sb +; CHECK: satb +; CHECK-NOT: sxtb +define i32 @xb_sb(i32 %x) local_unnamed_addr #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.satb(i32 %x) + %1 = tail call i32 @llvm.hexagon.A2.sxtb(i32 %0) + ret i32 %1 +} + +; CHECK-LABEL: xuh_suh +; CHECK: satuh +; CHECK-NOT: zxth +define i32 @xuh_suh(i32 %x) local_unnamed_addr #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.satuh(i32 %x) + %1 = tail call i32 @llvm.hexagon.A2.zxth(i32 %0) + ret i32 %1 +} + +; CHECK-LABEL: xub_sub +; CHECK: satub +; CHECK-NOT: zxtb +define i32 @xub_sub(i32 %x) local_unnamed_addr #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.satub(i32 %x) + %1 = tail call i32 @llvm.hexagon.A2.zxtb(i32 %0) + ret i32 %1 +} + + +declare i32 @llvm.hexagon.A2.sxtb(i32) #1 +declare i32 @llvm.hexagon.A2.sxth(i32) #1 +declare i32 @llvm.hexagon.A2.zxtb(i32) #1 +declare i32 @llvm.hexagon.A2.zxth(i32) #1 + +declare i32 @llvm.hexagon.A2.satb(i32) #1 +declare i32 @llvm.hexagon.A2.sath(i32) #1 +declare i32 @llvm.hexagon.A2.satub(i32) #1 +declare i32 @llvm.hexagon.A2.satuh(i32) #1 + +attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" } +attributes #1 = { nounwind readnone } diff --git a/test/CodeGen/Hexagon/bit-extract-off.ll b/test/CodeGen/Hexagon/bit-extract-off.ll new file mode 100644 index 000000000000..183435ab7b23 --- /dev/null +++ b/test/CodeGen/Hexagon/bit-extract-off.ll @@ -0,0 +1,23 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; CHECK: extractu(r1,#31,#0) + +; In the IR this was an extract of 31 bits starting at position 32 in r1:0. +; When mapping it to an extract from r1, the offset was not reset to 0, and +; we had "extractu(r1,#31,#32)". + +target triple = "hexagon" + +define hidden i32 @fred([101 x double]* %a0, i32 %a1, i32* %a2, i32* %a3) #0 { +b4: + br label %b5 + +b5: ; preds = %b5, %b4 + %v6 = call double @fabs(double undef) #1 + store double %v6, double* undef, align 8 + br label %b5 +} + +declare double @fabs(double) #1 + +attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" } +attributes #1 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" } diff --git a/test/CodeGen/Hexagon/bit-extract.ll b/test/CodeGen/Hexagon/bit-extract.ll new file mode 100644 index 000000000000..ad7d05d2c235 --- /dev/null +++ b/test/CodeGen/Hexagon/bit-extract.ll @@ -0,0 +1,75 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s + +target triple = "hexagon" + +; CHECK-LABEL: ua +; CHECK: extractu(r0,#26,#0) +define i32 @ua(i32 %x) local_unnamed_addr #0 { +entry: + %shl = and i32 %x, 67108863 + ret i32 %shl +} + +; CHECK-LABEL: ub +; CHECK: extractu(r0,#16,#4) +define i32 @ub(i32 %x) local_unnamed_addr #0 { +entry: + %0 = lshr i32 %x, 4 + %shr = and i32 %0, 65535 + ret i32 %shr +} + +; CHECK-LABEL: uc +; CHECK: extractu(r0,#24,#0) +define i32 @uc(i32 %x) local_unnamed_addr #0 { +entry: + %shl = and i32 %x, 16777215 + ret i32 %shl +} + +; CHECK-LABEL: ud +; CHECK: extractu(r0,#16,#8) +define i32 @ud(i32 %x) local_unnamed_addr #0 { +entry: + %bf.lshr = lshr i32 %x, 8 + %bf.clear = and i32 %bf.lshr, 65535 + ret i32 %bf.clear +} + +; CHECK-LABEL: sa +; CHECK: extract(r0,#26,#0) +define i32 @sa(i32 %x) local_unnamed_addr #0 { +entry: + %shl = shl i32 %x, 6 + %shr = ashr exact i32 %shl, 6 + ret i32 %shr +} + +; CHECK-LABEL: sb +; CHECK: extract(r0,#16,#4) +define i32 @sb(i32 %x) local_unnamed_addr #0 { +entry: + %shl = shl i32 %x, 12 + %shr = ashr i32 %shl, 16 + ret i32 %shr +} + +; CHECK-LABEL: sc +; CHECK: extract(r0,#24,#0) +define i32 @sc(i32 %x) local_unnamed_addr #0 { +entry: + %shl = shl i32 %x, 8 + %shr = ashr exact i32 %shl, 8 + ret i32 %shr +} + +; CHECK-LABEL: sd +; CHECK: extract(r0,#16,#8) +define i32 @sd(i32 %x) local_unnamed_addr #0 { +entry: + %bf.shl = shl i32 %x, 8 + %bf.ashr = ashr i32 %bf.shl, 16 + ret i32 %bf.ashr +} + +attributes #0 = { noinline norecurse nounwind readnone "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" } diff --git a/test/CodeGen/Hexagon/bit-has.ll b/test/CodeGen/Hexagon/bit-has.ll new file mode 100644 index 000000000000..9022de391868 --- /dev/null +++ b/test/CodeGen/Hexagon/bit-has.ll @@ -0,0 +1,64 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; REQUIRES: asserts + +; This used to crash. Check for some sane output. +; CHECK: sath + +target triple = "hexagon" + +define void @fred() local_unnamed_addr #0 { +b0: + %v1 = load i32, i32* undef, align 4 + %v2 = tail call i32 @llvm.hexagon.A2.sath(i32 undef) + %v3 = and i32 %v1, 603979776 + %v4 = trunc i32 %v3 to i30 + switch i30 %v4, label %b22 [ + i30 -536870912, label %b5 + i30 -469762048, label %b6 + ] + +b5: ; preds = %b0 + unreachable + +b6: ; preds = %b0 + %v7 = load i32, i32* undef, align 4 + %v8 = sub nsw i32 65536, %v7 + %v9 = load i32, i32* undef, align 4 + %v10 = mul nsw i32 %v9, %v9 + %v11 = zext i32 %v10 to i64 + %v12 = mul nsw i32 %v2, %v8 + %v13 = sext i32 %v12 to i64 + %v14 = mul nsw i64 %v13, %v11 + %v15 = trunc i64 %v14 to i32 + %v16 = and i32 %v15, 2147483647 + store i32 %v16, i32* undef, align 4 + %v17 = lshr i64 %v14, 31 + %v18 = trunc i64 %v17 to i32 + store i32 %v18, i32* undef, align 4 + br label %b19 + +b19: ; preds = %b6 + br i1 undef, label %b20, label %b21 + +b20: ; preds = %b19 + unreachable + +b21: ; preds = %b19 + br label %b23 + +b22: ; preds = %b0 + unreachable + +b23: ; preds = %b21 + %v24 = load i32, i32* undef, align 4 + %v25 = shl i32 %v24, 1 + %v26 = and i32 %v25, 65534 + %v27 = or i32 %v26, 0 + store i32 %v27, i32* undef, align 4 + ret void +} + +declare i32 @llvm.hexagon.A2.sath(i32) #1 + +attributes #0 = { nounwind "target-cpu"="hexagonv5" "target-features"="-hvx,-hvx-double,-long-calls" } +attributes #1 = { nounwind readnone } diff --git a/test/CodeGen/Hexagon/bit-phi.ll b/test/CodeGen/Hexagon/bit-phi.ll index 86b18d8bf256..7abfba079bb0 100644 --- a/test/CodeGen/Hexagon/bit-phi.ll +++ b/test/CodeGen/Hexagon/bit-phi.ll @@ -1,4 +1,5 @@ ; RUN: llc -march=hexagon < %s +; RUN: llc -march=hexagon -disable-hcp < %s ; REQUIRES: asserts target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32" diff --git a/test/CodeGen/Hexagon/bit-rie.ll b/test/CodeGen/Hexagon/bit-rie.ll index 6bd0558f580c..302382a1ade4 100644 --- a/test/CodeGen/Hexagon/bit-rie.ll +++ b/test/CodeGen/Hexagon/bit-rie.ll @@ -187,8 +187,8 @@ declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #2 declare i32 @llvm.hexagon.S2.clbnorm(i32) #2 declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32) #2 declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32) #2 -declare void @llvm.lifetime.end(i64, i8* nocapture) #1 -declare void @llvm.lifetime.start(i64, i8* nocapture) #1 +declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 +declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind } diff --git a/test/CodeGen/Hexagon/bit-skip-byval.ll b/test/CodeGen/Hexagon/bit-skip-byval.ll index d6c1aad94007..9ee4014ae346 100644 --- a/test/CodeGen/Hexagon/bit-skip-byval.ll +++ b/test/CodeGen/Hexagon/bit-skip-byval.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; ; Either and or zxtb. -; CHECK: r0 = and(r1, #255) +; CHECK: r0 = and(r1,#255) %struct.t0 = type { i32 } diff --git a/test/CodeGen/Hexagon/bit-validate-reg.ll b/test/CodeGen/Hexagon/bit-validate-reg.ll index 16d4a5e4484d..42eed97786cd 100644 --- a/test/CodeGen/Hexagon/bit-validate-reg.ll +++ b/test/CodeGen/Hexagon/bit-validate-reg.ll @@ -1,10 +1,13 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexbit-extract=0 < %s | FileCheck %s ; Make sure we don't generate zxtb to transfer a predicate register into ; a general purpose register. ; CHECK: r0 = p0 ; CHECK-NOT: zxtb(p +; CHECK-NOT: and(p +; CHECK-NOT: extract(p +; CHECK-NOT: extractu(p target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/bitmanip.ll b/test/CodeGen/Hexagon/bitmanip.ll new file mode 100644 index 000000000000..2044a2fdd083 --- /dev/null +++ b/test/CodeGen/Hexagon/bitmanip.ll @@ -0,0 +1,135 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s + +; CHECK-LABEL: popcount_16 +; CHECK: zxth +; CHECK: popcount +define i16 @popcount_16(i16 %p) #0 { + %t = call i16 @llvm.ctpop.i16(i16 %p) #0 + ret i16 %t +} + +; CHECK-LABEL: popcount_32 +; CHECK: popcount +define i32 @popcount_32(i32 %p) #0 { + %t = call i32 @llvm.ctpop.i32(i32 %p) #0 + ret i32 %t +} + +; CHECK-LABEL: popcount_64 +; CHECK: popcount +define i64 @popcount_64(i64 %p) #0 { + %t = call i64 @llvm.ctpop.i64(i64 %p) #0 + ret i64 %t +} + +; CHECK-LABEL: ctlz_16 +; CHECK: [[REG0:r[0-9]+]] = zxth +; CHECK: [[REG1:r[0-9]+]] = cl0([[REG0]]) +; CHECK: add([[REG1]],#-16) +define i16 @ctlz_16(i16 %p) #0 { + %t = call i16 @llvm.ctlz.i16(i16 %p, i1 true) #0 + ret i16 %t +} + +; CHECK-LABEL: ctlz_32 +; CHECK: cl0 +define i32 @ctlz_32(i32 %p) #0 { + %t = call i32 @llvm.ctlz.i32(i32 %p, i1 true) #0 + ret i32 %t +} + +; CHECK-LABEL: ctlz_64 +; CHECK: cl0 +define i64 @ctlz_64(i64 %p) #0 { + %t = call i64 @llvm.ctlz.i64(i64 %p, i1 true) #0 + ret i64 %t +} + +; CHECK-LABEL: cttz_16 +; CHECK: ct0 +define i16 @cttz_16(i16 %p) #0 { + %t = call i16 @llvm.cttz.i16(i16 %p, i1 true) #0 + ret i16 %t +} + +; CHECK-LABEL: cttz_32 +; CHECK: ct0 +define i32 @cttz_32(i32 %p) #0 { + %t = call i32 @llvm.cttz.i32(i32 %p, i1 true) #0 + ret i32 %t +} + +; CHECK-LABEL: cttz_64 +; CHECK: ct0 +define i64 @cttz_64(i64 %p) #0 { + %t = call i64 @llvm.cttz.i64(i64 %p, i1 true) #0 + ret i64 %t +} + +; CHECK-LABEL: brev_16 +; CHECK: [[REG:r[0-9]+]] = brev +; CHECK: lsr([[REG]],#16) +define i16 @brev_16(i16 %p) #0 { + %t = call i16 @llvm.bitreverse.i16(i16 %p) #0 + ret i16 %t +} + +; CHECK-LABEL: brev_32 +; CHECK: brev +define i32 @brev_32(i32 %p) #0 { + %t = call i32 @llvm.bitreverse.i32(i32 %p) #0 + ret i32 %t +} + +; CHECK-LABEL: brev_64 +; CHECK: brev +define i64 @brev_64(i64 %p) #0 { + %t = call i64 @llvm.bitreverse.i64(i64 %p) #0 + ret i64 %t +} + +; CHECK-LABEL: bswap_16 +; CHECK: [[REG:r[0-9]+]] = swiz +; CHECK: lsr([[REG]],#16) +define i16 @bswap_16(i16 %p) #0 { + %t = call i16 @llvm.bswap.i16(i16 %p) #0 + ret i16 %t +} + +; CHECK-LABEL: bswap_32 +; CHECK: swiz +define i32 @bswap_32(i32 %p) #0 { + %t = call i32 @llvm.bswap.i32(i32 %p) #0 + ret i32 %t +} + +; CHECK-LABEL: bswap_64 +; CHECK: swiz +; CHECK: swiz +; CHECK: combine +define i64 @bswap_64(i64 %p) #0 { + %t = call i64 @llvm.bswap.i64(i64 %p) #0 + ret i64 %t +} + +declare i16 @llvm.ctpop.i16(i16) #0 +declare i32 @llvm.ctpop.i32(i32) #0 +declare i64 @llvm.ctpop.i64(i64) #0 + +declare i16 @llvm.ctlz.i16(i16, i1) #0 +declare i32 @llvm.ctlz.i32(i32, i1) #0 +declare i64 @llvm.ctlz.i64(i64, i1) #0 + +declare i16 @llvm.cttz.i16(i16, i1) #0 +declare i32 @llvm.cttz.i32(i32, i1) #0 +declare i64 @llvm.cttz.i64(i64, i1) #0 + +declare i16 @llvm.bitreverse.i16(i16) #0 +declare i32 @llvm.bitreverse.i32(i32) #0 +declare i64 @llvm.bitreverse.i64(i64) #0 + +declare i16 @llvm.bswap.i16(i16) #0 +declare i32 @llvm.bswap.i32(i32) #0 +declare i64 @llvm.bswap.i64(i64) #0 + +attributes #0 = { nounwind readnone } diff --git a/test/CodeGen/Hexagon/block-addr.ll b/test/CodeGen/Hexagon/block-addr.ll index c0db2cef545e..5af3a69f8aab 100644 --- a/test/CodeGen/Hexagon/block-addr.ll +++ b/test/CodeGen/Hexagon/block-addr.ll @@ -2,7 +2,7 @@ ; CHECK: .LJTI ; CHECK-DAG: r[[REG:[0-9]+]] = memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+<<#[0-9]+}}) -; CHECK-DAG: jumpr:nt r[[REG]] +; CHECK-DAG: jumpr r[[REG]] define void @main() #0 { entry: diff --git a/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll b/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll index a56680bd4399..e09f79866215 100644 --- a/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll +++ b/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll @@ -3,7 +3,7 @@ ; Check that the testcase compiles successfully. Expect that if-conversion ; took place. ; CHECK-LABEL: fred: -; CHECK: if (!p0) r1 = memw(r0 + #0) +; CHECK: if (!p0) r1 = memw(r0+#0) target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/brev_ld.ll b/test/CodeGen/Hexagon/brev_ld.ll index a2914296ec41..861da32b981b 100644 --- a/test/CodeGen/Hexagon/brev_ld.ll +++ b/test/CodeGen/Hexagon/brev_ld.ll @@ -29,7 +29,7 @@ entry: %1 = bitcast i64* %inputLR to i8* %sub = sub i32 13, %shr1 %shl = shl i32 1, %sub -; CHECK: = memd(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: = memd(r{{[0-9]*}}++m{{[0-1]}}:brev) %2 = call i8* @llvm.hexagon.brev.ldd(i8* %0, i8* %1, i32 %shl) %3 = bitcast i8* %1 to i64* %4 = load i64, i64* %3, align 8, !tbaa !0 @@ -49,7 +49,7 @@ entry: %1 = bitcast i32* %inputLR to i8* %sub = sub i32 14, %shr1 %shl = shl i32 1, %sub -; CHECK: = memw(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: = memw(r{{[0-9]*}}++m{{[0-1]}}:brev) %2 = call i8* @llvm.hexagon.brev.ldw(i8* %0, i8* %1, i32 %shl) %3 = bitcast i8* %1 to i32* %4 = load i32, i32* %3, align 4, !tbaa !2 @@ -69,7 +69,7 @@ entry: %1 = bitcast i16* %inputLR to i8* %sub = sub i32 15, %shr1 %shl = shl i32 1, %sub -; CHECK: = memh(r{{[0-9]*}} ++ m0:brev) +; CHECK: = memh(r{{[0-9]*}}++m0:brev) %2 = call i8* @llvm.hexagon.brev.ldh(i8* %0, i8* %1, i32 %shl) %3 = bitcast i8* %1 to i16* %4 = load i16, i16* %3, align 2, !tbaa !3 @@ -89,7 +89,7 @@ entry: %1 = bitcast i16* %inputLR to i8* %sub = sub i32 15, %shr1 %shl = shl i32 1, %sub -; CHECK: = memuh(r{{[0-9]*}} ++ m0:brev) +; CHECK: = memuh(r{{[0-9]*}}++m0:brev) %2 = call i8* @llvm.hexagon.brev.lduh(i8* %0, i8* %1, i32 %shl) %3 = bitcast i8* %1 to i16* %4 = load i16, i16* %3, align 2, !tbaa !3 @@ -108,7 +108,7 @@ entry: %0 = bitcast i16* %arrayidx to i8* %sub = sub nsw i32 16, %shr1 %shl = shl i32 1, %sub -; CHECK: = memub(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: = memub(r{{[0-9]*}}++m{{[0-1]}}:brev) %1 = call i8* @llvm.hexagon.brev.ldub(i8* %0, i8* %inputLR, i32 %shl) %2 = load i8, i8* %inputLR, align 1, !tbaa !0 ret i8 %2 @@ -126,7 +126,7 @@ entry: %0 = bitcast i16* %arrayidx to i8* %sub = sub nsw i32 16, %shr1 %shl = shl i32 1, %sub -; CHECK: = memb(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: = memb(r{{[0-9]*}}++m{{[0-1]}}:brev) %1 = call i8* @llvm.hexagon.brev.ldb(i8* %0, i8* %inputLR, i32 %shl) %2 = load i8, i8* %inputLR, align 1, !tbaa !0 ret i8 %2 diff --git a/test/CodeGen/Hexagon/brev_st.ll b/test/CodeGen/Hexagon/brev_st.ll index 6c55681a683b..cee5f52e3e40 100644 --- a/test/CodeGen/Hexagon/brev_st.ll +++ b/test/CodeGen/Hexagon/brev_st.ll @@ -26,7 +26,7 @@ entry: %0 = bitcast i16* %arrayidx to i8* %sub = sub i32 13, %shr2 %shl = shl i32 1, %sub -; CHECK: memd(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: memd(r{{[0-9]*}}++m{{[0-1]}}:brev) %1 = tail call i8* @llvm.hexagon.brev.std(i8* %0, i64 undef, i32 %shl) ret i64 0 } @@ -42,7 +42,7 @@ entry: %0 = bitcast i16* %arrayidx to i8* %sub = sub i32 14, %shr1 %shl = shl i32 1, %sub -; CHECK: memw(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: memw(r{{[0-9]*}}++m{{[0-1]}}:brev) %1 = tail call i8* @llvm.hexagon.brev.stw(i8* %0, i32 undef, i32 %shl) ret i32 0 } @@ -58,7 +58,7 @@ entry: %0 = bitcast i16* %arrayidx to i8* %sub = sub i32 15, %shr2 %shl = shl i32 1, %sub -; CHECK: memh(r{{[0-9]*}} ++ m{{[0-1]}}:brev) +; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev) %1 = tail call i8* @llvm.hexagon.brev.sth(i8* %0, i32 0, i32 %shl) ret i16 0 } @@ -74,7 +74,7 @@ entry: %0 = bitcast i16* %arrayidx to i8* %sub = sub i32 15, %shr2 %shl = shl i32 1, %sub -; CHECK: memh(r{{[0-9]*}} ++ m{{[0-1]}}:brev){{ *}}={{ *}}r{{[0-9]*}}.h +; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev) = r{{[0-9]*}}.h %1 = tail call i8* @llvm.hexagon.brev.sthhi(i8* %0, i32 0, i32 %shl) ret i16 0 } @@ -89,7 +89,7 @@ entry: %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %sub = sub nsw i32 16, %shr2 - ; CHECK: memb(r{{[0-9]*}} ++ m{{[0-1]}}:brev) + ; CHECK: memb(r{{[0-9]*}}++m{{[0-1]}}:brev) %shl = shl i32 1, %sub %1 = tail call i8* @llvm.hexagon.brev.stb(i8* %0, i32 0, i32 %shl) ret i8 0 diff --git a/test/CodeGen/Hexagon/builtin-expect.ll b/test/CodeGen/Hexagon/builtin-expect.ll new file mode 100644 index 000000000000..9945da1782b2 --- /dev/null +++ b/test/CodeGen/Hexagon/builtin-expect.ll @@ -0,0 +1,44 @@ +; RUN: llc -march=hexagon -disable-block-placement < %s | FileCheck %s + +; Check that the branch to the block b10 is marked as taken (i.e. ":t"). +; CHECK-LABEL: foo +; CHECK: if ({{.*}}) jump:t .LBB0_[[LAB:[0-9]+]] +; CHECK: [[LAB]]: +; CHECK: add({{.*}},#65) + +target triple = "hexagon" + +define i32 @foo(i32 %a0) local_unnamed_addr #0 { +b1: + %v2 = icmp eq i32 %a0, 0 + br i1 %v2, label %b3, label %b10, !prof !0 + +b3: ; preds = %b1 + br label %b4 + +b4: ; preds = %b4, %b3 + %v5 = phi i32 [ %v6, %b4 ], [ 0, %b3 ] + %v6 = add nuw nsw i32 %v5, 1 + %v7 = mul nuw nsw i32 %v5, 67 + %v8 = tail call i32 @bar(i32 %v7) #0 + %v9 = icmp eq i32 %v6, 10 + br i1 %v9, label %b13, label %b4 + +b10: ; preds = %b1 + %v11 = add nsw i32 %a0, 65 + %v12 = tail call i32 @bar(i32 %v11) #0 + br label %b14 + +b13: ; preds = %b4 + br label %b14 + +b14: ; preds = %b13, %b10 + %v15 = phi i32 [ %v12, %b10 ], [ 0, %b13 ] + ret i32 %v15 +} + +declare i32 @bar(i32) local_unnamed_addr #0 + +attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double,-long-calls" } + +!0 = !{!"branch_weights", i32 1, i32 2000} diff --git a/test/CodeGen/Hexagon/cext-valid-packet1.ll b/test/CodeGen/Hexagon/cext-valid-packet1.ll index 36abc59f5e3e..b0aa3c16f862 100644 --- a/test/CodeGen/Hexagon/cext-valid-packet1.ll +++ b/test/CodeGen/Hexagon/cext-valid-packet1.ll @@ -3,8 +3,8 @@ ; Check that the packetizer generates valid packets with constant ; extended instructions. ; CHECK: { -; CHECK-NEXT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}, ##{{[0-9]+}}) -; CHECK-NEXT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}, ##{{[0-9]+}}) +; CHECK-NEXT: r{{[0-9]+}} = add(r{{[0-9]+}},##{{[0-9]+}}) +; CHECK-NEXT: r{{[0-9]+}} = add(r{{[0-9]+}},##{{[0-9]+}}) ; CHECK-NEXT: } define i32 @check-packet1(i32 %a, i32 %b, i32 %c) nounwind readnone { diff --git a/test/CodeGen/Hexagon/circ_ld.ll b/test/CodeGen/Hexagon/circ_ld.ll index ffa5f2cd2220..a9b367e9c4ee 100644 --- a/test/CodeGen/Hexagon/circ_ld.ll +++ b/test/CodeGen/Hexagon/circ_ld.ll @@ -26,7 +26,7 @@ entry: %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %or = or i32 %shr1, 33554432 -; CHECK: = memb(r{{[0-9]*.}}++{{.}}#-1:circ(m{{[0-1]}})) +; CHECK: = memb(r{{[0-9]*}}++#-1:circ(m{{[0-1]}})) %1 = call i8* @llvm.hexagon.circ.ldb(i8* %0, i8* %inputLR, i32 %or, i32 -1) %2 = load i8, i8* %inputLR, align 1, !tbaa !0 ret i8 %2 @@ -45,7 +45,7 @@ entry: %1 = bitcast i64* %inputLR to i8* %shl = shl nuw nsw i32 %shr1, 3 %or = or i32 %shl, 83886080 -; CHECK: = memd(r{{[0-9]*.}}++{{.}}#-8:circ(m{{[0-1]}})) +; CHECK: = memd(r{{[0-9]*}}++#-8:circ(m{{[0-1]}})) %2 = call i8* @llvm.hexagon.circ.ldd(i8* %0, i8* %1, i32 %or, i32 -8) %3 = bitcast i8* %1 to i64* %4 = load i64, i64* %3, align 8, !tbaa !0 @@ -64,7 +64,7 @@ entry: %0 = bitcast i16* %arrayidx to i8* %1 = bitcast i16* %inputLR to i8* %or = or i32 %shr1, 50331648 -; CHECK: = memh(r{{[0-9]*.}}++{{.}}#-2:circ(m{{[0-1]}})) +; CHECK: = memh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}})) %2 = call i8* @llvm.hexagon.circ.ldh(i8* %0, i8* %1, i32 %or, i32 -2) %3 = bitcast i8* %1 to i16* %4 = load i16, i16* %3, align 2, !tbaa !2 @@ -82,7 +82,7 @@ entry: %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %or = or i32 %shr1, 33554432 -; CHECK: = memub(r{{[0-9]*.}}++{{.}}#-1:circ(m{{[0-1]}})) +; CHECK: = memub(r{{[0-9]*}}++#-1:circ(m{{[0-1]}})) %1 = call i8* @llvm.hexagon.circ.ldub(i8* %0, i8* %inputLR, i32 %or, i32 -1) %2 = load i8, i8* %inputLR, align 1, !tbaa !0 ret i8 %2 @@ -100,7 +100,7 @@ entry: %0 = bitcast i16* %arrayidx to i8* %1 = bitcast i16* %inputLR to i8* %or = or i32 %shr1, 50331648 -; CHECK: = memuh(r{{[0-9]*.}}++{{.}}#-2:circ(m{{[0-1]}})) +; CHECK: = memuh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}})) %2 = call i8* @llvm.hexagon.circ.lduh(i8* %0, i8* %1, i32 %or, i32 -2) %3 = bitcast i8* %1 to i16* %4 = load i16, i16* %3, align 2, !tbaa !2 @@ -120,7 +120,7 @@ entry: %1 = bitcast i32* %inputLR to i8* %shl = shl nuw nsw i32 %shr1, 2 %or = or i32 %shl, 67108864 -; CHECK: = memw(r{{[0-9]*.}}++{{.}}#-4:circ(m{{[0-1]}})) +; CHECK: = memw(r{{[0-9]*}}++#-4:circ(m{{[0-1]}})) %2 = call i8* @llvm.hexagon.circ.ldw(i8* %0, i8* %1, i32 %or, i32 -4) %3 = bitcast i8* %1 to i32* %4 = load i32, i32* %3, align 4, !tbaa !3 diff --git a/test/CodeGen/Hexagon/circ_ldw.ll b/test/CodeGen/Hexagon/circ_ldw.ll index 4511a9cf69da..abfb0886c686 100644 --- a/test/CodeGen/Hexagon/circ_ldw.ll +++ b/test/CodeGen/Hexagon/circ_ldw.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s -; CHECK: r{{[0-9]*}} = memw(r{{[0-9]*.}}++{{.}}#-4:circ(m0)) +; CHECK: r{{[0-9]*}} = memw(r{{[0-9]*}}++#-4:circ(m0)) %union.vect64 = type { i64 } diff --git a/test/CodeGen/Hexagon/circ_st.ll b/test/CodeGen/Hexagon/circ_st.ll index 4b54afbc611d..c8fa256ad48a 100644 --- a/test/CodeGen/Hexagon/circ_st.ll +++ b/test/CodeGen/Hexagon/circ_st.ll @@ -23,7 +23,7 @@ entry: %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %or = or i32 %shr2, 33554432 -; CHECK: memb(r{{[0-9]*}}{{.}}++{{.}}#-1:circ(m{{[0-1]}})) +; CHECK: memb(r{{[0-9]*}}++#-1:circ(m{{[0-1]}})) %1 = tail call i8* @llvm.hexagon.circ.stb(i8* %0, i32 0, i32 %or, i32 -1) ret i8 0 } @@ -39,7 +39,7 @@ entry: %0 = bitcast i16* %arrayidx to i8* %shl = shl nuw nsw i32 %shr1, 3 %or = or i32 %shl, 83886080 -; CHECK: memd(r{{[0-9]*}}{{.}}++{{.}}#-8:circ(m{{[0-1]}})) +; CHECK: memd(r{{[0-9]*}}++#-8:circ(m{{[0-1]}})) %1 = tail call i8* @llvm.hexagon.circ.std(i8* %0, i64 undef, i32 %or, i32 -8) ret i64 0 } @@ -54,7 +54,7 @@ entry: %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %or = or i32 %shr2, 50331648 -; CHECK: memh(r{{[0-9]*}}{{.}}++{{.}}#-2:circ(m{{[0-1]}})) +; CHECK: memh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}})) %1 = tail call i8* @llvm.hexagon.circ.sth(i8* %0, i32 0, i32 %or, i32 -2) ret i16 0 } @@ -69,7 +69,7 @@ entry: %arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom %0 = bitcast i16* %arrayidx to i8* %or = or i32 %shr2, 50331648 -; CHECK: memh(r{{[0-9]*}}{{.}}++{{.}}#-2:circ(m{{[0-1]}})){{ *}}={{ *}}r{{[0-9]*}}.h +; CHECK: memh(r{{[0-9]*}}++#-2:circ(m{{[0-1]}})) = r{{[0-9]*}}.h %1 = tail call i8* @llvm.hexagon.circ.sthhi(i8* %0, i32 0, i32 %or, i32 -2) ret i16 0 } @@ -85,7 +85,7 @@ entry: %0 = bitcast i16* %arrayidx to i8* %shl = shl nuw nsw i32 %shr1, 2 %or = or i32 %shl, 67108864 -; CHECK: memw(r{{[0-9]*}}{{.}}++{{.}}#-4:circ(m{{[0-1]}})) +; CHECK: memw(r{{[0-9]*}}++#-4:circ(m{{[0-1]}})) %1 = tail call i8* @llvm.hexagon.circ.stw(i8* %0, i32 undef, i32 %or, i32 -4) ret i32 0 } diff --git a/test/CodeGen/Hexagon/clr_set_toggle.ll b/test/CodeGen/Hexagon/clr_set_toggle.ll index 19e3ed0cf897..4e9838316522 100644 --- a/test/CodeGen/Hexagon/clr_set_toggle.ll +++ b/test/CodeGen/Hexagon/clr_set_toggle.ll @@ -4,7 +4,7 @@ define i32 @my_clrbit(i32 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31) %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 %0 = load i32, i32* %x.addr, align 4 @@ -15,7 +15,7 @@ entry: define i64 @my_clrbit2(i64 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit2 -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -26,7 +26,7 @@ entry: define i64 @my_clrbit3(i64 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit3 -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -37,7 +37,7 @@ entry: define i32 @my_clrbit4(i32 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit4 -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #13) +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#13) %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 %0 = load i32, i32* %x.addr, align 4 @@ -48,7 +48,7 @@ entry: define i64 @my_clrbit5(i64 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit5 -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #13) +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#13) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -59,7 +59,7 @@ entry: define i64 @my_clrbit6(i64 %x) nounwind { entry: ; CHECK-LABEL: my_clrbit6 -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #27) +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#27) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -70,7 +70,7 @@ entry: define zeroext i16 @my_setbit(i16 zeroext %crc) nounwind { entry: ; CHECK-LABEL: my_setbit -; CHECK: memh(r{{[0-9]+}}+#{{[0-9]+}}){{ *}}={{ *}}setbit(#15) +; CHECK: memh(r{{[0-9]+}}+#{{[0-9]+}}) = setbit(#15) %crc.addr = alloca i16, align 2 store i16 %crc, i16* %crc.addr, align 2 %0 = load i16, i16* %crc.addr, align 2 @@ -85,7 +85,7 @@ entry: define i32 @my_setbit2(i32 %x) nounwind { entry: ; CHECK-LABEL: my_setbit2 -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #15) +; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#15) %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 %0 = load i32, i32* %x.addr, align 4 @@ -96,7 +96,7 @@ entry: define i64 @my_setbit3(i64 %x) nounwind { entry: ; CHECK-LABEL: my_setbit3 -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #15) +; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#15) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -107,7 +107,7 @@ entry: define i32 @my_setbit4(i32 %x) nounwind { entry: ; CHECK-LABEL: my_setbit4 -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#31) %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 %0 = load i32, i32* %x.addr, align 4 @@ -118,7 +118,7 @@ entry: define i64 @my_setbit5(i64 %x) nounwind { entry: ; CHECK-LABEL: my_setbit5 -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}setbit(r{{[0-9]+}}, #13) +; CHECK: r{{[0-9]+}} = setbit(r{{[0-9]+}},#13) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -129,7 +129,7 @@ entry: define zeroext i16 @my_togglebit(i16 zeroext %crc) nounwind { entry: ; CHECK-LABEL: my_togglebit -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15) +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15) %crc.addr = alloca i16, align 2 store i16 %crc, i16* %crc.addr, align 2 %0 = load i16, i16* %crc.addr, align 2 @@ -144,7 +144,7 @@ entry: define i32 @my_togglebit2(i32 %x) nounwind { entry: ; CHECK-LABEL: my_togglebit2 -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15) +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15) %x.addr = alloca i32, align 4 store i32 %x, i32* %x.addr, align 4 %0 = load i32, i32* %x.addr, align 4 @@ -155,7 +155,7 @@ entry: define i64 @my_togglebit3(i64 %x) nounwind { entry: ; CHECK-LABEL: my_togglebit3 -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #15) +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#15) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 @@ -166,7 +166,7 @@ entry: define i64 @my_togglebit4(i64 %x) nounwind { entry: ; CHECK-LABEL: my_togglebit4 -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #20) +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#20) %x.addr = alloca i64, align 8 store i64 %x, i64* %x.addr, align 8 %0 = load i64, i64* %x.addr, align 8 diff --git a/test/CodeGen/Hexagon/cmp.ll b/test/CodeGen/Hexagon/cmp.ll index c274a787249a..a0bb90de1c27 100644 --- a/test/CodeGen/Hexagon/cmp.ll +++ b/test/CodeGen/Hexagon/cmp.ll @@ -9,7 +9,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmpeq(i32 %0, i32 1) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}},r{{[0-9]}}) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpeq(i32, i32) #1 @@ -23,7 +23,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmpgt(i32 %0, i32 2) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},r{{[0-9]}}) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgt(i32, i32) #1 @@ -37,7 +37,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmpgtu(i32 %0, i32 3) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},r{{[0-9]}}) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgtu(i32, i32) #1 @@ -51,7 +51,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmplt(i32 %0, i32 4) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},r{{[0-9]}}) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmplt(i32, i32) #1 @@ -65,7 +65,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmpltu(i32 %0, i32 5) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},r{{[0-9]}}) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpltu(i32, i32) #1 @@ -79,7 +79,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmpeqi(i32 %0, i32 10) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, {{.*}}#10) +; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}},#10) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpeqi(i32, i32) #1 @@ -93,7 +93,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmpgti(i32 %0, i32 20) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, {{.*}}#20) +; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},#20) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgti(i32, i32) #1 @@ -107,7 +107,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmpgtui(i32 %0, i32 40) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, {{.*}}#40) +; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},#40) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgtui(i32, i32) #1 @@ -121,7 +121,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmpgei(i32 %0, i32 3) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, {{.*}}#2) +; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}},#2) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgei(i32, i32) #1 @@ -135,7 +135,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 3) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, {{.*}}#2) +; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}},#2) ; Function Attrs: nounwind readnone declare i32 @llvm.hexagon.C2.cmpgeui(i32, i32) #1 @@ -149,7 +149,7 @@ entry: %1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 0) ret i32 %1 } -; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, r{{[0-9]}}) +; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}},r{{[0-9]}}) attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/test/CodeGen/Hexagon/combine.ll b/test/CodeGen/Hexagon/combine.ll index 04a080fdf425..5b71b3665667 100644 --- a/test/CodeGen/Hexagon/combine.ll +++ b/test/CodeGen/Hexagon/combine.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr -hexagon-bit=0 < %s | FileCheck %s -; CHECK: combine(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: combine(r{{[0-9]+}},r{{[0-9]+}}) @j = external global i32 @k = external global i64 diff --git a/test/CodeGen/Hexagon/compound.ll b/test/CodeGen/Hexagon/compound.ll index f8d36b8b77d9..a3bd52f97194 100644 --- a/test/CodeGen/Hexagon/compound.ll +++ b/test/CodeGen/Hexagon/compound.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -filetype=obj -o - %s | llvm-objdump -d - | FileCheck %s +; RUN: llc -march=hexagon -filetype=obj -ifcvt-limit=0 -o - %s | llvm-objdump -d - | FileCheck %s ; CHECK: p0 = cmp.gt(r0,#-1); if (!p0.new) jump:nt @@ -14,4 +14,4 @@ ret void y: call void @b() ret void -}
\ No newline at end of file +} diff --git a/test/CodeGen/Hexagon/constp-combine-neg.ll b/test/CodeGen/Hexagon/constp-combine-neg.ll index 18f0e81076af..089d9f6a9984 100644 --- a/test/CodeGen/Hexagon/constp-combine-neg.ll +++ b/test/CodeGen/Hexagon/constp-combine-neg.ll @@ -19,9 +19,9 @@ entry: ; The instructions seem to be in a different order in the .s file than ; the corresponding values in the .ll file, so just run the test three ; times and each time test for a different instruction. -; CHECK-TEST1: combine(#-2, #3) -; CHECK-TEST2: combine(#6, #-4) -; CHECK-TEST3: combine(#-10, #-8) +; CHECK-TEST1: combine(#-2,#3) +; CHECK-TEST2: combine(#6,#-4) +; CHECK-TEST3: combine(#-10,#-8) attributes #0 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/test/CodeGen/Hexagon/convert-to-dot-old.ll b/test/CodeGen/Hexagon/convert-to-dot-old.ll new file mode 100644 index 000000000000..b793fa0c22cd --- /dev/null +++ b/test/CodeGen/Hexagon/convert-to-dot-old.ll @@ -0,0 +1,110 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv55 -filetype=obj -o /dev/null +; REQUIRES: asserts +; There should be no output (nothing on stderr). + +; Due to a bug in converting a dot-new branch into a dot-old one, opcodes +; with branch prediction bits were selected even if the architecture did +; not support them. On V55-, the dot-old branch opcodes are J2_jumpt and +; J2_jumpf (and a pair of J2_jumpr*), whereas J2_jumptpt could have been +; a result of the conversion to dot-old. This would fail a verification +; check in the MC code emitter, so make sure it does not happen. + +target triple = "hexagon" + +define void @fred(i16* nocapture %a0, i16* nocapture %a1, i16* nocapture %a2, i16 signext %a3, i16* %a4, i16 signext %a5, i16 signext %a6, i16 signext %a7, i32 %a8, i16 signext %a9, i16 signext %a10) local_unnamed_addr #0 { +b11: + %v12 = sext i16 %a5 to i32 + %v13 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v12) + %v14 = tail call i32 @llvm.hexagon.A2.sxth(i32 2) + %v15 = tail call i32 @llvm.hexagon.A2.sxth(i32 undef) + %v16 = tail call i32 @llvm.hexagon.A2.sath(i32 undef) + %v17 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v16) + %v18 = tail call i32 @llvm.hexagon.A2.aslh(i32 undef) + %v19 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v18, i32 %v14) + %v20 = tail call i32 @llvm.hexagon.A2.asrh(i32 %v19) + %v21 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v20) + %v22 = tail call i32 @llvm.hexagon.A2.sub(i32 %v17, i32 %v21) + %v23 = tail call i32 @llvm.hexagon.A2.sath(i32 %v22) + %v24 = select i1 undef, i32 undef, i32 %v23 + %v25 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v24) + %v26 = tail call i32 @llvm.hexagon.A2.sub(i32 %v13, i32 %v25) + %v27 = tail call i32 @llvm.hexagon.A2.sath(i32 %v26) + %v28 = tail call i32 @llvm.hexagon.A2.sxth(i32 %v27) + %v29 = tail call i32 @llvm.hexagon.A2.sub(i32 %v28, i32 %v14) + %v30 = tail call i32 @llvm.hexagon.A2.sath(i32 %v29) + %v31 = shl i32 %v30, 16 + %v32 = icmp sgt i32 undef, %v31 + %v33 = select i1 %v32, i32 %v30, i32 undef + %v34 = trunc i32 %v33 to i16 + %v35 = trunc i32 %v24 to i16 + call void @foo(i16* nonnull undef, i32* nonnull undef, i16* %a4, i16 signext %v35, i16 signext %v34, i16 signext 2) #4 + %v36 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v18, i32 undef) + %v37 = call i32 @llvm.hexagon.A2.asrh(i32 %v36) + %v38 = call i32 @llvm.hexagon.A2.sub(i32 %v13, i32 undef) + %v39 = call i32 @llvm.hexagon.A2.sath(i32 %v38) + %v40 = call i32 @llvm.hexagon.A2.sxth(i32 %v39) + %v41 = call i32 @llvm.hexagon.A2.sub(i32 %v40, i32 %v14) + %v42 = call i32 @llvm.hexagon.A2.sath(i32 %v41) + %v43 = select i1 undef, i32 %v42, i32 %v37 + %v44 = trunc i32 %v43 to i16 + call void @foo(i16* nonnull undef, i32* nonnull undef, i16* %a4, i16 signext undef, i16 signext %v44, i16 signext 2) #4 + %v45 = call i32 @llvm.hexagon.A2.sath(i32 undef) + %v46 = select i1 undef, i32 undef, i32 %v45 + %v47 = trunc i32 %v46 to i16 + call void @foo(i16* nonnull undef, i32* nonnull undef, i16* %a4, i16 signext %v47, i16 signext undef, i16 signext 2) #4 + %v48 = call i32 @llvm.hexagon.A2.sub(i32 undef, i32 %v15) + %v49 = call i32 @llvm.hexagon.A2.sath(i32 %v48) + %v50 = trunc i32 %v49 to i16 + store i16 %v50, i16* undef, align 2 + store i16 %a3, i16* %a0, align 2 + %v51 = sext i16 %a10 to i32 + %v52 = call i32 @llvm.hexagon.A2.sxth(i32 %v51) + %v53 = call i32 @llvm.hexagon.A2.add(i32 undef, i32 %v52) + %v54 = call i32 @llvm.hexagon.A2.sath(i32 %v53) + %v55 = trunc i32 %v54 to i16 + store i16 %v55, i16* %a1, align 2 + store i16 %a7, i16* %a2, align 2 + %v56 = sext i16 %a9 to i32 + %v57 = call i32 @llvm.hexagon.A2.sxth(i32 %v56) + br i1 undef, label %b58, label %b62 + +b58: ; preds = %b11 + %v59 = call i32 @llvm.hexagon.A2.add(i32 %v57, i32 %v52) + %v60 = call i32 @llvm.hexagon.A2.sath(i32 %v59) + %v61 = trunc i32 %v60 to i16 + store i16 %v61, i16* %a1, align 2 + br label %b63 + +b62: ; preds = %b11 + br label %b63 + +b63: ; preds = %b62, %b58 + %v64 = phi i16 [ undef, %b58 ], [ %a9, %b62 ] + %v65 = icmp slt i16 undef, %v64 + br i1 %v65, label %b66, label %b67 + +b66: ; preds = %b63 + br i1 undef, label %b67, label %b68 + +b67: ; preds = %b66, %b63 + store i16 0, i16* %a2, align 2 + br label %b68 + +b68: ; preds = %b67, %b66 + ret void +} + +declare i32 @llvm.hexagon.A2.sath(i32) #2 +declare i32 @llvm.hexagon.A2.add(i32, i32) #2 +declare i32 @llvm.hexagon.A2.sxth(i32) #2 +declare i32 @llvm.hexagon.A2.sub(i32, i32) #2 +declare i32 @llvm.hexagon.A2.asrh(i32) #2 +declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #2 +declare i32 @llvm.hexagon.A2.aslh(i32) #2 +declare void @foo(i16*, i32*, i16*, i16 signext, i16 signext, i16 signext) local_unnamed_addr #3 + +attributes #0 = { nounwind optsize "target-cpu"="hexagonv55" "target-features"="-hvx,-hvx-double,-long-calls" } +attributes #1 = { argmemonly nounwind } +attributes #2 = { nounwind readnone } +attributes #3 = { optsize "target-cpu"="hexagonv55" "target-features"="-hvx,-hvx-double,-long-calls" } +attributes #4 = { nounwind optsize } diff --git a/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll b/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll deleted file mode 100644 index b8f483298f8c..000000000000 --- a/test/CodeGen/Hexagon/ctlz-cttz-ctpop.ll +++ /dev/null @@ -1,36 +0,0 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s - -; CHECK-DAG: ct0({{r[0-9]*:[0-9]*}}) -; CHECK-DAG: cl0({{r[0-9]*:[0-9]*}}) -; CHECK-DAG: ct0({{r[0-9]*}}) -; CHECK-DAG: cl0({{r[0-9]*}}) -; CHECK-DAG: r{{[0-9]+}} += lsr(r{{[0-9]+}}, #4) - -define i32 @foo(i64 %a, i32 %b) nounwind { -entry: - %tmp0 = tail call i64 @llvm.ctlz.i64( i64 %a, i1 true ) - %tmp1 = tail call i64 @llvm.cttz.i64( i64 %a, i1 true ) - %tmp2 = tail call i32 @llvm.ctlz.i32( i32 %b, i1 true ) - %tmp3 = tail call i32 @llvm.cttz.i32( i32 %b, i1 true ) - %tmp4 = tail call i64 @llvm.ctpop.i64( i64 %a ) - %tmp5 = tail call i32 @llvm.ctpop.i32( i32 %b ) - - - %tmp6 = trunc i64 %tmp0 to i32 - %tmp7 = trunc i64 %tmp1 to i32 - %tmp8 = trunc i64 %tmp4 to i32 - %tmp9 = add i32 %tmp6, %tmp7 - %tmp10 = add i32 %tmp9, %tmp8 - %tmp11 = add i32 %tmp10, %tmp2 - %tmp12 = add i32 %tmp11, %tmp3 - %tmp13 = add i32 %tmp12, %tmp5 - - ret i32 %tmp13 -} - -declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone -declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone -declare i64 @llvm.cttz.i64(i64, i1) nounwind readnone -declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone -declare i64 @llvm.ctpop.i64(i64) nounwind readnone -declare i32 @llvm.ctpop.i32(i32) nounwind readnone diff --git a/test/CodeGen/Hexagon/dead-store-stack.ll b/test/CodeGen/Hexagon/dead-store-stack.ll index 93d324baad9e..0d8124e76b90 100644 --- a/test/CodeGen/Hexagon/dead-store-stack.ll +++ b/test/CodeGen/Hexagon/dead-store-stack.ll @@ -1,6 +1,6 @@ ; RUN: llc -O2 -march=hexagon < %s | FileCheck %s ; CHECK: ParseFunc: -; CHECK: r[[ARG0:[0-9]+]] = memuh(r[[ARG1:[0-9]+]] + #[[OFFSET:[0-9]+]]) +; CHECK: r[[ARG0:[0-9]+]] = memuh(r[[ARG1:[0-9]+]]+#[[OFFSET:[0-9]+]]) ; CHECK: memw(r[[ARG1]]+#[[OFFSET]]) = r[[ARG0]] @.str.3 = external unnamed_addr constant [8 x i8], align 1 diff --git a/test/CodeGen/Hexagon/early-if-merge-loop.ll b/test/CodeGen/Hexagon/early-if-merge-loop.ll new file mode 100644 index 000000000000..f45058f029dd --- /dev/null +++ b/test/CodeGen/Hexagon/early-if-merge-loop.ll @@ -0,0 +1,91 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; Make sure that the loop in the end has only one basic block. + +; CHECK-LABEL: fred +; Rely on the comments, make sure the one for the loop header is present. +; CHECK: %loop +; CHECK-NOT: %should_merge + +target triple = "hexagon" + +define i32 @fred(i32 %a0, i64* nocapture readonly %a1) #0 { +b2: + %v3 = bitcast i64* %a1 to i32* + %v4 = getelementptr inbounds i32, i32* %v3, i32 1 + %v5 = zext i32 %a0 to i64 + br label %loop + +loop: ; preds = %should_merge, %b2 + %v7 = phi i32 [ 0, %b2 ], [ %v49, %should_merge ] + %v8 = phi i32 [ 0, %b2 ], [ %v42, %should_merge ] + %v9 = phi i32* [ %v4, %b2 ], [ %v53, %should_merge ] + %v10 = phi i32 [ 0, %b2 ], [ %v30, %should_merge ] + %v11 = phi i32* [ %v3, %b2 ], [ %v51, %should_merge ] + %v12 = phi i32 [ 0, %b2 ], [ %v23, %should_merge ] + %v13 = phi i32 [ 2, %b2 ], [ %v54, %should_merge ] + %v14 = load i32, i32* %v11, align 4, !tbaa !0 + %v15 = load i32, i32* %v9, align 4, !tbaa !0 + %v16 = icmp ult i32 %v13, 30 + %v17 = zext i32 %v12 to i64 + %v18 = shl nuw i64 %v17, 32 + %v19 = zext i32 %v14 to i64 + %v20 = or i64 %v18, %v19 + %v21 = tail call i64 @llvm.hexagon.A2.addp(i64 %v20, i64 %v5) + %v22 = lshr i64 %v21, 32 + %v23 = trunc i64 %v22 to i32 + %v24 = zext i32 %v10 to i64 + %v25 = shl nuw i64 %v24, 32 + %v26 = zext i32 %v15 to i64 + %v27 = or i64 %v25, %v26 + %v28 = tail call i64 @llvm.hexagon.A2.addp(i64 %v27, i64 %v5) + %v29 = lshr i64 %v28, 32 + %v30 = trunc i64 %v29 to i32 + %v31 = getelementptr inbounds i32, i32* %v3, i32 %v13 + %v32 = load i32, i32* %v31, align 4, !tbaa !0 + %v33 = or i32 %v13, 1 + %v34 = getelementptr inbounds i32, i32* %v3, i32 %v33 + %v35 = load i32, i32* %v34, align 4, !tbaa !0 + %v36 = zext i32 %v8 to i64 + %v37 = shl nuw i64 %v36, 32 + %v38 = zext i32 %v32 to i64 + %v39 = or i64 %v37, %v38 + %v40 = tail call i64 @llvm.hexagon.A2.subp(i64 %v39, i64 %v5) + %v41 = lshr i64 %v40, 32 + %v42 = trunc i64 %v41 to i32 + %v43 = zext i32 %v7 to i64 + %v44 = shl nuw i64 %v43, 32 + %v45 = zext i32 %v35 to i64 + %v46 = or i64 %v44, %v45 + %v47 = tail call i64 @llvm.hexagon.A2.subp(i64 %v46, i64 %v5) + %v48 = lshr i64 %v47, 32 + %v49 = trunc i64 %v48 to i32 + br i1 %v16, label %should_merge, label %exit + +should_merge: ; preds = %loop + %v50 = add nuw nsw i32 %v13, 2 + %v51 = getelementptr inbounds i32, i32* %v3, i32 %v50 + %v52 = add nuw nsw i32 %v13, 3 + %v53 = getelementptr inbounds i32, i32* %v3, i32 %v52 + %v54 = add nuw nsw i32 %v13, 4 + br label %loop + +exit: ; preds = %loop + %v57 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v42, i32 %v23) + %v58 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v49, i32 %v30) + %v59 = tail call i64 @llvm.hexagon.A2.addp(i64 %v57, i64 %v58) + %v60 = lshr i64 %v59, 32 + %v61 = trunc i64 %v60 to i32 + ret i32 %v61 +} + +declare i64 @llvm.hexagon.A2.addp(i64, i64) #1 +declare i64 @llvm.hexagon.A2.subp(i64, i64) #1 +declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1 + +attributes #0 = { nounwind readonly "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" } +attributes #1 = { nounwind readnone } + +!0 = !{!1, !1, i64 0} +!1 = !{!"long", !2, i64 0} +!2 = !{!"omnipotent char", !3, i64 0} +!3 = !{!"Simple C/C++ TBAA"} diff --git a/test/CodeGen/Hexagon/early-if-phi-i1.ll b/test/CodeGen/Hexagon/early-if-phi-i1.ll index 1649d51269ee..f4af62d6b10e 100644 --- a/test/CodeGen/Hexagon/early-if-phi-i1.ll +++ b/test/CodeGen/Hexagon/early-if-phi-i1.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s +; RUN: llc -march=hexagon < %s ; REQUIRES: asserts ; Check that the early if-conversion does not predicate block1 (where the ; join block has a phi node of type i1). diff --git a/test/CodeGen/Hexagon/early-if-vecpred.ll b/test/CodeGen/Hexagon/early-if-vecpred.ll new file mode 100644 index 000000000000..ca119e1d1dec --- /dev/null +++ b/test/CodeGen/Hexagon/early-if-vecpred.ll @@ -0,0 +1,37 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; REQUIRES: asserts + +; Hexagon early if-conversion used to crash on this testcase due to not +; recognizing vector predicate registers. + +target triple = "hexagon" + +; Check that the early if-conversion has not happened. + +; CHECK-LABEL: fred +; CHECK: q{{[0-3]}} = not +; CHECK: LBB +; CHECK: if (q{{[0-3]}}) vmem +define void @fred(i32 %a0) #0 { +b1: + %v2 = tail call <1024 x i1> @llvm.hexagon.V6.pred.scalar2.128B(i32 %a0) #2 + br i1 undef, label %b3, label %b5 + +b3: ; preds = %b1 + %v4 = tail call <1024 x i1> @llvm.hexagon.V6.pred.not.128B(<1024 x i1> %v2) #2 + br label %b5 + +b5: ; preds = %b3, %b1 + %v6 = phi <1024 x i1> [ %v4, %b3 ], [ %v2, %b1 ] + %v7 = bitcast <1024 x i1> %v6 to <32 x i32> + tail call void asm sideeffect "if ($0) vmem($1) = $2;", "q,r,v,~{memory}"(<32 x i32> %v7, <32 x i32>* undef, <32 x i32> undef) #2 + ret void +} + +declare <1024 x i1> @llvm.hexagon.V6.pred.scalar2.128B(i32) #1 +declare <1024 x i1> @llvm.hexagon.V6.pred.not.128B(<1024 x i1>) #1 + +attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-double" } +attributes #1 = { nounwind readnone } +attributes #2 = { nounwind } + diff --git a/test/CodeGen/Hexagon/eh_return.ll b/test/CodeGen/Hexagon/eh_return.ll index 67649a07afc7..1596ade24c82 100644 --- a/test/CodeGen/Hexagon/eh_return.ll +++ b/test/CodeGen/Hexagon/eh_return.ll @@ -4,7 +4,7 @@ ; CHECK: deallocframe ; CHECK-NEXT: } ; CHECK-NEXT: { -; CHECK-NEXT: r29 = add(r29, r28) +; CHECK-NEXT: r29 = add(r29,r28) ; CHECK-NEXT: } ; CHECK-NEXT: { ; CHECK-NEXT: jumpr r31 diff --git a/test/CodeGen/Hexagon/eliminate-pred-spill.ll b/test/CodeGen/Hexagon/eliminate-pred-spill.ll index 6fb0a3e2658d..b3a4a2f42524 100644 --- a/test/CodeGen/Hexagon/eliminate-pred-spill.ll +++ b/test/CodeGen/Hexagon/eliminate-pred-spill.ll @@ -1,5 +1,4 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-hexagon-hvx-double \ -; RUN: -hexagon-bit=0 < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-bit=0 < %s | FileCheck %s ; This spill should be eliminated. ; CHECK-NOT: vmem(r29+#6) @@ -140,5 +139,5 @@ declare <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32>, <32 x i32>, i32) declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1 -attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-double" } attributes #1 = { nounwind readnone } diff --git a/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll b/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll new file mode 100644 index 000000000000..ce7f5e0ce12f --- /dev/null +++ b/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll @@ -0,0 +1,54 @@ +; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s +; REQUIRES: asserts + +; Check for some output other than crashing. +; CHECK: bitsset + +target triple = "hexagon" + +; Function Attrs: nounwind +define void @fred() local_unnamed_addr #0 { +b0: + %v1 = load i32, i32* undef, align 4 + %v2 = and i32 %v1, 603979776 + %v3 = trunc i32 %v2 to i30 + switch i30 %v3, label %b23 [ + i30 -536870912, label %b4 + i30 -469762048, label %b5 + ] + +b4: ; preds = %b0 + unreachable + +b5: ; preds = %b0 + %v6 = load i32, i32* undef, align 4 + br i1 undef, label %b7, label %b8 + +b7: ; preds = %b5 + br label %b9 + +b8: ; preds = %b5 + br label %b9 + +b9: ; preds = %b8, %b7 + %v10 = load i32, i32* undef, align 4 + %v11 = load i32, i32* undef, align 4 + %v12 = mul nsw i32 %v11, %v10 + %v13 = ashr i32 %v12, 13 + %v14 = mul nsw i32 %v13, %v13 + %v15 = zext i32 %v14 to i64 + %v16 = mul nsw i32 %v6, %v6 + %v17 = zext i32 %v16 to i64 + %v18 = lshr i64 %v17, 5 + %v19 = select i1 undef, i64 %v18, i64 %v17 + %v20 = mul nuw nsw i64 %v19, %v15 + %v21 = trunc i64 %v20 to i32 + %v22 = and i32 %v21, 2147483647 + store i32 %v22, i32* undef, align 4 + unreachable + +b23: ; preds = %b0 + ret void +} + +attributes #0 = { nounwind "target-cpu"="hexagonv5" "target-features"="-hvx,-hvx-double,-long-calls" } diff --git a/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll b/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll new file mode 100644 index 000000000000..ecec83625e1c --- /dev/null +++ b/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll @@ -0,0 +1,45 @@ +; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s +; REQUIRES: asserts + +; Check for some output (as opposed to a crash). +; CHECK: loop0 + +target triple = "hexagon" + +@x = external local_unnamed_addr global [80 x i32], align 8 + +; Function Attrs: nounwind +define void @fred() local_unnamed_addr #0 { +b0: + br label %b1 + +b1: ; preds = %b20, %b0 + br label %b2 + +b2: ; preds = %b2, %b1 + %v3 = phi i32 [ 0, %b1 ], [ %v17, %b2 ] + %v4 = phi i32 [ 0, %b1 ], [ %v16, %b2 ] + %v5 = phi i32 [ undef, %b1 ], [ %v18, %b2 ] + %v6 = load i32, i32* undef, align 8 + %v7 = icmp sgt i32 %v6, undef + %v8 = select i1 %v7, i32 %v3, i32 %v4 + %v9 = select i1 undef, i32 0, i32 %v8 + %v10 = select i1 undef, i32 undef, i32 %v9 + %v11 = select i1 undef, i32 0, i32 %v10 + %v12 = icmp sgt i32 undef, 0 + %v13 = select i1 %v12, i32 undef, i32 %v11 + %v14 = select i1 false, i32 undef, i32 %v13 + %v15 = select i1 false, i32 undef, i32 %v14 + %v16 = select i1 false, i32 undef, i32 %v15 + %v17 = add nsw i32 %v3, 8 + %v18 = add i32 %v5, -8 + %v19 = icmp eq i32 %v18, 0 + br i1 %v19, label %b20, label %b2 + +b20: ; preds = %b2 + %v21 = getelementptr inbounds [80 x i32], [80 x i32]* @x, i32 0, i32 %v16 + store i32 -2000, i32* %v21, align 4 + br label %b1 +} + +attributes #0 = { nounwind "target-cpu"="hexagonv55" "target-features"="-hvx,-hvx-double,-long-calls" } diff --git a/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir b/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir index 983035e228cc..f3d105f75da2 100644 --- a/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir +++ b/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir @@ -1,4 +1,4 @@ -# RUN: llc -march=hexagon -run-pass expand-condsets -o - 2>&1 %s -verify-machineinstrs -debug-only=expand-condsets | FileCheck %s +# RUN: llc -march=hexagon -run-pass expand-condsets -o - %s -verify-machineinstrs -debug-only=expand-condsets 2>&1 | FileCheck %s # REQUIRES: asserts # Check that coalesced registers are removed from live intervals. diff --git a/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll b/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll new file mode 100644 index 000000000000..4f2bb86f0842 --- /dev/null +++ b/test/CodeGen/Hexagon/expand-vstorerw-undef2.ll @@ -0,0 +1,216 @@ +; RUN: llc -march=hexagon < %s +; REQUIRES: asserts + +; Dead defs may still appear live in LivePhysRegs, leading to an expansion +; of a double-vector store that uses an undefined source register. + +target triple = "hexagon-unknown--elf" + +declare noalias i8* @halide_malloc() local_unnamed_addr #0 +declare void @halide_free() local_unnamed_addr #0 + +declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1 +declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #1 +declare <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32) #1 +declare <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32>, <32 x i32>) #1 +declare <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32>, <32 x i32>) #1 +declare <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32>, <32 x i32>, i32) #1 +declare <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32>, <32 x i32>, i32) #1 +declare <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32>, <32 x i32>, i32) #1 +declare <32 x i32> @llvm.hexagon.V6.vavghrnd.128B(<32 x i32>, <32 x i32>) #1 +declare <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32>, i32) #1 +declare <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32>, <32 x i32>) #1 +declare <32 x i32> @llvm.hexagon.V6.vshufoh.128B(<32 x i32>, <32 x i32>) #1 +declare <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32>, <32 x i32>) #1 +declare <64 x i32> @llvm.hexagon.V6.vaddhw.128B(<32 x i32>, <32 x i32>) #1 +declare <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32>, <64 x i32>) #1 +declare <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32>, <32 x i32>) #1 +declare <64 x i32> @llvm.hexagon.V6.vmpyuh.128B(<32 x i32>, i32) #1 +declare <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32>, <32 x i32>, i32) #1 +declare <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32>, <32 x i32>, i32) #1 + +define hidden void @fred() #0 { +b0: + br i1 undef, label %b1, label %b2 + +b1: ; preds = %b0 + ret void + +b2: ; preds = %b0 + %v3 = tail call i8* @halide_malloc() + %v4 = bitcast i8* %v3 to i16* + %v5 = tail call i8* @halide_malloc() + %v6 = bitcast i8* %v5 to i16* + %v7 = tail call i8* @halide_malloc() + %v8 = bitcast i8* %v7 to i16* + %v9 = tail call i8* @halide_malloc() + %v10 = bitcast i8* %v9 to i16* + br label %b11 + +b11: ; preds = %b11, %b2 + br i1 undef, label %b12, label %b11 + +b12: ; preds = %b11 + br i1 undef, label %b16, label %b13 + +b13: ; preds = %b12 + %v14 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> zeroinitializer) #2 + %v15 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> undef, <32 x i32> %v14, i32 1) #2 + br i1 undef, label %b19, label %b17 + +b16: ; preds = %b12 + unreachable + +b17: ; preds = %b13 + %v18 = tail call <32 x i32> @llvm.hexagon.V6.vavghrnd.128B(<32 x i32> %v15, <32 x i32> undef) #2 + br label %b19 + +b19: ; preds = %b17, %b13 + %v20 = phi <32 x i32> [ %v18, %b17 ], [ %v15, %b13 ] + %v21 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> zeroinitializer, <32 x i32> %v20) #2 + %v22 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v21, <32 x i32> undef, i32 -2) + %v23 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v22) + store <32 x i32> %v23, <32 x i32>* undef, align 128 + tail call void @halide_free() #3 + br label %b24 + +b24: ; preds = %b33, %b19 + %v25 = load <32 x i32>, <32 x i32>* undef, align 128 + %v26 = fptoui float undef to i16 + %v27 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 -2147450880) #2 + %v28 = xor i16 %v26, -1 + %v29 = zext i16 %v28 to i32 + %v30 = or i32 0, %v29 + %v31 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1) #2 + %v32 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v31, <32 x i32> %v31) + br label %b34 + +b33: ; preds = %b34 + br label %b24 + +b34: ; preds = %b34, %b24 + %v35 = phi <32 x i32> [ %v45, %b34 ], [ undef, %b24 ] + %v36 = phi <32 x i32> [ undef, %b34 ], [ %v25, %b24 ] + %v37 = phi <32 x i32> [ %v46, %b34 ], [ undef, %b24 ] + %v38 = phi i32 [ %v145, %b34 ], [ 0, %b24 ] + %v39 = load <32 x i32>, <32 x i32>* undef, align 128 + %v40 = add nsw i32 %v38, undef + %v41 = shl nsw i32 %v40, 6 + %v42 = add nsw i32 %v41, 64 + %v43 = getelementptr inbounds i16, i16* %v6, i32 %v42 + %v44 = bitcast i16* %v43 to <32 x i32>* + %v45 = load <32 x i32>, <32 x i32>* %v44, align 128 + %v46 = load <32 x i32>, <32 x i32>* undef, align 128 + %v47 = load <32 x i32>, <32 x i32>* null, align 128 + %v48 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> undef, i32 2) + %v49 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v45, <32 x i32> %v35, i32 24) + %v50 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> %v48, <32 x i32> %v49) #2 + %v51 = tail call <64 x i32> @llvm.hexagon.V6.vaddhw.128B(<32 x i32> undef, <32 x i32> %v50) #2 + %v52 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v39, <32 x i32> %v47, i32 50) + %v53 = tail call <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32> %v52, <32 x i32> undef) + %v54 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v53, <32 x i32> %v27) #2 + %v55 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> undef, <32 x i32> %v54, i32 undef) #2 + %v56 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v55, <64 x i32> zeroinitializer) #2 + %v57 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v56) + %v58 = tail call <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32> %v57, i32 16) #2 + %v59 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v56) + %v60 = tail call <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32> %v59, i32 16) #2 + %v61 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v60, <32 x i32> %v58) + %v62 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v61, <64 x i32> %v55) #2 + %v63 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v62, <64 x i32> zeroinitializer) #2 + %v64 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v63) #2 + %v65 = tail call <32 x i32> @llvm.hexagon.V6.vshufoh.128B(<32 x i32> %v64, <32 x i32> undef) #2 + %v66 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v65, <32 x i32> %v27) #2 + %v67 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v66, <32 x i32> undef) #2 + %v68 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> zeroinitializer, <32 x i32> %v27) #2 + %v69 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.128B(<32 x i32> %v68, i32 %v30) #2 + %v70 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v47, <32 x i32> undef, i32 52) + %v71 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v39, <32 x i32> %v47, i32 52) + %v72 = tail call <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32> %v71, <32 x i32> %v70) + %v73 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v72, <32 x i32> %v27) #2 + %v74 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %v69, <32 x i32> %v73, i32 undef) #2 + %v75 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v74, <64 x i32> zeroinitializer) #2 + %v76 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v75) + %v77 = tail call <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32> %v76, i32 16) #2 + %v78 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> undef, <32 x i32> %v77) + %v79 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v78, <64 x i32> %v74) #2 + %v80 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v79, <64 x i32> zeroinitializer) #2 + %v81 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v80) #2 + %v82 = tail call <32 x i32> @llvm.hexagon.V6.vshufoh.128B(<32 x i32> %v81, <32 x i32> undef) #2 + %v83 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v82, <32 x i32> %v27) #2 + %v84 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v51, <64 x i32> %v32) #2 + %v85 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v84) #2 + %v86 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> undef, <32 x i32> %v85, i32 1) #2 + %v87 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v83, <32 x i32> %v86) #2 + %v88 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v87, <32 x i32> %v67, i32 -2) + %v89 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v88) + %v90 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v88) + %v91 = getelementptr inbounds i16, i16* %v10, i32 undef + %v92 = bitcast i16* %v91 to <32 x i32>* + store <32 x i32> %v90, <32 x i32>* %v92, align 128 + %v93 = getelementptr inbounds i16, i16* %v10, i32 undef + %v94 = bitcast i16* %v93 to <32 x i32>* + store <32 x i32> %v89, <32 x i32>* %v94, align 128 + %v95 = getelementptr inbounds i16, i16* %v4, i32 undef + %v96 = bitcast i16* %v95 to <32 x i32>* + %v97 = load <32 x i32>, <32 x i32>* %v96, align 128 + %v98 = getelementptr inbounds i16, i16* %v8, i32 undef + %v99 = bitcast i16* %v98 to <32 x i32>* + %v100 = load <32 x i32>, <32 x i32>* %v99, align 128 + %v101 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> undef, <32 x i32> %v36, i32 22) + %v102 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> %v100, <32 x i32> %v101) #2 + %v103 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> undef, <32 x i32> %v102) #2 + %v104 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v97, <32 x i32> %v37, i32 48) + %v105 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v46, <32 x i32> %v97, i32 48) + %v106 = tail call <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32> %v105, <32 x i32> %v104) + %v107 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> undef, <64 x i32> %v32) #2 + %v108 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v107) #2 + %v109 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> undef, <32 x i32> %v108, i32 1) #2 + %v110 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v106, <32 x i32> %v109) #2 + %v111 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v110, <32 x i32> %v103, i32 -2) + %v112 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v111) + %v113 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v111) + %v114 = getelementptr inbounds i16, i16* %v10, i32 undef + %v115 = bitcast i16* %v114 to <32 x i32>* + store <32 x i32> %v113, <32 x i32>* %v115, align 128 + %v116 = getelementptr inbounds i16, i16* %v10, i32 undef + %v117 = bitcast i16* %v116 to <32 x i32>* + store <32 x i32> %v112, <32 x i32>* %v117, align 128 + %v118 = getelementptr inbounds i16, i16* %v4, i32 undef + %v119 = bitcast i16* %v118 to <32 x i32>* + %v120 = load <32 x i32>, <32 x i32>* %v119, align 128 + %v121 = getelementptr inbounds i16, i16* %v6, i32 undef + %v122 = bitcast i16* %v121 to <32 x i32>* + %v123 = load <32 x i32>, <32 x i32>* %v122, align 128 + %v124 = getelementptr inbounds i16, i16* %v6, i32 0 + %v125 = bitcast i16* %v124 to <32 x i32>* + %v126 = load <32 x i32>, <32 x i32>* %v125, align 128 + %v127 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v126, <32 x i32> %v123, i32 22) + %v128 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> undef, <32 x i32> %v127) #2 + %v129 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v126, <32 x i32> %v123, i32 24) + %v130 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> undef, <32 x i32> %v129) #2 + %v131 = tail call <64 x i32> @llvm.hexagon.V6.vaddhw.128B(<32 x i32> %v128, <32 x i32> %v130) #2 + %v132 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v120, <32 x i32> undef, i32 46) + %v133 = tail call <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32> undef, <32 x i32> %v132) + %v134 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v133, <32 x i32> %v128) #2 + %v135 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v120, <32 x i32> undef, i32 48) + %v136 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> undef, <32 x i32> %v120, i32 48) + %v137 = tail call <32 x i32> @llvm.hexagon.V6.vpackeh.128B(<32 x i32> %v136, <32 x i32> %v135) + %v138 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v131, <64 x i32> %v32) #2 + %v139 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v138) #2 + %v140 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> %v139, <32 x i32> undef, i32 1) #2 + %v141 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v137, <32 x i32> %v140) #2 + %v142 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v141, <32 x i32> %v134, i32 -2) + %v143 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v142) + %v144 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v142) + store <32 x i32> %v144, <32 x i32>* undef, align 128 + store <32 x i32> %v143, <32 x i32>* undef, align 128 + %v145 = add nuw nsw i32 %v38, 1 + %v146 = icmp eq i32 %v38, undef + br i1 %v146, label %b33, label %b34 +} + +attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-double" } +attributes #1 = { nounwind readnone } +attributes #2 = { nounwind } +attributes #3 = { nobuiltin nounwind } diff --git a/test/CodeGen/Hexagon/extload-combine.ll b/test/CodeGen/Hexagon/extload-combine.ll index c492343d7915..c7a386a664ba 100644 --- a/test/CodeGen/Hexagon/extload-combine.ll +++ b/test/CodeGen/Hexagon/extload-combine.ll @@ -15,8 +15,8 @@ ; Function Attrs: nounwind define i64 @short_test1() #0 { -; CHECK: [[VAR:r[0-9]+]]{{ *}}={{ *}}memuh(## -; CHECK: combine(#0, [[VAR]]) +; CHECK: [[VAR:r[0-9]+]] = memuh(## +; CHECK: combine(#0,[[VAR]]) entry: store i16 0, i16* @a, align 2 %0 = load i16, i16* @b, align 2 @@ -26,7 +26,7 @@ entry: ; Function Attrs: nounwind define i64 @short_test2() #0 { -; CHECK: [[VAR1:r[0-9]+]]{{ *}}={{ *}}memh(## +; CHECK: [[VAR1:r[0-9]+]] = memh(## ; CHECK: sxtw([[VAR1]]) entry: store i16 0, i16* @a, align 2 @@ -37,8 +37,8 @@ entry: ; Function Attrs: nounwind define i64 @char_test1() #0 { -; CHECK: [[VAR2:r[0-9]+]]{{ *}}={{ *}}memub(## -; CHECK: combine(#0, [[VAR2]]) +; CHECK: [[VAR2:r[0-9]+]] = memub(## +; CHECK: combine(#0,[[VAR2]]) entry: store i8 0, i8* @char_a, align 1 %0 = load i8, i8* @char_b, align 1 @@ -48,7 +48,7 @@ entry: ; Function Attrs: nounwind define i64 @char_test2() #0 { -; CHECK: [[VAR3:r[0-9]+]]{{ *}}={{ *}}memb(## +; CHECK: [[VAR3:r[0-9]+]] = memb(## ; CHECK: sxtw([[VAR3]]) entry: store i8 0, i8* @char_a, align 1 @@ -59,8 +59,8 @@ entry: ; Function Attrs: nounwind define i64 @int_test1() #0 { -; CHECK: [[VAR4:r[0-9]+]]{{ *}}={{ *}}memw(## -; CHECK: combine(#0, [[VAR4]]) +; CHECK: [[VAR4:r[0-9]+]] = memw(## +; CHECK: combine(#0,[[VAR4]]) entry: store i32 0, i32* @int_a, align 4 %0 = load i32, i32* @int_b, align 4 @@ -70,7 +70,7 @@ entry: ; Function Attrs: nounwind define i64 @int_test2() #0 { -; CHECK: [[VAR5:r[0-9]+]]{{ *}}={{ *}}memw(## +; CHECK: [[VAR5:r[0-9]+]] = memw(## ; CHECK: sxtw([[VAR5]]) entry: store i32 0, i32* @int_a, align 4 diff --git a/test/CodeGen/Hexagon/extract-basic.ll b/test/CodeGen/Hexagon/extract-basic.ll index c75125cedd35..ad118dea0ab6 100644 --- a/test/CodeGen/Hexagon/extract-basic.ll +++ b/test/CodeGen/Hexagon/extract-basic.ll @@ -1,8 +1,8 @@ ; RUN: llc -O2 -march=hexagon < %s | FileCheck %s -; CHECK-DAG: extractu(r{{[0-9]*}}, #3, #4) -; CHECK-DAG: extractu(r{{[0-9]*}}, #8, #7) -; CHECK-DAG: extractu(r{{[0-9]*}}, #8, #16) +; CHECK-DAG: extractu(r{{[0-9]*}},#3,#4) +; CHECK-DAG: extractu(r{{[0-9]*}},#8,#7) +; CHECK-DAG: extractu(r{{[0-9]*}},#8,#16) ; C source: ; typedef struct { diff --git a/test/CodeGen/Hexagon/fadd.ll b/test/CodeGen/Hexagon/fadd.ll index 6cf0fbbccf73..0418c1724f5b 100644 --- a/test/CodeGen/Hexagon/fadd.ll +++ b/test/CodeGen/Hexagon/fadd.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that we generate sp floating point add in V5. -; CHECK: r{{[0-9]+}} = sfadd(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+}} = sfadd(r{{[0-9]+}},r{{[0-9]+}}) define i32 @main() nounwind { entry: diff --git a/test/CodeGen/Hexagon/find-loop-instr.ll b/test/CodeGen/Hexagon/find-loop-instr.ll new file mode 100644 index 000000000000..1234baf17f52 --- /dev/null +++ b/test/CodeGen/Hexagon/find-loop-instr.ll @@ -0,0 +1,79 @@ +; RUN: llc -march=hexagon < %s +; REQUIRES: asserts + +; This code causes multiple endloop instructions to be generated for the +; same loop. The findLoopInstr would encounter for one endloop would encounter +; the other endloop, and return null in response. This resulted in a crash. +; +; Check that with the fix we are able to compile this code successfully. + +target triple = "hexagon" + +; Function Attrs: norecurse +define void @fred() local_unnamed_addr #0 align 2 { +b0: + br label %b7 + +b1: ; preds = %b9 + br i1 undef, label %b4, label %b2 + +b2: ; preds = %b1 + %v3 = sub i32 undef, undef + br label %b4 + +b4: ; preds = %b2, %b1 + %v5 = phi i32 [ undef, %b1 ], [ %v3, %b2 ] + br i1 undef, label %b14, label %b6 + +b6: ; preds = %b4 + br label %b10 + +b7: ; preds = %b0 + br i1 undef, label %b9, label %b8 + +b8: ; preds = %b7 + unreachable + +b9: ; preds = %b7 + br label %b1 + +b10: ; preds = %b21, %b6 + %v11 = phi i32 [ %v22, %b21 ], [ %v5, %b6 ] + br i1 undef, label %b21, label %b12 + +b12: ; preds = %b10 + br label %b15 + +b13: ; preds = %b21 + br label %b14 + +b14: ; preds = %b13, %b4 + ret void + +b15: ; preds = %b12 + br i1 undef, label %b16, label %b17 + +b16: ; preds = %b15 + store i32 0, i32* undef, align 4 + br label %b21 + +b17: ; preds = %b15 + br label %b18 + +b18: ; preds = %b17 + br i1 undef, label %b19, label %b20 + +b19: ; preds = %b18 + br label %b21 + +b20: ; preds = %b18 + store i32 0, i32* undef, align 4 + br label %b21 + +b21: ; preds = %b20, %b19, %b16, %b10 + %v22 = add i32 %v11, -8 + %v23 = icmp eq i32 %v22, 0 + br i1 %v23, label %b13, label %b10 +} + +attributes #0 = { norecurse "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" } diff --git a/test/CodeGen/Hexagon/float-amode.ll b/test/CodeGen/Hexagon/float-amode.ll index 9804f48349f8..d770582ecab9 100644 --- a/test/CodeGen/Hexagon/float-amode.ll +++ b/test/CodeGen/Hexagon/float-amode.ll @@ -12,9 +12,9 @@ @a = common global float 0.000000e+00, align 4 ; CHECK-LABEL: test1 -; CHECK: [[REG11:(r[0-9]+)]]{{ *}}={{ *}}memw(r{{[0-9]+}} + r{{[0-9]+}}<<#2) +; CHECK: [[REG11:(r[0-9]+)]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) ; CHECK: [[REG12:(r[0-9]+)]] += sfmpy({{.*}}[[REG11]] -; CHECK: memw(r{{[0-9]+}} + r{{[0-9]+}}<<#2) = [[REG12]].new +; CHECK: memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) = [[REG12]].new ; Function Attrs: norecurse nounwind define void @test1(%struct.matrix_params* nocapture readonly %params, i32 %col1) { @@ -35,7 +35,7 @@ entry: } ; CHECK-LABEL: test2 -; CHECK: [[REG21:(r[0-9]+)]]{{ *}}={{ *}}memw(##globB+92) +; CHECK: [[REG21:(r[0-9]+)]] = memw(##globB+92) ; CHECK: [[REG22:(r[0-9]+)]] = sfadd({{.*}}[[REG21]] ; CHECK: memw(##globA+84) = [[REG22]] @@ -54,9 +54,9 @@ entry: } ; CHECK-LABEL: test3 -; CHECK: [[REG31:(r[0-9]+)]]{{ *}}={{ *}}memw(#b) +; CHECK: [[REG31:(r[0-9]+)]] = memw(gp+#b) ; CHECK: [[REG32:(r[0-9]+)]] = sfadd({{.*}}[[REG31]] -; CHECK: memw(#a) = [[REG32]] +; CHECK: memw(gp+#a) = [[REG32]] ; Function Attrs: norecurse nounwind define void @test3(%struct.matrix_params* nocapture readonly %params, i32 %col1) { @@ -73,9 +73,9 @@ entry: } ; CHECK-LABEL: test4 -; CHECK: [[REG41:(r[0-9]+)]]{{ *}}={{ *}}memw(r0<<#2 + ##globB+52) +; CHECK: [[REG41:(r[0-9]+)]] = memw(r0<<#2+##globB+52) ; CHECK: [[REG42:(r[0-9]+)]] = sfadd({{.*}}[[REG41]] -; CHECK: memw(r0<<#2 + ##globA+60) = [[REG42]] +; CHECK: memw(r0<<#2+##globA+60) = [[REG42]] ; Function Attrs: noinline norecurse nounwind define void @test4(i32 %col1) { entry: diff --git a/test/CodeGen/Hexagon/fmul.ll b/test/CodeGen/Hexagon/fmul.ll index 4f55d0bec471..552f98ec7a53 100644 --- a/test/CodeGen/Hexagon/fmul.ll +++ b/test/CodeGen/Hexagon/fmul.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that we generate single precision floating point multiply in V5. -; CHECK: r{{[0-9]+}} = sfmpy(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+}} = sfmpy(r{{[0-9]+}},r{{[0-9]+}}) define i32 @main() nounwind { diff --git a/test/CodeGen/Hexagon/fsel.ll b/test/CodeGen/Hexagon/fsel.ll index 247249da50b1..a2f0b4a47f10 100644 --- a/test/CodeGen/Hexagon/fsel.ll +++ b/test/CodeGen/Hexagon/fsel.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s ; CHECK-LABEL: danny: -; CHECK: mux(p0, r1, ##1065353216) +; CHECK: mux(p0,r1,##1065353216) define float @danny(i32 %x, float %f) #0 { %t = icmp sgt i32 %x, 0 @@ -10,7 +10,7 @@ define float @danny(i32 %x, float %f) #0 { } ; CHECK-LABEL: sammy: -; CHECK: mux(p0, ##1069547520, r1) +; CHECK: mux(p0,##1069547520,r1) define float @sammy(i32 %x, float %f) #0 { %t = icmp sgt i32 %x, 0 diff --git a/test/CodeGen/Hexagon/fsub.ll b/test/CodeGen/Hexagon/fsub.ll index ca7bdc4d0b38..d7b0e2f65b33 100644 --- a/test/CodeGen/Hexagon/fsub.ll +++ b/test/CodeGen/Hexagon/fsub.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that we generate sp floating point subtract in V5. -; CHECK: r{{[0-9]+}} = sfsub(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+}} = sfsub(r{{[0-9]+}},r{{[0-9]+}}) define i32 @main() nounwind { entry: diff --git a/test/CodeGen/Hexagon/fusedandshift.ll b/test/CodeGen/Hexagon/fusedandshift.ll index 414574aec401..9abd366e6916 100644 --- a/test/CodeGen/Hexagon/fusedandshift.ll +++ b/test/CodeGen/Hexagon/fusedandshift.ll @@ -1,8 +1,8 @@ -; RUN: llc -march=hexagon -hexagon-extract=0 < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-extract=0 -hexbit-extract=0 < %s | FileCheck %s ; Check that we generate fused logical and with shift instruction. ; Disable "extract" generation, since it may eliminate the and/lsr. -; CHECK: r{{[0-9]+}} = and(#15, lsr(r{{[0-9]+}}, #{{[0-9]+}}) +; CHECK: r{{[0-9]+}} = and(#15,lsr(r{{[0-9]+}},#{{[0-9]+}}) define i32 @main(i16* %a, i16* %b) nounwind { entry: diff --git a/test/CodeGen/Hexagon/gp-rel.ll b/test/CodeGen/Hexagon/gp-rel.ll index bb7cb182bf1b..00f57797b6f1 100644 --- a/test/CodeGen/Hexagon/gp-rel.ll +++ b/test/CodeGen/Hexagon/gp-rel.ll @@ -7,8 +7,8 @@ define i32 @foo(i32 %p) #0 { entry: -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#a) -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#b) +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(gp+#a) +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(gp+#b) ; CHECK: if{{ *}}(p{{[0-3]}}) memw(##c){{ *}}={{ *}}r{{[0-9]+}} %0 = load i32, i32* @a, align 4 %1 = load i32, i32* @b, align 4 diff --git a/test/CodeGen/Hexagon/hwloop-cleanup.ll b/test/CodeGen/Hexagon/hwloop-cleanup.ll index c04966a5a4b2..56a6fedf81ef 100644 --- a/test/CodeGen/Hexagon/hwloop-cleanup.ll +++ b/test/CodeGen/Hexagon/hwloop-cleanup.ll @@ -5,7 +5,7 @@ ; Bug 6685. ; CHECK: loop0 -; CHECK-NOT: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#-1) +; CHECK-NOT: r{{[0-9]+}} = add(r{{[0-9]+}},#-1) ; CHECK-NOT: cmp.eq ; CHECK: endloop0 @@ -39,7 +39,7 @@ for.end: ; This test checks that that initial loop count value is removed. ; CHECK-NOT: ={{.}}#40 ; CHECK: loop0 -; CHECK-NOT: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#-1) +; CHECK-NOT: r{{[0-9]+}} = add(r{{[0-9]+}},#-1) ; CHECK-NOT: cmp.eq ; CHECK: endloop0 @@ -64,7 +64,7 @@ for.end: ; This test checks that we don't remove the induction variable since it's used. ; CHECK: loop0 -; CHECK: r{{[0-9]+}}{{.}}={{.}}add(r{{[0-9]+}},{{.}}#1) +; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}},#1) ; CHECK-NOT: cmp.eq ; CHECK: endloop0 define i32 @test3(i32* nocapture %b) nounwind { diff --git a/test/CodeGen/Hexagon/hwloop-loop1.ll b/test/CodeGen/Hexagon/hwloop-loop1.ll index 238d34e7ea15..427efdc2c111 100644 --- a/test/CodeGen/Hexagon/hwloop-loop1.ll +++ b/test/CodeGen/Hexagon/hwloop-loop1.ll @@ -2,8 +2,8 @@ ; ; Generate loop1 instruction for double loop sequence. -; CHECK: loop1(.LBB{{.}}_{{.}}, #100) -; CHECK: loop0(.LBB{{.}}_{{.}}, #100) +; CHECK: loop1(.LBB{{.}}_{{.}},#100) +; CHECK: loop0(.LBB{{.}}_{{.}},#100) ; CHECK: endloop0 ; CHECK: endloop1 @@ -12,9 +12,9 @@ entry: %array = alloca [100 x i32], align 8 %doublearray = alloca [100 x [100 x i32]], align 8 %0 = bitcast [100 x i32]* %array to i8* - call void @llvm.lifetime.start(i64 400, i8* %0) #1 + call void @llvm.lifetime.start.p0i8(i64 400, i8* %0) #1 %1 = bitcast [100 x [100 x i32]]* %doublearray to i8* - call void @llvm.lifetime.start(i64 40000, i8* %1) #1 + call void @llvm.lifetime.start.p0i8(i64 40000, i8* %1) #1 %arrayidx1 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %doublearray, i32 0, i32 10, i32 10 %arrayidx2.gep = getelementptr [100 x i32], [100 x i32]* %array, i32 0, i32 0 br label %for.body @@ -56,11 +56,11 @@ for.inc15: for.end17: %3 = load i32, i32* %arrayidx1, align 8 - call void @llvm.lifetime.end(i64 40000, i8* %1) #1 - call void @llvm.lifetime.end(i64 400, i8* %0) #1 + call void @llvm.lifetime.end.p0i8(i64 40000, i8* %1) #1 + call void @llvm.lifetime.end.p0i8(i64 400, i8* %0) #1 ret i32 %3 } -declare void @llvm.lifetime.start(i64, i8* nocapture) #1 +declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 -declare void @llvm.lifetime.end(i64, i8* nocapture) #1 +declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 diff --git a/test/CodeGen/Hexagon/hwloop1.ll b/test/CodeGen/Hexagon/hwloop1.ll index 68af3b34eeeb..7a805d951b95 100644 --- a/test/CodeGen/Hexagon/hwloop1.ll +++ b/test/CodeGen/Hexagon/hwloop1.ll @@ -3,7 +3,7 @@ ; Case 1 : Loop with a constant number of iterations. ; CHECK-LABEL: @hwloop1 -; CHECK: loop0(.LBB{{.}}_{{.}}, #10) +; CHECK: loop0(.LBB{{.}}_{{.}},#10) ; CHECK: endloop0 @a = common global [10 x i32] zeroinitializer, align 4 @@ -23,7 +23,7 @@ for.end: ; Case 2 : Loop with a run-time number of iterations. ; CHECK-LABEL: @hwloop2 -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define i32 @hwloop2(i32 %n, i32* nocapture %b) nounwind { @@ -54,8 +54,8 @@ for.end: ; Case 3 : Induction variable increment more than 1. ; CHECK-LABEL: @hwloop3 -; CHECK: lsr(r{{[0-9]+}}, #2) -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: lsr(r{{[0-9]+}},#2) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define i32 @hwloop3(i32 %n, i32* nocapture %b) nounwind { @@ -86,7 +86,7 @@ for.end: ; Case 4 : Loop exit compare uses register instead of immediate value. ; CHECK-LABEL: @hwloop4 -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define i32 @hwloop4(i32 %n, i32* nocapture %b) nounwind { @@ -114,7 +114,7 @@ for.end: ; Case 5: After LSR, the initial value is 100 and the iv decrements to 0. ; CHECK-LABEL: @hwloop5 -; CHECK: loop0(.LBB{{.}}_{{.}}, #100) +; CHECK: loop0(.LBB{{.}}_{{.}},#100) ; CHECK: endloop0 define void @hwloop5(i32* nocapture %a, i32* nocapture %res) nounwind { @@ -138,8 +138,8 @@ for.end: ; Case 6: Large immediate offset ; CHECK-LABEL: @hwloop6 -; CHECK-NOT: loop0(.LBB{{.}}_{{.}}, #1024) -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK-NOT: loop0(.LBB{{.}}_{{.}},#1024) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define void @hwloop6(i32* nocapture %a, i32* nocapture %res) nounwind { diff --git a/test/CodeGen/Hexagon/hwloop2.ll b/test/CodeGen/Hexagon/hwloop2.ll index d411d979904e..ba3de1f1a2af 100644 --- a/test/CodeGen/Hexagon/hwloop2.ll +++ b/test/CodeGen/Hexagon/hwloop2.ll @@ -2,7 +2,7 @@ ; Test for multiple phis with induction variables. -; CHECK: loop0(.LBB{{.}}_{{.}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{.}}_{{.}},r{{[0-9]+}}) ; CHECK: endloop0 define i32 @hwloop4(i32* nocapture %s, i32* nocapture %a, i32 %n) { diff --git a/test/CodeGen/Hexagon/hwloop4.ll b/test/CodeGen/Hexagon/hwloop4.ll index d159c45e3fb8..b8cea4c77720 100644 --- a/test/CodeGen/Hexagon/hwloop4.ll +++ b/test/CodeGen/Hexagon/hwloop4.ll @@ -2,9 +2,9 @@ ; ; Remove the unnecessary 'add' instruction used for the hardware loop setup. -; CHECK: [[OP0:r[0-9]+]] = add([[OP1:r[0-9]+]], #-[[OP2:[0-9]+]] -; CHECK-NOT: add([[OP0]], #[[OP2]]) -; CHECK: lsr([[OP1]], #{{[0-9]+}}) +; CHECK: [[OP0:r[0-9]+]] = add([[OP1:r[0-9]+]],#-[[OP2:[0-9]+]] +; CHECK-NOT: add([[OP0]],#[[OP2]]) +; CHECK: lsr([[OP1]],#{{[0-9]+}}) ; CHECK: loop0 define void @matrix_mul_matrix(i32 %N, i32* nocapture %C, i16* nocapture readnone %A, i16* nocapture readnone %B) #0 { diff --git a/test/CodeGen/Hexagon/hwloop5.ll b/test/CodeGen/Hexagon/hwloop5.ll index 0886b03cc754..f4990dabebb9 100644 --- a/test/CodeGen/Hexagon/hwloop5.ll +++ b/test/CodeGen/Hexagon/hwloop5.ll @@ -2,9 +2,9 @@ ; ; Generate hardware loop when unknown trip count loop is vectorized. -; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}},r{{[0-9]+}}) ; CHECK: endloop0 -; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}}, r{{[0-9]+}}) +; CHECK: loop0(.LBB{{[0-9]*}}_{{[0-9]*}},r{{[0-9]+}}) ; CHECK: endloop0 @A = common global [1000 x i32] zeroinitializer, align 8 diff --git a/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll b/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll index 68a5dc16ecff..91b9aaa9cb4e 100644 --- a/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll +++ b/test/CodeGen/Hexagon/ifcvt-diamond-bug-2016-08-26.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -o - %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-eif=0 < %s | FileCheck %s target triple = "hexagon" %struct.0 = type { i16, i16 } @@ -15,7 +15,7 @@ entry: br i1 %cmp199, label %if.then200, label %if.else201 ; CHECK-DAG: [[R4:r[0-9]+]] = #4 -; CHECK: p0 = cmp.eq(r0, #0) +; CHECK: p0 = cmp.eq(r0,#0) ; CHECK: if (!p0.new) [[R3:r[0-9]+]] = #3 ; CHECK-DAG: if (!p0) memh(##t) = [[R3]] ; CHECK-DAG: if (p0) memh(##t) = [[R4]] diff --git a/test/CodeGen/Hexagon/ifcvt-simple-bprob.ll b/test/CodeGen/Hexagon/ifcvt-simple-bprob.ll new file mode 100644 index 000000000000..2d48d30dd7d8 --- /dev/null +++ b/test/CodeGen/Hexagon/ifcvt-simple-bprob.ll @@ -0,0 +1,36 @@ +; RUN: llc -march=hexagon < %s + +; Check that branch probabilities are set correctly after performing the +; simple variant of if-conversion. The converted block has a branch that +; is not analyzable. + +target triple = "hexagon" + +declare void @foo() + +; CHECK-LABEL: danny +; CHECK: if (p0.new) jump:nt foo +define void @danny(i32 %x) { + %t0 = icmp sgt i32 %x, 0 + br i1 %t0, label %tail, label %exit, !prof !0 +tail: + tail call void @foo(); + ret void +exit: + ret void +} + +; CHECK-LABEL: sammy +; CHECK: if (!p0.new) jump:t foo +define void @sammy(i32 %x) { + %t0 = icmp sgt i32 %x, 0 + br i1 %t0, label %exit, label %tail, !prof !0 +tail: + tail call void @foo(); + ret void +exit: + ret void +} + +!0 = !{!"branch_weights", i32 1, i32 2000} + diff --git a/test/CodeGen/Hexagon/inline-asm-vecpred128.ll b/test/CodeGen/Hexagon/inline-asm-vecpred128.ll new file mode 100644 index 000000000000..234f5a0b7926 --- /dev/null +++ b/test/CodeGen/Hexagon/inline-asm-vecpred128.ll @@ -0,0 +1,15 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s +; REQUIRES: asserts + +; Make sure we can handle the 'q' constraint in the 128-byte mode. + +target triple = "hexagon" + +; CHECK-LABEL: fred +; CHECK: if (q{{[0-3]}}) vmem +define void @fred() #0 { + tail call void asm sideeffect "if ($0) vmem($1) = $2;", "q,r,v,~{memory}"(<32 x i32> undef, <32 x i32>* undef, <32 x i32> undef) #0 + ret void +} + +attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-double" } diff --git a/test/CodeGen/Hexagon/insert-basic.ll b/test/CodeGen/Hexagon/insert-basic.ll index e941c063d9ed..14ee735abd79 100644 --- a/test/CodeGen/Hexagon/insert-basic.ll +++ b/test/CodeGen/Hexagon/insert-basic.ll @@ -1,8 +1,8 @@ ; RUN: llc -O2 -march=hexagon < %s | FileCheck %s -; CHECK-DAG: insert(r{{[0-9]*}}, #17, #0) -; CHECK-DAG: insert(r{{[0-9]*}}, #18, #0) -; CHECK-DAG: insert(r{{[0-9]*}}, #22, #0) -; CHECK-DAG: insert(r{{[0-9]*}}, #12, #0) +; CHECK-DAG: insert(r{{[0-9]*}},#17,#0) +; CHECK-DAG: insert(r{{[0-9]*}},#18,#0) +; CHECK-DAG: insert(r{{[0-9]*}},#22,#0) +; CHECK-DAG: insert(r{{[0-9]*}},#12,#0) ; C source: ; typedef struct { diff --git a/test/CodeGen/Hexagon/insert4.ll b/test/CodeGen/Hexagon/insert4.ll index c4d575dd4060..3bc8e9e57982 100644 --- a/test/CodeGen/Hexagon/insert4.ll +++ b/test/CodeGen/Hexagon/insert4.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; ; Check that we no longer generate 4 inserts. -; CHECK: combine(r{{[0-9]+}}.l, r{{[0-9]+}}.l) -; CHECK: combine(r{{[0-9]+}}.l, r{{[0-9]+}}.l) +; CHECK: combine(r{{[0-9]+}}.l,r{{[0-9]+}}.l) +; CHECK: combine(r{{[0-9]+}}.l,r{{[0-9]+}}.l) ; CHECK-NOT: insert target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll index fcf80b08181e..abdd4cba7c5c 100644 --- a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll +++ b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll @@ -10,21 +10,21 @@ define i32 @A2_addi(i32 %a) { %z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0) ret i32 %z } -; CHECK: = add({{.*}}, #0) +; CHECK: = add({{.*}},#0) declare i32 @llvm.hexagon.A2.add(i32, i32) define i32 @A2_add(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}, {{.*}}) +; CHECK: = add({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.addsat(i32, i32) define i32 @A2_addsat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}, {{.*}}):sat +; CHECK: = add({{.*}},{{.*}}):sat ; Logical operations declare i32 @llvm.hexagon.A2.and(i32, i32) @@ -32,35 +32,35 @@ define i32 @A2_and(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b) ret i32 %z } -; CHECK: = and({{.*}}, {{.*}}) +; CHECK: = and({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.or(i32, i32) define i32 @A2_or(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b) ret i32 %z } -; CHECK: = or({{.*}}, {{.*}}) +; CHECK: = or({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.xor(i32, i32) define i32 @A2_xor(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b) ret i32 %z } -; CHECK: = xor({{.*}}, {{.*}}) +; CHECK: = xor({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.andn(i32, i32) define i32 @A4_andn(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b) ret i32 %z } -; CHECK: = and({{.*}}, ~{{.*}}) +; CHECK: = and({{.*}},~{{.*}}) declare i32 @llvm.hexagon.A4.orn(i32, i32) define i32 @A4_orn(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b) ret i32 %z } -; CHECK: = or({{.*}}, ~{{.*}}) +; CHECK: = or({{.*}},~{{.*}}) ; Subtract declare i32 @llvm.hexagon.A2.sub(i32, i32) @@ -68,14 +68,14 @@ define i32 @A2_sub(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}, {{.*}}) +; CHECK: = sub({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.subsat(i32, i32) define i32 @A2_subsat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}, {{.*}}):sat +; CHECK: = sub({{.*}},{{.*}}):sat ; Sign extend declare i32 @llvm.hexagon.A2.sxtb(i32) @@ -128,21 +128,21 @@ define i32 @A2_svaddh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svaddh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vaddh({{.*}}, {{.*}}) +; CHECK: = vaddh({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.svaddhs(i32, i32) define i32 @A2_svaddhs(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svaddhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vaddh({{.*}}, {{.*}}):sat +; CHECK: = vaddh({{.*}},{{.*}}):sat declare i32 @llvm.hexagon.A2.svadduhs(i32, i32) define i32 @A2_svadduhs(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svadduhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vadduh({{.*}}, {{.*}}):sat +; CHECK: = vadduh({{.*}},{{.*}}):sat ; Vector average halfwords declare i32 @llvm.hexagon.A2.svavgh(i32, i32) @@ -150,21 +150,21 @@ define i32 @A2_svavgh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svavgh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vavgh({{.*}}, {{.*}}) +; CHECK: = vavgh({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.svavghs(i32, i32) define i32 @A2_svavghs(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svavghs(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vavgh({{.*}}, {{.*}}):rnd +; CHECK: = vavgh({{.*}},{{.*}}):rnd declare i32 @llvm.hexagon.A2.svnavgh(i32, i32) define i32 @A2_svnavgh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svnavgh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vnavgh({{.*}}, {{.*}}) +; CHECK: = vnavgh({{.*}},{{.*}}) ; Vector subtract halfwords declare i32 @llvm.hexagon.A2.svsubh(i32, i32) @@ -172,21 +172,21 @@ define i32 @A2_svsubh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svsubh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vsubh({{.*}}, {{.*}}) +; CHECK: = vsubh({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.svsubhs(i32, i32) define i32 @A2_svsubhs(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svsubhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vsubh({{.*}}, {{.*}}):sat +; CHECK: = vsubh({{.*}},{{.*}}):sat declare i32 @llvm.hexagon.A2.svsubuhs(i32, i32) define i32 @A2_svsubuhs(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svsubuhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vsubuh({{.*}}, {{.*}}):sat +; CHECK: = vsubuh({{.*}},{{.*}}):sat ; Zero extend declare i32 @llvm.hexagon.A2.zxth(i32) diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll index c9fb0afe0781..554dac4563d1 100644 --- a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll +++ b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll @@ -10,56 +10,56 @@ define i64 @A4_combineri(i32 %a) { %z = call i64 @llvm.hexagon.A4.combineri(i32 %a, i32 0) ret i64 %z } -; CHECK: = combine({{.*}}, #0) +; CHECK: = combine({{.*}},#0) declare i64 @llvm.hexagon.A4.combineir(i32, i32) define i64 @A4_combineir(i32 %a) { %z = call i64 @llvm.hexagon.A4.combineir(i32 0, i32 %a) ret i64 %z } -; CHECK: = combine(#0, {{.*}}) +; CHECK: = combine(#0,{{.*}}) declare i64 @llvm.hexagon.A2.combineii(i32, i32) define i64 @A2_combineii() { %z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0) ret i64 %z } -; CHECK: = combine(#0, #0) +; CHECK: = combine(#0,#0) declare i32 @llvm.hexagon.A2.combine.hh(i32, i32) define i32 @A2_combine_hh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = combine({{.*}}, {{.*}}) +; CHECK: = combine({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.combine.hl(i32, i32) define i32 @A2_combine_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = combine({{.*}}, {{.*}}) +; CHECK: = combine({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.combine.lh(i32, i32) define i32 @A2_combine_lh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = combine({{.*}}, {{.*}}) +; CHECK: = combine({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) define i32 @A2_combine_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = combine({{.*}}, {{.*}}) +; CHECK: = combine({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.combinew(i32, i32) define i64 @A2_combinew(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b) ret i64 %z } -; CHECK: = combine({{.*}}, {{.*}}) +; CHECK: = combine({{.*}},{{.*}}) ; Mux declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32) @@ -67,21 +67,21 @@ define i32 @C2_muxri(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b) ret i32 %z } -; CHECK: = mux({{.*}}, #0, {{.*}}) +; CHECK: = mux({{.*}},#0,{{.*}}) declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32) define i32 @C2_muxir(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: = mux({{.*}}, {{.*}}, #0) +; CHECK: = mux({{.*}},{{.*}},#0) declare i32 @llvm.hexagon.C2.mux(i32, i32, i32) define i32 @C2_mux(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.C2.mux(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = mux({{.*}}, {{.*}}, {{.*}}) +; CHECK: = mux({{.*}},{{.*}},{{.*}}) ; Shift word by 16 declare i32 @llvm.hexagon.A2.aslh(i32) @@ -104,4 +104,4 @@ define i64 @S2_packhl(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b) ret i64 %z } -; CHECK: = packhl({{.*}}, {{.*}}) +; CHECK: = packhl({{.*}},{{.*}}) diff --git a/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll b/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll new file mode 100644 index 000000000000..2a54bfef0ad7 --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/byte-store-double.ll @@ -0,0 +1,41 @@ +; RUN: llc -mattr=+hvx-double -march=hexagon -O2 < %s | FileCheck %s + +; CHECK-LABEL: V6_vmaskedstoreq_128B +; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}} + +; CHECK-LABEL: V6_vmaskedstorenq_128B +; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}} + +; CHECK-LABEL: V6_vmaskedstorentq_128B +; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}} + +; CHECK-LABEL: V6_vmaskedstorentnq_128B +; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}} + +declare void @llvm.hexagon.V6.vmaskedstoreq.128B(<1024 x i1>, i8*, <32 x i32>) +define void @V6_vmaskedstoreq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) { + %1 = bitcast <32 x i32> %a to <1024 x i1> + call void @llvm.hexagon.V6.vmaskedstoreq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c) + ret void +} + +declare void @llvm.hexagon.V6.vmaskedstorenq.128B(<1024 x i1>, i8*, <32 x i32>) +define void @V6_vmaskedstorenq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) { + %1 = bitcast <32 x i32> %a to <1024 x i1> + call void @llvm.hexagon.V6.vmaskedstorenq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c) + ret void +} + +declare void @llvm.hexagon.V6.vmaskedstorentq.128B(<1024 x i1>, i8*, <32 x i32>) +define void @V6_vmaskedstorentq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) { + %1 = bitcast <32 x i32> %a to <1024 x i1> + call void @llvm.hexagon.V6.vmaskedstorentq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c) + ret void +} + +declare void @llvm.hexagon.V6.vmaskedstorentnq.128B(<1024 x i1>, i8*, <32 x i32>) +define void @V6_vmaskedstorentnq_128B( <32 x i32> %a, i8* %b, <32 x i32> %c) { + %1 = bitcast <32 x i32> %a to <1024 x i1> + call void @llvm.hexagon.V6.vmaskedstorentnq.128B(<1024 x i1> %1, i8* %b, <32 x i32> %c) + ret void +} diff --git a/test/CodeGen/Hexagon/intrinsics/byte-store.ll b/test/CodeGen/Hexagon/intrinsics/byte-store.ll new file mode 100644 index 000000000000..208c15fec980 --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/byte-store.ll @@ -0,0 +1,41 @@ +; RUN: llc -mattr=+hvx -march=hexagon -O2 < %s | FileCheck %s + +; CHECK-LABEL: V6_vmaskedstoreq +; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}} + +; CHECK-LABEL: V6_vmaskedstorenq +; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}} + +; CHECK-LABEL: V6_vmaskedstorentq +; CHECK: if (q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}} + +; CHECK-LABEL: V6_vmaskedstorentnq +; CHECK: if (!q{{[0-3]+}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}} + +declare void @llvm.hexagon.V6.vmaskedstoreq(<512 x i1>, i8*, <16 x i32>) +define void @V6_vmaskedstoreq( <16 x i32> %a, i8* %b, <16 x i32> %c) { + %1 = bitcast <16 x i32> %a to <512 x i1> + call void @llvm.hexagon.V6.vmaskedstoreq(<512 x i1> %1, i8* %b, <16 x i32> %c) + ret void +} + +declare void @llvm.hexagon.V6.vmaskedstorenq(<512 x i1>, i8*, <16 x i32>) +define void @V6_vmaskedstorenq( <16 x i32> %a, i8* %b, <16 x i32> %c) { + %1 = bitcast <16 x i32> %a to <512 x i1> + call void @llvm.hexagon.V6.vmaskedstorenq(<512 x i1> %1, i8* %b, <16 x i32> %c) + ret void +} + +declare void @llvm.hexagon.V6.vmaskedstorentq(<512 x i1>, i8*, <16 x i32>) +define void @V6_vmaskedstorentq( <16 x i32> %a, i8* %b, <16 x i32> %c) { + %1 = bitcast <16 x i32> %a to <512 x i1> + call void @llvm.hexagon.V6.vmaskedstorentq(<512 x i1> %1, i8* %b, <16 x i32> %c) + ret void +} + +declare void @llvm.hexagon.V6.vmaskedstorentnq(<512 x i1>, i8*, <16 x i32>) +define void @V6_vmaskedstorentnq( <16 x i32> %a, i8* %b, <16 x i32> %c) { + %1 = bitcast <16 x i32> %a to <512 x i1> + call void @llvm.hexagon.V6.vmaskedstorentnq(<512 x i1> %1, i8* %b, <16 x i32> %c) + ret void +} diff --git a/test/CodeGen/Hexagon/intrinsics/cr.ll b/test/CodeGen/Hexagon/intrinsics/cr.ll index f308ef8e5664..4c0fcb3707c1 100644 --- a/test/CodeGen/Hexagon/intrinsics/cr.ll +++ b/test/CodeGen/Hexagon/intrinsics/cr.ll @@ -10,14 +10,14 @@ define i32 @C4_fastcorner9(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C4.fastcorner9(i32 %a, i32 %b) ret i32 %z } -; CHECK: = fastcorner9({{.*}}, {{.*}}) +; CHECK: = fastcorner9({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.fastcorner9.not(i32, i32) define i32 @C4_fastcorner9_not(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C4.fastcorner9.not(i32 %a, i32 %b) ret i32 %z } -; CHECK: = !fastcorner9({{.*}}, {{.*}}) +; CHECK: = !fastcorner9({{.*}},{{.*}}) ; Logical reductions on predicates declare i32 @llvm.hexagon.C2.any8(i32) @@ -41,70 +41,70 @@ define i32 @C2_and(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C2.and(i32 %a, i32 %b) ret i32 %z } -; CHECK: = and({{.*}}, {{.*}}) +; CHECK: = and({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.and.and(i32, i32, i32) define i32 @C4_and_and(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.and.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = and({{.*}}, and({{.*}}, {{.*}})) +; CHECK: = and({{.*}},and({{.*}},{{.*}})) declare i32 @llvm.hexagon.C2.or(i32, i32) define i32 @C2_or(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C2.or(i32 %a, i32 %b) ret i32 %z } -; CHECK: = or({{.*}}, {{.*}}) +; CHECK: = or({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.and.or(i32, i32, i32) define i32 @C4_and_or(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.and.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = and({{.*}}, or({{.*}}, {{.*}})) +; CHECK: = and({{.*}},or({{.*}},{{.*}})) declare i32 @llvm.hexagon.C2.xor(i32, i32) define i32 @C2_xor(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C2.xor(i32 %a, i32 %b) ret i32 %z } -; CHECK: = xor({{.*}}, {{.*}}) +; CHECK: = xor({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.or.and(i32, i32, i32) define i32 @C4_or_and(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.or.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = or({{.*}}, and({{.*}}, {{.*}})) +; CHECK: = or({{.*}},and({{.*}},{{.*}})) declare i32 @llvm.hexagon.C2.andn(i32, i32) define i32 @C2_andn(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C2.andn(i32 %a, i32 %b) ret i32 %z } -; CHECK: = and({{.*}}, !{{.*}}) +; CHECK: = and({{.*}},!{{.*}}) declare i32 @llvm.hexagon.C4.or.or(i32, i32, i32) define i32 @C4_or_or(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.or.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = or({{.*}}, or({{.*}}, {{.*}})) +; CHECK: = or({{.*}},or({{.*}},{{.*}})) declare i32 @llvm.hexagon.C4.and.andn(i32, i32, i32) define i32 @C4_and_andn(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.and.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = and({{.*}}, and({{.*}}, !{{.*}})) +; CHECK: = and({{.*}},and({{.*}},!{{.*}})) declare i32 @llvm.hexagon.C4.and.orn(i32, i32, i32) define i32 @C4_and_orn(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.and.orn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = and({{.*}}, or({{.*}}, !{{.*}})) +; CHECK: = and({{.*}},or({{.*}},!{{.*}})) declare i32 @llvm.hexagon.C2.not(i32) define i32 @C2_not(i32 %a) { @@ -118,18 +118,18 @@ define i32 @C4_or_andn(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.or.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = or({{.*}}, and({{.*}}, !{{.*}})) +; CHECK: = or({{.*}},and({{.*}},!{{.*}})) declare i32 @llvm.hexagon.C2.orn(i32, i32) define i32 @C2_orn(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C2.orn(i32 %a, i32 %b) ret i32 %z } -; CHECK: = or({{.*}}, !{{.*}}) +; CHECK: = or({{.*}},!{{.*}}) declare i32 @llvm.hexagon.C4.or.orn(i32, i32, i32) define i32 @C4_or_orn(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.or.orn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = or({{.*}}, or({{.*}}, !{{.*}})) +; CHECK: = or({{.*}},or({{.*}},!{{.*}})) diff --git a/test/CodeGen/Hexagon/intrinsics/system_user.ll b/test/CodeGen/Hexagon/intrinsics/system_user.ll index dad4effb0a14..ac4c53e221d0 100644 --- a/test/CodeGen/Hexagon/intrinsics/system_user.ll +++ b/test/CodeGen/Hexagon/intrinsics/system_user.ll @@ -10,4 +10,4 @@ define void @prefetch(i8* %a) { call void @llvm.hexagon.prefetch(i8* %a) ret void } -; CHECK: dcfetch({{.*}} + #0) +; CHECK: dcfetch({{.*}}+#0) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll index c5c23c22bde9..4d630c62005b 100644 --- a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll +++ b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll @@ -34,42 +34,42 @@ define i32 @S4_addaddi(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.addaddi(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: = add({{.*}}, add({{.*}}, #0)) +; CHECK: = add({{.*}},add({{.*}},#0)) declare i32 @llvm.hexagon.S4.subaddi(i32, i32, i32) define i32 @S4_subaddi(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.subaddi(i32 %a, i32 0, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}, sub(#0, {{.*}})) +; CHECK: = add({{.*}},sub(#0,{{.*}})) declare i32 @llvm.hexagon.M2.accii(i32, i32, i32) define i32 @M2_accii(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.accii(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: += add({{.*}}, #0) +; CHECK: += add({{.*}},#0) declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32) define i32 @M2_naccii(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.naccii(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: -= add({{.*}}, #0) +; CHECK: -= add({{.*}},#0) declare i32 @llvm.hexagon.M2.acci(i32, i32, i32) define i32 @M2_acci(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.acci(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += add({{.*}}, {{.*}}) +; CHECK: += add({{.*}},{{.*}}) declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32) define i32 @M2_nacci(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.nacci(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= add({{.*}}, {{.*}}) +; CHECK: -= add({{.*}},{{.*}}) ; Add doublewords declare i64 @llvm.hexagon.A2.addp(i64, i64) @@ -77,14 +77,14 @@ define i64 @A2_addp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.addp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = add({{.*}}, {{.*}}) +; CHECK: = add({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.addpsat(i64, i64) define i64 @A2_addpsat(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.addpsat(i64 %a, i64 %b) ret i64 %z } -; CHECK: = add({{.*}}, {{.*}}):sat +; CHECK: = add({{.*}},{{.*}}):sat ; Add halfword declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32) @@ -92,84 +92,84 @@ define i32 @A2_addh_l16_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.l) +; CHECK: = add({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32) define i32 @A2_addh_l16_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.h) +; CHECK: = add({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32) define i32 @A2_addh_l16_sat.ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.l):sat +; CHECK: = add({{.*}}.l,{{.*}}.l):sat declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32) define i32 @A2_addh_l16_sat.hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.h):sat +; CHECK: = add({{.*}}.l,{{.*}}.h):sat declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32) define i32 @A2_addh_h16_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.l):<<16 +; CHECK: = add({{.*}}.l,{{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32) define i32 @A2_addh_h16_lh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.h):<<16 +; CHECK: = add({{.*}}.l,{{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32) define i32 @A2_addh_h16_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.h, {{.*}}.l):<<16 +; CHECK: = add({{.*}}.h,{{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32) define i32 @A2_addh_h16_hh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.h, {{.*}}.h):<<16 +; CHECK: = add({{.*}}.h,{{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32) define i32 @A2_addh_h16_sat_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.l):sat:<<16 +; CHECK: = add({{.*}}.l,{{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32) define i32 @A2_addh_h16_sat_lh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.l, {{.*}}.h):sat:<<16 +; CHECK: = add({{.*}}.l,{{.*}}.h):sat:<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32) define i32 @A2_addh_h16_sat_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.h, {{.*}}.l):sat:<<16 +; CHECK: = add({{.*}}.h,{{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32) define i32 @A2_addh_h16_sat_hh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}.h, {{.*}}.h):sat:<<16 +; CHECK: = add({{.*}}.h,{{.*}}.h):sat:<<16 ; Logical doublewords declare i64 @llvm.hexagon.A2.notp(i64) @@ -184,35 +184,35 @@ define i64 @A2_andp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.andp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = and({{.*}}, {{.*}}) +; CHECK: = and({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.andnp(i64, i64) define i64 @A2_andnp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A4.andnp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = and({{.*}}, ~{{.*}}) +; CHECK: = and({{.*}},~{{.*}}) declare i64 @llvm.hexagon.A2.orp(i64, i64) define i64 @A2_orp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.orp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = or({{.*}}, {{.*}}) +; CHECK: = or({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.ornp(i64, i64) define i64 @A2_ornp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A4.ornp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = or({{.*}}, ~{{.*}}) +; CHECK: = or({{.*}},~{{.*}}) declare i64 @llvm.hexagon.A2.xorp(i64, i64) define i64 @A2_xorp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.xorp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = xor({{.*}}, {{.*}}) +; CHECK: = xor({{.*}},{{.*}}) ; Logical-logical doublewords declare i64 @llvm.hexagon.M4.xor.xacc(i64, i64, i64) @@ -220,7 +220,7 @@ define i64 @M4_xor_xacc(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M4.xor.xacc(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: ^= xor({{.*}}, {{.*}}) +; CHECK: ^= xor({{.*}},{{.*}}) ; Logical-logical words declare i32 @llvm.hexagon.S4.or.andi(i32, i32, i32) @@ -228,91 +228,91 @@ define i32 @S4_or_andi(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.or.andi(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: |= and({{.*}}, #0) +; CHECK: |= and({{.*}},#0) declare i32 @llvm.hexagon.S4.or.andix(i32, i32, i32) define i32 @S4_or_andix(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.or.andix(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: = or({{.*}}, and({{.*}}, #0)) +; CHECK: = or({{.*}},and({{.*}},#0)) declare i32 @llvm.hexagon.M4.or.andn(i32, i32, i32) define i32 @M4_or_andn(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.or.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= and({{.*}}, ~{{.*}}) +; CHECK: |= and({{.*}},~{{.*}}) declare i32 @llvm.hexagon.M4.and.andn(i32, i32, i32) define i32 @M4_and_andn(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.and.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= and({{.*}}, ~{{.*}}) +; CHECK: &= and({{.*}},~{{.*}}) declare i32 @llvm.hexagon.M4.xor.andn(i32, i32, i32) define i32 @M4_xor_andn(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.xor.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: ^= and({{.*}}, ~{{.*}}) +; CHECK: ^= and({{.*}},~{{.*}}) declare i32 @llvm.hexagon.M4.and.and(i32, i32, i32) define i32 @M4_and_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.and.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= and({{.*}}, {{.*}}) +; CHECK: &= and({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.and.or(i32, i32, i32) define i32 @M4_and_or(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.and.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= or({{.*}}, {{.*}}) +; CHECK: &= or({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.and.xor(i32, i32, i32) define i32 @M4_and_xor(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.and.xor(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= xor({{.*}}, {{.*}}) +; CHECK: &= xor({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.or.and(i32, i32, i32) define i32 @M4_or_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.or.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= and({{.*}}, {{.*}}) +; CHECK: |= and({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.or.or(i32, i32, i32) define i32 @M4_or_or(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.or.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= or({{.*}}, {{.*}}) +; CHECK: |= or({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.or.xor(i32, i32, i32) define i32 @M4_or_xor(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.or.xor(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= xor({{.*}}, {{.*}}) +; CHECK: |= xor({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.xor.and(i32, i32, i32) define i32 @M4_xor_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.xor.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: ^= and({{.*}}, {{.*}}) +; CHECK: ^= and({{.*}},{{.*}}) declare i32 @llvm.hexagon.M4.xor.or(i32, i32, i32) define i32 @M4_xor_or(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.xor.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: ^= or({{.*}}, {{.*}}) +; CHECK: ^= or({{.*}},{{.*}}) ; Maximum words declare i32 @llvm.hexagon.A2.max(i32, i32) @@ -320,14 +320,14 @@ define i32 @A2_max(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.max(i32 %a, i32 %b) ret i32 %z } -; CHECK: = max({{.*}}, {{.*}}) +; CHECK: = max({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.maxu(i32, i32) define i32 @A2_maxu(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.maxu(i32 %a, i32 %b) ret i32 %z } -; CHECK: = maxu({{.*}}, {{.*}}) +; CHECK: = maxu({{.*}},{{.*}}) ; Maximum doublewords declare i64 @llvm.hexagon.A2.maxp(i64, i64) @@ -335,14 +335,14 @@ define i64 @A2_maxp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.maxp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = max({{.*}}, {{.*}}) +; CHECK: = max({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.maxup(i64, i64) define i64 @A2_maxup(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.maxup(i64 %a, i64 %b) ret i64 %z } -; CHECK: = maxu({{.*}}, {{.*}}) +; CHECK: = maxu({{.*}},{{.*}}) ; Minimum words declare i32 @llvm.hexagon.A2.min(i32, i32) @@ -350,14 +350,14 @@ define i32 @A2_min(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.min(i32 %a, i32 %b) ret i32 %z } -; CHECK: = min({{.*}}, {{.*}}) +; CHECK: = min({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.minu(i32, i32) define i32 @A2_minu(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.minu(i32 %a, i32 %b) ret i32 %z } -; CHECK: = minu({{.*}}, {{.*}}) +; CHECK: = minu({{.*}},{{.*}}) ; Minimum doublewords declare i64 @llvm.hexagon.A2.minp(i64, i64) @@ -365,14 +365,14 @@ define i64 @A2_minp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.minp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = min({{.*}}, {{.*}}) +; CHECK: = min({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.minup(i64, i64) define i64 @A2_minup(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.minup(i64 %a, i64 %b) ret i64 %z } -; CHECK: = minu({{.*}}, {{.*}}) +; CHECK: = minu({{.*}},{{.*}}) ; Module wrap declare i32 @llvm.hexagon.A4.modwrapu(i32, i32) @@ -380,7 +380,7 @@ define i32 @A4_modwrapu(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.modwrapu(i32 %a, i32 %b) ret i32 %z } -; CHECK: = modwrap({{.*}}, {{.*}}) +; CHECK: = modwrap({{.*}},{{.*}}) ; Negate declare i64 @llvm.hexagon.A2.negp(i64) @@ -410,42 +410,42 @@ define i32 @A4_cround_ri(i32 %a) { %z = call i32 @llvm.hexagon.A4.cround.ri(i32 %a, i32 0) ret i32 %z } -; CHECK: = cround({{.*}}, #0) +; CHECK: = cround({{.*}},#0) declare i32 @llvm.hexagon.A4.round.ri(i32, i32) define i32 @A4_round_ri(i32 %a) { %z = call i32 @llvm.hexagon.A4.round.ri(i32 %a, i32 0) ret i32 %z } -; CHECK: = round({{.*}}, #0) +; CHECK: = round({{.*}},#0) declare i32 @llvm.hexagon.A4.round.ri.sat(i32, i32) define i32 @A4_round_ri_sat(i32 %a) { %z = call i32 @llvm.hexagon.A4.round.ri.sat(i32 %a, i32 0) ret i32 %z } -; CHECK: = round({{.*}}, #0):sat +; CHECK: = round({{.*}},#0):sat declare i32 @llvm.hexagon.A4.cround.rr(i32, i32) define i32 @A4_cround_rr(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cround.rr(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cround({{.*}}, {{.*}}) +; CHECK: = cround({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.round.rr(i32, i32) define i32 @A4_round_rr(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.round.rr(i32 %a, i32 %b) ret i32 %z } -; CHECK: = round({{.*}}, {{.*}}) +; CHECK: = round({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.round.rr.sat(i32, i32) define i32 @A4_round_rr_sat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.round.rr.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = round({{.*}}, {{.*}}):sat +; CHECK: = round({{.*}},{{.*}}):sat ; Subtract doublewords declare i64 @llvm.hexagon.A2.subp(i64, i64) @@ -453,7 +453,7 @@ define i64 @A2_subp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.subp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = sub({{.*}}, {{.*}}) +; CHECK: = sub({{.*}},{{.*}}) ; Subtract and accumulate declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32) @@ -461,7 +461,7 @@ define i32 @M2_subacc(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.subacc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += sub({{.*}}, {{.*}}) +; CHECK: += sub({{.*}},{{.*}}) ; Subtract halfwords declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32) @@ -469,84 +469,84 @@ define i32 @A2_subh_l16_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.l) +; CHECK: = sub({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32) define i32 @A2_subh_l16_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.h) +; CHECK: = sub({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32) define i32 @A2_subh_l16_sat.ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.l):sat +; CHECK: = sub({{.*}}.l,{{.*}}.l):sat declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32) define i32 @A2_subh_l16_sat.hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.h):sat +; CHECK: = sub({{.*}}.l,{{.*}}.h):sat declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32) define i32 @A2_subh_h16_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.l):<<16 +; CHECK: = sub({{.*}}.l,{{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32) define i32 @A2_subh_h16_lh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.h):<<16 +; CHECK: = sub({{.*}}.l,{{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32) define i32 @A2_subh_h16_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.h, {{.*}}.l):<<16 +; CHECK: = sub({{.*}}.h,{{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32) define i32 @A2_subh_h16_hh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.h, {{.*}}.h):<<16 +; CHECK: = sub({{.*}}.h,{{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32) define i32 @A2_subh_h16_sat_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.l):sat:<<16 +; CHECK: = sub({{.*}}.l,{{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32) define i32 @A2_subh_h16_sat_lh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.l, {{.*}}.h):sat:<<16 +; CHECK: = sub({{.*}}.l,{{.*}}.h):sat:<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32) define i32 @A2_subh_h16_sat_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.h, {{.*}}.l):sat:<<16 +; CHECK: = sub({{.*}}.h,{{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32) define i32 @A2_subh_h16_sat_hh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: = sub({{.*}}.h, {{.*}}.h):sat:<<16 +; CHECK: = sub({{.*}}.h,{{.*}}.h):sat:<<16 ; Sign extend word to doubleword declare i64 @llvm.hexagon.A2.sxtw(i32) @@ -592,7 +592,7 @@ define i64 @M2_vabsdiffh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vabsdiffh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vabsdiffh({{.*}}, {{.*}}) +; CHECK: = vabsdiffh({{.*}},{{.*}}) ; Vector absolute difference words declare i64 @llvm.hexagon.M2.vabsdiffw(i64, i64) @@ -600,7 +600,7 @@ define i64 @M2_vabsdiffw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vabsdiffw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vabsdiffw({{.*}}, {{.*}}) +; CHECK: = vabsdiffw({{.*}},{{.*}}) ; Vector add halfwords declare i64 @llvm.hexagon.A2.vaddh(i64, i64) @@ -608,21 +608,21 @@ define i64 @A2_vaddh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddh({{.*}}, {{.*}}) +; CHECK: = vaddh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vaddhs(i64, i64) define i64 @A2_vaddhs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddh({{.*}}, {{.*}}):sat +; CHECK: = vaddh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.A2.vadduhs(i64, i64) define i64 @A2_vadduhs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vadduhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vadduh({{.*}}, {{.*}}):sat +; CHECK: = vadduh({{.*}},{{.*}}):sat ; Vector add halfwords with saturate and pack to unsigned bytes declare i32 @llvm.hexagon.A5.vaddhubs(i64, i64) @@ -630,7 +630,7 @@ define i32 @A5_vaddhubs(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A5.vaddhubs(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vaddhub({{.*}}, {{.*}}):sat +; CHECK: = vaddhub({{.*}},{{.*}}):sat ; Vector reduce add unsigned bytes declare i64 @llvm.hexagon.A2.vraddub(i64, i64) @@ -638,14 +638,14 @@ define i64 @A2_vraddub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vraddub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vraddub({{.*}}, {{.*}}) +; CHECK: = vraddub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vraddub.acc(i64, i64, i64) define i64 @A2_vraddub_acc(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.A2.vraddub.acc(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vraddub({{.*}}, {{.*}}) +; CHECK: += vraddub({{.*}},{{.*}}) ; Vector reduce add halfwords declare i32 @llvm.hexagon.M2.vradduh(i64, i64) @@ -653,14 +653,14 @@ define i32 @M2_vradduh(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.M2.vradduh(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vradduh({{.*}}, {{.*}}) +; CHECK: = vradduh({{.*}},{{.*}}) declare i32 @llvm.hexagon.M2.vraddh(i64, i64) define i32 @M2_vraddh(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.M2.vraddh(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vraddh({{.*}}, {{.*}}) +; CHECK: = vraddh({{.*}},{{.*}}) ; Vector add bytes declare i64 @llvm.hexagon.A2.vaddub(i64, i64) @@ -668,14 +668,14 @@ define i64 @A2_vaddub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddub({{.*}}, {{.*}}) +; CHECK: = vaddub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vaddubs(i64, i64) define i64 @A2_vaddubs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddubs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddub({{.*}}, {{.*}}):sat +; CHECK: = vaddub({{.*}},{{.*}}):sat ; Vector add words declare i64 @llvm.hexagon.A2.vaddw(i64, i64) @@ -683,14 +683,14 @@ define i64 @A2_vaddw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddw({{.*}}, {{.*}}) +; CHECK: = vaddw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vaddws(i64, i64) define i64 @A2_vaddws(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddws(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vaddw({{.*}}, {{.*}}):sat +; CHECK: = vaddw({{.*}},{{.*}}):sat ; Vector average halfwords declare i64 @llvm.hexagon.A2.vavgh(i64, i64) @@ -698,56 +698,56 @@ define i64 @A2_vavgh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgh({{.*}}, {{.*}}) +; CHECK: = vavgh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vavghr(i64, i64) define i64 @A2_vavghr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavghr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgh({{.*}}, {{.*}}):rnd +; CHECK: = vavgh({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vavghcr(i64, i64) define i64 @A2_vavghcr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavghcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgh({{.*}}, {{.*}}):crnd +; CHECK: = vavgh({{.*}},{{.*}}):crnd declare i64 @llvm.hexagon.A2.vavguh(i64, i64) define i64 @A2_vavguh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavguh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavguh({{.*}}, {{.*}}) +; CHECK: = vavguh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vavguhr(i64, i64) define i64 @A2_vavguhr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavguhr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavguh({{.*}}, {{.*}}):rnd +; CHECK: = vavguh({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavgh(i64, i64) define i64 @A2_vnavgh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavgh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgh({{.*}}, {{.*}}) +; CHECK: = vnavgh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vnavghr(i64, i64) define i64 @A2_vnavghr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavghr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgh({{.*}}, {{.*}}):rnd +; CHECK: = vnavgh({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavghcr(i64, i64) define i64 @A2_vnavghcr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavghcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgh({{.*}}, {{.*}}):crnd +; CHECK: = vnavgh({{.*}},{{.*}}):crnd ; Vector average unsigned bytes declare i64 @llvm.hexagon.A2.vavgub(i64, i64) @@ -755,14 +755,14 @@ define i64 @A2_vavgub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgub(i64 %a, i64 %b) ret i64 %z } -; CHECK: vavgub({{.*}}, {{.*}}) +; CHECK: vavgub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vavgubr(i64, i64) define i64 @A2_vavgubr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgubr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgub({{.*}}, {{.*}}):rnd +; CHECK: = vavgub({{.*}},{{.*}}):rnd ; Vector average words declare i64 @llvm.hexagon.A2.vavgw(i64, i64) @@ -770,56 +770,56 @@ define i64 @A2_vavgw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgw({{.*}}, {{.*}}) +; CHECK: = vavgw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vavgwr(i64, i64) define i64 @A2_vavgwr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgwr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgw({{.*}}, {{.*}}):rnd +; CHECK: = vavgw({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vavgwcr(i64, i64) define i64 @A2_vavgwcr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgwcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavgw({{.*}}, {{.*}}):crnd +; CHECK: = vavgw({{.*}},{{.*}}):crnd declare i64 @llvm.hexagon.A2.vavguw(i64, i64) define i64 @A2_vavguw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavguw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavguw({{.*}}, {{.*}}) +; CHECK: = vavguw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vavguwr(i64, i64) define i64 @A2_vavguwr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavguwr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vavguw({{.*}}, {{.*}}):rnd +; CHECK: = vavguw({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavgw(i64, i64) define i64 @A2_vnavgw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavgw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgw({{.*}}, {{.*}}) +; CHECK: = vnavgw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vnavgwr(i64, i64) define i64 @A2_vnavgwr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavgwr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgw({{.*}}, {{.*}}):rnd +; CHECK: = vnavgw({{.*}},{{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavgwcr(i64, i64) define i64 @A2_vnavgwcr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavgwcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vnavgw({{.*}}, {{.*}}):crnd +; CHECK: = vnavgw({{.*}},{{.*}}):crnd ; Vector conditional negate declare i64 @llvm.hexagon.S2.vcnegh(i64, i32) @@ -827,14 +827,14 @@ define i64 @S2_vcnegh(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.vcnegh(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vcnegh({{.*}}, {{.*}}) +; CHECK: = vcnegh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.vrcnegh(i64, i64, i32) define i64 @S2_vrcnegh(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.vrcnegh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: += vrcnegh({{.*}}, {{.*}}) +; CHECK: += vrcnegh({{.*}},{{.*}}) ; Vector maximum bytes declare i64 @llvm.hexagon.A2.vmaxub(i64, i64) @@ -842,14 +842,14 @@ define i64 @A2_vmaxub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vmaxub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmaxub({{.*}}, {{.*}}) +; CHECK: = vmaxub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vmaxb(i64, i64) define i64 @A2_vmaxb(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vmaxb(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmaxb({{.*}}, {{.*}}) +; CHECK: = vmaxb({{.*}},{{.*}}) ; Vector maximum halfwords declare i64 @llvm.hexagon.A2.vmaxh(i64, i64) @@ -857,14 +857,14 @@ define i64 @A2_vmaxh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vmaxh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmaxh({{.*}}, {{.*}}) +; CHECK: = vmaxh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vmaxuh(i64, i64) define i64 @A2_vmaxuh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vmaxuh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmaxuh({{.*}}, {{.*}}) +; CHECK: = vmaxuh({{.*}},{{.*}}) ; Vector reduce maximum halfwords declare i64 @llvm.hexagon.A4.vrmaxh(i64, i64, i32) @@ -872,14 +872,14 @@ define i64 @A4_vrmaxh(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrmaxh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrmaxh({{.*}}, {{.*}}) +; CHECK: = vrmaxh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.vrmaxuh(i64, i64, i32) define i64 @A4_vrmaxuh(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrmaxuh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrmaxuh({{.*}}, {{.*}}) +; CHECK: = vrmaxuh({{.*}},{{.*}}) ; Vector reduce maximum words declare i64 @llvm.hexagon.A4.vrmaxw(i64, i64, i32) @@ -887,14 +887,14 @@ define i64 @A4_vrmaxw(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrmaxw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrmaxw({{.*}}, {{.*}}) +; CHECK: = vrmaxw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.vrmaxuw(i64, i64, i32) define i64 @A4_vrmaxuw(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrmaxuw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: vrmaxuw({{.*}}, {{.*}}) +; CHECK: vrmaxuw({{.*}},{{.*}}) ; Vector minimum bytes declare i64 @llvm.hexagon.A2.vminub(i64, i64) @@ -902,14 +902,14 @@ define i64 @A2_vminub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vminub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vminub({{.*}}, {{.*}}) +; CHECK: = vminub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vminb(i64, i64) define i64 @A2_vminb(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vminb(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vminb({{.*}}, {{.*}}) +; CHECK: = vminb({{.*}},{{.*}}) ; Vector minimum halfwords declare i64 @llvm.hexagon.A2.vminh(i64, i64) @@ -917,14 +917,14 @@ define i64 @A2_vminh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vminh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vminh({{.*}}, {{.*}}) +; CHECK: = vminh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vminuh(i64, i64) define i64 @A2_vminuh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vminuh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vminuh({{.*}}, {{.*}}) +; CHECK: = vminuh({{.*}},{{.*}}) ; Vector reduce minimum halfwords declare i64 @llvm.hexagon.A4.vrminh(i64, i64, i32) @@ -932,14 +932,14 @@ define i64 @A4_vrminh(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrminh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrminh({{.*}}, {{.*}}) +; CHECK: = vrminh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.vrminuh(i64, i64, i32) define i64 @A4_vrminuh(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrminuh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrminuh({{.*}}, {{.*}}) +; CHECK: = vrminuh({{.*}},{{.*}}) ; Vector reduce minimum words declare i64 @llvm.hexagon.A4.vrminw(i64, i64, i32) @@ -947,14 +947,14 @@ define i64 @A4_vrminw(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrminw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrminw({{.*}}, {{.*}}) +; CHECK: = vrminw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A4.vrminuw(i64, i64, i32) define i64 @A4_vrminuw(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrminuw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vrminuw({{.*}}, {{.*}}) +; CHECK: = vrminuw({{.*}},{{.*}}) ; Vector sum of absolute differences unsigned bytes declare i64 @llvm.hexagon.A2.vrsadub(i64, i64) @@ -962,14 +962,14 @@ define i64 @A2_vrsadub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vrsadub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrsadub({{.*}}, {{.*}}) +; CHECK: = vrsadub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vrsadub.acc(i64, i64, i64) define i64 @A2_vrsadub_acc(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.A2.vrsadub.acc(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrsadub({{.*}}, {{.*}}) +; CHECK: += vrsadub({{.*}},{{.*}}) ; Vector subtract halfwords declare i64 @llvm.hexagon.A2.vsubh(i64, i64) @@ -977,21 +977,21 @@ define i64 @A2_vsubh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubh({{.*}}, {{.*}}) +; CHECK: = vsubh({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vsubhs(i64, i64) define i64 @A2_vsubhs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubh({{.*}}, {{.*}}):sat +; CHECK: = vsubh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.A2.vsubuhs(i64, i64) define i64 @A2_vsubuhs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubuhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubuh({{.*}}, {{.*}}):sat +; CHECK: = vsubuh({{.*}},{{.*}}):sat ; Vector subtract bytes declare i64 @llvm.hexagon.A2.vsubub(i64, i64) @@ -999,14 +999,14 @@ define i64 @A2_vsubub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubub(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubub({{.*}}, {{.*}}) +; CHECK: = vsubub({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vsububs(i64, i64) define i64 @A2_vsububs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsububs(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubub({{.*}}, {{.*}}):sat +; CHECK: = vsubub({{.*}},{{.*}}):sat ; Vector subtract words declare i64 @llvm.hexagon.A2.vsubw(i64, i64) @@ -1014,11 +1014,11 @@ define i64 @A2_vsubw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubw({{.*}}, {{.*}}) +; CHECK: = vsubw({{.*}},{{.*}}) declare i64 @llvm.hexagon.A2.vsubws(i64, i64) define i64 @A2_vsubws(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubws(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vsubw({{.*}}, {{.*}}):sat +; CHECK: = vsubw({{.*}},{{.*}}):sat diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll index e8f83d01820a..ec7613e3ef2a 100644 --- a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll +++ b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll @@ -38,14 +38,14 @@ define i32 @S4_clbpaddi(i64 %a) { %z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0) ret i32 %z } -; CHECK: = add(clb({{.*}}), #0) +; CHECK: = add(clb({{.*}}),#0) declare i32 @llvm.hexagon.S4.clbaddi(i32, i32) define i32 @S4_clbaddi(i32 %a) { %z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0) ret i32 %z } -; CHECK: = add(clb({{.*}}), #0) +; CHECK: = add(clb({{.*}}),#0) declare i32 @llvm.hexagon.S2.cl0(i32) define i32 @S2_cl0(i32 %a) { @@ -111,56 +111,56 @@ define i64 @S2_extractup(i64 %a) { %z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0) ret i64 %z } -; CHECK: = extractu({{.*}}, #0, #0) +; CHECK: = extractu({{.*}},#0,#0) declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32) define i64 @S2_extractp(i64 %a) { %z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0) ret i64 %z } -; CHECK: = extract({{.*}}, #0, #0) +; CHECK: = extract({{.*}},#0,#0) declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32) define i32 @S2_extractu(i32 %a) { %z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0) ret i32 %z } -; CHECK: = extractu({{.*}}, #0, #0) +; CHECK: = extractu({{.*}},#0,#0) declare i32 @llvm.hexagon.S4.extract(i32, i32, i32) define i32 @S2_extract(i32 %a) { %z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0) ret i32 %z } -; CHECK: = extract({{.*}}, #0, #0) +; CHECK: = extract({{.*}},#0,#0) declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64) define i64 @S2_extractup_rp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = extractu({{.*}}, {{.*}}) +; CHECK: = extractu({{.*}},{{.*}}) declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64) define i64 @S4_extractp_rp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = extract({{.*}}, {{.*}}) +; CHECK: = extract({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64) define i32 @S2_extractu_rp(i32 %a, i64 %b) { %z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b) ret i32 %z } -; CHECK: = extractu({{.*}}, {{.*}}) +; CHECK: = extractu({{.*}},{{.*}}) declare i32 @llvm.hexagon.S4.extract.rp(i32, i64) define i32 @S4_extract_rp(i32 %a, i64 %b) { %z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b) ret i32 %z } -; CHECK: = extract({{.*}}, {{.*}}) +; CHECK: = extract({{.*}},{{.*}}) ; Insert bitfield declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32) @@ -168,28 +168,28 @@ define i64 @S2_insertp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0) ret i64 %z } -; CHECK: = insert({{.*}}, #0, #0) +; CHECK: = insert({{.*}},#0,#0) declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32) define i32 @S2_insert(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: = insert({{.*}}, #0, #0) +; CHECK: = insert({{.*}},#0,#0) declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64) define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) { %z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c) ret i32 %z } -; CHECK: = insert({{.*}}, {{.*}}) +; CHECK: = insert({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64) define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: = insert({{.*}}, r5:4) +; CHECK: = insert({{.*}},r5:4) ; Interleave/deinterleave declare i64 @llvm.hexagon.S2.deinterleave(i64) @@ -212,7 +212,7 @@ define i64 @S2_lfsp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b) ret i64 %z } -; CHECK: = lfs({{.*}}, {{.*}}) +; CHECK: = lfs({{.*}},{{.*}}) ; Masked parity declare i32 @llvm.hexagon.S2.parityp(i64, i64) @@ -220,14 +220,14 @@ define i32 @S2_parityp(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b) ret i32 %z } -; CHECK: = parity({{.*}}, {{.*}}) +; CHECK: = parity({{.*}},{{.*}}) declare i32 @llvm.hexagon.S4.parity(i32, i32) define i32 @S4_parity(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b) ret i32 %z } -; CHECK: = parity({{.*}}, {{.*}}) +; CHECK: = parity({{.*}},{{.*}}) ; Bit reverse declare i64 @llvm.hexagon.S2.brevp(i64) @@ -250,42 +250,42 @@ define i32 @S2_setbit_i(i32 %a) { %z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: = setbit({{.*}}, #0) +; CHECK: = setbit({{.*}},#0) declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32) define i32 @S2_clrbit_i(i32 %a) { %z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: = clrbit({{.*}}, #0) +; CHECK: = clrbit({{.*}},#0) declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32) define i32 @S2_togglebit_i(i32 %a) { %z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: = togglebit({{.*}}, #0) +; CHECK: = togglebit({{.*}},#0) declare i32 @llvm.hexagon.S2.setbit.r(i32, i32) define i32 @S2_setbit_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = setbit({{.*}}, {{.*}}) +; CHECK: = setbit({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32) define i32 @S2_clrbit_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = clrbit({{.*}}, {{.*}}) +; CHECK: = clrbit({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32) define i32 @S2_togglebit_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = togglebit({{.*}}, {{.*}}) +; CHECK: = togglebit({{.*}},{{.*}}) ; Split bitfield declare i64 @llvm.hexagon.A4.bitspliti(i32, i32) @@ -293,14 +293,14 @@ define i64 @A4_bitspliti(i32 %a) { %z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0) ret i64 %z } -; CHECK: = bitsplit({{.*}}, #0) +; CHECK: = bitsplit({{.*}},#0) declare i64 @llvm.hexagon.A4.bitsplit(i32, i32) define i64 @A4_bitsplit(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b) ret i64 %z } -; CHECK: = bitsplit({{.*}}, {{.*}}) +; CHECK: = bitsplit({{.*}},{{.*}}) ; Table index declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32) @@ -308,25 +308,25 @@ define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: = tableidxb({{.*}}, #0, #0) +; CHECK: = tableidxb({{.*}},#0,#0) declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32) define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: = tableidxh({{.*}}, #0, #-1) +; CHECK: = tableidxh({{.*}},#0,#-1) declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32) define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: = tableidxw({{.*}}, #0, #-2) +; CHECK: = tableidxw({{.*}},#0,#-2) declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32) define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: = tableidxd({{.*}}, #0, #-3) +; CHECK: = tableidxd({{.*}},#0,#-3) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll index 0087883573ec..254b928aa982 100644 --- a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll +++ b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll @@ -10,28 +10,28 @@ define i64 @S4_vxaddsubh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxaddsubh({{.*}}, {{.*}}):sat +; CHECK: = vxaddsubh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64) define i64 @S4_vxsubaddh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxsubaddh({{.*}}, {{.*}}):sat +; CHECK: = vxsubaddh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64) define i64 @S4_vxaddsubhr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxaddsubh({{.*}}, {{.*}}):rnd:>>1:sat +; CHECK: = vxaddsubh({{.*}},{{.*}}):rnd:>>1:sat declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64) define i64 @S4_vxsubaddhr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxsubaddh({{.*}}, {{.*}}):rnd:>>1:sat +; CHECK: = vxsubaddh({{.*}},{{.*}}):rnd:>>1:sat ; Complex add/sub words declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64) @@ -39,14 +39,14 @@ define i64 @S4_vxaddsubw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxaddsubw({{.*}}, {{.*}}):sat +; CHECK: = vxaddsubw({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64) define i64 @S4_vxsubaddw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vxsubaddw({{.*}}, {{.*}}):sat +; CHECK: = vxsubaddw({{.*}},{{.*}}):sat ; Complex multiply declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32) @@ -54,84 +54,84 @@ define i64 @M2_cmpys_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpy({{.*}}, {{.*}}):sat +; CHECK: = cmpy({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32) define i64 @M2_cmpys_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpy({{.*}}, {{.*}}):<<1:sat +; CHECK: = cmpy({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32) define i64 @M2_cmpysc_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpy({{.*}}, {{.*}}*):sat +; CHECK: = cmpy({{.*}},{{.*}}*):sat declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32) define i64 @M2_cmpysc_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:sat +; CHECK: = cmpy({{.*}},{{.*}}*):<<1:sat declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32) define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpy({{.*}}, {{.*}}):sat +; CHECK: += cmpy({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32) define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpy({{.*}}, {{.*}}):<<1:sat +; CHECK: += cmpy({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32) define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= cmpy({{.*}}, {{.*}}):sat +; CHECK: -= cmpy({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32) define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= cmpy({{.*}}, {{.*}}):<<1:sat +; CHECK: -= cmpy({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32) define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpy({{.*}}, {{.*}}*):sat +; CHECK: += cmpy({{.*}},{{.*}}*):sat declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32) define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpy({{.*}}, {{.*}}*):<<1:sat +; CHECK: += cmpy({{.*}},{{.*}}*):<<1:sat declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32) define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= cmpy({{.*}}, {{.*}}*):sat +; CHECK: -= cmpy({{.*}},{{.*}}*):sat declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32) define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= cmpy({{.*}}, {{.*}}*):<<1:sat +; CHECK: -= cmpy({{.*}},{{.*}}*):<<1:sat ; Complex multiply real or imaginary declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32) @@ -139,28 +139,28 @@ define i64 @M2_cmpyi_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpyi({{.*}}, {{.*}}) +; CHECK: = cmpyi({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32) define i64 @M2_cmpyr_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = cmpyr({{.*}}, {{.*}}) +; CHECK: = cmpyr({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32) define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpyi({{.*}}, {{.*}}) +; CHECK: += cmpyi({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32) define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += cmpyr({{.*}}, {{.*}}) +; CHECK: += cmpyr({{.*}},{{.*}}) ; Complex multiply with round and pack declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32) @@ -168,28 +168,28 @@ define i32 @M2_cmpyrs_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpy({{.*}}, {{.*}}):rnd:sat +; CHECK: = cmpy({{.*}},{{.*}}):rnd:sat declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32) define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpy({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: = cmpy({{.*}},{{.*}}):<<1:rnd:sat declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32) define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpy({{.*}}, {{.*}}*):rnd:sat +; CHECK: = cmpy({{.*}},{{.*}}*):rnd:sat declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32) define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:rnd:sat +; CHECK: = cmpy({{.*}},{{.*}}*):<<1:rnd:sat ; Complex multiply 32x16 declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32) @@ -197,28 +197,28 @@ define i32 @M4_cmpyi_wh(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b) ret i32 %z } -; CHECK: = cmpyiwh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: = cmpyiwh({{.*}},{{.*}}):<<1:rnd:sat declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32) define i32 @M4_cmpyi_whc(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b) ret i32 %z } -; CHECK: = cmpyiwh({{.*}}, {{.*}}*):<<1:rnd:sat +; CHECK: = cmpyiwh({{.*}},{{.*}}*):<<1:rnd:sat declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32) define i32 @M4_cmpyr_wh(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b) ret i32 %z } -; CHECK: = cmpyrwh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: = cmpyrwh({{.*}},{{.*}}):<<1:rnd:sat declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32) define i32 @M4_cmpyr_whc(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b) ret i32 %z } -; CHECK: = cmpyrwh({{.*}}, {{.*}}*):<<1:rnd:sat +; CHECK: = cmpyrwh({{.*}},{{.*}}*):<<1:rnd:sat ; Vector complex multiply real or imaginary declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64) @@ -226,42 +226,42 @@ define i64 @M2_vcmpy_s0_sat_r(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vcmpyr({{.*}}, {{.*}}):sat +; CHECK: = vcmpyr({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64) define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vcmpyr({{.*}}, {{.*}}):<<1:sat +; CHECK: = vcmpyr({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64) define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vcmpyi({{.*}}, {{.*}}):sat +; CHECK: = vcmpyi({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64) define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vcmpyi({{.*}}, {{.*}}):<<1:sat +; CHECK: = vcmpyi({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64) define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vcmpyr({{.*}}, r5:4):sat +; CHECK: += vcmpyr({{.*}},r5:4):sat declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64) define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vcmpyi({{.*}}, r5:4):sat +; CHECK: += vcmpyi({{.*}},r5:4):sat ; Vector complex conjugate declare i64 @llvm.hexagon.A2.vconj(i64) @@ -277,7 +277,7 @@ define i64 @S2_vcrotate(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vcrotate({{.*}}, {{.*}}) +; CHECK: = vcrotate({{.*}},{{.*}}) ; Vector reduce complex multiply real or imaginary declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64) @@ -285,56 +285,56 @@ define i64 @M2_vrcmpyi_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrcmpyi({{.*}}, {{.*}}) +; CHECK: = vrcmpyi({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64) define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrcmpyr({{.*}}, {{.*}}) +; CHECK: = vrcmpyr({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64) define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrcmpyi({{.*}}, {{.*}}*) +; CHECK: = vrcmpyi({{.*}},{{.*}}*) declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64) define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrcmpyr({{.*}}, {{.*}}*) +; CHECK: = vrcmpyr({{.*}},{{.*}}*) declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64) define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrcmpyi({{.*}}, r5:4) +; CHECK: += vrcmpyi({{.*}},r5:4) declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64) define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrcmpyr({{.*}}, r5:4) +; CHECK: += vrcmpyr({{.*}},r5:4) declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64) define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrcmpyi({{.*}}, r5:4*) +; CHECK: += vrcmpyi({{.*}},r5:4*) declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64) define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrcmpyr({{.*}}, r5:4*) +; CHECK: += vrcmpyr({{.*}},r5:4*) ; Vector reduce complex rotate declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32) @@ -342,11 +342,11 @@ define i64 @S4_vrcrotate(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0) ret i64 %z } -; CHECK: = vrcrotate({{.*}}, {{.*}}, #0) +; CHECK: = vrcrotate({{.*}},{{.*}},#0) declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32) define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0) ret i64 %z } -; CHECK: += vrcrotate({{.*}}, {{.*}}, #0) +; CHECK: += vrcrotate({{.*}},{{.*}},#0) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll index 598d0a83206d..ee56e9051621 100644 --- a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll +++ b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll @@ -11,7 +11,7 @@ define float @F2_sfadd(float %a, float %b) { %z = call float @llvm.hexagon.F2.sfadd(float %a, float %b) ret float %z } -; CHECK: = sfadd({{.*}}, {{.*}}) +; CHECK: = sfadd({{.*}},{{.*}}) ; Classify floating-point value declare i32 @llvm.hexagon.F2.sfclass(float, i32) @@ -19,14 +19,14 @@ define i32 @F2_sfclass(float %a) { %z = call i32 @llvm.hexagon.F2.sfclass(float %a, i32 0) ret i32 %z } -; CHECK: = sfclass({{.*}}, #0) +; CHECK: = sfclass({{.*}},#0) declare i32 @llvm.hexagon.F2.dfclass(double, i32) define i32 @F2_dfclass(double %a) { %z = call i32 @llvm.hexagon.F2.dfclass(double %a, i32 0) ret i32 %z } -; CHECK: = dfclass({{.*}}, #0) +; CHECK: = dfclass({{.*}},#0) ; Compare floating-point value declare i32 @llvm.hexagon.F2.sfcmpge(float, float) @@ -34,56 +34,56 @@ define i32 @F2_sfcmpge(float %a, float %b) { %z = call i32 @llvm.hexagon.F2.sfcmpge(float %a, float %b) ret i32 %z } -; CHECK: = sfcmp.ge({{.*}}, {{.*}}) +; CHECK: = sfcmp.ge({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.sfcmpuo(float, float) define i32 @F2_sfcmpuo(float %a, float %b) { %z = call i32 @llvm.hexagon.F2.sfcmpuo(float %a, float %b) ret i32 %z } -; CHECK: = sfcmp.uo({{.*}}, {{.*}}) +; CHECK: = sfcmp.uo({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.sfcmpeq(float, float) define i32 @F2_sfcmpeq(float %a, float %b) { %z = call i32 @llvm.hexagon.F2.sfcmpeq(float %a, float %b) ret i32 %z } -; CHECK: = sfcmp.eq({{.*}}, {{.*}}) +; CHECK: = sfcmp.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.sfcmpgt(float, float) define i32 @F2_sfcmpgt(float %a, float %b) { %z = call i32 @llvm.hexagon.F2.sfcmpgt(float %a, float %b) ret i32 %z } -; CHECK: = sfcmp.gt({{.*}}, {{.*}}) +; CHECK: = sfcmp.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.dfcmpge(double, double) define i32 @F2_dfcmpge(double %a, double %b) { %z = call i32 @llvm.hexagon.F2.dfcmpge(double %a, double %b) ret i32 %z } -; CHECK: = dfcmp.ge({{.*}}, {{.*}}) +; CHECK: = dfcmp.ge({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.dfcmpuo(double, double) define i32 @F2_dfcmpuo(double %a, double %b) { %z = call i32 @llvm.hexagon.F2.dfcmpuo(double %a, double %b) ret i32 %z } -; CHECK: = dfcmp.uo({{.*}}, {{.*}}) +; CHECK: = dfcmp.uo({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.dfcmpeq(double, double) define i32 @F2_dfcmpeq(double %a, double %b) { %z = call i32 @llvm.hexagon.F2.dfcmpeq(double %a, double %b) ret i32 %z } -; CHECK: = dfcmp.eq({{.*}}, {{.*}}) +; CHECK: = dfcmp.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.F2.dfcmpgt(double, double) define i32 @F2_dfcmpgt(double %a, double %b) { %z = call i32 @llvm.hexagon.F2.dfcmpgt(double %a, double %b) ret i32 %z } -; CHECK: = dfcmp.gt({{.*}}, {{.*}}) +; CHECK: = dfcmp.gt({{.*}},{{.*}}) ; Convert floating-point value to other format declare double @llvm.hexagon.F2.conv.sf2df(float) @@ -283,14 +283,14 @@ define float @F2_sffixupn(float %a, float %b) { %z = call float @llvm.hexagon.F2.sffixupn(float %a, float %b) ret float %z } -; CHECK: = sffixupn({{.*}}, {{.*}}) +; CHECK: = sffixupn({{.*}},{{.*}}) declare float @llvm.hexagon.F2.sffixupd(float, float) define float @F2_sffixupd(float %a, float %b) { %z = call float @llvm.hexagon.F2.sffixupd(float %a, float %b) ret float %z } -; CHECK: = sffixupd({{.*}}, {{.*}}) +; CHECK: = sffixupd({{.*}},{{.*}}) ; Floating point fused multiply-add declare float @llvm.hexagon.F2.sffma(float, float, float) @@ -298,14 +298,14 @@ define float @F2_sffma(float %a, float %b, float %c) { %z = call float @llvm.hexagon.F2.sffma(float %a, float %b, float %c) ret float %z } -; CHECK: += sfmpy({{.*}}, {{.*}}) +; CHECK: += sfmpy({{.*}},{{.*}}) declare float @llvm.hexagon.F2.sffms(float, float, float) define float @F2_sffms(float %a, float %b, float %c) { %z = call float @llvm.hexagon.F2.sffms(float %a, float %b, float %c) ret float %z } -; CHECK: -= sfmpy({{.*}}, {{.*}}) +; CHECK: -= sfmpy({{.*}},{{.*}}) ; Floating point fused multiply-add with scaling declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32) @@ -313,7 +313,7 @@ define float @F2_sffma_sc(float %a, float %b, float %c, i32 %d) { %z = call float @llvm.hexagon.F2.sffma.sc(float %a, float %b, float %c, i32 %d) ret float %z } -; CHECK: += sfmpy({{.*}}, {{.*}}, {{.*}}):scale +; CHECK: += sfmpy({{.*}},{{.*}},{{.*}}):scale ; Floating point fused multiply-add for library routines declare float @llvm.hexagon.F2.sffma.lib(float, float, float) @@ -321,14 +321,14 @@ define float @F2_sffma_lib(float %a, float %b, float %c) { %z = call float @llvm.hexagon.F2.sffma.lib(float %a, float %b, float %c) ret float %z } -; CHECK: += sfmpy({{.*}}, {{.*}}):lib +; CHECK: += sfmpy({{.*}},{{.*}}):lib declare float @llvm.hexagon.F2.sffms.lib(float, float, float) define float @F2_sffms_lib(float %a, float %b, float %c) { %z = call float @llvm.hexagon.F2.sffms.lib(float %a, float %b, float %c) ret float %z } -; CHECK: -= sfmpy({{.*}}, {{.*}}):lib +; CHECK: -= sfmpy({{.*}},{{.*}}):lib ; Create floating-point constant declare float @llvm.hexagon.F2.sfimm.p(i32) @@ -365,7 +365,7 @@ define float @F2_sfmax(float %a, float %b) { %z = call float @llvm.hexagon.F2.sfmax(float %a, float %b) ret float %z } -; CHECK: = sfmax({{.*}}, {{.*}}) +; CHECK: = sfmax({{.*}},{{.*}}) ; Floating point minimum declare float @llvm.hexagon.F2.sfmin(float, float) @@ -373,7 +373,7 @@ define float @F2_sfmin(float %a, float %b) { %z = call float @llvm.hexagon.F2.sfmin(float %a, float %b) ret float %z } -; CHECK: = sfmin({{.*}}, {{.*}}) +; CHECK: = sfmin({{.*}},{{.*}}) ; Floating point multiply declare float @llvm.hexagon.F2.sfmpy(float, float) @@ -381,7 +381,7 @@ define float @F2_sfmpy(float %a, float %b) { %z = call float @llvm.hexagon.F2.sfmpy(float %a, float %b) ret float %z } -; CHECK: = sfmpy({{.*}}, {{.*}}) +; CHECK: = sfmpy({{.*}},{{.*}}) ; Floating point subtraction declare float @llvm.hexagon.F2.sfsub(float, float) @@ -389,4 +389,4 @@ define float @F2_sfsub(float %a, float %b) { %z = call float @llvm.hexagon.F2.sfsub(float %a, float %b) ret float %z } -; CHECK: = sfsub({{.*}}, {{.*}}) +; CHECK: = sfsub({{.*}},{{.*}}) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll index a1490499fbf6..4da4a8a6393f 100644 --- a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll +++ b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll @@ -11,35 +11,35 @@ define i32 @M4_mpyrr_addi(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.mpyrr.addi(i32 0, i32 %a, i32 %b) ret i32 %z } -; CHECK: = add(#0, mpyi({{.*}}, {{.*}})) +; CHECK: = add(#0,mpyi({{.*}},{{.*}})) declare i32 @llvm.hexagon.M4.mpyri.addi(i32, i32, i32) define i32 @M4_mpyri_addi(i32 %a) { %z = call i32 @llvm.hexagon.M4.mpyri.addi(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: = add(#0, mpyi({{.*}}, #0)) +; CHECK: = add(#0,mpyi({{.*}},#0)) declare i32 @llvm.hexagon.M4.mpyri.addr.u2(i32, i32, i32) define i32 @M4_mpyri_addr_u2(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.mpyri.addr.u2(i32 %a, i32 0, i32 %b) ret i32 %z } -; CHECK: = add({{.*}}, mpyi(#0, {{.*}})) +; CHECK: = add({{.*}},mpyi(#0,{{.*}})) declare i32 @llvm.hexagon.M4.mpyri.addr(i32, i32, i32) define i32 @M4_mpyri_addr(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.mpyri.addr(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: = add({{.*}}, mpyi({{.*}}, #0)) +; CHECK: = add({{.*}},mpyi({{.*}},#0)) declare i32 @llvm.hexagon.M4.mpyrr.addr(i32, i32, i32) define i32 @M4_mpyrr_addr(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.mpyrr.addr(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: = add({{.*}}, mpyi({{.*}}, {{.*}})) +; CHECK: = add({{.*}},mpyi({{.*}},{{.*}})) ; Vector multiply word by signed half (32x16) declare i64 @llvm.hexagon.M2.mmpyl.s0(i64, i64) @@ -47,56 +47,56 @@ define i64 @M2_mmpyl_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweh({{.*}}, {{.*}}):sat +; CHECK: = vmpyweh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64) define i64 @M2_mmpyl_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:sat +; CHECK: = vmpyweh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyh.s0(i64, i64) define i64 @M2_mmpyh_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywoh({{.*}}, {{.*}}):sat +; CHECK: = vmpywoh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyh.s1(i64, i64) define i64 @M2_mmpyh_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:sat +; CHECK: = vmpywoh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyl.rs0(i64, i64) define i64 @M2_mmpyl_rs0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyl.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweh({{.*}}, {{.*}}):rnd:sat +; CHECK: = vmpyweh({{.*}},{{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyl.rs1(i64, i64) define i64 @M2_mmpyl_rs1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyl.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: = vmpyweh({{.*}},{{.*}}):<<1:rnd:sat declare i64 @llvm.hexagon.M2.mmpyh.rs0(i64, i64) define i64 @M2_mmpyh_rs0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyh.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywoh({{.*}}, {{.*}}):rnd:sat +; CHECK: = vmpywoh({{.*}},{{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyh.rs1(i64, i64) define i64 @M2_mmpyh_rs1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyh.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: = vmpywoh({{.*}},{{.*}}):<<1:rnd:sat ; Vector multiply word by unsigned half (32x16) declare i64 @llvm.hexagon.M2.mmpyul.s0(i64, i64) @@ -104,56 +104,56 @@ define i64 @M2_mmpyul_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyul.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweuh({{.*}}, {{.*}}):sat +; CHECK: = vmpyweuh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyul.s1(i64, i64) define i64 @M2_mmpyul_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyul.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:sat +; CHECK: = vmpyweuh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyuh.s0(i64, i64) define i64 @M2_mmpyuh_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyuh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywouh({{.*}}, {{.*}}):sat +; CHECK: = vmpywouh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyuh.s1(i64, i64) define i64 @M2_mmpyuh_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyuh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:sat +; CHECK: = vmpywouh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyul.rs0(i64, i64) define i64 @M2_mmpyul_rs0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyul.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweuh({{.*}}, {{.*}}):rnd:sat +; CHECK: = vmpyweuh({{.*}},{{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyul.rs1(i64, i64) define i64 @M2_mmpyul_rs1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyul.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: = vmpyweuh({{.*}},{{.*}}):<<1:rnd:sat declare i64 @llvm.hexagon.M2.mmpyuh.rs0(i64, i64) define i64 @M2_mmpyuh_rs0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyuh.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywouh({{.*}}, {{.*}}):rnd:sat +; CHECK: = vmpywouh({{.*}},{{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyuh.rs1(i64, i64) define i64 @M2_mmpyuh_rs1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyuh.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:rnd:sat +; CHECK: = vmpywouh({{.*}},{{.*}}):<<1:rnd:sat ; Multiply signed halfwords declare i64 @llvm.hexagon.M2.mpyd.ll.s0(i32, i32) @@ -161,616 +161,616 @@ define i64 @M2_mpyd_ll_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.ll.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l) +; CHECK: = mpy({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32) define i64 @M2_mpyd_ll_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.ll.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.lh.s0(i32, i32) define i64 @M2_mpyd_lh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.lh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h) +; CHECK: = mpy({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.lh.s1(i32, i32) define i64 @M2_mpyd_lh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.lh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.hl.s0(i32, i32) define i64 @M2_mpyd_hl_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.hl.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l) +; CHECK: = mpy({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.hl.s1(i32, i32) define i64 @M2_mpyd_hl_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.hl.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.hh.s0(i32, i32) define i64 @M2_mpyd_hh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.hh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h) +; CHECK: = mpy({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.hh.s1(i32, i32) define i64 @M2_mpyd_hh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.hh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32, i32) define i64 @M2_mpyd_rnd_ll_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd +; CHECK: = mpy({{.*}}.l,{{.*}}.l):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32, i32) define i64 @M2_mpyd_rnd_ll_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd +; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32, i32) define i64 @M2_mpyd_rnd_lh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd +; CHECK: = mpy({{.*}}.l,{{.*}}.h):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32, i32) define i64 @M2_mpyd_rnd_lh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd +; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32, i32) define i64 @M2_mpyd_rnd_hl_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd +; CHECK: = mpy({{.*}}.h,{{.*}}.l):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32, i32) define i64 @M2_mpyd_rnd_hl_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd +; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32, i32) define i64 @M2_mpyd_rnd_hh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd +; CHECK: = mpy({{.*}}.h,{{.*}}.h):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32, i32) define i64 @M2_mpyd_rnd_hh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd +; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64, i32, i32) define i64 @M2_mpyd_acc_ll_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l) +; CHECK: += mpy({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64, i32, i32) define i64 @M2_mpyd_acc_ll_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: += mpy({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64, i32, i32) define i64 @M2_mpyd_acc_lh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h) +; CHECK: += mpy({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64, i32, i32) define i64 @M2_mpyd_acc_lh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: += mpy({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64, i32, i32) define i64 @M2_mpyd_acc_hl_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l) +; CHECK: += mpy({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64, i32, i32) define i64 @M2_mpyd_acc_hl_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: += mpy({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64, i32, i32) define i64 @M2_mpyd_acc_hh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h) +; CHECK: += mpy({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64, i32, i32) define i64 @M2_mpyd_acc_hh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: += mpy({{.*}}.h,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64, i32, i32) define i64 @M2_mpyd_nac_ll_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l) +; CHECK: -= mpy({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64, i32, i32) define i64 @M2_mpyd_nac_ll_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: -= mpy({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64, i32, i32) define i64 @M2_mpyd_nac_lh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h) +; CHECK: -= mpy({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64, i32, i32) define i64 @M2_mpyd_nac_lh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: -= mpy({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64, i32, i32) define i64 @M2_mpyd_nac_hl_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l) +; CHECK: -= mpy({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64, i32, i32) define i64 @M2_mpyd_nac_hl_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: -= mpy({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64, i32, i32) define i64 @M2_mpyd_nac_hh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h) +; CHECK: -= mpy({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64, i32, i32) define i64 @M2_mpyd_nac_hh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: -= mpy({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.ll.s0(i32, i32) define i32 @M2_mpy_ll_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l) +; CHECK: = mpy({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.ll.s1(i32, i32) define i32 @M2_mpy_ll_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.lh.s0(i32, i32) define i32 @M2_mpy_lh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h) +; CHECK: = mpy({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.lh.s1(i32, i32) define i32 @M2_mpy_lh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.hl.s0(i32, i32) define i32 @M2_mpy_hl_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l) +; CHECK: = mpy({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.hl.s1(i32, i32) define i32 @M2_mpy_hl_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.hh.s0(i32, i32) define i32 @M2_mpy_hh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h) +; CHECK: = mpy({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.hh.s1(i32, i32) define i32 @M2_mpy_hh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32, i32) define i32 @M2_mpy_sat_ll_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):sat +; CHECK: = mpy({{.*}}.l,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32) define i32 @M2_mpy_sat_ll_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:sat +; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32, i32) define i32 @M2_mpy_sat_lh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):sat +; CHECK: = mpy({{.*}}.l,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32, i32) define i32 @M2_mpy_sat_lh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:sat +; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32, i32) define i32 @M2_mpy_sat_hl_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):sat +; CHECK: = mpy({{.*}}.h,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32, i32) define i32 @M2_mpy_sat_hl_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:sat +; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32, i32) define i32 @M2_mpy_sat_hh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):sat +; CHECK: = mpy({{.*}}.h,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32, i32) define i32 @M2_mpy_sat_hh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:sat +; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32, i32) define i32 @M2_mpy_sat_rnd_ll_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd:sat +; CHECK: = mpy({{.*}}.l,{{.*}}.l):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32, i32) define i32 @M2_mpy_sat_rnd_ll_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd:sat +; CHECK: = mpy({{.*}}.l,{{.*}}.l):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32, i32) define i32 @M2_mpy_sat_rnd_lh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd:sat +; CHECK: = mpy({{.*}}.l,{{.*}}.h):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32, i32) define i32 @M2_mpy_sat_rnd_lh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd:sat +; CHECK: = mpy({{.*}}.l,{{.*}}.h):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32, i32) define i32 @M2_mpy_sat_rnd_hl_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd:sat +; CHECK: = mpy({{.*}}.h,{{.*}}.l):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32, i32) define i32 @M2_mpy_sat_rnd_hl_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd:sat +; CHECK: = mpy({{.*}}.h,{{.*}}.l):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32, i32) define i32 @M2_mpy_sat_rnd_hh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd:sat +; CHECK: = mpy({{.*}}.h,{{.*}}.h):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32, i32) define i32 @M2_mpy_sat_rnd_hh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd:sat +; CHECK: = mpy({{.*}}.h,{{.*}}.h):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32, i32, i32) define i32 @M2_mpy_acc_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l) +; CHECK: += mpy({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32, i32, i32) define i32 @M2_mpy_acc_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: += mpy({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32, i32, i32) define i32 @M2_mpy_acc_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h) +; CHECK: += mpy({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32, i32, i32) define i32 @M2_mpy_acc_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: += mpy({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32, i32, i32) define i32 @M2_mpy_acc_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l) +; CHECK: += mpy({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32, i32, i32) define i32 @M2_mpy_acc_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: += mpy({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32, i32, i32) define i32 @M2_mpy_acc_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h) +; CHECK: += mpy({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32, i32, i32) define i32 @M2_mpy_acc_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: += mpy({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l):sat +; CHECK: += mpy({{.*}}.l,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1:sat +; CHECK: += mpy({{.*}}.l,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h):sat +; CHECK: += mpy({{.*}}.l,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1:sat +; CHECK: += mpy({{.*}}.l,{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l):sat +; CHECK: += mpy({{.*}}.h,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1:sat +; CHECK: += mpy({{.*}}.h,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h):sat +; CHECK: += mpy({{.*}}.h,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1:sat +; CHECK: += mpy({{.*}}.h,{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32, i32, i32) define i32 @M2_mpy_nac_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l) +; CHECK: -= mpy({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32, i32, i32) define i32 @M2_mpy_nac_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1 +; CHECK: -= mpy({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32, i32, i32) define i32 @M2_mpy_nac_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h) +; CHECK: -= mpy({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32, i32, i32) define i32 @M2_mpy_nac_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1 +; CHECK: -= mpy({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32, i32, i32) define i32 @M2_mpy_nac_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l) +; CHECK: -= mpy({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32, i32, i32) define i32 @M2_mpy_nac_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1 +; CHECK: -= mpy({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32, i32, i32) define i32 @M2_mpy_nac_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h) +; CHECK: -= mpy({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32, i32, i32) define i32 @M2_mpy_nac_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1 +; CHECK: -= mpy({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l):sat +; CHECK: -= mpy({{.*}}.l,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1:sat +; CHECK: -= mpy({{.*}}.l,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h):sat +; CHECK: -= mpy({{.*}}.l,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1:sat +; CHECK: -= mpy({{.*}}.l,{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l):sat +; CHECK: -= mpy({{.*}}.h,{{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1:sat +; CHECK: -= mpy({{.*}}.h,{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h):sat +; CHECK: -= mpy({{.*}}.h,{{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1:sat +; CHECK: -= mpy({{.*}}.h,{{.*}}.h):<<1:sat ; Multiply unsigned halfwords declare i64 @llvm.hexagon.M2.mpyud.ll.s0(i32, i32) @@ -778,336 +778,336 @@ define i64 @M2_mpyud_ll_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.ll.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.l) +; CHECK: = mpyu({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.ll.s1(i32, i32) define i64 @M2_mpyud_ll_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.ll.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: = mpyu({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.lh.s0(i32, i32) define i64 @M2_mpyud_lh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.lh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.h) +; CHECK: = mpyu({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.lh.s1(i32, i32) define i64 @M2_mpyud_lh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.lh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: = mpyu({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.hl.s0(i32, i32) define i64 @M2_mpyud_hl_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.hl.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.l) +; CHECK: = mpyu({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.hl.s1(i32, i32) define i64 @M2_mpyud_hl_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.hl.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: = mpyu({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.hh.s0(i32, i32) define i64 @M2_mpyud_hh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.hh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.h) +; CHECK: = mpyu({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.hh.s1(i32, i32) define i64 @M2_mpyud_hh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.hh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: = mpyu({{.*}}.h,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64, i32, i32) define i64 @M2_mpyud_acc_ll_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.l) +; CHECK: += mpyu({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64, i32, i32) define i64 @M2_mpyud_acc_ll_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: += mpyu({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64, i32, i32) define i64 @M2_mpyud_acc_lh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.h) +; CHECK: += mpyu({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64, i32, i32) define i64 @M2_mpyud_acc_lh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: += mpyu({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64, i32, i32) define i64 @M2_mpyud_acc_hl_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.l) +; CHECK: += mpyu({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64, i32, i32) define i64 @M2_mpyud_acc_hl_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: += mpyu({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64, i32, i32) define i64 @M2_mpyud_acc_hh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.h) +; CHECK: += mpyu({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64, i32, i32) define i64 @M2_mpyud_acc_hh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: += mpyu({{.*}}.h,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64, i32, i32) define i64 @M2_mpyud_nac_ll_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.l) +; CHECK: -= mpyu({{.*}}.l,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64, i32, i32) define i64 @M2_mpyud_nac_ll_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: -= mpyu({{.*}}.l,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64, i32, i32) define i64 @M2_mpyud_nac_lh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.h) +; CHECK: -= mpyu({{.*}}.l,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64, i32, i32) define i64 @M2_mpyud_nac_lh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: -= mpyu({{.*}}.l,{{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64, i32, i32) define i64 @M2_mpyud_nac_hl_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.l) +; CHECK: -= mpyu({{.*}}.h,{{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64, i32, i32) define i64 @M2_mpyud_nac_hl_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: -= mpyu({{.*}}.h,{{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64, i32, i32) define i64 @M2_mpyud_nac_hh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.h) +; CHECK: -= mpyu({{.*}}.h,{{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64, i32, i32) define i64 @M2_mpyud_nac_hh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: -= mpyu({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.ll.s0(i32, i32) define i32 @M2_mpyu_ll_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.l) +; CHECK: = mpyu({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.ll.s1(i32, i32) define i32 @M2_mpyu_ll_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: = mpyu({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.lh.s0(i32, i32) define i32 @M2_mpyu_lh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.h) +; CHECK: = mpyu({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.lh.s1(i32, i32) define i32 @M2_mpyu_lh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: = mpyu({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.hl.s0(i32, i32) define i32 @M2_mpyu_hl_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.l) +; CHECK: = mpyu({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.hl.s1(i32, i32) define i32 @M2_mpyu_hl_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: = mpyu({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.hh.s0(i32, i32) define i32 @M2_mpyu_hh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.h) +; CHECK: = mpyu({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.hh.s1(i32, i32) define i32 @M2_mpyu_hh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: = mpyu({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32, i32, i32) define i32 @M2_mpyu_acc_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.l) +; CHECK: += mpyu({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32, i32, i32) define i32 @M2_mpyu_acc_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: += mpyu({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32, i32, i32) define i32 @M2_mpyu_acc_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.h) +; CHECK: += mpyu({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32, i32, i32) define i32 @M2_mpyu_acc_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: += mpyu({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32, i32, i32) define i32 @M2_mpyu_acc_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.l) +; CHECK: += mpyu({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32, i32, i32) define i32 @M2_mpyu_acc_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: += mpyu({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32, i32, i32) define i32 @M2_mpyu_acc_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.h) +; CHECK: += mpyu({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32, i32, i32) define i32 @M2_mpyu_acc_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: += mpyu({{.*}}.h,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32, i32, i32) define i32 @M2_mpyu_nac_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.l) +; CHECK: -= mpyu({{.*}}.l,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32, i32, i32) define i32 @M2_mpyu_nac_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1 +; CHECK: -= mpyu({{.*}}.l,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32, i32, i32) define i32 @M2_mpyu_nac_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.h) +; CHECK: -= mpyu({{.*}}.l,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32, i32, i32) define i32 @M2_mpyu_nac_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1 +; CHECK: -= mpyu({{.*}}.l,{{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32, i32, i32) define i32 @M2_mpyu_nac_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.l) +; CHECK: -= mpyu({{.*}}.h,{{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32, i32, i32) define i32 @M2_mpyu_nac_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1 +; CHECK: -= mpyu({{.*}}.h,{{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32, i32, i32) define i32 @M2_mpyu_nac_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.h) +; CHECK: -= mpyu({{.*}}.h,{{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32, i32, i32) define i32 @M2_mpyu_nac_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1 +; CHECK: -= mpyu({{.*}}.h,{{.*}}.h):<<1 ; Polynomial multiply words declare i64 @llvm.hexagon.M4.pmpyw(i32, i32) @@ -1115,14 +1115,14 @@ define i64 @M4_pmpyw(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M4.pmpyw(i32 %a, i32 %b) ret i64 %z } -; CHECK: = pmpyw({{.*}}, {{.*}}) +; CHECK: = pmpyw({{.*}},{{.*}}) declare i64 @llvm.hexagon.M4.pmpyw.acc(i64, i32, i32) define i64 @M4_pmpyw_acc(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: ^= pmpyw({{.*}}, {{.*}}) +; CHECK: ^= pmpyw({{.*}},{{.*}}) ; Vector reduce multiply word by signed half declare i64 @llvm.hexagon.M4.vrmpyoh.s0(i64, i64) @@ -1130,56 +1130,56 @@ define i64 @M4_vrmpyoh_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M4.vrmpyoh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpywoh({{.*}}, {{.*}}) +; CHECK: = vrmpywoh({{.*}},{{.*}}) declare i64 @llvm.hexagon.M4.vrmpyoh.s1(i64, i64) define i64 @M4_vrmpyoh_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M4.vrmpyoh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpywoh({{.*}}, {{.*}}):<<1 +; CHECK: = vrmpywoh({{.*}},{{.*}}):<<1 declare i64 @llvm.hexagon.M4.vrmpyeh.s0(i64, i64) define i64 @M4_vrmpyeh_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M4.vrmpyeh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpyweh({{.*}}, {{.*}}) +; CHECK: = vrmpyweh({{.*}},{{.*}}) declare i64 @llvm.hexagon.M4.vrmpyeh.s1(i64, i64) define i64 @M4_vrmpyeh_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M4.vrmpyeh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpyweh({{.*}}, {{.*}}):<<1 +; CHECK: = vrmpyweh({{.*}},{{.*}}):<<1 declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64, i64, i64) define i64 @M4_vrmpyoh_acc_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpywoh({{.*}}, r5:4) +; CHECK: += vrmpywoh({{.*}},r5:4) declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64, i64, i64) define i64 @M4_vrmpyoh_acc_s1(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpywoh({{.*}}, r5:4):<<1 +; CHECK: += vrmpywoh({{.*}},r5:4):<<1 declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64, i64, i64) define i64 @M4_vrmpyeh_acc_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpyweh({{.*}}, r5:4) +; CHECK: += vrmpyweh({{.*}},r5:4) declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64, i64, i64) define i64 @M4_vrmpyeh_acc_s1(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpyweh({{.*}}, r5:4):<<1 +; CHECK: += vrmpyweh({{.*}},r5:4):<<1 ; Multiply and use upper result declare i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32, i32) @@ -1187,84 +1187,84 @@ define i32 @M2_dpmpyss_rnd_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}):rnd +; CHECK: = mpy({{.*}},{{.*}}):rnd declare i32 @llvm.hexagon.M2.mpyu.up(i32, i32) define i32 @M2_mpyu_up(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.up(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpyu({{.*}}, {{.*}}) +; CHECK: = mpyu({{.*}},{{.*}}) declare i32 @llvm.hexagon.M2.mpysu.up(i32, i32) define i32 @M2_mpysu_up(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpysu.up(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpysu({{.*}}, {{.*}}) +; CHECK: = mpysu({{.*}},{{.*}}) declare i32 @llvm.hexagon.M2.hmmpyh.s1(i32, i32) define i32 @M2_hmmpyh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.hmmpyh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:sat +; CHECK: = mpy({{.*}},{{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.hmmpyl.s1(i32, i32) define i32 @M2_hmmpyl_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:sat +; CHECK: = mpy({{.*}},{{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.hmmpyh.rs1(i32, i32) define i32 @M2_hmmpyh_rs1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.hmmpyh.rs1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:rnd:sat +; CHECK: = mpy({{.*}},{{.*}}.h):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32, i32) define i32 @M2_mpy_up_s1_sat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}):<<1:sat +; CHECK: = mpy({{.*}},{{.*}}):<<1:sat declare i32 @llvm.hexagon.M2.hmmpyl.rs1(i32, i32) define i32 @M2_hmmpyl_rs1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.hmmpyl.rs1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:rnd:sat +; CHECK: = mpy({{.*}},{{.*}}.l):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.up(i32, i32) define i32 @M2_mpy_up(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.up(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}) +; CHECK: = mpy({{.*}},{{.*}}) declare i32 @llvm.hexagon.M2.mpy.up.s1(i32, i32) define i32 @M2_mpy_up_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.up.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: = mpy({{.*}}, {{.*}}):<<1 +; CHECK: = mpy({{.*}},{{.*}}):<<1 declare i32 @llvm.hexagon.M4.mac.up.s1.sat(i32, i32, i32) define i32 @M4_mac_up_s1_sat(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.mac.up.s1.sat(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += mpy({{.*}}, {{.*}}):<<1:sat +; CHECK: += mpy({{.*}},{{.*}}):<<1:sat declare i32 @llvm.hexagon.M4.nac.up.s1.sat(i32, i32, i32) define i32 @M4_nac_up_s1_sat(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.nac.up.s1.sat(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= mpy({{.*}}, {{.*}}):<<1:sat +; CHECK: -= mpy({{.*}},{{.*}}):<<1:sat ; Multiply and use full result declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32) @@ -1272,42 +1272,42 @@ define i64 @M2_dpmpyss_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpy({{.*}}, {{.*}}) +; CHECK: = mpy({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.dpmpyuu.s0(i32, i32) define i64 @M2_dpmpyuu_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.dpmpyuu.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = mpyu({{.*}}, {{.*}}) +; CHECK: = mpyu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64, i32, i32) define i64 @M2_dpmpyss_acc_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpy({{.*}}, {{.*}}) +; CHECK: += mpy({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64, i32, i32) define i64 @M2_dpmpyss_nac_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpy({{.*}}, {{.*}}) +; CHECK: -= mpy({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64, i32, i32) define i64 @M2_dpmpyuu_acc_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += mpyu({{.*}}, {{.*}}) +; CHECK: += mpyu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64, i32, i32) define i64 @M2_dpmpyuu_nac_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: -= mpyu({{.*}}, {{.*}}) +; CHECK: -= mpyu({{.*}},{{.*}}) ; Vector dual multiply declare i64 @llvm.hexagon.M2.vdmpys.s0(i64, i64) @@ -1315,14 +1315,14 @@ define i64 @M2_vdmpys_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vdmpys.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vdmpy({{.*}}, {{.*}}):sat +; CHECK: = vdmpy({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vdmpys.s1(i64, i64) define i64 @M2_vdmpys_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vdmpys.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vdmpy({{.*}}, {{.*}}):<<1:sat +; CHECK: = vdmpy({{.*}},{{.*}}):<<1:sat ; Vector reduce multiply bytes declare i64 @llvm.hexagon.M5.vrmpybuu(i64, i64) @@ -1330,28 +1330,28 @@ define i64 @M5_vrmpybuu(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M5.vrmpybuu(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpybu({{.*}}, {{.*}}) +; CHECK: = vrmpybu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M5.vrmpybsu(i64, i64) define i64 @M5_vrmpybsu(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M5.vrmpybsu(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpybsu({{.*}}, {{.*}}) +; CHECK: = vrmpybsu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M5.vrmacbuu(i64, i64, i64) define i64 @M5_vrmacbuu(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M5.vrmacbuu(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpybu({{.*}}, r5:4) +; CHECK: += vrmpybu({{.*}},r5:4) declare i64 @llvm.hexagon.M5.vrmacbsu(i64, i64, i64) define i64 @M5_vrmacbsu(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M5.vrmacbsu(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpybsu({{.*}}, r5:4) +; CHECK: += vrmpybsu({{.*}},r5:4) ; Vector dual multiply signed by unsigned bytes declare i64 @llvm.hexagon.M5.vdmpybsu(i64, i64) @@ -1359,14 +1359,14 @@ define i64 @M5_vdmpybsu(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M5.vdmpybsu(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vdmpybsu({{.*}}, {{.*}}):sat +; CHECK: = vdmpybsu({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M5.vdmacbsu(i64, i64, i64) define i64 @M5_vdmacbsu(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M5.vdmacbsu(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vdmpybsu({{.*}}, r5:4):sat +; CHECK: += vdmpybsu({{.*}},r5:4):sat ; Vector multiply even halfwords declare i64 @llvm.hexagon.M2.vmpy2es.s0(i64, i64) @@ -1374,35 +1374,35 @@ define i64 @M2_vmpy2es_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2es.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyeh({{.*}}, {{.*}}):sat +; CHECK: = vmpyeh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vmpy2es.s1(i64, i64) define i64 @M2_vmpy2es_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2es.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vmpyeh({{.*}}, {{.*}}):<<1:sat +; CHECK: = vmpyeh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vmac2es(i64, i64, i64) define i64 @M2_vmac2es(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vmac2es(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vmpyeh({{.*}}, r5:4) +; CHECK: += vmpyeh({{.*}},r5:4) declare i64 @llvm.hexagon.M2.vmac2es.s0(i64, i64, i64) define i64 @M2_vmac2es_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vmac2es.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vmpyeh({{.*}}, r5:4):sat +; CHECK: += vmpyeh({{.*}},r5:4):sat declare i64 @llvm.hexagon.M2.vmac2es.s1(i64, i64, i64) define i64 @M2_vmac2es_s1(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vmac2es.s1(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vmpyeh({{.*}}, r5:4):<<1:sat +; CHECK: += vmpyeh({{.*}},r5:4):<<1:sat ; Vector multiply halfwords declare i64 @llvm.hexagon.M2.vmpy2s.s0(i32, i32) @@ -1410,35 +1410,35 @@ define i64 @M2_vmpy2s_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpyh({{.*}}, {{.*}}):sat +; CHECK: = vmpyh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vmpy2s.s1(i32, i32) define i64 @M2_vmpy2s_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2s.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpyh({{.*}}, {{.*}}):<<1:sat +; CHECK: = vmpyh({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vmac2(i64, i32, i32) define i64 @M2_vmac2(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.vmac2(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpyh({{.*}}, {{.*}}) +; CHECK: += vmpyh({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.vmac2s.s0(i64, i32, i32) define i64 @M2_vmac2s_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpyh({{.*}}, {{.*}}):sat +; CHECK: += vmpyh({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vmac2s.s1(i64, i32, i32) define i64 @M2_vmac2s_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.vmac2s.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpyh({{.*}}, {{.*}}):<<1:sat +; CHECK: += vmpyh({{.*}},{{.*}}):<<1:sat ; Vector multiply halfwords signed by unsigned declare i64 @llvm.hexagon.M2.vmpy2su.s0(i32, i32) @@ -1446,28 +1446,28 @@ define i64 @M2_vmpy2su_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2su.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpyhsu({{.*}}, {{.*}}):sat +; CHECK: = vmpyhsu({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vmpy2su.s1(i32, i32) define i64 @M2_vmpy2su_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2su.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpyhsu({{.*}}, {{.*}}):<<1:sat +; CHECK: = vmpyhsu({{.*}},{{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vmac2su.s0(i64, i32, i32) define i64 @M2_vmac2su_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.vmac2su.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpyhsu({{.*}}, {{.*}}):sat +; CHECK: += vmpyhsu({{.*}},{{.*}}):sat declare i64 @llvm.hexagon.M2.vmac2su.s1(i64, i32, i32) define i64 @M2_vmac2su_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.vmac2su.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpyhsu({{.*}}, {{.*}}):<<1:sat +; CHECK: += vmpyhsu({{.*}},{{.*}}):<<1:sat ; Vector reduce multiply halfwords declare i64 @llvm.hexagon.M2.vrmpy.s0(i64, i64) @@ -1475,14 +1475,14 @@ define i64 @M2_vrmpy_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vrmpy.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vrmpyh({{.*}}, {{.*}}) +; CHECK: = vrmpyh({{.*}},{{.*}}) declare i64 @llvm.hexagon.M2.vrmac.s0(i64, i64, i64) define i64 @M2_vrmac_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vrmac.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: += vrmpyh({{.*}}, r5:4) +; CHECK: += vrmpyh({{.*}},r5:4) ; Vector multiply bytes declare i64 @llvm.hexagon.M5.vmpybsu(i32, i32) @@ -1490,28 +1490,28 @@ define i64 @M2_vmpybsu(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M5.vmpybsu(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpybsu({{.*}}, {{.*}}) +; CHECK: = vmpybsu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M5.vmpybuu(i32, i32) define i64 @M2_vmpybuu(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M5.vmpybuu(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vmpybu({{.*}}, {{.*}}) +; CHECK: = vmpybu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M5.vmacbuu(i64, i32, i32) define i64 @M2_vmacbuu(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M5.vmacbuu(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpybu({{.*}}, {{.*}}) +; CHECK: += vmpybu({{.*}},{{.*}}) declare i64 @llvm.hexagon.M5.vmacbsu(i64, i32, i32) define i64 @M2_vmacbsu(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M5.vmacbsu(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: += vmpybsu({{.*}}, {{.*}}) +; CHECK: += vmpybsu({{.*}},{{.*}}) ; Vector polynomial multiply halfwords declare i64 @llvm.hexagon.M4.vpmpyh(i32, i32) @@ -1519,11 +1519,11 @@ define i64 @M4_vpmpyh(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M4.vpmpyh(i32 %a, i32 %b) ret i64 %z } -; CHECK: = vpmpyh({{.*}}, {{.*}}) +; CHECK: = vpmpyh({{.*}},{{.*}}) declare i64 @llvm.hexagon.M4.vpmpyh.acc(i64, i32, i32) define i64 @M4_vpmpyh_acc(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M4.vpmpyh.acc(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: ^= vpmpyh({{.*}}, {{.*}}) +; CHECK: ^= vpmpyh({{.*}},{{.*}}) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll index 3e044e3838de..9260790e33a6 100644 --- a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll +++ b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll @@ -141,28 +141,28 @@ define i64 @S2_shuffeb(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.shuffeb(i64 %a, i64 %b) ret i64 %z } -; CHECK: = shuffeb({{.*}}, {{.*}}) +; CHECK: = shuffeb({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.shuffob(i64, i64) define i64 @S2_shuffob(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.shuffob(i64 %a, i64 %b) ret i64 %z } -; CHECK: = shuffob({{.*}}, {{.*}}) +; CHECK: = shuffob({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.shuffeh(i64, i64) define i64 @S2_shuffeh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.shuffeh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = shuffeh({{.*}}, {{.*}}) +; CHECK: = shuffeh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.shuffoh(i64, i64) define i64 @S2_shuffoh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.shuffoh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = shuffoh({{.*}}, {{.*}}) +; CHECK: = shuffoh({{.*}},{{.*}}) ; Vector splat bytes declare i32 @llvm.hexagon.S2.vsplatrb(i32) @@ -186,14 +186,14 @@ define i64 @S2_vspliceib(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.vspliceib(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: = vspliceb({{.*}}, {{.*}}, #0) +; CHECK: = vspliceb({{.*}},{{.*}},#0) declare i64 @llvm.hexagon.S2.vsplicerb(i64, i64, i32) define i64 @S2_vsplicerb(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.vsplicerb(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: = vspliceb({{.*}}, {{.*}}, {{.*}}) +; CHECK: = vspliceb({{.*}},{{.*}},{{.*}}) ; Vector sign extend declare i64 @llvm.hexagon.S2.vsxtbh(i32) @@ -230,14 +230,14 @@ define i64 @S2_vtrunowh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.vtrunowh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vtrunowh({{.*}}, {{.*}}) +; CHECK: = vtrunowh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64) define i64 @S2_vtrunewh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.vtrunewh(i64 %a, i64 %b) ret i64 %z } -; CHECK: = vtrunewh({{.*}}, {{.*}}) +; CHECK: = vtrunewh({{.*}},{{.*}}) ; Vector zero extend declare i64 @llvm.hexagon.S2.vzxtbh(i32) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll index f06339b9a85a..506dc88d3c1a 100644 --- a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll +++ b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll @@ -10,42 +10,42 @@ define i32 @A4_cmpbgt(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmpbgt(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpb.gt({{.*}}, {{.*}}) +; CHECK: = cmpb.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmpbeq(i32, i32) define i32 @A4_cmpbeq(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmpbeq(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpb.eq({{.*}}, {{.*}}) +; CHECK: = cmpb.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmpbgtu(i32, i32) define i32 @A4_cmpbgtu(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmpbgtu(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmpb.gtu({{.*}}, {{.*}}) +; CHECK: = cmpb.gtu({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmpbgti(i32, i32) define i32 @A4_cmpbgti(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmpbgti(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmpb.gt({{.*}}, #0) +; CHECK: = cmpb.gt({{.*}},#0) declare i32 @llvm.hexagon.A4.cmpbeqi(i32, i32) define i32 @A4_cmpbeqi(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmpbeqi(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmpb.eq({{.*}}, #0) +; CHECK: = cmpb.eq({{.*}},#0) declare i32 @llvm.hexagon.A4.cmpbgtui(i32, i32) define i32 @A4_cmpbgtui(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmpbgtui(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmpb.gtu({{.*}}, #0) +; CHECK: = cmpb.gtu({{.*}},#0) ; Compare half declare i32 @llvm.hexagon.A4.cmphgt(i32, i32) @@ -53,42 +53,42 @@ define i32 @A4_cmphgt(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmphgt(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmph.gt({{.*}}, {{.*}}) +; CHECK: = cmph.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmpheq(i32, i32) define i32 @A4_cmpheq(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmpheq(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmph.eq({{.*}}, {{.*}}) +; CHECK: = cmph.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmphgtu(i32, i32) define i32 @A4_cmphgtu(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmphgtu(i32 %a, i32 %b) ret i32 %z } -; CHECK: = cmph.gtu({{.*}}, {{.*}}) +; CHECK: = cmph.gtu({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.cmphgti(i32, i32) define i32 @A4_cmphgti(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmphgti(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmph.gt({{.*}}, #0) +; CHECK: = cmph.gt({{.*}},#0) declare i32 @llvm.hexagon.A4.cmpheqi(i32, i32) define i32 @A4_cmpheqi(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmpheqi(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmph.eq({{.*}}, #0) +; CHECK: = cmph.eq({{.*}},#0) declare i32 @llvm.hexagon.A4.cmphgtui(i32, i32) define i32 @A4_cmphgtui(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmphgtui(i32 %a, i32 0) ret i32 %z } -; CHECK: = cmph.gtu({{.*}}, #0) +; CHECK: = cmph.gtu({{.*}},#0) ; Compare doublewords declare i32 @llvm.hexagon.C2.cmpgtp(i64, i64) @@ -96,21 +96,21 @@ define i32 @C2_cmpgtp(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.C2.cmpgtp(i64 %a, i64 %b) ret i32 %z } -; CHECK: = cmp.gt({{.*}}, {{.*}}) +; CHECK: = cmp.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.C2.cmpeqp(i64, i64) define i32 @C2_cmpeqp(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.C2.cmpeqp(i64 %a, i64 %b) ret i32 %z } -; CHECK: = cmp.eq({{.*}}, {{.*}}) +; CHECK: = cmp.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.C2.cmpgtup(i64, i64) define i32 @C2_cmpgtup(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.C2.cmpgtup(i64 %a, i64 %b) ret i32 %z } -; CHECK: = cmp.gtu({{.*}}, {{.*}}) +; CHECK: = cmp.gtu({{.*}},{{.*}}) ; Compare bitmask declare i32 @llvm.hexagon.C2.bitsclri(i32, i32) @@ -118,42 +118,42 @@ define i32 @C2_bitsclri(i32 %a) { %z = call i32 @llvm.hexagon.C2.bitsclri(i32 %a, i32 0) ret i32 %z } -; CHECK: = bitsclr({{.*}}, #0) +; CHECK: = bitsclr({{.*}},#0) declare i32 @llvm.hexagon.C4.nbitsclri(i32, i32) define i32 @C4_nbitsclri(i32 %a) { %z = call i32 @llvm.hexagon.C4.nbitsclri(i32 %a, i32 0) ret i32 %z } -; CHECK: = !bitsclr({{.*}}, #0) +; CHECK: = !bitsclr({{.*}},#0) declare i32 @llvm.hexagon.C2.bitsset(i32, i32) define i32 @C2_bitsset(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C2.bitsset(i32 %a, i32 %b) ret i32 %z } -; CHECK: = bitsset({{.*}}, {{.*}}) +; CHECK: = bitsset({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.nbitsset(i32, i32) define i32 @C4_nbitsset(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C4.nbitsset(i32 %a, i32 %b) ret i32 %z } -; CHECK: = !bitsset({{.*}}, {{.*}}) +; CHECK: = !bitsset({{.*}},{{.*}}) declare i32 @llvm.hexagon.C2.bitsclr(i32, i32) define i32 @C2_bitsclr(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C2.bitsclr(i32 %a, i32 %b) ret i32 %z } -; CHECK: = bitsclr({{.*}}, {{.*}}) +; CHECK: = bitsclr({{.*}},{{.*}}) declare i32 @llvm.hexagon.C4.nbitsclr(i32, i32) define i32 @C4_nbitsclr(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C4.nbitsclr(i32 %a, i32 %b) ret i32 %z } -; CHECK: = !bitsclr({{.*}}, {{.*}}) +; CHECK: = !bitsclr({{.*}},{{.*}}) ; Mask generate from predicate declare i64 @llvm.hexagon.C2.mask(i32) @@ -169,7 +169,7 @@ define i32 @A4_tlbmatch(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.tlbmatch(i64 %a, i32 %b) ret i32 %z } -; CHECK: = tlbmatch({{.*}}, {{.*}}) +; CHECK: = tlbmatch({{.*}},{{.*}}) ; Test bit declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32) @@ -177,28 +177,28 @@ define i32 @S2_tstbit_i(i32 %a) { %z = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: = tstbit({{.*}}, #0) +; CHECK: = tstbit({{.*}},#0) declare i32 @llvm.hexagon.S4.ntstbit.i(i32, i32) define i32 @S4_ntstbit_i(i32 %a) { %z = call i32 @llvm.hexagon.S4.ntstbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: = !tstbit({{.*}}, #0) +; CHECK: = !tstbit({{.*}},#0) declare i32 @llvm.hexagon.S2.tstbit.r(i32, i32) define i32 @S2_tstbit_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.tstbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = tstbit({{.*}}, {{.*}}) +; CHECK: = tstbit({{.*}},{{.*}}) declare i32 @llvm.hexagon.S4.ntstbit.r(i32, i32) define i32 @S4_ntstbit_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.ntstbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = !tstbit({{.*}}, {{.*}}) +; CHECK: = !tstbit({{.*}},{{.*}}) ; Vector compare halfwords declare i32 @llvm.hexagon.A2.vcmpheq(i64, i64) @@ -206,42 +206,42 @@ define i32 @A2_vcmpheq(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpheq(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmph.eq({{.*}}, {{.*}}) +; CHECK: = vcmph.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.vcmphgt(i64, i64) define i32 @A2_vcmphgt(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmphgt(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmph.gt({{.*}}, {{.*}}) +; CHECK: = vcmph.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.vcmphgtu(i64, i64) define i32 @A2_vcmphgtu(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmphgtu(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmph.gtu({{.*}}, {{.*}}) +; CHECK: = vcmph.gtu({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.vcmpheqi(i64, i32) define i32 @A4_vcmpheqi(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpheqi(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmph.eq({{.*}}, #0) +; CHECK: = vcmph.eq({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmphgti(i64, i32) define i32 @A4_vcmphgti(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmphgti(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmph.gt({{.*}}, #0) +; CHECK: = vcmph.gt({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmphgtui(i64, i32) define i32 @A4_vcmphgtui(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmphgtui(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmph.gtu({{.*}}, #0) +; CHECK: = vcmph.gtu({{.*}},#0) ; Vector compare bytes for any match declare i32 @llvm.hexagon.A4.vcmpbeq.any(i64, i64) @@ -249,7 +249,7 @@ define i32 @A4_vcmpbeq_any(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %a, i64 %b) ret i32 %z } -; CHECK: = any8(vcmpb.eq({{.*}}, {{.*}})) +; CHECK: = any8(vcmpb.eq({{.*}},{{.*}})) ; Vector compare bytes declare i32 @llvm.hexagon.A2.vcmpbeq(i64, i64) @@ -257,42 +257,42 @@ define i32 @A2_vcmpbeq(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpbeq(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpb.eq({{.*}}, {{.*}}) +; CHECK: = vcmpb.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.vcmpbgtu(i64, i64) define i32 @A2_vcmpbgtu(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpbgtu(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpb.gtu({{.*}}, {{.*}}) +; CHECK: = vcmpb.gtu({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.vcmpbgt(i64, i64) define i32 @A4_vcmpbgt(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A4.vcmpbgt(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpb.gt({{.*}}, {{.*}}) +; CHECK: = vcmpb.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.vcmpbeqi(i64, i32) define i32 @A4_vcmpbeqi(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpbeqi(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpb.eq({{.*}}, #0) +; CHECK: = vcmpb.eq({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmpbgti(i64, i32) define i32 @A4_vcmpbgti(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpbgti(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpb.gt({{.*}}, #0) +; CHECK: = vcmpb.gt({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmpbgtui(i64, i32) define i32 @A4_vcmpbgtui(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpbgtui(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpb.gtu({{.*}}, #0) +; CHECK: = vcmpb.gtu({{.*}},#0) ; Vector compare words declare i32 @llvm.hexagon.A2.vcmpweq(i64, i64) @@ -300,42 +300,42 @@ define i32 @A2_vcmpweq(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpweq(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpw.eq({{.*}}, {{.*}}) +; CHECK: = vcmpw.eq({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.vcmpwgt(i64, i64) define i32 @A2_vcmpwgt(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpwgt(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpw.gt({{.*}}, {{.*}}) +; CHECK: = vcmpw.gt({{.*}},{{.*}}) declare i32 @llvm.hexagon.A2.vcmpwgtu(i64, i64) define i32 @A2_vcmpwgtu(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %a, i64 %b) ret i32 %z } -; CHECK: = vcmpw.gtu({{.*}}, {{.*}}) +; CHECK: = vcmpw.gtu({{.*}},{{.*}}) declare i32 @llvm.hexagon.A4.vcmpweqi(i64, i32) define i32 @A4_vcmpweqi(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpweqi(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpw.eq({{.*}}, #0) +; CHECK: = vcmpw.eq({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmpwgti(i64, i32) define i32 @A4_vcmpwgti(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpwgti(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpw.gt({{.*}}, #0) +; CHECK: = vcmpw.gt({{.*}},#0) declare i32 @llvm.hexagon.A4.vcmpwgtui(i64, i32) define i32 @A4_vcmpwgtui(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpwgtui(i64 %a, i32 0) ret i32 %z } -; CHECK: = vcmpw.gtu({{.*}}, #0) +; CHECK: = vcmpw.gtu({{.*}},#0) ; Viterbi pack even and odd predicate bitsclr declare i32 @llvm.hexagon.C2.vitpack(i32, i32) @@ -343,7 +343,7 @@ define i32 @C2_vitpack(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C2.vitpack(i32 %a, i32 %b) ret i32 %z } -; CHECK: = vitpack({{.*}}, {{.*}}) +; CHECK: = vitpack({{.*}},{{.*}}) ; Vector mux declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64) @@ -351,4 +351,4 @@ define i64 @C2_vmux(i32 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.C2.vmux(i32 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: = vmux({{.*}}, {{.*}}, {{.*}}) +; CHECK: = vmux({{.*}},{{.*}},{{.*}}) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll index 1a65f44c1954..8809baf3551b 100644 --- a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll +++ b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll @@ -10,42 +10,42 @@ define i64 @S2_asr_i_p(i64 %a) { %z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0) ret i64 %z } -; CHECK: = asr({{.*}}, #0) +; CHECK: = asr({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32) define i64 @S2_lsr_i_p(i64 %a) { %z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0) ret i64 %z } -; CHECK: = lsr({{.*}}, #0) +; CHECK: = lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32) define i64 @S2_asl_i_p(i64 %a) { %z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0) ret i64 %z } -; CHECK: = asl({{.*}}, #0) +; CHECK: = asl({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32) define i32 @S2_asr_i_r(i32 %a) { %z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0) ret i32 %z } -; CHECK: = asr({{.*}}, #0) +; CHECK: = asr({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32) define i32 @S2_lsr_i_r(i32 %a) { %z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0) ret i32 %z } -; CHECK: = lsr({{.*}}, #0) +; CHECK: = lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32) define i32 @S2_asl_i_r(i32 %a) { %z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0) ret i32 %z } -; CHECK: = asl({{.*}}, #0) +; CHECK: = asl({{.*}},#0) ; Shift by immediate and accumulate declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32) @@ -53,84 +53,84 @@ define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: -= asr({{.*}}, #0) +; CHECK: -= asr({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32) define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: -= lsr({{.*}}, #0) +; CHECK: -= lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32) define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: -= asl({{.*}}, #0) +; CHECK: -= asl({{.*}},#0) declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32) define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: += asr({{.*}}, #0) +; CHECK: += asr({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32) define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: += lsr({{.*}}, #0) +; CHECK: += lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32) define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: += asl({{.*}}, #0) +; CHECK: += asl({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32) define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: -= asr({{.*}}, #0) +; CHECK: -= asr({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32) define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: -= lsr({{.*}}, #0) +; CHECK: -= lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32) define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: -= asl({{.*}}, #0) +; CHECK: -= asl({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32) define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: += asr({{.*}}, #0) +; CHECK: += asr({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32) define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: += lsr({{.*}}, #0) +; CHECK: += lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32) define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: += asl({{.*}}, #0) +; CHECK: += asl({{.*}},#0) ; Shift by immediate and add declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32) @@ -138,35 +138,35 @@ define i32 @S4_addi_asl_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: = add(#0, asl({{.*}}, #0)) +; CHECK: = add(#0,asl({{.*}},#0)) declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32) define i32 @S4_subi_asl_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: = sub(#0, asl({{.*}}, #0)) +; CHECK: = sub(#0,asl({{.*}},#0)) declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32) define i32 @S4_addi_lsr_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: = add(#0, lsr({{.*}}, #0)) +; CHECK: = add(#0,lsr({{.*}},#0)) declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32) define i32 @S4_subi_lsr_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: = sub(#0, lsr({{.*}}, #0)) +; CHECK: = sub(#0,lsr({{.*}},#0)) declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32) define i32 @S2_addasl_rrri(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: = addasl({{.*}}, {{.*}}, #0) +; CHECK: = addasl({{.*}},{{.*}},#0) ; Shift by immediate and logical declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32) @@ -174,140 +174,140 @@ define i64 @S2_asr_i_p_and(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: &= asr({{.*}}, #0) +; CHECK: &= asr({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32) define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: {{.*}} &= lsr({{.*}}, #0) +; CHECK: {{.*}} &= lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32) define i64 @S2_asl_i_p_and(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: &= asl({{.*}}, #0) +; CHECK: &= asl({{.*}},#0) declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32) define i64 @S2_asr_i_p_or(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: |= asr({{.*}}, #0) +; CHECK: |= asr({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32) define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: |= lsr({{.*}}, #0) +; CHECK: |= lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32) define i64 @S2_asl_i_p_or(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: |= asl({{.*}}, #0) +; CHECK: |= asl({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32) define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: ^= lsr({{.*}}, #0) +; CHECK: ^= lsr({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32) define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: ^= asl({{.*}}, #0) +; CHECK: ^= asl({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32) define i32 @S2_asr_i_r_and(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: &= asr({{.*}}, #0) +; CHECK: &= asr({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32) define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: &= lsr({{.*}}, #0) +; CHECK: &= lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32) define i32 @S2_asl_i_r_and(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: &= asl({{.*}}, #0) +; CHECK: &= asl({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32) define i32 @S2_asr_i_r_or(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: |= asr({{.*}}, #0) +; CHECK: |= asr({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32) define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: |= lsr({{.*}}, #0) +; CHECK: |= lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32) define i32 @S2_asl_i_r_or(i32%a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: |= asl({{.*}}, #0) +; CHECK: |= asl({{.*}},#0) declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32) define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0) ret i32 %z } -; CHECK: ^= lsr({{.*}}, #0) +; CHECK: ^= lsr({{.*}},#0) declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32) define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: ^= asl({{.*}}, #0) +; CHECK: ^= asl({{.*}},#0) declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32) define i32 @S4_andi_asl_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: = and(#0, asl({{.*}}, #0)) +; CHECK: = and(#0,asl({{.*}},#0)) declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32) define i32 @S4_ori_asl_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: = or(#0, asl({{.*}}, #0)) +; CHECK: = or(#0,asl({{.*}},#0)) declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32) define i32 @S4_andi_lsr_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: = and(#0, lsr({{.*}}, #0)) +; CHECK: = and(#0,lsr({{.*}},#0)) declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32) define i32 @S4_ori_lsr_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: = or(#0, lsr({{.*}}, #0)) +; CHECK: = or(#0,lsr({{.*}},#0)) ; Shift right by immediate with rounding declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32) @@ -315,14 +315,14 @@ define i64 @S2_asr_i_p_rnd(i64 %a) { %z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0) ret i64 %z } -; CHECK: = asr({{.*}}, #0):rnd +; CHECK: = asr({{.*}},#0):rnd declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32) define i32 @S2_asr_i_r_rnd(i32 %a) { %z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0) ret i32 %z } -; CHECK: = asr({{.*}}, #0):rnd +; CHECK: = asr({{.*}},#0):rnd ; Shift left by immediate with saturation declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32) @@ -330,7 +330,7 @@ define i32 @S2_asl_i_r_sat(i32 %a) { %z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0) ret i32 %z } -; CHECK: = asl({{.*}}, #0):sat +; CHECK: = asl({{.*}},#0):sat ; Shift by register declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32) @@ -338,63 +338,63 @@ define i64 @S2_asr_r_p(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: = asr({{.*}}, {{.*}}) +; CHECK: = asr({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32) define i64 @S2_lsr_r_p(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: = lsr({{.*}}, {{.*}}) +; CHECK: = lsr({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32) define i64 @S2_asl_r_p(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: = asl({{.*}}, {{.*}}) +; CHECK: = asl({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32) define i64 @S2_lsl_r_p(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: = lsl({{.*}}, {{.*}}) +; CHECK: = lsl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32) define i32 @S2_asr_r_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = asr({{.*}}, {{.*}}) +; CHECK: = asr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32) define i32 @S2_lsr_r_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = lsr({{.*}}, {{.*}}) +; CHECK: = lsr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32) define i32 @S2_asl_r_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = asl({{.*}}, {{.*}}) +; CHECK: = asl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32) define i32 @S2_lsl_r_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: = lsl({{.*}}, {{.*}}) +; CHECK: = lsl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S4.lsli(i32, i32) define i32 @S4_lsli(i32 %a) { %z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a) ret i32 %z } -; CHECK: = lsl(#0, {{.*}}) +; CHECK: = lsl(#0,{{.*}}) ; Shift by register and accumulate declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32) @@ -402,112 +402,112 @@ define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: -= asr({{.*}}, r4) +; CHECK: -= asr({{.*}},r4) declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32) define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: -= lsr({{.*}}, r4) +; CHECK: -= lsr({{.*}},r4) declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32) define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: -= asl({{.*}}, r4) +; CHECK: -= asl({{.*}},r4) declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32) define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: -= lsl({{.*}}, r4) +; CHECK: -= lsl({{.*}},r4) declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32) define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: += asr({{.*}}, r4) +; CHECK: += asr({{.*}},r4) declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32) define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: += lsr({{.*}}, r4) +; CHECK: += lsr({{.*}},r4) declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32) define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: += asl({{.*}}, r4) +; CHECK: += asl({{.*}},r4) declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32) define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: += lsl({{.*}}, r4) +; CHECK: += lsl({{.*}},r4) declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32) define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= asr({{.*}}, {{.*}}) +; CHECK: -= asr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32) define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= lsr({{.*}}, {{.*}}) +; CHECK: -= lsr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32) define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= asl({{.*}}, {{.*}}) +; CHECK: -= asl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32) define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: -= lsl({{.*}}, {{.*}}) +; CHECK: -= lsl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32) define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += asr({{.*}}, {{.*}}) +; CHECK: += asr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32) define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += lsr({{.*}}, {{.*}}) +; CHECK: += lsr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32) define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += asl({{.*}}, {{.*}}) +; CHECK: += asl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32) define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: += lsl({{.*}}, {{.*}}) +; CHECK: += lsl({{.*}},{{.*}}) ; Shift by register and logical declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32) @@ -515,112 +515,112 @@ define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: |= asr({{.*}}, r4) +; CHECK: |= asr({{.*}},r4) declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32) define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: |= lsr({{.*}}, r4) +; CHECK: |= lsr({{.*}},r4) declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32) define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: |= asl({{.*}}, r4) +; CHECK: |= asl({{.*}},r4) declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32) define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: |= lsl({{.*}}, r4) +; CHECK: |= lsl({{.*}},r4) declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32) define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: &= asr({{.*}}, r4) +; CHECK: &= asr({{.*}},r4) declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32) define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: &= lsr({{.*}}, r4) +; CHECK: &= lsr({{.*}},r4) declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32) define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: &= asl({{.*}}, r4) +; CHECK: &= asl({{.*}},r4) declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32) define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: &= lsl({{.*}}, r4) +; CHECK: &= lsl({{.*}},r4) declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32) define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= asr({{.*}}, {{.*}}) +; CHECK: |= asr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32) define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= lsr({{.*}}, {{.*}}) +; CHECK: |= lsr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32) define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= asl({{.*}}, {{.*}}) +; CHECK: |= asl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32) define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: |= lsl({{.*}}, {{.*}}) +; CHECK: |= lsl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32) define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= asr({{.*}}, {{.*}}) +; CHECK: &= asr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32) define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= lsr({{.*}}, {{.*}}) +; CHECK: &= lsr({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32) define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= asl({{.*}}, {{.*}}) +; CHECK: &= asl({{.*}},{{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32) define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: &= lsl({{.*}}, {{.*}}) +; CHECK: &= lsl({{.*}},{{.*}}) ; Shift by register with saturation declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) @@ -628,14 +628,14 @@ define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = asr({{.*}}, {{.*}}):sat +; CHECK: = asr({{.*}},{{.*}}):sat declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32) define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: = asl({{.*}}, {{.*}}):sat +; CHECK: = asl({{.*}},{{.*}}):sat ; Vector shift halfwords by immediate declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32) @@ -643,21 +643,21 @@ define i64 @S2_asr_i_vh(i64 %a) { %z = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %a, i32 0) ret i64 %z } -; CHECK: = vasrh({{.*}}, #0) +; CHECK: = vasrh({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32) define i64 @S2_lsr_i_vh(i64 %a) { %z = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %a, i32 0) ret i64 %z } -; CHECK: = vlsrh({{.*}}, #0) +; CHECK: = vlsrh({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32) define i64 @S2_asl_i_vh(i64 %a) { %z = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %a, i32 0) ret i64 %z } -; CHECK: = vaslh({{.*}}, #0) +; CHECK: = vaslh({{.*}},#0) ; Vector shift halfwords by register declare i64 @llvm.hexagon.S2.asr.r.vh(i64, i32) @@ -665,28 +665,28 @@ define i64 @S2_asr_r_vh(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.asr.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vasrh({{.*}}, {{.*}}) +; CHECK: = vasrh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.lsr.r.vh(i64, i32) define i64 @S2_lsr_r_vh(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.lsr.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vlsrh({{.*}}, {{.*}}) +; CHECK: = vlsrh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.asl.r.vh(i64, i32) define i64 @S2_asl_r_vh(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.asl.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vaslh({{.*}}, {{.*}}) +; CHECK: = vaslh({{.*}},{{.*}}) declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32) define i64 @S2_lsl_r_vh(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: = vlslh({{.*}}, {{.*}}) +; CHECK: = vlslh({{.*}},{{.*}}) ; Vector shift words by immediate declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32) @@ -694,21 +694,21 @@ define i64 @S2_asr_i_vw(i64 %a) { %z = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %a, i32 0) ret i64 %z } -; CHECK: = vasrw({{.*}}, #0) +; CHECK: = vasrw({{.*}},#0) declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32) define i64 @S2_lsr_i_vw(i64 %a) { %z = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %a, i32 0) ret i64 %z } -; CHECK: = vlsrw({{.*}}, #0) +; CHECK: = vlsrw({{.*}},#0) declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32) define i64 @S2_asl_i_vw(i64 %a) { %z = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %a, i32 0) ret i64 %z } -; CHECK: = vaslw({{.*}}, #0) +; CHECK: = vaslw({{.*}},#0) ; Vector shift words by with truncate and pack declare i32 @llvm.hexagon.S2.asr.i.svw.trun(i64, i32) @@ -716,11 +716,11 @@ define i32 @S2_asr_i_svw_trun(i64 %a) { %z = call i32 @llvm.hexagon.S2.asr.i.svw.trun(i64 %a, i32 0) ret i32 %z } -; CHECK: = vasrw({{.*}}, #0) +; CHECK: = vasrw({{.*}},#0) declare i32 @llvm.hexagon.S2.asr.r.svw.trun(i64, i32) define i32 @S2_asr_r_svw_trun(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.r.svw.trun(i64 %a, i32 %b) ret i32 %z } -; CHECK: = vasrw({{.*}}, {{.*}}) +; CHECK: = vasrw({{.*}},{{.*}}) diff --git a/test/CodeGen/Hexagon/isel-exti1.ll b/test/CodeGen/Hexagon/isel-exti1.ll new file mode 100644 index 000000000000..b49986628e4e --- /dev/null +++ b/test/CodeGen/Hexagon/isel-exti1.ll @@ -0,0 +1,22 @@ +; RUN: llc -O0 -march=hexagon < %s | FileCheck %s + +; CHECK-LABEL: sexti1 +; CHECK: r[[REG:[0-9]+]] = mux(p{{[0-3]}},#-1,#0) +; CHECK: combine(r[[REG]],r[[REG]]) +define i64 @sexti1(i64 %a0, i64 %a1) { +entry: + %t0 = icmp ult i64 %a0, %a1 + %t1 = sext i1 %t0 to i64 + ret i64 %t1 +} + +; CHECK-LABEL: zexti1 +; CHECK: r[[REG:[0-9]+]] = mux(p{{[0-3]}},#1,#0) +; CHECK: combine(#0,r[[REG]]) +define i64 @zexti1(i64 %a0, i64 %a1) { +entry: + %t0 = icmp ult i64 %a0, %a1 + %t1 = zext i1 %t0 to i64 + ret i64 %t1 +} + diff --git a/test/CodeGen/Hexagon/isel-i1arg-crash.ll b/test/CodeGen/Hexagon/isel-i1arg-crash.ll new file mode 100644 index 000000000000..7e8bd9e93b27 --- /dev/null +++ b/test/CodeGen/Hexagon/isel-i1arg-crash.ll @@ -0,0 +1,6 @@ +; RUN: llc -march=hexagon -debug-only=isel < %s +; REQUIRES: asserts + +define void @g(i1 %cond) { + ret void +} diff --git a/test/CodeGen/Hexagon/isel-op-zext-i1.ll b/test/CodeGen/Hexagon/isel-op-zext-i1.ll new file mode 100644 index 000000000000..d77d0929e21f --- /dev/null +++ b/test/CodeGen/Hexagon/isel-op-zext-i1.ll @@ -0,0 +1,13 @@ +; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s + +; In the IR, the i1 value is zero-extended first, then passed to add. +; Check that in the final code, the mux happens after the add. +; CHECK: [[REG1:r[0-9]+]] = add([[REG0:r[0-9]+]],#1) +; CHECK: r{{[0-9]+}} = mux(p{{[0-3]}},[[REG1]],[[REG0]]) + +define i32 @foo(i32 %a, i32 %b) { + %v0 = icmp eq i32 %a, %b + %v1 = zext i1 %v0 to i32 + %v2 = add i32 %v1, %a + ret i32 %v2 +} diff --git a/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll b/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll new file mode 100644 index 000000000000..db850950fd53 --- /dev/null +++ b/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll @@ -0,0 +1,36 @@ +; Check for recognizing the "memmove" idiom. +; RUN: opt -basicaa -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \ +; RUN: | FileCheck %s +; CHECK: call void @llvm.memmove + +; Function Attrs: norecurse nounwind +define void @foo(i32* nocapture %A, i32* nocapture readonly %B, i32 %n) #0 { +entry: + %cmp1 = icmp sgt i32 %n, 0 + br i1 %cmp1, label %for.body.preheader, label %for.end + +for.body.preheader: ; preds = %entry + %arrayidx.gep = getelementptr i32, i32* %B, i32 0 + %arrayidx1.gep = getelementptr i32, i32* %A, i32 0 + br label %for.body + +for.body: ; preds = %for.body.preheader, %for.body + %arrayidx.phi = phi i32* [ %arrayidx.gep, %for.body.preheader ], [ %arrayidx.inc, %for.body ] + %arrayidx1.phi = phi i32* [ %arrayidx1.gep, %for.body.preheader ], [ %arrayidx1.inc, %for.body ] + %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ] + %0 = load i32, i32* %arrayidx.phi, align 4 + store i32 %0, i32* %arrayidx1.phi, align 4 + %inc = add nuw nsw i32 %i.02, 1 + %exitcond = icmp ne i32 %inc, %n + %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1 + %arrayidx1.inc = getelementptr i32, i32* %arrayidx1.phi, i32 1 + br i1 %exitcond, label %for.body, label %for.end.loopexit + +for.end.loopexit: ; preds = %for.body + br label %for.end + +for.end: ; preds = %for.end.loopexit, %entry + ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll b/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll new file mode 100644 index 000000000000..b9747a887a59 --- /dev/null +++ b/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll @@ -0,0 +1,36 @@ +; RUN: opt -basicaa -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \ +; RUN: | FileCheck %s + +define void @PR14241(i32* %s, i64 %size) #0 { +; Ensure that we don't form a memcpy for strided loops. Briefly, when we taught +; LoopIdiom about memmove and strided loops, this got miscompiled into a memcpy +; instead of a memmove. If we get the memmove transform back, this will catch +; regressions. +; +; CHECK-LABEL: @PR14241( + +entry: + %end.idx = add i64 %size, -1 + %end.ptr = getelementptr inbounds i32, i32* %s, i64 %end.idx + br label %while.body +; CHECK-NOT: memcpy +; CHECK: memmove + +while.body: + %phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ] + %src.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1 + %val = load i32, i32* %src.ptr, align 4 +; CHECK: load + %dst.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 0 + store i32 %val, i32* %dst.ptr, align 4 +; CHECK: store + %next.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1 + %cmp = icmp eq i32* %next.ptr, %end.ptr + br i1 %cmp, label %exit, label %while.body + +exit: + ret void +; CHECK: ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/Hexagon/loop-idiom/lcssa.ll b/test/CodeGen/Hexagon/loop-idiom/lcssa.ll new file mode 100644 index 000000000000..48632fde1368 --- /dev/null +++ b/test/CodeGen/Hexagon/loop-idiom/lcssa.ll @@ -0,0 +1,46 @@ +; RUN: opt -hexagon-loop-idiom -loop-deletion -gvn -S < %s +; REQUIRES: asserts + +; This tests that the HexagonLoopIdiom pass does not mark LCSSA information +; as preserved. The pass calls SimplifyInstruction is a couple of places, +; which can invalidate LCSSA. Specifically, the uses of a LCSSA phi variable +; are replaced by the incoming value. + +define hidden void @test() local_unnamed_addr #0 { +entry: + br label %if.then63 + +if.then63: + br i1 undef, label %do.body311, label %if.end375 + +do.body311: + br i1 undef, label %do.end318, label %do.body311 + +do.end318: + br i1 undef, label %if.end322, label %if.end375 + +if.end322: + %sub325 = sub i32 undef, undef + br i1 undef, label %do.end329, label %do.body311 + +do.end329: + %sub325.lcssa = phi i32 [ %sub325, %if.end322 ] + br label %do.body330 + +do.body330: + %row_width.7 = phi i32 [ %sub325.lcssa, %do.end329 ], [ %dec334, %do.body330 ] + %sp.5 = phi i8* [ undef, %do.end329 ], [ %incdec.ptr331, %do.body330 ] + %dp.addr.5 = phi i8* [ undef, %do.end329 ], [ %incdec.ptr332, %do.body330 ] + %0 = load i8, i8* %sp.5, align 1 + store i8 %0, i8* %dp.addr.5, align 1 + %incdec.ptr332 = getelementptr inbounds i8, i8* %dp.addr.5, i32 1 + %incdec.ptr331 = getelementptr inbounds i8, i8* %sp.5, i32 1 + %dec334 = add i32 %row_width.7, -1 + %cmp335 = icmp eq i32 %dec334, 0 + br i1 %cmp335, label %if.end375, label %do.body330 + +if.end375: + ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll b/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll new file mode 100644 index 000000000000..591683291982 --- /dev/null +++ b/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll @@ -0,0 +1,24 @@ +; RUN: opt -basicaa -hexagon-loop-idiom -mtriple hexagon-unknown-elf < %s +; REQUIRES: asserts + +target triple = "hexagon" + +; Function Attrs: nounwind +define void @fred(i8 zeroext %L) #0 { +entry: + br i1 undef, label %if.end53, label %while.body37 + +while.body37: ; preds = %while.body37, %entry + %i.121 = phi i32 [ %inc46, %while.body37 ], [ 0, %entry ] + %shl = shl i32 1, %i.121 + %and39 = and i32 %shl, undef + %tobool40 = icmp eq i32 %and39, 0 + %inc46 = add nuw nsw i32 %i.121, 1 + %storemerge = select i1 %tobool40, i8 %L, i8 0 + br i1 undef, label %while.body37, label %if.end53 + +if.end53: ; preds = %while.body37, %entry + ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll b/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll new file mode 100644 index 000000000000..f738282c0f1b --- /dev/null +++ b/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll @@ -0,0 +1,83 @@ +; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s +; CHECK-LABEL: define void @fred + +; Check that this test does not crash. + +target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" +target triple = "hexagon" + +%struct.0 = type { [120 x i16], [80 x i16], [80 x i16], [80 x i16], [80 x i16], [80 x i16], [40 x i16], [40 x i16], [40 x i16], [40 x i16], [40 x i16], [40 x i16] } + +define void @fred(%struct.0* %demod_state) local_unnamed_addr #0 { +entry: + br label %for.body309 + +for.body309: ; preds = %for.body309, %entry + %max_diff.0300 = phi i16 [ %max_diff.1, %for.body309 ], [ 0, %entry ] + %arrayidx322.phi = phi i16* [ undef, %entry ], [ %arrayidx322.inc, %for.body309 ] + %arrayidx331.phi = phi i16* [ undef, %entry ], [ %arrayidx331.inc, %for.body309 ] + %lag.4299.apmt = phi i32 [ %inc376.apmt, %for.body309 ], [ 0, %entry ] + %0 = load i16, i16* %arrayidx322.phi, align 2 + %conv323 = sext i16 %0 to i32 + %sub324 = sub nsw i32 0, %conv323 + %ispos258 = icmp sgt i32 %sub324, -1 + %1 = select i1 %ispos258, i32 %sub324, i32 0 + %add326 = add nsw i32 %1, 0 + %2 = load i16, i16* %arrayidx331.phi, align 2 + %conv332 = sext i16 %2 to i32 + %sub333 = sub nsw i32 0, %conv332 + %ispos260 = icmp sgt i32 %sub333, -1 + %3 = select i1 %ispos260, i32 %sub333, i32 undef + %sub342 = sub nsw i32 0, %conv323 + %ispos262 = icmp sgt i32 %sub342, -1 + %4 = select i1 %ispos262, i32 %sub342, i32 undef + %sub351 = sub nsw i32 0, %conv332 + %ispos264 = icmp sgt i32 %sub351, -1 + %5 = select i1 %ispos264, i32 %sub351, i32 0 + %sub360 = sub nsw i32 %conv323, %conv332 + %ispos266 = icmp sgt i32 %sub360, -1 + %6 = select i1 %ispos266, i32 %sub360, i32 0 + %add335 = add nsw i32 %add326, %4 + %add344 = add nsw i32 %add335, %3 + %add353 = add i32 %add344, %5 + %add362 = add i32 %add353, %6 + %div363 = sdiv i32 %add362, 6 + %conv364 = trunc i32 %div363 to i16 + %sext268 = shl i32 %div363, 16 + %conv369 = ashr exact i32 %sext268, 16 + %conv370 = sext i16 %max_diff.0300 to i32 + %cmp371 = icmp sgt i32 %conv369, %conv370 + %max_diff.1 = select i1 %cmp371, i16 %conv364, i16 %max_diff.0300 + %inc376.apmt = add nuw nsw i32 %lag.4299.apmt, 1 + %exitcond331 = icmp ne i32 %inc376.apmt, 40 + %arrayidx322.inc = getelementptr i16, i16* %arrayidx322.phi, i32 1 + %arrayidx331.inc = getelementptr i16, i16* %arrayidx331.phi, i32 1 + br i1 %exitcond331, label %for.body309, label %for.end377 + +for.end377: ; preds = %for.body309 + %max_diff.1.lcssa = phi i16 [ %max_diff.1, %for.body309 ] + %cmp407 = icmp sgt i16 %max_diff.1.lcssa, 4 + br label %for.body405 + +for.body405: ; preds = %if.end437, %for.end377 + %arrayidx412 = getelementptr inbounds %struct.0, %struct.0* %demod_state, i32 0, i32 11, i32 undef + br i1 %cmp407, label %if.then409, label %if.end437 + +if.then409: ; preds = %for.body405 + %arrayidx416 = getelementptr inbounds [40 x i16], [40 x i16]* null, i32 0, i32 undef + %7 = load i16, i16* %arrayidx416, align 2 + %conv417 = sext i16 %7 to i32 + %shl = shl i32 %conv417, 4 + %mul419 = mul nsw i32 %shl, 655 + %add420 = add nsw i32 %mul419, 0 + br label %if.end437 + +if.end437: ; preds = %if.then409, %for.body405 + %mul431.sink = phi i32 [ %add420, %if.then409 ], [ undef, %for.body405 ] + %shr432257 = lshr i32 %mul431.sink, 15 + %conv433 = trunc i32 %shr432257 to i16 + store i16 %conv433, i16* %arrayidx412, align 2 + br label %for.body405 +} + +attributes #0 = { noinline nounwind "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" } diff --git a/test/CodeGen/Hexagon/loop-idiom/pmpy-mod.ll b/test/CodeGen/Hexagon/loop-idiom/pmpy-mod.ll new file mode 100644 index 000000000000..9907ae71c992 --- /dev/null +++ b/test/CodeGen/Hexagon/loop-idiom/pmpy-mod.ll @@ -0,0 +1,84 @@ +; Run -O2 to make sure that all the usual optimizations do happen before +; the Hexagon loop idiom recognition runs. This is to check that we still +; get this opportunity regardless of what happens before. + +; RUN: opt -O2 -march=hexagon -S < %s | FileCheck %s + +target triple = "hexagon" +target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" + +; CHECK-LABEL: define zeroext i16 @pmpy_mod_lsr +; There need to be two pmpy instructions. +; CHECK: call i64 @llvm.hexagon.M4.pmpyw +; CHECK: call i64 @llvm.hexagon.M4.pmpyw + +define zeroext i16 @pmpy_mod_lsr(i8 zeroext %a0, i16 zeroext %a1) #0 { +b2: + br label %b3 + +b3: ; preds = %b44, %b2 + %v4 = phi i8 [ %a0, %b2 ], [ %v19, %b44 ] + %v5 = phi i16 [ %a1, %b2 ], [ %v43, %b44 ] + %v6 = phi i8 [ 0, %b2 ], [ %v45, %b44 ] + %v7 = zext i8 %v6 to i32 + %v8 = icmp slt i32 %v7, 8 + br i1 %v8, label %b9, label %b46 + +b9: ; preds = %b3 + %v10 = zext i8 %v4 to i32 + %v11 = and i32 %v10, 1 + %v12 = trunc i16 %v5 to i8 + %v13 = zext i8 %v12 to i32 + %v14 = and i32 %v13, 1 + %v15 = xor i32 %v11, %v14 + %v16 = trunc i32 %v15 to i8 + %v17 = zext i8 %v4 to i32 + %v18 = ashr i32 %v17, 1 + %v19 = trunc i32 %v18 to i8 + %v20 = zext i8 %v16 to i32 + %v21 = icmp eq i32 %v20, 1 + br i1 %v21, label %b22, label %b26 + +b22: ; preds = %b9 + %v23 = zext i16 %v5 to i32 + %v24 = xor i32 %v23, 16386 + %v25 = trunc i32 %v24 to i16 + br label %b27 + +b26: ; preds = %b9 + br label %b27 + +b27: ; preds = %b26, %b22 + %v28 = phi i16 [ %v25, %b22 ], [ %v5, %b26 ] + %v29 = phi i8 [ 1, %b22 ], [ 0, %b26 ] + %v30 = zext i16 %v28 to i32 + %v31 = ashr i32 %v30, 1 + %v32 = trunc i32 %v31 to i16 + %v33 = icmp ne i8 %v29, 0 + br i1 %v33, label %b34, label %b38 + +b34: ; preds = %b27 + %v35 = zext i16 %v32 to i32 + %v36 = or i32 %v35, 32768 + %v37 = trunc i32 %v36 to i16 + br label %b42 + +b38: ; preds = %b27 + %v39 = zext i16 %v32 to i32 + %v40 = and i32 %v39, 32767 + %v41 = trunc i32 %v40 to i16 + br label %b42 + +b42: ; preds = %b38, %b34 + %v43 = phi i16 [ %v37, %b34 ], [ %v41, %b38 ] + br label %b44 + +b44: ; preds = %b42 + %v45 = add i8 %v6, 1 + br label %b3 + +b46: ; preds = %b3 + ret i16 %v5 +} + +attributes #0 = { noinline nounwind "target-cpu"="hexagonv5" "target-features"="-hvx,-hvx-double,-long-calls" } diff --git a/test/CodeGen/Hexagon/loop-idiom/pmpy.ll b/test/CodeGen/Hexagon/loop-idiom/pmpy.ll new file mode 100644 index 000000000000..781618e58901 --- /dev/null +++ b/test/CodeGen/Hexagon/loop-idiom/pmpy.ll @@ -0,0 +1,33 @@ +; RUN: opt -hexagon-loop-idiom < %s -mtriple=hexagon-unknown-unknown -S \ +; RUN: | FileCheck %s + +target triple = "hexagon" + +; CHECK: define i64 @basic_pmpy +; CHECK: llvm.hexagon.M4.pmpyw +define i64 @basic_pmpy(i32 %P, i32 %Q) #0 { +entry: + %conv = zext i32 %Q to i64 + br label %for.body + +for.body: ; preds = %entry, %for.body + %i.07 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %R.06 = phi i64 [ 0, %entry ], [ %xor.R.06, %for.body ] + %shl = shl i32 1, %i.07 + %and = and i32 %shl, %P + %tobool = icmp eq i32 %and, 0 + %sh_prom = zext i32 %i.07 to i64 + %shl1 = shl i64 %conv, %sh_prom + %xor = xor i64 %shl1, %R.06 + %xor.R.06 = select i1 %tobool, i64 %R.06, i64 %xor + %inc = add nuw nsw i32 %i.07, 1 + %exitcond = icmp ne i32 %inc, 32 + br i1 %exitcond, label %for.body, label %for.end + +for.end: ; preds = %for.body + %R.1.lcssa = phi i64 [ %xor.R.06, %for.body ] + ret i64 %R.1.lcssa +} + +attributes #0 = { nounwind } + diff --git a/test/CodeGen/Hexagon/memops-stack.ll b/test/CodeGen/Hexagon/memops-stack.ll index a8dc664591e9..1aa2e30ea25b 100644 --- a/test/CodeGen/Hexagon/memops-stack.ll +++ b/test/CodeGen/Hexagon/memops-stack.ll @@ -9,13 +9,13 @@ define void @test0() #0 { entry: %x = alloca i32, align 4 %0 = bitcast i32* %x to i8* - call void @llvm.lifetime.start(i64 4, i8* %0) #3 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3 call void @foo(i32* nonnull %x) #3 %1 = load i32, i32* %x, align 4, !tbaa !1 %inc = add nsw i32 %1, 1 store i32 %inc, i32* %x, align 4, !tbaa !1 call void @foo(i32* nonnull %x) #3 - call void @llvm.lifetime.end(i64 4, i8* %0) #3 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3 ret void } @@ -25,13 +25,13 @@ define void @test1() #0 { entry: %x = alloca i32, align 4 %0 = bitcast i32* %x to i8* - call void @llvm.lifetime.start(i64 4, i8* %0) #3 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3 call void @foo(i32* nonnull %x) #3 %1 = load i32, i32* %x, align 4, !tbaa !1 %inc = sub nsw i32 %1, 1 store i32 %inc, i32* %x, align 4, !tbaa !1 call void @foo(i32* nonnull %x) #3 - call void @llvm.lifetime.end(i64 4, i8* %0) #3 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3 ret void } @@ -41,13 +41,13 @@ define void @test2() #0 { entry: %x = alloca i32, align 4 %0 = bitcast i32* %x to i8* - call void @llvm.lifetime.start(i64 4, i8* %0) #3 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3 call void @foo(i32* nonnull %x) #3 %1 = load i32, i32* %x, align 4, !tbaa !1 %inc = or i32 %1, 1 store i32 %inc, i32* %x, align 4, !tbaa !1 call void @foo(i32* nonnull %x) #3 - call void @llvm.lifetime.end(i64 4, i8* %0) #3 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3 ret void } @@ -57,13 +57,13 @@ define void @test3() #0 { entry: %x = alloca i32, align 4 %0 = bitcast i32* %x to i8* - call void @llvm.lifetime.start(i64 4, i8* %0) #3 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3 call void @foo(i32* nonnull %x) #3 %1 = load i32, i32* %x, align 4, !tbaa !1 %inc = and i32 %1, -2 store i32 %inc, i32* %x, align 4, !tbaa !1 call void @foo(i32* nonnull %x) #3 - call void @llvm.lifetime.end(i64 4, i8* %0) #3 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3 ret void } @@ -73,13 +73,13 @@ define void @test4(i32 %a) #0 { entry: %x = alloca i32, align 4 %0 = bitcast i32* %x to i8* - call void @llvm.lifetime.start(i64 4, i8* %0) #3 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3 call void @foo(i32* nonnull %x) #3 %1 = load i32, i32* %x, align 4, !tbaa !1 %inc = add nsw i32 %1, %a store i32 %inc, i32* %x, align 4, !tbaa !1 call void @foo(i32* nonnull %x) #3 - call void @llvm.lifetime.end(i64 4, i8* %0) #3 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3 ret void } @@ -89,13 +89,13 @@ define void @test5(i32 %a) #0 { entry: %x = alloca i32, align 4 %0 = bitcast i32* %x to i8* - call void @llvm.lifetime.start(i64 4, i8* %0) #3 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3 call void @foo(i32* nonnull %x) #3 %1 = load i32, i32* %x, align 4, !tbaa !1 %inc = sub nsw i32 %1, %a store i32 %inc, i32* %x, align 4, !tbaa !1 call void @foo(i32* nonnull %x) #3 - call void @llvm.lifetime.end(i64 4, i8* %0) #3 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3 ret void } @@ -105,13 +105,13 @@ define void @test6(i32 %a) #0 { entry: %x = alloca i32, align 4 %0 = bitcast i32* %x to i8* - call void @llvm.lifetime.start(i64 4, i8* %0) #3 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3 call void @foo(i32* nonnull %x) #3 %1 = load i32, i32* %x, align 4, !tbaa !1 %inc = or i32 %1, %a store i32 %inc, i32* %x, align 4, !tbaa !1 call void @foo(i32* nonnull %x) #3 - call void @llvm.lifetime.end(i64 4, i8* %0) #3 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3 ret void } @@ -121,20 +121,20 @@ define void @test7(i32 %a) #0 { entry: %x = alloca i32, align 4 %0 = bitcast i32* %x to i8* - call void @llvm.lifetime.start(i64 4, i8* %0) #3 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3 call void @foo(i32* nonnull %x) #3 %1 = load i32, i32* %x, align 4, !tbaa !1 %inc = and i32 %1, %a store i32 %inc, i32* %x, align 4, !tbaa !1 call void @foo(i32* nonnull %x) #3 - call void @llvm.lifetime.end(i64 4, i8* %0) #3 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #3 ret void } declare void @foo(i32*) #2 -declare void @llvm.lifetime.start(i64, i8* nocapture) #1 -declare void @llvm.lifetime.end(i64, i8* nocapture) #1 +declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 +declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind } diff --git a/test/CodeGen/Hexagon/newvalueSameReg.ll b/test/CodeGen/Hexagon/newvalueSameReg.ll index 0fc4df22eb32..39f32fb2f9d5 100644 --- a/test/CodeGen/Hexagon/newvalueSameReg.ll +++ b/test/CodeGen/Hexagon/newvalueSameReg.ll @@ -12,8 +12,8 @@ ; Test that we don't generate a new value compare if the operands are ; the same register. -; CHECK-NOT: cmp.eq([[REG0:(r[0-9]+)]].new, [[REG0]]) -; CHECK: cmp.eq([[REG1:(r[0-9]+)]], [[REG1]]) +; CHECK-NOT: cmp.eq([[REG0:(r[0-9]+)]].new,[[REG0]]) +; CHECK: cmp.eq([[REG1:(r[0-9]+)]],[[REG1]]) ; Function Attrs: nounwind declare void @fprintf(%struct._Dnk_filet.1* nocapture, i8* nocapture readonly, ...) #1 diff --git a/test/CodeGen/Hexagon/newvaluejump.ll b/test/CodeGen/Hexagon/newvaluejump.ll index 3e1ee179573a..e1437f369c88 100644 --- a/test/CodeGen/Hexagon/newvaluejump.ll +++ b/test/CodeGen/Hexagon/newvaluejump.ll @@ -6,7 +6,7 @@ define i32 @foo(i32 %a) nounwind { entry: -; CHECK: if (cmp.eq(r{{[0-9]+}}.new, #0)) jump{{.}} +; CHECK: if (cmp.eq(r{{[0-9]+}}.new,#0)) jump{{.}} %addr1 = alloca i32, align 4 %addr2 = alloca i32, align 4 %0 = load i32, i32* @i, align 4 diff --git a/test/CodeGen/Hexagon/newvaluejump2.ll b/test/CodeGen/Hexagon/newvaluejump2.ll index a812a7d96659..4c897f0830f3 100644 --- a/test/CodeGen/Hexagon/newvaluejump2.ll +++ b/test/CodeGen/Hexagon/newvaluejump2.ll @@ -6,7 +6,7 @@ @Reg = common global i32 0, align 4 define i32 @main() nounwind { entry: -; CHECK: if (cmp.gt(r{{[0-9]+}}, r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}} +; CHECK: if (cmp.gt(r{{[0-9]+}},r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}} %Reg2 = alloca i32, align 4 %0 = load i32, i32* %Reg2, align 4 %1 = load i32, i32* @Reg, align 4 diff --git a/test/CodeGen/Hexagon/newvaluejump3.ll b/test/CodeGen/Hexagon/newvaluejump3.ll new file mode 100644 index 000000000000..1e2e6c28c849 --- /dev/null +++ b/test/CodeGen/Hexagon/newvaluejump3.ll @@ -0,0 +1,79 @@ +; RUN: llc -march=hexagon -filetype=obj -o /dev/null < %s +; REQUIRES: asserts + +; This crashed in the MC code emitter, because a new-value branch was created +; with IMPLICIT_DEF as the producer. + +target triple = "hexagon" + +%type.0 = type { %type.1, [64 x i8] } +%type.1 = type { [12 x i8], %type.2*, double } +%type.2 = type { i16, i16, [1 x %type.3] } +%type.3 = type { i32 } +%type.4 = type { %type.2*, i32 } + +define hidden fastcc i8* @fred(%type.0* nocapture readonly %a0, i8* readonly %a1) unnamed_addr #2 { +b2: + %v3 = load i8, i8* %a1, align 1 + br i1 undef, label %b4, label %b24 + +b4: ; preds = %b2 + switch i8 %v3, label %b13 [ + i8 25, label %b5 + i8 26, label %b6 + i8 28, label %b8 + i8 27, label %b9 + i8 43, label %b11 + i8 110, label %b12 + ] + +b5: ; preds = %b4 + unreachable + +b6: ; preds = %b4 + %v7 = getelementptr inbounds i8, i8* %a1, i32 2 + br label %b16 + +b8: ; preds = %b4 + br label %b16 + +b9: ; preds = %b4 + %v10 = tail call fastcc i8* @fred(%type.0* undef, i8* undef) + br label %b24 + +b11: ; preds = %b4 + unreachable + +b12: ; preds = %b4 + unreachable + +b13: ; preds = %b4 + br label %b14 + +b14: ; preds = %b13 + br i1 undef, label %b15, label %b16 + +b15: ; preds = %b14 + unreachable + +b16: ; preds = %b20, %b14, %b8, %b6 + %v17 = phi i8* [ %v21, %b20 ], [ undef, %b14 ], [ undef, %b8 ], [ %v7, %b6 ] + %v18 = phi i32 [ 0, %b20 ], [ undef, %b14 ], [ 0, %b8 ], [ 8, %b6 ] + %v19 = icmp sgt i32 %v18, 0 + br i1 %v19, label %b20, label %b24 + +b20: ; preds = %b16 + %v21 = getelementptr inbounds i8, i8* %v17, i32 1 + %v22 = load i8, i8* %v17, align 1 + %v23 = icmp eq i8 %v22, undef + br i1 %v23, label %b16, label %b24 + +b24: ; preds = %b20, %b16, %b9, %b2 + %v25 = phi i8* [ null, %b2 ], [ null, %b9 ], [ %v17, %b16 ], [ null, %b20 ] + ret i8* %v25 +} + +attributes #0 = { argmemonly nounwind } +attributes #1 = { nounwind readonly "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double,-long-calls" } +attributes #2 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double,-long-calls" } + diff --git a/test/CodeGen/Hexagon/opt-addr-mode.ll b/test/CodeGen/Hexagon/opt-addr-mode.ll index 7cb437c327cf..705cd045ea30 100644 --- a/test/CodeGen/Hexagon/opt-addr-mode.ll +++ b/test/CodeGen/Hexagon/opt-addr-mode.ll @@ -2,10 +2,10 @@ ; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 -disable-hexagon-amodeopt=0 -hexagon-amode-growth-limit=4 < %s | FileCheck %s --check-prefix=CHECK-AMODE ; CHECK-NO-AMODE: [[REG0:(r[0-9]+)]] = ##global_2 -; CHECK-NO-AMODE: memw([[REG0]] + {{.*}}<<#2) = +; CHECK-NO-AMODE: memw([[REG0]]+{{.*}}<<#2) = ; CHECK-AMODE: [[REG1:(r[0-9]+)]] = memw(##global_1) -; CHECK-AMODE: memw([[REG1]]<<#2 + ##global_2) = +; CHECK-AMODE: memw([[REG1]]<<#2+##global_2) = @global_1 = external global i32, align 4 @global_2 = external global [128 x i32], align 8 diff --git a/test/CodeGen/Hexagon/opt-fabs.ll b/test/CodeGen/Hexagon/opt-fabs.ll index 2ecbce310ade..9c94f853ba50 100644 --- a/test/CodeGen/Hexagon/opt-fabs.ll +++ b/test/CodeGen/Hexagon/opt-fabs.ll @@ -1,7 +1,7 @@ ; RUN: llc -mtriple=hexagon-unknown-elf -mcpu=hexagonv5 -hexagon-bit=0 < %s | FileCheck %s ; Optimize fabsf to clrbit in V5. -; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}} = clrbit(r{{[0-9]+}},#31) define float @my_fabsf(float %x) nounwind { entry: diff --git a/test/CodeGen/Hexagon/opt-fneg.ll b/test/CodeGen/Hexagon/opt-fneg.ll index 978957865863..da496c588019 100644 --- a/test/CodeGen/Hexagon/opt-fneg.ll +++ b/test/CodeGen/Hexagon/opt-fneg.ll @@ -3,7 +3,7 @@ define float @foo(float %x) nounwind { entry: -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#31) %x.addr = alloca float, align 4 store float %x, float* %x.addr, align 4 %0 = load float, float* %x.addr, align 4 @@ -13,14 +13,14 @@ entry: define float @bar(float %x) nounwind { entry: -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#31) %sub = fsub float -0.000000e+00, %x ret float %sub } define float @baz(float %x) nounwind { entry: -; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31) +; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}},#31) %conv1 = fmul float %x, -1.000000e+00 ret float %conv1 } diff --git a/test/CodeGen/Hexagon/opt-spill-volatile.ll b/test/CodeGen/Hexagon/opt-spill-volatile.ll index 99dd4646d743..1c86716132fd 100644 --- a/test/CodeGen/Hexagon/opt-spill-volatile.ll +++ b/test/CodeGen/Hexagon/opt-spill-volatile.ll @@ -6,22 +6,22 @@ target triple = "hexagon" ; CHECK-LABEL: foo ; CHECK: memw(r29+#4) = -; CHECK: = memw(r29 + #4) +; CHECK: = memw(r29+#4) define i32 @foo(i32 %a) #0 { entry: %x = alloca i32, align 4 %x.0.x.0..sroa_cast = bitcast i32* %x to i8* - call void @llvm.lifetime.start(i64 4, i8* %x.0.x.0..sroa_cast) + call void @llvm.lifetime.start.p0i8(i64 4, i8* %x.0.x.0..sroa_cast) store volatile i32 0, i32* %x, align 4 %call = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #0 %x.0.x.0. = load volatile i32, i32* %x, align 4 %add = add nsw i32 %x.0.x.0., %a - call void @llvm.lifetime.end(i64 4, i8* %x.0.x.0..sroa_cast) + call void @llvm.lifetime.end.p0i8(i64 4, i8* %x.0.x.0..sroa_cast) ret i32 %add } -declare void @llvm.lifetime.start(i64, i8* nocapture) #1 -declare void @llvm.lifetime.end(i64, i8* nocapture) #1 +declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 +declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 declare i32 @bar(...) #0 diff --git a/test/CodeGen/Hexagon/pic-local.ll b/test/CodeGen/Hexagon/pic-local.ll index 48b0096aa652..6544b3d32165 100644 --- a/test/CodeGen/Hexagon/pic-local.ll +++ b/test/CodeGen/Hexagon/pic-local.ll @@ -9,11 +9,11 @@ define internal void @f2() { } define void()* @get_f1() { - ; CHECK: r0 = add(pc, ##.Lf1@PCREL) + ; CHECK: r0 = add(pc,##.Lf1@PCREL) ret void()* @f1 } define void()* @get_f2() { - ; CHECK: r0 = add(pc, ##f2@PCREL) + ; CHECK: r0 = add(pc,##f2@PCREL) ret void()* @f2 } diff --git a/test/CodeGen/Hexagon/pic-simple.ll b/test/CodeGen/Hexagon/pic-simple.ll index 46d95204f2e7..aeb21ef7de1c 100644 --- a/test/CodeGen/Hexagon/pic-simple.ll +++ b/test/CodeGen/Hexagon/pic-simple.ll @@ -1,8 +1,8 @@ ; RUN: llc -mtriple=hexagon-- -mcpu=hexagonv5 -relocation-model=pic < %s | FileCheck %s -; CHECK: r{{[0-9]+}} = add({{pc|PC}}, ##_GLOBAL_OFFSET_TABLE_@PCREL) -; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##src@GOT) -; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##dst@GOT) +; CHECK: r{{[0-9]+}} = add({{pc|PC}},##_GLOBAL_OFFSET_TABLE_@PCREL) +; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##src@GOT) +; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##dst@GOT) @dst = external global i32 @src = external global i32 diff --git a/test/CodeGen/Hexagon/pic-static.ll b/test/CodeGen/Hexagon/pic-static.ll index 66d7734f2cf2..95da5f060d72 100644 --- a/test/CodeGen/Hexagon/pic-static.ll +++ b/test/CodeGen/Hexagon/pic-static.ll @@ -1,8 +1,8 @@ ; RUN: llc -mtriple=hexagon-- -mcpu=hexagonv5 -relocation-model=pic < %s | FileCheck %s -; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}}, ##_GLOBAL_OFFSET_TABLE_@PCREL) -; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}}, ##x@PCREL) -; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}{{.*}}+{{.*}}##bar@GOT) +; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}},##_GLOBAL_OFFSET_TABLE_@PCREL) +; CHECK-DAG: r{{[0-9]+}} = add({{pc|PC}},##x@PCREL) +; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+##bar@GOT) @x = internal global i32 9, align 4 @bar = external global i32* diff --git a/test/CodeGen/Hexagon/pred-absolute-store.ll b/test/CodeGen/Hexagon/pred-absolute-store.ll index 3e5e98270d53..2f19e9aeb7bb 100644 --- a/test/CodeGen/Hexagon/pred-absolute-store.ll +++ b/test/CodeGen/Hexagon/pred-absolute-store.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; Check that we are able to predicate instructions with abosolute +; Check that we are able to predicate instructions with absolute ; addressing mode. -; CHECK: if ({{!*}}p{{[0-2]}}.new) memw(##gvar) = r{{[0-9]+}} +; CHECK: if ({{!?}}p{{[0-3]}}) memw(##gvar) = r{{[0-9]+}} @gvar = external global i32 define i32 @test2(i32 %a, i32 %b) nounwind { diff --git a/test/CodeGen/Hexagon/predicate-logical.ll b/test/CodeGen/Hexagon/predicate-logical.ll index be2bcb03d6a1..e3ba4d8643db 100644 --- a/test/CodeGen/Hexagon/predicate-logical.ll +++ b/test/CodeGen/Hexagon/predicate-logical.ll @@ -1,5 +1,5 @@ ; RUN: llc -O2 -march=hexagon < %s | FileCheck %s -; CHECK: p{{[0-9]}} = or(p{{[0-9]}}, and(p{{[0-9]}}, p{{[0-9]}})) +; CHECK: p{{[0-9]}} = or(p{{[0-9]}},and(p{{[0-9]}},p{{[0-9]}})) target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/predicate-rcmp.ll b/test/CodeGen/Hexagon/predicate-rcmp.ll index 45daa88d7161..78991e0dbe70 100644 --- a/test/CodeGen/Hexagon/predicate-rcmp.ll +++ b/test/CodeGen/Hexagon/predicate-rcmp.ll @@ -1,5 +1,5 @@ ; RUN: llc -O2 -march=hexagon < %s | FileCheck %s -; CHECK: cmp.eq(r{{[0-9]+}}, #0) +; CHECK: cmp.eq(r{{[0-9]+}},#0) ; Check that the result of the builtin is not stored directly, i.e. that ; there is an instruction that converts it to {0,1} from {0,-1}. Right now ; the instruction is "r4 = !cmp.eq(r0, #0)". diff --git a/test/CodeGen/Hexagon/rdf-copy-undef2.ll b/test/CodeGen/Hexagon/rdf-copy-undef2.ll index 5f29d414ffc1..28bf4c67cd75 100644 --- a/test/CodeGen/Hexagon/rdf-copy-undef2.ll +++ b/test/CodeGen/Hexagon/rdf-copy-undef2.ll @@ -3,8 +3,8 @@ target triple = "hexagon" -declare void @llvm.lifetime.start(i64, i8* nocapture) #0 -declare void @llvm.lifetime.end(i64, i8* nocapture) #0 +declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0 +declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0 declare signext i16 @cat(i16 signext) #1 declare void @danny(i16 signext, i16 signext, i16 signext, i16* nocapture readonly, i16 signext, i16* nocapture) #1 declare void @sammy(i16* nocapture readonly, i16* nocapture readonly, i16* nocapture readonly, i32* nocapture, i16* nocapture, i16 signext, i16 signext, i16 signext) #1 diff --git a/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll b/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll index 7adf7e8a5355..222d8a2b2e14 100644 --- a/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll +++ b/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll @@ -13,18 +13,18 @@ define i32 @foo(i32 %status) #0 { entry: %arg1 = alloca i32, align 4 %0 = bitcast i32* %arg1 to i8* - call void @llvm.lifetime.start(i64 4, i8* %0) #2 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #2 store i32 %status, i32* %arg1, align 4, !tbaa !1 %1 = call i32 asm sideeffect "r0 = #$1\0Ar1 = $2\0Ar2 = $4\0Atrap0 (#0)\0A$0 = r0", "=r,i,r,*m,r,~{r0},~{r1},~{r2}"(i32 24, i32* nonnull %arg1, i32* nonnull %arg1, i32 %status) #2, !srcloc !5 - call void @llvm.lifetime.end(i64 4, i8* %0) #2 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #2 ret i32 %1 } ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.start(i64, i8* nocapture) #1 +declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 ; Function Attrs: argmemonly nounwind -declare void @llvm.lifetime.end(i64, i8* nocapture) #1 +declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv5" "target-features"="-hvx,-hvx-double" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind } diff --git a/test/CodeGen/Hexagon/rdf-phi-up.ll b/test/CodeGen/Hexagon/rdf-phi-up.ll index 28f4c90c174d..d4e726471238 100644 --- a/test/CodeGen/Hexagon/rdf-phi-up.ll +++ b/test/CodeGen/Hexagon/rdf-phi-up.ll @@ -7,8 +7,8 @@ target triple = "hexagon" %struct.0 = type { i32, i16, i8* } -declare void @llvm.lifetime.start(i64, i8* nocapture) #1 -declare void @llvm.lifetime.end(i64, i8* nocapture) #1 +declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 +declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 define i32 @fred(i8* readonly %p0, i32* %p1) local_unnamed_addr #0 { entry: @@ -32,7 +32,7 @@ if.then3: ; preds = %if.then if.else: ; preds = %lor.lhs.false %v6 = bitcast i16* %v0 to i8* - call void @llvm.lifetime.start(i64 2, i8* nonnull %v6) #0 + call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %v6) #0 store i16 0, i16* %v0, align 2 %v7 = call i32 @foo(%struct.0* nonnull %v3, i16* nonnull %v0) #0 %v8 = icmp eq i32* %p1, null @@ -45,7 +45,7 @@ if.then6: ; preds = %if.else br label %if.end7 if.end7: ; preds = %if.else, %if.then6 - call void @llvm.lifetime.end(i64 2, i8* nonnull %v6) #0 + call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %v6) #0 br label %cleanup cleanup: ; preds = %if.then3, %if.then, diff --git a/test/CodeGen/Hexagon/readcyclecounter.ll b/test/CodeGen/Hexagon/readcyclecounter.ll new file mode 100644 index 000000000000..0a60c94b019c --- /dev/null +++ b/test/CodeGen/Hexagon/readcyclecounter.ll @@ -0,0 +1,10 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s + +; CHECK-LABEL: test_readcyclecounter +; CHECK: r1:0 = c15:14 +define i64 @test_readcyclecounter() nounwind { + %t0 = call i64 @llvm.readcyclecounter() + ret i64 %t0 +} + +declare i64 @llvm.readcyclecounter() diff --git a/test/CodeGen/Hexagon/regalloc-block-overlap.ll b/test/CodeGen/Hexagon/regalloc-block-overlap.ll new file mode 100644 index 000000000000..c98fcb6a9f04 --- /dev/null +++ b/test/CodeGen/Hexagon/regalloc-block-overlap.ll @@ -0,0 +1,143 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s + +; Check for a sane output. This testcase used to cause a crash. +; CHECK: vlut16 + +target triple = "hexagon-unknown--elf" + +declare void @halide_malloc() local_unnamed_addr #0 + +declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #1 +declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1 +declare <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32>, <32 x i32>) #1 +declare <32 x i32> @llvm.hexagon.V6.vmpyiewuh.128B(<32 x i32>, <32 x i32>) #1 +declare <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32>, <64 x i32>) #1 +declare <32 x i32> @llvm.hexagon.V6.vasrwhsat.128B(<32 x i32>, <32 x i32>, i32) #1 +declare <64 x i32> @llvm.hexagon.V6.vlutvwh.128B(<32 x i32>, <32 x i32>, i32) #1 +declare <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32>, <32 x i32>, <32 x i32>, i32) #1 + +define hidden void @fred() #0 { +b0: + %v1 = ashr i32 undef, 7 + %v2 = shl nsw i32 %v1, 7 + switch i32 undef, label %b7 [ + i32 1, label %b3 + i32 2, label %b5 + i32 3, label %b6 + ] + +b3: ; preds = %b0 + unreachable + +b4: ; preds = %b7 + switch i32 undef, label %b9 [ + i32 1, label %b8 + i32 2, label %b10 + i32 3, label %b11 + ] + +b5: ; preds = %b0 + unreachable + +b6: ; preds = %b0 + unreachable + +b7: ; preds = %b0 + br label %b4 + +b8: ; preds = %b4 + br label %b12 + +b9: ; preds = %b4 + br label %b12 + +b10: ; preds = %b4 + br label %b12 + +b11: ; preds = %b4 + br label %b12 + +b12: ; preds = %b11, %b10, %b9, %b8 + br label %b13 + +b13: ; preds = %b14, %b12 + br label %b14 + +b14: ; preds = %b13 + br i1 undef, label %b15, label %b13 + +b15: ; preds = %b14 + br label %b16 + +b16: ; preds = %b15 + br i1 undef, label %b17, label %b18 + +b17: ; preds = %b16 + unreachable + +b18: ; preds = %b16 + tail call void @halide_malloc() + br label %b19 + +b19: ; preds = %b18 + br i1 undef, label %b20, label %b21 + +b20: ; preds = %b19 + br label %b32 + +b21: ; preds = %b38, %b19 + %v22 = zext i32 %v2 to i64 + %v23 = lshr i64 %v22, 31 + %v24 = shl nuw nsw i64 %v23, 1 + %v25 = or i64 %v24, 0 + %v26 = icmp ult i64 undef, 2147483648 + %v27 = mul nuw nsw i64 %v25, 3 + %v28 = add nuw nsw i64 %v27, 0 + %v29 = and i64 %v28, 133143986176 + %v30 = icmp eq i64 %v29, 0 + %v31 = and i1 %v26, %v30 + br label %b39 + +b32: ; preds = %b20 + %v33 = zext i32 %v2 to i64 + %v34 = mul nuw nsw i64 %v33, 12 + %v35 = icmp ult i64 %v34, 2147483648 + %v36 = and i1 %v35, undef + br i1 %v36, label %b38, label %b37 + +b37: ; preds = %b32 + ret void + +b38: ; preds = %b32 + tail call void @halide_malloc() + br label %b21 + +b39: ; preds = %b42, %b21 + br label %b40 + +b40: ; preds = %b39 + br i1 %v31, label %b42, label %b41 + +b41: ; preds = %b40 + unreachable + +b42: ; preds = %b40 + %v43 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.128B(<32 x i32> undef, <32 x i32> undef, i32 0) + %v44 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v43, <32 x i32> undef, <32 x i32> undef, i32 1) + %v45 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v44, <32 x i32> undef, <32 x i32> undef, i32 2) + %v46 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v45, <32 x i32> undef, <32 x i32> undef, i32 3) + %v47 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v46, <32 x i32> undef, <32 x i32> undef, i32 4) + %v48 = tail call <64 x i32> @llvm.hexagon.V6.vlutvwh.oracc.128B(<64 x i32> %v47, <32 x i32> undef, <32 x i32> undef, i32 5) + %v49 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v48) + %v50 = tail call <32 x i32> @llvm.hexagon.V6.vmpyiewuh.128B(<32 x i32> undef, <32 x i32> %v49) #2 + %v51 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> undef, <32 x i32> %v50) #2 + %v52 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v51, <64 x i32> undef) #2 + %v53 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v52) #2 + %v54 = tail call <32 x i32> @llvm.hexagon.V6.vasrwhsat.128B(<32 x i32> %v53, <32 x i32> undef, i32 15) #2 + store <32 x i32> %v54, <32 x i32>* undef, align 128 + br label %b39 +} + +attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-double" } +attributes #1 = { nounwind readnone } +attributes #2 = { nounwind } diff --git a/test/CodeGen/Hexagon/ret-struct-by-val.ll b/test/CodeGen/Hexagon/ret-struct-by-val.ll index 26ed2ff36f77..60a97bcccfc5 100644 --- a/test/CodeGen/Hexagon/ret-struct-by-val.ll +++ b/test/CodeGen/Hexagon/ret-struct-by-val.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: r0 = add(r0, r1) +; CHECK: r0 = add(r0,r1) ; Allow simple structures to be returned by value. diff --git a/test/CodeGen/Hexagon/runtime-stkchk.ll b/test/CodeGen/Hexagon/runtime-stkchk.ll index a4e8f117679e..38aa8726d19c 100644 --- a/test/CodeGen/Hexagon/runtime-stkchk.ll +++ b/test/CodeGen/Hexagon/runtime-stkchk.ll @@ -6,12 +6,12 @@ define i32 @foo_1(i32 %n) #0 { entry: %local = alloca [1024 x i32], align 8 %0 = bitcast [1024 x i32]* %local to i8* - call void @llvm.lifetime.start(i64 4096, i8* %0) #1 + call void @llvm.lifetime.start.p0i8(i64 4096, i8* %0) #1 %arraydecay = getelementptr inbounds [1024 x i32], [1024 x i32]* %local, i32 0, i32 0 call void @baz_1(i32* %arraydecay) #3 %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* %local, i32 0, i32 %n %1 = load i32, i32* %arrayidx, align 4 - call void @llvm.lifetime.end(i64 4096, i8* %0) #1 + call void @llvm.lifetime.end.p0i8(i64 4096, i8* %0) #1 ret i32 %1 } @@ -21,21 +21,21 @@ define i32 @foo_2(i32 %n, i32* %y) #0 { entry: %local = alloca [2048 x i32], align 8 %0 = bitcast [2048 x i32]* %local to i8* - call void @llvm.lifetime.start(i64 8192, i8* %0) #1 + call void @llvm.lifetime.start.p0i8(i64 8192, i8* %0) #1 %arraydecay = getelementptr inbounds [2048 x i32], [2048 x i32]* %local, i32 0, i32 0 call void @baz_2(i32* %y, i32* %arraydecay) #3 %1 = load i32, i32* %y, align 4 %add = add nsw i32 %n, %1 %arrayidx = getelementptr inbounds [2048 x i32], [2048 x i32]* %local, i32 0, i32 %add %2 = load i32, i32* %arrayidx, align 4 - call void @llvm.lifetime.end(i64 8192, i8* %0) #1 + call void @llvm.lifetime.end.p0i8(i64 8192, i8* %0) #1 ret i32 %2 } declare void @baz_1(i32*) #2 declare void @baz_2(i32*, i32*) #2 -declare void @llvm.lifetime.start(i64, i8* nocapture) #1 -declare void @llvm.lifetime.end(i64, i8* nocapture) #1 +declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1 +declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/test/CodeGen/Hexagon/section_7275.ll b/test/CodeGen/Hexagon/section_7275.ll index c2b80ae3f69d..1806f1e9c844 100644 --- a/test/CodeGen/Hexagon/section_7275.ll +++ b/test/CodeGen/Hexagon/section_7275.ll @@ -8,13 +8,13 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; CHECK-LABEL: foo ; CHECK-DAG: memw(##b) -; CHECK-DAG: memw(#d) +; CHECK-DAG: memw(gp+#d) ; CHECK-DAG: memw(##g) -; CHECK-DAG: memw(#h) -; CHECK-DAG: memw(#f) +; CHECK-DAG: memw(gp+#h) +; CHECK-DAG: memw(gp+#f) ; CHECK-DAG: memw(##e) -; CHECK-DAG: memw(#a) -; CHECK-DAG: memw(#c) +; CHECK-DAG: memw(gp+#a) +; CHECK-DAG: memw(gp+#c) ; CHECK-LABEL: bar ; CHECK: memw(##b) diff --git a/test/CodeGen/Hexagon/signed_immediates.ll b/test/CodeGen/Hexagon/signed_immediates.ll index a4766313cc68..ad4aa2596607 100644 --- a/test/CodeGen/Hexagon/signed_immediates.ll +++ b/test/CodeGen/Hexagon/signed_immediates.ll @@ -33,7 +33,7 @@ define i64* @foo4(i64* %a, i64 %b) { } ; s6Ext -; CHECK: if (p0.new) memw(r0+#0)=#-1 +; CHECK: if (p0.new) memw(r0+#0) = #-1 define void @foo5(i32* %a, i1 %b) { br i1 %b, label %x, label %y x: @@ -44,7 +44,7 @@ y: } ; s10Ext -; CHECK: p0 = cmp.eq(r0, #-1) +; CHECK: p0 = cmp.eq(r0,#-1) define i1 @foo7(i32 %a) { %b = icmp eq i32 %a, -1 ret i1 %b @@ -96,4 +96,4 @@ y: ; CHECK: r0 = #-2 define i32 @foo13() { ret i32 -2 -}
\ No newline at end of file +} diff --git a/test/CodeGen/Hexagon/stack-align1.ll b/test/CodeGen/Hexagon/stack-align1.ll index 4efa70f59854..aefd16594f06 100644 --- a/test/CodeGen/Hexagon/stack-align1.ll +++ b/test/CodeGen/Hexagon/stack-align1.ll @@ -1,7 +1,7 @@ ; RUN: llc -O0 -march=hexagon < %s | FileCheck %s -; CHECK: and(r29, #-32) -; CHECK-DAG: add(r29, #0) -; CHECK-DAG: add(r29, #28) +; CHECK: and(r29,#-32) +; CHECK-DAG: add(r29,#0) +; CHECK-DAG: add(r29,#28) target triple = "hexagon-unknown-unknown" diff --git a/test/CodeGen/Hexagon/stack-align2.ll b/test/CodeGen/Hexagon/stack-align2.ll index 1bbd57820325..042e4097c56a 100644 --- a/test/CodeGen/Hexagon/stack-align2.ll +++ b/test/CodeGen/Hexagon/stack-align2.ll @@ -1,9 +1,9 @@ ; RUN: llc -O0 -march=hexagon < %s | FileCheck %s -; CHECK: and(r29, #-128) -; CHECK-DAG: add(r29, #0) -; CHECK-DAG: add(r29, #64) -; CHECK-DAG: add(r29, #96) -; CHECK-DAG: add(r29, #124) +; CHECK: and(r29,#-128) +; CHECK-DAG: add(r29,#0) +; CHECK-DAG: add(r29,#64) +; CHECK-DAG: add(r29,#96) +; CHECK-DAG: add(r29,#124) target triple = "hexagon-unknown-unknown" diff --git a/test/CodeGen/Hexagon/stack-alloca1.ll b/test/CodeGen/Hexagon/stack-alloca1.ll index 00e9e051aebb..b38b8846d26f 100644 --- a/test/CodeGen/Hexagon/stack-alloca1.ll +++ b/test/CodeGen/Hexagon/stack-alloca1.ll @@ -1,5 +1,5 @@ ; RUN: llc -O0 -march=hexagon < %s | FileCheck %s -; CHECK: sub(r29, r[[REG:[0-9]+]]) +; CHECK: sub(r29,r[[REG:[0-9]+]]) ; CHECK: r29 = r[[REG]] target triple = "hexagon-unknown-unknown" diff --git a/test/CodeGen/Hexagon/stack-alloca2.ll b/test/CodeGen/Hexagon/stack-alloca2.ll index ad5e13166aa2..b211be0c0fff 100644 --- a/test/CodeGen/Hexagon/stack-alloca2.ll +++ b/test/CodeGen/Hexagon/stack-alloca2.ll @@ -1,8 +1,8 @@ ; RUN: llc -O0 -march=hexagon < %s | FileCheck %s -; CHECK-DAG: r[[AP:[0-9]+]] = and(r30, #-32) -; CHECK-DAG: r1 = add(r[[AP]], #-32) +; CHECK-DAG: r[[AP:[0-9]+]] = and(r30,#-32) +; CHECK-DAG: r1 = add(r[[AP]],#-32) -; CHECK-DAG: sub(r29, r[[SP:[0-9]+]]) +; CHECK-DAG: sub(r29,r[[SP:[0-9]+]]) ; CHECK-DAG: r29 = r[[SP]] target triple = "hexagon-unknown-unknown" diff --git a/test/CodeGen/Hexagon/static.ll b/test/CodeGen/Hexagon/static.ll index c3237b748881..15aab434158c 100644 --- a/test/CodeGen/Hexagon/static.ll +++ b/test/CodeGen/Hexagon/static.ll @@ -4,9 +4,9 @@ @acc = external global i32 @val = external global i32 -; CHECK-DAG: memw(#num) -; CHECK-DAG: memw(#acc) -; CHECK-DAG: memw(#val) +; CHECK-DAG: memw(gp+#num) +; CHECK-DAG: memw(gp+#acc) +; CHECK-DAG: memw(gp+#val) define void @foo() nounwind { entry: diff --git a/test/CodeGen/Hexagon/store-shift.ll b/test/CodeGen/Hexagon/store-shift.ll index 866930990baa..981071a0181e 100644 --- a/test/CodeGen/Hexagon/store-shift.ll +++ b/test/CodeGen/Hexagon/store-shift.ll @@ -1,12 +1,12 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; CHECK-DAG: r[[BASE:[0-9]+]] += add -; CHECK-DAG: r[[IDX0:[0-9]+]] = add(r2, #5) -; CHECK-DAG: r[[IDX1:[0-9]+]] = add(r2, #6) -; CHECK-DAG: memw(r0 + r[[IDX0]]<<#2) = r3 -; CHECK-DAG: memw(r0 + r[[IDX1]]<<#2) = r3 -; CHECK-DAG: memw(r[[BASE]] + r[[IDX0]]<<#2) = r[[IDX0]] -; CHECK-DAG: memw(r[[BASE]] + r[[IDX1]]<<#2) = r[[IDX0]] +; CHECK-DAG: r[[IDX0:[0-9]+]] = add(r2,#5) +; CHECK-DAG: r[[IDX1:[0-9]+]] = add(r2,#6) +; CHECK-DAG: memw(r0+r[[IDX0]]<<#2) = r3 +; CHECK-DAG: memw(r0+r[[IDX1]]<<#2) = r3 +; CHECK-DAG: memw(r[[BASE]]+r[[IDX0]]<<#2) = r[[IDX0]] +; CHECK-DAG: memw(r[[BASE]]+r[[IDX1]]<<#2) = r[[IDX0]] target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/sube.ll b/test/CodeGen/Hexagon/sube.ll index 7bc00759303f..2b09a998eff0 100644 --- a/test/CodeGen/Hexagon/sube.ll +++ b/test/CodeGen/Hexagon/sube.ll @@ -1,29 +1,26 @@ -; RUN: llc -march=hexagon -disable-hsdr -hexagon-expand-condsets=0 -hexagon-bit=0 -disable-post-ra < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s -; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #0) -; CHECK: r{{[0-9]+:[0-9]+}} = combine(#0, #1) -; CHECK: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+}} = mux(p{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = combine(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK-DAG: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK-DAG: r{{[0-9]+:[0-9]+}} = sub(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK-DAG: p{{[0-9]+}} = cmp.gtu(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) +; CHECK-DAG: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}}) +; CHECK-DAG: r{{[0-9]+}} = mux(p{{[0-9]+}},r{{[0-9]+}},r{{[0-9]+}}) -define void @check_sube_subc(i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) { -entry: - %tmp1 = zext i64 %AL to i128 - %tmp23 = zext i64 %AH to i128 - %tmp4 = shl i128 %tmp23, 64 - %tmp5 = or i128 %tmp4, %tmp1 - %tmp67 = zext i64 %BL to i128 - %tmp89 = zext i64 %BH to i128 - %tmp11 = shl i128 %tmp89, 64 - %tmp12 = or i128 %tmp11, %tmp67 - %tmp15 = sub i128 %tmp5, %tmp12 - %tmp1617 = trunc i128 %tmp15 to i64 - store i64 %tmp1617, i64* %RL - %tmp21 = lshr i128 %tmp15, 64 - %tmp2122 = trunc i128 %tmp21 to i64 - store i64 %tmp2122, i64* %RH - ret void +define void @check_sube_subc(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64* %a4, i64* %a5) { +b6: + %v7 = zext i64 %a0 to i128 + %v8 = zext i64 %a1 to i128 + %v9 = shl i128 %v8, 64 + %v10 = or i128 %v7, %v9 + %v11 = zext i64 %a2 to i128 + %v12 = zext i64 %a3 to i128 + %v13 = shl i128 %v12, 64 + %v14 = or i128 %v11, %v13 + %v15 = sub i128 %v10, %v14 + %v16 = lshr i128 %v15, 64 + %v17 = trunc i128 %v15 to i64 + %v18 = trunc i128 %v16 to i64 + store i64 %v17, i64* %a4 + store i64 %v18, i64* %a5 + ret void } diff --git a/test/CodeGen/Hexagon/subi-asl.ll b/test/CodeGen/Hexagon/subi-asl.ll index f0b27e828f50..d7610ceb62ac 100644 --- a/test/CodeGen/Hexagon/subi-asl.ll +++ b/test/CodeGen/Hexagon/subi-asl.ll @@ -3,11 +3,11 @@ ; Check if S4_subi_asl_ri is being generated correctly. ; CHECK-LABEL: yes_sub_asl -; CHECK: [[REG1:(r[0-9]+)]] = sub(#0, asl([[REG1]], #1)) +; CHECK: [[REG1:(r[0-9]+)]] = sub(#0,asl([[REG1]],#1)) ; CHECK-LABEL: no_sub_asl -; CHECK: [[REG2:(r[0-9]+)]] = asl(r{{[0-9]+}}, #1) -; CHECK: r{{[0-9]+}} = sub([[REG2]], r{{[0-9]+}}) +; CHECK: [[REG2:(r[0-9]+)]] = asl(r{{[0-9]+}},#1) +; CHECK: r{{[0-9]+}} = sub([[REG2]],r{{[0-9]+}}) %struct.rtx_def = type { i16, i8 } diff --git a/test/CodeGen/Hexagon/swp-const-tc.ll b/test/CodeGen/Hexagon/swp-const-tc.ll index 3113094d2ba3..c07d23623eba 100644 --- a/test/CodeGen/Hexagon/swp-const-tc.ll +++ b/test/CodeGen/Hexagon/swp-const-tc.ll @@ -4,7 +4,7 @@ ; of computing a new LC0 value. ; CHECK-LABEL: @test -; CHECK: loop0(.LBB0_1, #998) +; CHECK: loop0(.LBB0_1,#998) define i32 @test(i32* %A, i32* %B, i32 %count) { entry: diff --git a/test/CodeGen/Hexagon/swp-matmul-bitext.ll b/test/CodeGen/Hexagon/swp-matmul-bitext.ll index db5bb96d0bc9..9c425ae6a098 100644 --- a/test/CodeGen/Hexagon/swp-matmul-bitext.ll +++ b/test/CodeGen/Hexagon/swp-matmul-bitext.ll @@ -11,7 +11,7 @@ ; CHECK: [[REG0:(r[0-9]+)]] = memh ; CHECK: [[REG1:(r[0-9]+)]] = memh ; CHECK: += mpyi -; CHECK: [[REG2]] = mpyi([[REG0]], [[REG1]]) +; CHECK: [[REG2]] = mpyi([[REG0]],[[REG1]]) ; CHECK: endloop0 %union_h2_sem_t = type { i32 } diff --git a/test/CodeGen/Hexagon/swp-max.ll b/test/CodeGen/Hexagon/swp-max.ll index 038138ff2561..26238ea6fb37 100644 --- a/test/CodeGen/Hexagon/swp-max.ll +++ b/test/CodeGen/Hexagon/swp-max.ll @@ -15,8 +15,8 @@ for.body.preheader: ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: [[REG1:(r[0-9]+)]] = max(r{{[0-9]+}}, [[REG1]]) -; CHECK: [[REG0:(r[0-9]+)]] = add([[REG2:(r[0-9]+)]], [[REG0]]) +; CHECK: [[REG1:(r[0-9]+)]] = max(r{{[0-9]+}},[[REG1]]) +; CHECK: [[REG0:(r[0-9]+)]] = add([[REG2:(r[0-9]+)]],[[REG0]]) ; CHECK: [[REG2]] = memw ; CHECK: endloop0 diff --git a/test/CodeGen/Hexagon/swp-multi-loops.ll b/test/CodeGen/Hexagon/swp-multi-loops.ll index 56e8c6511000..fc2576af8ac2 100644 --- a/test/CodeGen/Hexagon/swp-multi-loops.ll +++ b/test/CodeGen/Hexagon/swp-multi-loops.ll @@ -5,15 +5,15 @@ ; Check if the first loop is pipelined. ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: add(r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK-NEXT: memw(r{{[0-9]+}}{{.*}}++{{.*}}#4) +; CHECK: add(r{{[0-9]+}},r{{[0-9]+}}) +; CHECK-NEXT: memw(r{{[0-9]+}}++#4) ; CHECK-NEXT: endloop0 ; Check if the second loop is pipelined. ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: add(r{{[0-9]+}}, r{{[0-9]+}}) -; CHECK-NEXT: memw(r{{[0-9]+}}{{.*}}++{{.*}}#4) +; CHECK: add(r{{[0-9]+}},r{{[0-9]+}}) +; CHECK-NEXT: memw(r{{[0-9]+}}++#4) ; CHECK-NEXT: endloop0 define i32 @test(i32* %a, i32 %n, i32 %l) { diff --git a/test/CodeGen/Hexagon/swp-stages4.ll b/test/CodeGen/Hexagon/swp-stages4.ll new file mode 100644 index 000000000000..f58e83203154 --- /dev/null +++ b/test/CodeGen/Hexagon/swp-stages4.ll @@ -0,0 +1,94 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 -enable-pipeliner -pipeliner-max-stages=2 -disable-block-placement=0 -hexagon-bit=0 < %s | FileCheck %s + +; Test that we rename registers correctly for multiple stages when there is a +; Phi and depends upon another Phi. + +; CHECK: = and +; CHECK: = and +; CHECK: = and +; CHECK: [[REG0:(r[0-9]+)]] = and([[REG1:(r[0-9]+)]],#255) +; CHECK-NOT: [[REG0]] = and([[REG1]],#255) +; CHECK: loop0(.LBB0_[[LOOP:.]], +; CHECK: .LBB0_[[LOOP]]: +; CHECK: [[REG0]] += add +; CHECK: = and +; CHECK: = and +; CHECK: [[REG0]] = and +; CHECK: endloop + +; Function Attrs: nounwind +define void @test(i8* noalias nocapture %src, i32 %srcWidth, i32 %srcHeight, i32 %srcStride, i8* noalias nocapture %dst, i32 %dstStride) #0 { +entry: + %sub = add i32 %srcWidth, -1 + %sub1 = add i32 %srcHeight, -1 + %add.ptr = getelementptr inbounds i8, i8* %src, i32 %srcStride + %add.ptr.sum = mul i32 %srcStride, 2 + %add.ptr2 = getelementptr inbounds i8, i8* %src, i32 %add.ptr.sum + br label %for.body.lr.ph + +for.body.lr.ph: + %0 = add i32 %srcHeight, -2 + %1 = mul i32 %0, %dstStride + %2 = mul i32 %0, %srcStride + %3 = mul i32 %sub1, %srcStride + br label %for.cond + +for.cond: + %scevgep = getelementptr i8, i8* %dst, i32 %1 + %scevgep220 = getelementptr i8, i8* %src, i32 %2 + %scevgep221 = getelementptr i8, i8* %src, i32 %3 + %arrayidx6 = getelementptr inbounds i8, i8* %src, i32 1 + %add11 = add i32 %srcStride, 1 + %arrayidx12 = getelementptr inbounds i8, i8* %src, i32 %add11 + br label %for.body75.preheader + +for.body75.preheader: + %sri = load i8, i8* %arrayidx6, align 1 + %sri224 = load i8, i8* %src, align 1 + %sri227 = load i8, i8* %arrayidx12, align 1 + %sri229 = load i8, i8* %add.ptr, align 1 + br label %for.body75 + +for.body75: + %j.0211 = phi i32 [ %add82, %for.body75 ], [ 1, %for.body75.preheader ] + %sr = phi i8 [ %4, %for.body75 ], [ %sri, %for.body75.preheader ] + %sr225 = phi i8 [ %sr, %for.body75 ], [ %sri224, %for.body75.preheader ] + %sr230 = phi i8 [ %5, %for.body75 ], [ %sri227, %for.body75.preheader ] + %sr231 = phi i8 [ %sr230, %for.body75 ], [ %sri229, %for.body75.preheader ] + %conv78 = zext i8 %sr225 to i32 + %conv80 = zext i8 %sr to i32 + %add81 = add nsw i32 %conv80, %conv78 + %add82 = add i32 %j.0211, 1 + %arrayidx83 = getelementptr inbounds i8, i8* %src, i32 %add82 + %4 = load i8, i8* %arrayidx83, align 1, !tbaa !0 + %conv84 = zext i8 %4 to i32 + %add85 = add nsw i32 %add81, %conv84 + %conv88 = zext i8 %sr231 to i32 + %add89 = add nsw i32 %add85, %conv88 + %conv91 = zext i8 %sr230 to i32 + %add92 = add nsw i32 %add89, %conv91 + %add.ptr.sum208 = add i32 %add82, %srcStride + %arrayidx94 = getelementptr inbounds i8, i8* %src, i32 %add.ptr.sum208 + %5 = load i8, i8* %arrayidx94, align 1, !tbaa !0 + %conv95 = zext i8 %5 to i32 + %add96 = add nsw i32 %add92, %conv95 + %mul97 = mul nsw i32 %add96, 7282 + %add98 = add nsw i32 %mul97, 32768 + %shr99209 = lshr i32 %add98, 16 + %conv100 = trunc i32 %shr99209 to i8 + %arrayidx101 = getelementptr inbounds i8, i8* %dst, i32 %j.0211 + store i8 %conv100, i8* %arrayidx101, align 1, !tbaa !0 + %exitcond = icmp eq i32 %add82, %sub + br i1 %exitcond, label %for.end104.loopexit, label %for.body75 + +for.end104.loopexit: + br label %for.end104 + +for.end104: + ret void +} + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!0 = !{!"omnipotent char", !1} +!1 = !{!"Simple C/C++ TBAA"} diff --git a/test/CodeGen/Hexagon/swp-stages5.ll b/test/CodeGen/Hexagon/swp-stages5.ll new file mode 100644 index 000000000000..fdfb2101cd36 --- /dev/null +++ b/test/CodeGen/Hexagon/swp-stages5.ll @@ -0,0 +1,78 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv5 -enable-pipeliner -pipeliner-max-stages=2 -hexagon-bit=0 < %s | FileCheck %s + +; Very similar to swp-stages4.ll, but the pipelined schedule is a little +; different. + +; CHECK: = memub(r{{[0-9]+}}++#1) +; CHECK-DAG: [[REG0:(r[0-9]+)]] = memub(r{{[0-9]+}}++#1) +; CHECK-DAG: loop0(.LBB0_[[LOOP:.]], +; CHECK: .LBB0_[[LOOP]]: +; CHECK: = and([[REG0]],#255) +; CHECK: [[REG0]]{{[:0-9]*}} = +; CHECK: endloop + +define void @fred(i8* noalias nocapture %src, i32 %srcWidth, i32 %srcHeight, i32 %srcStride, i8* noalias nocapture %dst, i32 %dstStride) #0 { +entry: + %sub = add i32 %srcWidth, -1 + %sub1 = add i32 %srcHeight, -1 + %add.ptr = getelementptr inbounds i8, i8* %src, i32 %srcStride + %add.ptr.sum = mul i32 %srcStride, 2 + %add.ptr2 = getelementptr inbounds i8, i8* %src, i32 %add.ptr.sum + %cmp212 = icmp ugt i32 %sub1, 1 + br i1 %cmp212, label %for.body.lr.ph, label %for.end + +for.body.lr.ph: + br label %for.body74.preheader + +for.body74.preheader: + %0 = load i8, i8* %add.ptr, align 1, !tbaa !0 + %arrayidx40 = getelementptr inbounds i8, i8* %add.ptr, i32 1 + %1 = load i8, i8* %arrayidx40, align 1, !tbaa !0 + %2 = load i8, i8* %add.ptr, align 1, !tbaa !0 + %arrayidx46 = getelementptr inbounds i8, i8* %add.ptr, i32 1 + %3 = load i8, i8* %arrayidx46, align 1, !tbaa !0 + br label %for.body74 + +for.body74: + %4 = phi i8 [ %9, %for.body74 ], [ %3, %for.body74.preheader ] + %5 = phi i8 [ %4, %for.body74 ], [ %2, %for.body74.preheader ] + %6 = phi i8 [ %8, %for.body74 ], [ %1, %for.body74.preheader ] + %7 = phi i8 [ %6, %for.body74 ], [ %0, %for.body74.preheader ] + %j.0211 = phi i32 [ %add81, %for.body74 ], [ 1, %for.body74.preheader ] + %conv77 = zext i8 %7 to i32 + %conv79 = zext i8 %6 to i32 + %add80 = add nsw i32 %conv79, %conv77 + %add81 = add i32 %j.0211, 1 + %arrayidx82 = getelementptr inbounds i8, i8* %src, i32 %add81 + %8 = load i8, i8* %arrayidx82, align 1, !tbaa !0 + %conv83 = zext i8 %8 to i32 + %add84 = add nsw i32 %add80, %conv83 + %conv87 = zext i8 %5 to i32 + %add88 = add nsw i32 %add84, %conv87 + %conv90 = zext i8 %4 to i32 + %add91 = add nsw i32 %add88, %conv90 + %arrayidx93 = getelementptr inbounds i8, i8* %add.ptr, i32 %add81 + %9 = load i8, i8* %arrayidx93, align 1, !tbaa !0 + %conv94 = zext i8 %9 to i32 + %add95 = add nsw i32 %add91, %conv94 + %mul96 = mul nsw i32 %add95, 7282 + %add97 = add nsw i32 %mul96, 32768 + %shr98208 = lshr i32 %add97, 16 + %conv99 = trunc i32 %shr98208 to i8 + %add.ptr5.sum209 = add i32 %j.0211, %dstStride + %arrayidx100 = getelementptr inbounds i8, i8* %dst, i32 %add.ptr5.sum209 + store i8 %conv99, i8* %arrayidx100, align 1, !tbaa !0 + %exitcond = icmp eq i32 %add81, %sub + br i1 %exitcond, label %for.end103.loopexit, label %for.body74 + +for.end103.loopexit: + br label %for.end + +for.end: + ret void +} + +attributes #0 = { nounwind } + +!0 = !{!"omnipotent char", !1} +!1 = !{!"Simple C/C++ TBAA"} diff --git a/test/CodeGen/Hexagon/swp-vmult.ll b/test/CodeGen/Hexagon/swp-vmult.ll index 9018405274cd..7c53248f47fc 100644 --- a/test/CodeGen/Hexagon/swp-vmult.ll +++ b/test/CodeGen/Hexagon/swp-vmult.ll @@ -2,10 +2,10 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -O3 < %s | FileCheck %s ; Multiply and accumulate -; CHECK: mpyi([[REG0:r([0-9]+)]], [[REG1:r([0-9]+)]]) -; CHECK-NEXT: add(r{{[0-9]+}}, #4) -; CHECK-NEXT: [[REG0]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0) -; CHECK-NEXT: [[REG1]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0) +; CHECK: mpyi([[REG0:r([0-9]+)]],[[REG1:r([0-9]+)]]) +; CHECK-NEXT: add(r{{[0-9]+}},#4) +; CHECK-NEXT: [[REG0]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0) +; CHECK-NEXT: [[REG1]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0) ; CHECK-NEXT: endloop0 define i32 @foo(i32* %a, i32* %b, i32 %n) { diff --git a/test/CodeGen/Hexagon/swp-vsum.ll b/test/CodeGen/Hexagon/swp-vsum.ll index 4756c644709f..3561997450de 100644 --- a/test/CodeGen/Hexagon/swp-vsum.ll +++ b/test/CodeGen/Hexagon/swp-vsum.ll @@ -4,9 +4,9 @@ ; Simple vector total. ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: -; CHECK: add([[REG:r([0-9]+)]], r{{[0-9]+}}) -; CHECK-NEXT: add(r{{[0-9]+}}, #4) -; CHECK-NEXT: [[REG]] = memw(r{{[0-9]+}} + r{{[0-9]+}}<<#0) +; CHECK: add([[REG:r([0-9]+)]],r{{[0-9]+}}) +; CHECK-NEXT: add(r{{[0-9]+}},#4) +; CHECK-NEXT: [[REG]] = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0) ; CHECK-NEXT: endloop0 define i32 @foo(i32* %a, i32 %n) { diff --git a/test/CodeGen/Hexagon/tail-dup-subreg-map.ll b/test/CodeGen/Hexagon/tail-dup-subreg-map.ll index 08dadeb9aaa4..1b11d087832a 100644 --- a/test/CodeGen/Hexagon/tail-dup-subreg-map.ll +++ b/test/CodeGen/Hexagon/tail-dup-subreg-map.ll @@ -5,7 +5,7 @@ ; subregisters were dropped by the tail duplicator, resulting in invalid ; COPY instructions being generated. -; CHECK: = extractu(r{{[0-9]+}}, #15, #17) +; CHECK: = extractu(r{{[0-9]+}},#15,#17) target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/tfr-to-combine.ll b/test/CodeGen/Hexagon/tfr-to-combine.ll index 1b82f3e4562e..50879ffe582d 100644 --- a/test/CodeGen/Hexagon/tfr-to-combine.ll +++ b/test/CodeGen/Hexagon/tfr-to-combine.ll @@ -8,7 +8,7 @@ ; Function Attrs: nounwind define i64 @test1() #0 { -; CHECK: combine(#10, #0) +; CHECK: combine(#10,#0) entry: store i16 0, i16* @a, align 2 store i16 10, i16* @b, align 2 @@ -17,7 +17,7 @@ entry: ; Function Attrs: nounwind define i64 @test2() #0 { -; CHECK: combine(#0, r{{[0-9]+}}) +; CHECK: combine(#0,r{{[0-9]+}}) entry: store i16 0, i16* @a, align 2 %0 = load i16, i16* @c, align 2 @@ -27,7 +27,7 @@ entry: ; Function Attrs: nounwind define i64 @test4() #0 { -; CHECK: combine(#0, #100) +; CHECK: combine(#0,#100) entry: store i16 100, i16* @b, align 2 store i16 0, i16* @a, align 2 diff --git a/test/CodeGen/Hexagon/tls_pic.ll b/test/CodeGen/Hexagon/tls_pic.ll index 190e1d71d39b..2c2be0dc384a 100644 --- a/test/CodeGen/Hexagon/tls_pic.ll +++ b/test/CodeGen/Hexagon/tls_pic.ll @@ -4,7 +4,7 @@ @src_ie = thread_local(initialexec) global i32 0, align 4 ; CHECK-LABEL: test_initial_exec -; CHECK-DAG: = add(pc, ##_GLOBAL_OFFSET_TABLE_@PCREL) +; CHECK-DAG: = add(pc,##_GLOBAL_OFFSET_TABLE_@PCREL) ; CHECK-DAG: = ##src_ie@IEGOT ; CHECK-DAG: = ##dst_ie@IEGOT ; CHECK-NOT: call @@ -22,7 +22,7 @@ entry: ; general-dynamic model. ; CHECK-LABEL: test_dynamic -; CHECK-DAG: = add(pc, ##_GLOBAL_OFFSET_TABLE_@PCREL) +; CHECK-DAG: = add(pc,##_GLOBAL_OFFSET_TABLE_@PCREL) ; CHECK-DAG: = ##src_gd@GDGOT ; CHECK-DAG: = ##dst_gd@GDGOT ; CHECK-DAG: call src_gd@GDPLT diff --git a/test/CodeGen/Hexagon/two-crash.ll b/test/CodeGen/Hexagon/two-crash.ll index 0ab02cda8a07..7e79cb3be912 100644 --- a/test/CodeGen/Hexagon/two-crash.ll +++ b/test/CodeGen/Hexagon/two-crash.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s ; This testcase crashed, because we propagated a reg:sub into a tied use. ; The two-address pass rewrote it in a way that generated incorrect code. -; CHECK: r{{[0-9]+}} += lsr(r{{[0-9]+}}, #16) +; CHECK: r{{[0-9]+}} += lsr(r{{[0-9]+}},#16) target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/undo-dag-shift.ll b/test/CodeGen/Hexagon/undo-dag-shift.ll new file mode 100644 index 000000000000..c1ab5d73f5c3 --- /dev/null +++ b/test/CodeGen/Hexagon/undo-dag-shift.ll @@ -0,0 +1,59 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s + +; DAG combiner folds sequences of shifts, which can sometimes obscure +; optimization opportunities. For example +; +; unsigned int c(unsigned int b, unsigned int *a) { +; unsigned int bitidx = b >> 5; +; return a[bitidx]; +; } +; +; produces +; (add x (shl (srl y 5) 2)) +; which is then folded into +; (add x (and (srl y 3) 1FFFFFFC)) +; +; That results in a constant-extended and: +; r0 = and(##536870908,lsr(r0,#3)) +; r0 = memw(r1+r0<<#0) +; whereas +; r0 = lsr(r0,#5) +; r0 = memw(r1+r0<<#2) +; is more desirable. + +target triple = "hexagon" + +; CHECK-LABEL: load_0 +; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#2) +define i32 @load_0(i32 %b, i32* nocapture readonly %a) #0 { +entry: + %shr = lshr i32 %b, 5 + %arrayidx = getelementptr inbounds i32, i32* %a, i32 %shr + %0 = load i32, i32* %arrayidx, align 4 + ret i32 %0 +} + +; This would require r0<<#3, which is not legal. +; CHECK-LABEL: load_1 +; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#0) +define i32 @load_1(i32 %b, [3 x i32]* nocapture readonly %a) #0 { +entry: + %shr = lshr i32 %b, 5 + %arrayidx = getelementptr inbounds [3 x i32], [3 x i32]* %a, i32 %shr, i32 0 + %0 = load i32, i32* %arrayidx, align 4 + ret i32 %0 +} + +; CHECK-LABEL: store_0 +; CHECK: memw(r{{[0-9]+}}+r{{[0-9]}}<<#2) +define void @store_0(i32 %b, i32* nocapture %a, i32 %v) #1 { +entry: + %shr = lshr i32 %b, 5 + %arrayidx = getelementptr inbounds i32, i32* %a, i32 %shr + store i32 %v, i32* %arrayidx, align 4 + ret void +} + +attributes #0 = { norecurse nounwind readonly "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" } +attributes #1 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="-hvx,-hvx-double,-long-calls" } + diff --git a/test/CodeGen/Hexagon/vaddh.ll b/test/CodeGen/Hexagon/vaddh.ll index 88194b750ad5..a4fb33de4ac5 100644 --- a/test/CodeGen/Hexagon/vaddh.ll +++ b/test/CodeGen/Hexagon/vaddh.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s -; CHECK: vaddh(r{{[0-9]+}}, r{{[0-9]+}}) +; CHECK: vaddh(r{{[0-9]+}},r{{[0-9]+}}) @j = external global i32 @k = external global i32 diff --git a/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll b/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll index 70c4aeb4bac0..4bba134a40cb 100644 --- a/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll +++ b/test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s ; This one should generate a combine with two immediates. -; CHECK: combine(#7, #7) +; CHECK: combine(#7,#7) @B = common global [400 x i32] zeroinitializer, align 8 @A = common global [400 x i32] zeroinitializer, align 8 @C = common global [400 x i32] zeroinitializer, align 8 diff --git a/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll b/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll index 91b32652400f..f49a1e24a1bb 100644 --- a/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll +++ b/test/CodeGen/Hexagon/vect/vect-loadv4i16.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s ; Check that store is post-incremented. -; CHECK: memuh(r{{[0-9]+}} + {{ *}}#6{{ *}}) -; CHECK: combine(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}{{ *}}) +; CHECK: memuh(r{{[0-9]+}}+#6) +; CHECK: combine(r{{[0-9]+}},r{{[0-9]+}}) ; CHECK: vaddh target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" diff --git a/test/CodeGen/Hexagon/vect/vect-shift-imm.ll b/test/CodeGen/Hexagon/vect/vect-shift-imm.ll index 4861181d4125..a4d6afa40bce 100644 --- a/test/CodeGen/Hexagon/vect/vect-shift-imm.ll +++ b/test/CodeGen/Hexagon/vect/vect-shift-imm.ll @@ -6,12 +6,12 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s --check-prefix=CHECK-LSRH ; ; Make sure that the instructions with immediate operands are generated. -; CHECK-ASLW: vaslw({{.*}}, #9) -; CHECK-ASRW: vasrw({{.*}}, #8) -; CHECK-LSRW: vlsrw({{.*}}, #7) -; CHECK-ASLH: vaslh({{.*}}, #6) -; CHECK-ASRH: vasrh({{.*}}, #5) -; CHECK-LSRH: vlsrh({{.*}}, #4) +; CHECK-ASLW: vaslw({{.*}},#9) +; CHECK-ASRW: vasrw({{.*}},#8) +; CHECK-LSRW: vlsrw({{.*}},#7) +; CHECK-ASLH: vaslh({{.*}},#6) +; CHECK-ASRH: vasrh({{.*}},#5) +; CHECK-LSRH: vlsrh({{.*}},#4) target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/vect/vect-shuffle.ll b/test/CodeGen/Hexagon/vect/vect-shuffle.ll index bd5b2b981695..27840bbd28d9 100644 --- a/test/CodeGen/Hexagon/vect/vect-shuffle.ll +++ b/test/CodeGen/Hexagon/vect/vect-shuffle.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s ; Check that store is post-incremented. -; CHECK-NOT: extractu +; CHECK-NOT: extractu(r{{[0-9]+}},#32, ; CHECK-NOT: insert target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/vect/vect-vshifts.ll b/test/CodeGen/Hexagon/vect/vect-vshifts.ll index 49ff812601ae..9d3cbe6e113f 100644 --- a/test/CodeGen/Hexagon/vect/vect-vshifts.ll +++ b/test/CodeGen/Hexagon/vect/vect-vshifts.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s ; Check that store is post-incremented. -; CHECK: r{{[0-9]+:[0-9]+}} = vasrw(r{{[0-9]+:[0-9]+}}, r{{[0-9]+}}) -; CHECK: r{{[0-9]+:[0-9]+}} = vaslw(r{{[0-9]+:[0-9]+}}, r{{[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}} = vasrw(r{{[0-9]+:[0-9]+}},r{{[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}} = vaslw(r{{[0-9]+:[0-9]+}},r{{[0-9]+}}) target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon" diff --git a/test/CodeGen/Hexagon/vect/vect-xor.ll b/test/CodeGen/Hexagon/vect/vect-xor.ll index 96719e683413..8864ab5c5cb7 100644 --- a/test/CodeGen/Hexagon/vect/vect-xor.ll +++ b/test/CodeGen/Hexagon/vect/vect-xor.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s ; Check that the parsing succeeded. -; CHECK: r{{[0-9]+:[0-9]+}} = xor(r{{[0-9]+:[0-9]+}}, r{{[0-9]+:[0-9]+}}) +; CHECK: r{{[0-9]+:[0-9]+}} = xor(r{{[0-9]+:[0-9]+}},r{{[0-9]+:[0-9]+}}) target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32" target triple = "hexagon" |