diff options
Diffstat (limited to 'test')
234 files changed, 14248 insertions, 4886 deletions
diff --git a/test/Analysis/BranchProbabilityInfo/basic.ll b/test/Analysis/BranchProbabilityInfo/basic.ll index 7bee1bd57373..64e0a82456f1 100644 --- a/test/Analysis/BranchProbabilityInfo/basic.ll +++ b/test/Analysis/BranchProbabilityInfo/basic.ll @@ -378,8 +378,8 @@ entry: %cond = icmp eq i32 %a, 42 br i1 %cond, label %exit, label %unr, !prof !4 -; CHECK: edge entry -> exit probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] -; CHECK: edge entry -> unr probability is 0x00000800 / 0x80000000 = 0.00% +; CHECK: edge entry -> exit probability is 0x7fffffff / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> unr probability is 0x00000001 / 0x80000000 = 0.00% unr: unreachable @@ -396,8 +396,8 @@ entry: %cond = icmp eq i32 %a, 42 br i1 %cond, label %exit, label %unr, !prof !5 -; CHECK: edge entry -> exit probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] -; CHECK: edge entry -> unr probability is 0x00000800 / 0x80000000 = 0.00% +; CHECK: edge entry -> exit probability is 0x7fffffff / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> unr probability is 0x00000001 / 0x80000000 = 0.00% unr: unreachable @@ -406,7 +406,7 @@ exit: ret i32 %b } -!5 = !{!"branch_weights", i32 1048575, i32 1} +!5 = !{!"branch_weights", i32 2147483647, i32 1} define i32 @test_unreachable_with_prof_zero(i32 %a, i32 %b) { ; CHECK: Printing analysis {{.*}} for function 'test_unreachable_with_prof_zero' @@ -414,8 +414,8 @@ entry: %cond = icmp eq i32 %a, 42 br i1 %cond, label %exit, label %unr, !prof !6 -; CHECK: edge entry -> exit probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] -; CHECK: edge entry -> unr probability is 0x00000800 / 0x80000000 = 0.00% +; CHECK: edge entry -> exit probability is 0x7fffffff / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> unr probability is 0x00000001 / 0x80000000 = 0.00% unr: unreachable @@ -451,11 +451,11 @@ entry: i32 2, label %case_c i32 3, label %case_d i32 4, label %case_e ], !prof !8 -; CHECK: edge entry -> case_a probability is 0x00000800 / 0x80000000 = 0.00% -; CHECK: edge entry -> case_b probability is 0x07fffdff / 0x80000000 = 6.25% -; CHECK: edge entry -> case_c probability is 0x67fffdff / 0x80000000 = 81.25% [HOT edge] -; CHECK: edge entry -> case_d probability is 0x07fffdff / 0x80000000 = 6.25% -; CHECK: edge entry -> case_e probability is 0x07fffdff / 0x80000000 = 6.25% +; CHECK: edge entry -> case_a probability is 0x00000001 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_b probability is 0x07ffffff / 0x80000000 = 6.25% +; CHECK: edge entry -> case_c probability is 0x67ffffff / 0x80000000 = 81.25% [HOT edge] +; CHECK: edge entry -> case_d probability is 0x07ffffff / 0x80000000 = 6.25% +; CHECK: edge entry -> case_e probability is 0x07ffffff / 0x80000000 = 6.25% case_a: unreachable @@ -493,11 +493,11 @@ entry: i32 2, label %case_c i32 3, label %case_d i32 4, label %case_e ], !prof !9 -; CHECK: edge entry -> case_a probability is 0x00000400 / 0x80000000 = 0.00% -; CHECK: edge entry -> case_b probability is 0x00000400 / 0x80000000 = 0.00% -; CHECK: edge entry -> case_c probability is 0x6aaaa7ff / 0x80000000 = 83.33% [HOT edge] -; CHECK: edge entry -> case_d probability is 0x0aaaa7ff / 0x80000000 = 8.33% -; CHECK: edge entry -> case_e probability is 0x0aaaa7ff / 0x80000000 = 8.33% +; CHECK: edge entry -> case_a probability is 0x00000001 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_b probability is 0x00000001 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_c probability is 0x6aaaaaa9 / 0x80000000 = 83.33% [HOT edge] +; CHECK: edge entry -> case_d probability is 0x0aaaaaa9 / 0x80000000 = 8.33% +; CHECK: edge entry -> case_e probability is 0x0aaaaaa9 / 0x80000000 = 8.33% case_a: unreachable @@ -534,10 +534,10 @@ entry: i32 3, label %case_d i32 4, label %case_e ], !prof !10 ; CHECK: edge entry -> case_a probability is 0x00000000 / 0x80000000 = 0.00% -; CHECK: edge entry -> case_b probability is 0x00000400 / 0x80000000 = 0.00% -; CHECK: edge entry -> case_c probability is 0x6e08fa2d / 0x80000000 = 85.96% [HOT edge] -; CHECK: edge entry -> case_d probability is 0x08fb80e9 / 0x80000000 = 7.02% -; CHECK: edge entry -> case_e probability is 0x08fb80e9 / 0x80000000 = 7.02% +; CHECK: edge entry -> case_b probability is 0x00000001 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_c probability is 0x6e08fb82 / 0x80000000 = 85.96% [HOT edge] +; CHECK: edge entry -> case_d probability is 0x08fb823e / 0x80000000 = 7.02% +; CHECK: edge entry -> case_e probability is 0x08fb823e / 0x80000000 = 7.02% case_a: unreachable diff --git a/test/Analysis/BranchProbabilityInfo/deopt-intrinsic.ll b/test/Analysis/BranchProbabilityInfo/deopt-intrinsic.ll index faa09f9e8a0c..c2681e5e7c80 100644 --- a/test/Analysis/BranchProbabilityInfo/deopt-intrinsic.ll +++ b/test/Analysis/BranchProbabilityInfo/deopt-intrinsic.ll @@ -9,8 +9,8 @@ entry: %cond = icmp eq i32 %a, 42 br i1 %cond, label %exit, label %deopt -; CHECK: edge entry -> exit probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] -; CHECK: edge entry -> deopt probability is 0x00000800 / 0x80000000 = 0.00% +; CHECK: edge entry -> exit probability is 0x7fffffff / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> deopt probability is 0x00000001 / 0x80000000 = 0.00% deopt: %rval = call i32(...) @llvm.experimental.deoptimize.i32() [ "deopt"() ] diff --git a/test/Analysis/BranchProbabilityInfo/noreturn.ll b/test/Analysis/BranchProbabilityInfo/noreturn.ll index 0c2fe863d034..0566ca16c2f3 100644 --- a/test/Analysis/BranchProbabilityInfo/noreturn.ll +++ b/test/Analysis/BranchProbabilityInfo/noreturn.ll @@ -9,8 +9,8 @@ define i32 @test1(i32 %a, i32 %b) { entry: %cond = icmp eq i32 %a, 42 br i1 %cond, label %exit, label %abort -; CHECK: edge entry -> exit probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] -; CHECK: edge entry -> abort probability is 0x00000800 / 0x80000000 = 0.00% +; CHECK: edge entry -> exit probability is 0x7fffffff / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> abort probability is 0x00000001 / 0x80000000 = 0.00% abort: call void @abort() noreturn @@ -27,11 +27,11 @@ entry: i32 2, label %case_b i32 3, label %case_c i32 4, label %case_d] -; CHECK: edge entry -> exit probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] -; CHECK: edge entry -> case_a probability is 0x00000200 / 0x80000000 = 0.00% -; CHECK: edge entry -> case_b probability is 0x00000200 / 0x80000000 = 0.00% -; CHECK: edge entry -> case_c probability is 0x00000200 / 0x80000000 = 0.00% -; CHECK: edge entry -> case_d probability is 0x00000200 / 0x80000000 = 0.00% +; CHECK: edge entry -> exit probability is 0x7ffffffc / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> case_a probability is 0x00000001 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_b probability is 0x00000001 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_c probability is 0x00000001 / 0x80000000 = 0.00% +; CHECK: edge entry -> case_d probability is 0x00000001 / 0x80000000 = 0.00% case_a: br label %case_b @@ -56,8 +56,8 @@ define i32 @test3(i32 %a, i32 %b) { entry: %cond1 = icmp eq i32 %a, 42 br i1 %cond1, label %exit, label %dom -; CHECK: edge entry -> exit probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] -; CHECK: edge entry -> dom probability is 0x00000800 / 0x80000000 = 0.00% +; CHECK: edge entry -> exit probability is 0x7fffffff / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> dom probability is 0x00000001 / 0x80000000 = 0.00% dom: %cond2 = icmp ult i32 %a, 42 @@ -87,8 +87,8 @@ define i32 @throwSmallException(i32 %idx, i32 %limit) #0 personality i8* bitcast entry: %cmp = icmp sge i32 %idx, %limit br i1 %cmp, label %if.then, label %if.end -; CHECK: edge entry -> if.then probability is 0x00000800 / 0x80000000 = 0.00% -; CHECK: edge entry -> if.end probability is 0x7ffff800 / 0x80000000 = 100.00% [HOT edge] +; CHECK: edge entry -> if.then probability is 0x00000001 / 0x80000000 = 0.00% +; CHECK: edge entry -> if.end probability is 0x7fffffff / 0x80000000 = 100.00% [HOT edge] if.then: ; preds = %entry %exception = call i8* @__cxa_allocate_exception(i64 1) #0 diff --git a/test/Analysis/CostModel/X86/ctlz.ll b/test/Analysis/CostModel/X86/ctlz.ll index 2c97da15aee5..769d73915e36 100644 --- a/test/Analysis/CostModel/X86/ctlz.ll +++ b/test/Analysis/CostModel/X86/ctlz.ll @@ -1,9 +1,12 @@ -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=pentium4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSE2 -check-prefix=NOPOPCNT -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSE42 -check-prefix=POPCNT -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX1 -check-prefix=POPCNT -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2 -check-prefix=POPCNT -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX1 -check-prefix=POPCNT -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2 -check-prefix=POPCNT +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=pentium4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSE2 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSE42 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX1 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX1 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=knl -mattr=-avx512cd -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX512 -check-prefix=AVX512F +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=skx -mattr=-avx512cd -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX512 -check-prefix=AVX512BW +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=skx -mattr=+avx512cd -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX512CD ; Verify the cost of scalar leading zero count instructions. @@ -80,11 +83,18 @@ declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1) declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>, i1) declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>, i1) +declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) +declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1) +declare <32 x i16> @llvm.ctlz.v32i16(<32 x i16>, i1) +declare <64 x i8> @llvm.ctlz.v64i8(<64 x i8>, i1) + define <2 x i64> @var_ctlz_v2i64(<2 x i64> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v2i64': ; SSE2: Found an estimated cost of 25 for instruction: %ctlz ; SSE42: Found an estimated cost of 23 for instruction: %ctlz ; AVX: Found an estimated cost of 23 for instruction: %ctlz +; AVX512: Found an estimated cost of 23 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz %ctlz = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 0) ret <2 x i64> %ctlz } @@ -94,6 +104,8 @@ define <2 x i64> @var_ctlz_v2i64u(<2 x i64> %a) { ; SSE2: Found an estimated cost of 25 for instruction: %ctlz ; SSE42: Found an estimated cost of 23 for instruction: %ctlz ; AVX: Found an estimated cost of 23 for instruction: %ctlz +; AVX512: Found an estimated cost of 23 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz %ctlz = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 1) ret <2 x i64> %ctlz } @@ -104,6 +116,8 @@ define <4 x i64> @var_ctlz_v4i64(<4 x i64> %a) { ; SSE42: Found an estimated cost of 46 for instruction: %ctlz ; AVX1: Found an estimated cost of 48 for instruction: %ctlz ; AVX2: Found an estimated cost of 23 for instruction: %ctlz +; AVX512: Found an estimated cost of 23 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz %ctlz = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %a, i1 0) ret <4 x i64> %ctlz } @@ -114,15 +128,45 @@ define <4 x i64> @var_ctlz_v4i64u(<4 x i64> %a) { ; SSE42: Found an estimated cost of 46 for instruction: %ctlz ; AVX1: Found an estimated cost of 48 for instruction: %ctlz ; AVX2: Found an estimated cost of 23 for instruction: %ctlz +; AVX512: Found an estimated cost of 23 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz %ctlz = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %a, i1 1) ret <4 x i64> %ctlz } +define <8 x i64> @var_ctlz_v8i64(<8 x i64> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v8i64': +; SSE2: Found an estimated cost of 100 for instruction: %ctlz +; SSE42: Found an estimated cost of 92 for instruction: %ctlz +; AVX1: Found an estimated cost of 96 for instruction: %ctlz +; AVX2: Found an estimated cost of 46 for instruction: %ctlz +; AVX512F: Found an estimated cost of 29 for instruction: %ctlz +; AVX512BW: Found an estimated cost of 23 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz + %ctlz = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 0) + ret <8 x i64> %ctlz +} + +define <8 x i64> @var_ctlz_v8i64u(<8 x i64> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v8i64u': +; SSE2: Found an estimated cost of 100 for instruction: %ctlz +; SSE42: Found an estimated cost of 92 for instruction: %ctlz +; AVX1: Found an estimated cost of 96 for instruction: %ctlz +; AVX2: Found an estimated cost of 46 for instruction: %ctlz +; AVX512F: Found an estimated cost of 29 for instruction: %ctlz +; AVX512BW: Found an estimated cost of 23 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz + %ctlz = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 1) + ret <8 x i64> %ctlz +} + define <4 x i32> @var_ctlz_v4i32(<4 x i32> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v4i32': ; SSE2: Found an estimated cost of 26 for instruction: %ctlz ; SSE42: Found an estimated cost of 18 for instruction: %ctlz ; AVX: Found an estimated cost of 18 for instruction: %ctlz +; AVX512: Found an estimated cost of 18 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz %ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 0) ret <4 x i32> %ctlz } @@ -132,6 +176,8 @@ define <4 x i32> @var_ctlz_v4i32u(<4 x i32> %a) { ; SSE2: Found an estimated cost of 26 for instruction: %ctlz ; SSE42: Found an estimated cost of 18 for instruction: %ctlz ; AVX: Found an estimated cost of 18 for instruction: %ctlz +; AVX512: Found an estimated cost of 18 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz %ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 1) ret <4 x i32> %ctlz } @@ -142,6 +188,8 @@ define <8 x i32> @var_ctlz_v8i32(<8 x i32> %a) { ; SSE42: Found an estimated cost of 36 for instruction: %ctlz ; AVX1: Found an estimated cost of 38 for instruction: %ctlz ; AVX2: Found an estimated cost of 18 for instruction: %ctlz +; AVX512: Found an estimated cost of 18 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz %ctlz = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %a, i1 0) ret <8 x i32> %ctlz } @@ -152,15 +200,45 @@ define <8 x i32> @var_ctlz_v8i32u(<8 x i32> %a) { ; SSE42: Found an estimated cost of 36 for instruction: %ctlz ; AVX1: Found an estimated cost of 38 for instruction: %ctlz ; AVX2: Found an estimated cost of 18 for instruction: %ctlz +; AVX512: Found an estimated cost of 18 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz %ctlz = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %a, i1 1) ret <8 x i32> %ctlz } +define <16 x i32> @var_ctlz_v16i32(<16 x i32> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v16i32': +; SSE2: Found an estimated cost of 104 for instruction: %ctlz +; SSE42: Found an estimated cost of 72 for instruction: %ctlz +; AVX1: Found an estimated cost of 76 for instruction: %ctlz +; AVX2: Found an estimated cost of 36 for instruction: %ctlz +; AVX512F: Found an estimated cost of 35 for instruction: %ctlz +; AVX512BW: Found an estimated cost of 22 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz + %ctlz = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 0) + ret <16 x i32> %ctlz +} + +define <16 x i32> @var_ctlz_v16i32u(<16 x i32> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v16i32u': +; SSE2: Found an estimated cost of 104 for instruction: %ctlz +; SSE42: Found an estimated cost of 72 for instruction: %ctlz +; AVX1: Found an estimated cost of 76 for instruction: %ctlz +; AVX2: Found an estimated cost of 36 for instruction: %ctlz +; AVX512F: Found an estimated cost of 35 for instruction: %ctlz +; AVX512BW: Found an estimated cost of 22 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 1 for instruction: %ctlz + %ctlz = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 1) + ret <16 x i32> %ctlz +} + define <8 x i16> @var_ctlz_v8i16(<8 x i16> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v8i16': ; SSE2: Found an estimated cost of 20 for instruction: %ctlz ; SSE42: Found an estimated cost of 14 for instruction: %ctlz ; AVX: Found an estimated cost of 14 for instruction: %ctlz +; AVX512: Found an estimated cost of 14 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 4 for instruction: %ctlz %ctlz = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 0) ret <8 x i16> %ctlz } @@ -170,6 +248,8 @@ define <8 x i16> @var_ctlz_v8i16u(<8 x i16> %a) { ; SSE2: Found an estimated cost of 20 for instruction: %ctlz ; SSE42: Found an estimated cost of 14 for instruction: %ctlz ; AVX: Found an estimated cost of 14 for instruction: %ctlz +; AVX512: Found an estimated cost of 14 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 4 for instruction: %ctlz %ctlz = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 1) ret <8 x i16> %ctlz } @@ -180,6 +260,8 @@ define <16 x i16> @var_ctlz_v16i16(<16 x i16> %a) { ; SSE42: Found an estimated cost of 28 for instruction: %ctlz ; AVX1: Found an estimated cost of 30 for instruction: %ctlz ; AVX2: Found an estimated cost of 14 for instruction: %ctlz +; AVX512: Found an estimated cost of 14 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 4 for instruction: %ctlz %ctlz = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %a, i1 0) ret <16 x i16> %ctlz } @@ -190,15 +272,45 @@ define <16 x i16> @var_ctlz_v16i16u(<16 x i16> %a) { ; SSE42: Found an estimated cost of 28 for instruction: %ctlz ; AVX1: Found an estimated cost of 30 for instruction: %ctlz ; AVX2: Found an estimated cost of 14 for instruction: %ctlz +; AVX512: Found an estimated cost of 14 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 4 for instruction: %ctlz %ctlz = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %a, i1 1) ret <16 x i16> %ctlz } +define <32 x i16> @var_ctlz_v32i16(<32 x i16> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v32i16': +; SSE2: Found an estimated cost of 80 for instruction: %ctlz +; SSE42: Found an estimated cost of 56 for instruction: %ctlz +; AVX1: Found an estimated cost of 60 for instruction: %ctlz +; AVX2: Found an estimated cost of 28 for instruction: %ctlz +; AVX512F: Found an estimated cost of 28 for instruction: %ctlz +; AVX512BW: Found an estimated cost of 18 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 8 for instruction: %ctlz + %ctlz = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %a, i1 0) + ret <32 x i16> %ctlz +} + +define <32 x i16> @var_ctlz_v32i16u(<32 x i16> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v32i16u': +; SSE2: Found an estimated cost of 80 for instruction: %ctlz +; SSE42: Found an estimated cost of 56 for instruction: %ctlz +; AVX1: Found an estimated cost of 60 for instruction: %ctlz +; AVX2: Found an estimated cost of 28 for instruction: %ctlz +; AVX512F: Found an estimated cost of 28 for instruction: %ctlz +; AVX512BW: Found an estimated cost of 18 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 8 for instruction: %ctlz + %ctlz = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %a, i1 1) + ret <32 x i16> %ctlz +} + define <16 x i8> @var_ctlz_v16i8(<16 x i8> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v16i8': ; SSE2: Found an estimated cost of 17 for instruction: %ctlz ; SSE42: Found an estimated cost of 9 for instruction: %ctlz ; AVX: Found an estimated cost of 9 for instruction: %ctlz +; AVX512: Found an estimated cost of 9 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 4 for instruction: %ctlz %ctlz = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 0) ret <16 x i8> %ctlz } @@ -208,6 +320,8 @@ define <16 x i8> @var_ctlz_v16i8u(<16 x i8> %a) { ; SSE2: Found an estimated cost of 17 for instruction: %ctlz ; SSE42: Found an estimated cost of 9 for instruction: %ctlz ; AVX: Found an estimated cost of 9 for instruction: %ctlz +; AVX512: Found an estimated cost of 9 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 4 for instruction: %ctlz %ctlz = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 1) ret <16 x i8> %ctlz } @@ -218,6 +332,8 @@ define <32 x i8> @var_ctlz_v32i8(<32 x i8> %a) { ; SSE42: Found an estimated cost of 18 for instruction: %ctlz ; AVX1: Found an estimated cost of 20 for instruction: %ctlz ; AVX2: Found an estimated cost of 9 for instruction: %ctlz +; AVX512: Found an estimated cost of 9 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 10 for instruction: %ctlz %ctlz = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %a, i1 0) ret <32 x i8> %ctlz } @@ -228,6 +344,34 @@ define <32 x i8> @var_ctlz_v32i8u(<32 x i8> %a) { ; SSE42: Found an estimated cost of 18 for instruction: %ctlz ; AVX1: Found an estimated cost of 20 for instruction: %ctlz ; AVX2: Found an estimated cost of 9 for instruction: %ctlz +; AVX512: Found an estimated cost of 9 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 10 for instruction: %ctlz %ctlz = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %a, i1 1) ret <32 x i8> %ctlz } + +define <64 x i8> @var_ctlz_v64i8(<64 x i8> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v64i8': +; SSE2: Found an estimated cost of 68 for instruction: %ctlz +; SSE42: Found an estimated cost of 36 for instruction: %ctlz +; AVX1: Found an estimated cost of 40 for instruction: %ctlz +; AVX2: Found an estimated cost of 18 for instruction: %ctlz +; AVX512F: Found an estimated cost of 18 for instruction: %ctlz +; AVX512BW: Found an estimated cost of 17 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 20 for instruction: %ctlz + %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 0) + ret <64 x i8> %ctlz +} + +define <64 x i8> @var_ctlz_v64i8u(<64 x i8> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v64i8u': +; SSE2: Found an estimated cost of 68 for instruction: %ctlz +; SSE42: Found an estimated cost of 36 for instruction: %ctlz +; AVX1: Found an estimated cost of 40 for instruction: %ctlz +; AVX2: Found an estimated cost of 18 for instruction: %ctlz +; AVX512F: Found an estimated cost of 18 for instruction: %ctlz +; AVX512BW: Found an estimated cost of 17 for instruction: %ctlz +; AVX512CD: Found an estimated cost of 20 for instruction: %ctlz + %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 1) + ret <64 x i8> %ctlz +} diff --git a/test/Analysis/CostModel/X86/ctpop.ll b/test/Analysis/CostModel/X86/ctpop.ll index f072cbaec492..e6a14e98e37a 100644 --- a/test/Analysis/CostModel/X86/ctpop.ll +++ b/test/Analysis/CostModel/X86/ctpop.ll @@ -4,6 +4,8 @@ ; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2 -check-prefix=POPCNT ; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX1 -check-prefix=POPCNT ; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2 -check-prefix=POPCNT +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=knl -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX512 -check-prefix=AVX512F -check-prefix=POPCNT +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=skx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX512 -check-prefix=AVX512BW -check-prefix=POPCNT ; Verify the cost of scalar population count instructions. @@ -56,11 +58,17 @@ declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>) declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>) +declare <8 x i64> @llvm.ctpop.v8i64(<8 x i64>) +declare <16 x i32> @llvm.ctpop.v16i32(<16 x i32>) +declare <32 x i16> @llvm.ctpop.v32i16(<32 x i16>) +declare <64 x i8> @llvm.ctpop.v64i8(<64 x i8>) + define <2 x i64> @var_ctpop_v2i64(<2 x i64> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v2i64': ; SSE2: Found an estimated cost of 12 for instruction: %ctpop ; SSE42: Found an estimated cost of 7 for instruction: %ctpop ; AVX: Found an estimated cost of 7 for instruction: %ctpop +; AVX512: Found an estimated cost of 7 for instruction: %ctpop %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a) ret <2 x i64> %ctpop } @@ -71,15 +79,29 @@ define <4 x i64> @var_ctpop_v4i64(<4 x i64> %a) { ; SSE42: Found an estimated cost of 14 for instruction: %ctpop ; AVX1: Found an estimated cost of 16 for instruction: %ctpop ; AVX2: Found an estimated cost of 7 for instruction: %ctpop +; AVX512: Found an estimated cost of 7 for instruction: %ctpop %ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a) ret <4 x i64> %ctpop } +define <8 x i64> @var_ctpop_v8i64(<8 x i64> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v8i64': +; SSE2: Found an estimated cost of 48 for instruction: %ctpop +; SSE42: Found an estimated cost of 28 for instruction: %ctpop +; AVX1: Found an estimated cost of 32 for instruction: %ctpop +; AVX2: Found an estimated cost of 14 for instruction: %ctpop +; AVX512F: Found an estimated cost of 16 for instruction: %ctpop +; AVX512BW: Found an estimated cost of 7 for instruction: %ctpop + %ctpop = call <8 x i64> @llvm.ctpop.v8i64(<8 x i64> %a) + ret <8 x i64> %ctpop +} + define <4 x i32> @var_ctpop_v4i32(<4 x i32> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v4i32': ; SSE2: Found an estimated cost of 15 for instruction: %ctpop ; SSE42: Found an estimated cost of 11 for instruction: %ctpop ; AVX: Found an estimated cost of 11 for instruction: %ctpop +; AVX512: Found an estimated cost of 11 for instruction: %ctpop %ctpop = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %a) ret <4 x i32> %ctpop } @@ -90,15 +112,29 @@ define <8 x i32> @var_ctpop_v8i32(<8 x i32> %a) { ; SSE42: Found an estimated cost of 22 for instruction: %ctpop ; AVX1: Found an estimated cost of 24 for instruction: %ctpop ; AVX2: Found an estimated cost of 11 for instruction: %ctpop +; AVX512: Found an estimated cost of 11 for instruction: %ctpop %ctpop = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %a) ret <8 x i32> %ctpop } +define <16 x i32> @var_ctpop_v16i32(<16 x i32> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v16i32': +; SSE2: Found an estimated cost of 60 for instruction: %ctpop +; SSE42: Found an estimated cost of 44 for instruction: %ctpop +; AVX1: Found an estimated cost of 48 for instruction: %ctpop +; AVX2: Found an estimated cost of 22 for instruction: %ctpop +; AVX512F: Found an estimated cost of 24 for instruction: %ctpop +; AVX512BW: Found an estimated cost of 11 for instruction: %ctpop + %ctpop = call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> %a) + ret <16 x i32> %ctpop +} + define <8 x i16> @var_ctpop_v8i16(<8 x i16> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v8i16': ; SSE2: Found an estimated cost of 13 for instruction: %ctpop ; SSE42: Found an estimated cost of 9 for instruction: %ctpop ; AVX: Found an estimated cost of 9 for instruction: %ctpop +; AVX512: Found an estimated cost of 9 for instruction: %ctpop %ctpop = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %a) ret <8 x i16> %ctpop } @@ -109,15 +145,29 @@ define <16 x i16> @var_ctpop_v16i16(<16 x i16> %a) { ; SSE42: Found an estimated cost of 18 for instruction: %ctpop ; AVX1: Found an estimated cost of 20 for instruction: %ctpop ; AVX2: Found an estimated cost of 9 for instruction: %ctpop +; AVX512: Found an estimated cost of 9 for instruction: %ctpop %ctpop = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %a) ret <16 x i16> %ctpop } +define <32 x i16> @var_ctpop_v32i16(<32 x i16> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v32i16': +; SSE2: Found an estimated cost of 52 for instruction: %ctpop +; SSE42: Found an estimated cost of 36 for instruction: %ctpop +; AVX1: Found an estimated cost of 40 for instruction: %ctpop +; AVX2: Found an estimated cost of 18 for instruction: %ctpop +; AVX512F: Found an estimated cost of 18 for instruction: %ctpop +; AVX512BW: Found an estimated cost of 9 for instruction: %ctpop + %ctpop = call <32 x i16> @llvm.ctpop.v32i16(<32 x i16> %a) + ret <32 x i16> %ctpop +} + define <16 x i8> @var_ctpop_v16i8(<16 x i8> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v16i8': ; SSE2: Found an estimated cost of 10 for instruction: %ctpop ; SSE42: Found an estimated cost of 6 for instruction: %ctpop ; AVX: Found an estimated cost of 6 for instruction: %ctpop +; AVX512: Found an estimated cost of 6 for instruction: %ctpop %ctpop = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a) ret <16 x i8> %ctpop } @@ -128,6 +178,19 @@ define <32 x i8> @var_ctpop_v32i8(<32 x i8> %a) { ; SSE42: Found an estimated cost of 12 for instruction: %ctpop ; AVX1: Found an estimated cost of 14 for instruction: %ctpop ; AVX2: Found an estimated cost of 6 for instruction: %ctpop +; AVX512: Found an estimated cost of 6 for instruction: %ctpop %ctpop = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %a) ret <32 x i8> %ctpop } + +define <64 x i8> @var_ctpop_v64i8(<64 x i8> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v64i8': +; SSE2: Found an estimated cost of 40 for instruction: %ctpop +; SSE42: Found an estimated cost of 24 for instruction: %ctpop +; AVX1: Found an estimated cost of 28 for instruction: %ctpop +; AVX2: Found an estimated cost of 12 for instruction: %ctpop +; AVX512F: Found an estimated cost of 12 for instruction: %ctpop +; AVX512BW: Found an estimated cost of 6 for instruction: %ctpop + %ctpop = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %a) + ret <64 x i8> %ctpop +} diff --git a/test/Analysis/CostModel/X86/cttz.ll b/test/Analysis/CostModel/X86/cttz.ll index 5d3c59b60232..e7a39781385e 100644 --- a/test/Analysis/CostModel/X86/cttz.ll +++ b/test/Analysis/CostModel/X86/cttz.ll @@ -1,9 +1,11 @@ -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=pentium4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSE2 -check-prefix=NOPOPCNT -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSE42 -check-prefix=POPCNT -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX1 -check-prefix=POPCNT -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2 -check-prefix=POPCNT -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX1 -check-prefix=POPCNT -; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2 -check-prefix=POPCNT +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=pentium4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSE2 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=SSE42 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX1 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX1 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX2 +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=knl -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX512 -check-prefix=AVX512F +; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=skx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX512 -check-prefix=AVX512BW ; Verify the cost of scalar trailing zero count instructions. @@ -80,11 +82,17 @@ declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>, i1) declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>, i1) declare <32 x i8> @llvm.cttz.v32i8(<32 x i8>, i1) +declare <8 x i64> @llvm.cttz.v8i64(<8 x i64>, i1) +declare <16 x i32> @llvm.cttz.v16i32(<16 x i32>, i1) +declare <32 x i16> @llvm.cttz.v32i16(<32 x i16>, i1) +declare <64 x i8> @llvm.cttz.v64i8(<64 x i8>, i1) + define <2 x i64> @var_cttz_v2i64(<2 x i64> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v2i64': ; SSE2: Found an estimated cost of 14 for instruction: %cttz ; SSE42: Found an estimated cost of 10 for instruction: %cttz ; AVX: Found an estimated cost of 10 for instruction: %cttz +; AVX512: Found an estimated cost of 10 for instruction: %cttz %cttz = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 0) ret <2 x i64> %cttz } @@ -94,6 +102,7 @@ define <2 x i64> @var_cttz_v2i64u(<2 x i64> %a) { ; SSE2: Found an estimated cost of 14 for instruction: %cttz ; SSE42: Found an estimated cost of 10 for instruction: %cttz ; AVX: Found an estimated cost of 10 for instruction: %cttz +; AVX512: Found an estimated cost of 10 for instruction: %cttz %cttz = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 1) ret <2 x i64> %cttz } @@ -104,6 +113,7 @@ define <4 x i64> @var_cttz_v4i64(<4 x i64> %a) { ; SSE42: Found an estimated cost of 20 for instruction: %cttz ; AVX1: Found an estimated cost of 22 for instruction: %cttz ; AVX2: Found an estimated cost of 10 for instruction: %cttz +; AVX512: Found an estimated cost of 10 for instruction: %cttz %cttz = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %a, i1 0) ret <4 x i64> %cttz } @@ -114,15 +124,41 @@ define <4 x i64> @var_cttz_v4i64u(<4 x i64> %a) { ; SSE42: Found an estimated cost of 20 for instruction: %cttz ; AVX1: Found an estimated cost of 22 for instruction: %cttz ; AVX2: Found an estimated cost of 10 for instruction: %cttz +; AVX512: Found an estimated cost of 10 for instruction: %cttz %cttz = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %a, i1 1) ret <4 x i64> %cttz } +define <8 x i64> @var_cttz_v8i64(<8 x i64> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_cttz_v8i64': +; SSE2: Found an estimated cost of 56 for instruction: %cttz +; SSE42: Found an estimated cost of 40 for instruction: %cttz +; AVX1: Found an estimated cost of 44 for instruction: %cttz +; AVX2: Found an estimated cost of 20 for instruction: %cttz +; AVX512F: Found an estimated cost of 20 for instruction: %cttz +; AVX512BW: Found an estimated cost of 10 for instruction: %cttz + %cttz = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %a, i1 0) + ret <8 x i64> %cttz +} + +define <8 x i64> @var_cttz_v8i64u(<8 x i64> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_cttz_v8i64u': +; SSE2: Found an estimated cost of 56 for instruction: %cttz +; SSE42: Found an estimated cost of 40 for instruction: %cttz +; AVX1: Found an estimated cost of 44 for instruction: %cttz +; AVX2: Found an estimated cost of 20 for instruction: %cttz +; AVX512F: Found an estimated cost of 20 for instruction: %cttz +; AVX512BW: Found an estimated cost of 10 for instruction: %cttz + %cttz = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %a, i1 1) + ret <8 x i64> %cttz +} + define <4 x i32> @var_cttz_v4i32(<4 x i32> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v4i32': ; SSE2: Found an estimated cost of 18 for instruction: %cttz ; SSE42: Found an estimated cost of 14 for instruction: %cttz ; AVX: Found an estimated cost of 14 for instruction: %cttz +; AVX512: Found an estimated cost of 14 for instruction: %cttz %cttz = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %a, i1 0) ret <4 x i32> %cttz } @@ -132,6 +168,7 @@ define <4 x i32> @var_cttz_v4i32u(<4 x i32> %a) { ; SSE2: Found an estimated cost of 18 for instruction: %cttz ; SSE42: Found an estimated cost of 14 for instruction: %cttz ; AVX: Found an estimated cost of 14 for instruction: %cttz +; AVX512: Found an estimated cost of 14 for instruction: %cttz %cttz = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %a, i1 1) ret <4 x i32> %cttz } @@ -142,6 +179,7 @@ define <8 x i32> @var_cttz_v8i32(<8 x i32> %a) { ; SSE42: Found an estimated cost of 28 for instruction: %cttz ; AVX1: Found an estimated cost of 30 for instruction: %cttz ; AVX2: Found an estimated cost of 14 for instruction: %cttz +; AVX512: Found an estimated cost of 14 for instruction: %cttz %cttz = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %a, i1 0) ret <8 x i32> %cttz } @@ -152,15 +190,41 @@ define <8 x i32> @var_cttz_v8i32u(<8 x i32> %a) { ; SSE42: Found an estimated cost of 28 for instruction: %cttz ; AVX1: Found an estimated cost of 30 for instruction: %cttz ; AVX2: Found an estimated cost of 14 for instruction: %cttz +; AVX512: Found an estimated cost of 14 for instruction: %cttz %cttz = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %a, i1 1) ret <8 x i32> %cttz } +define <16 x i32> @var_cttz_v16i32(<16 x i32> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_cttz_v16i32': +; SSE2: Found an estimated cost of 72 for instruction: %cttz +; SSE42: Found an estimated cost of 56 for instruction: %cttz +; AVX1: Found an estimated cost of 60 for instruction: %cttz +; AVX2: Found an estimated cost of 28 for instruction: %cttz +; AVX512F: Found an estimated cost of 28 for instruction: %cttz +; AVX512BW: Found an estimated cost of 14 for instruction: %cttz + %cttz = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %a, i1 0) + ret <16 x i32> %cttz +} + +define <16 x i32> @var_cttz_v16i32u(<16 x i32> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_cttz_v16i32u': +; SSE2: Found an estimated cost of 72 for instruction: %cttz +; SSE42: Found an estimated cost of 56 for instruction: %cttz +; AVX1: Found an estimated cost of 60 for instruction: %cttz +; AVX2: Found an estimated cost of 28 for instruction: %cttz +; AVX512F: Found an estimated cost of 28 for instruction: %cttz +; AVX512BW: Found an estimated cost of 14 for instruction: %cttz + %cttz = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %a, i1 1) + ret <16 x i32> %cttz +} + define <8 x i16> @var_cttz_v8i16(<8 x i16> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v8i16': ; SSE2: Found an estimated cost of 16 for instruction: %cttz ; SSE42: Found an estimated cost of 12 for instruction: %cttz ; AVX: Found an estimated cost of 12 for instruction: %cttz +; AVX512: Found an estimated cost of 12 for instruction: %cttz %cttz = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %a, i1 0) ret <8 x i16> %cttz } @@ -170,6 +234,7 @@ define <8 x i16> @var_cttz_v8i16u(<8 x i16> %a) { ; SSE2: Found an estimated cost of 16 for instruction: %cttz ; SSE42: Found an estimated cost of 12 for instruction: %cttz ; AVX: Found an estimated cost of 12 for instruction: %cttz +; AVX512: Found an estimated cost of 12 for instruction: %cttz %cttz = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %a, i1 1) ret <8 x i16> %cttz } @@ -180,6 +245,7 @@ define <16 x i16> @var_cttz_v16i16(<16 x i16> %a) { ; SSE42: Found an estimated cost of 24 for instruction: %cttz ; AVX1: Found an estimated cost of 26 for instruction: %cttz ; AVX2: Found an estimated cost of 12 for instruction: %cttz +; AVX512: Found an estimated cost of 12 for instruction: %cttz %cttz = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %a, i1 0) ret <16 x i16> %cttz } @@ -190,15 +256,41 @@ define <16 x i16> @var_cttz_v16i16u(<16 x i16> %a) { ; SSE42: Found an estimated cost of 24 for instruction: %cttz ; AVX1: Found an estimated cost of 26 for instruction: %cttz ; AVX2: Found an estimated cost of 12 for instruction: %cttz +; AVX512: Found an estimated cost of 12 for instruction: %cttz %cttz = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %a, i1 1) ret <16 x i16> %cttz } +define <32 x i16> @var_cttz_v32i16(<32 x i16> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_cttz_v32i16': +; SSE2: Found an estimated cost of 64 for instruction: %cttz +; SSE42: Found an estimated cost of 48 for instruction: %cttz +; AVX1: Found an estimated cost of 52 for instruction: %cttz +; AVX2: Found an estimated cost of 24 for instruction: %cttz +; AVX512F: Found an estimated cost of 24 for instruction: %cttz +; AVX512BW: Found an estimated cost of 12 for instruction: %cttz + %cttz = call <32 x i16> @llvm.cttz.v32i16(<32 x i16> %a, i1 0) + ret <32 x i16> %cttz +} + +define <32 x i16> @var_cttz_v32i16u(<32 x i16> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_cttz_v32i16u': +; SSE2: Found an estimated cost of 64 for instruction: %cttz +; SSE42: Found an estimated cost of 48 for instruction: %cttz +; AVX1: Found an estimated cost of 52 for instruction: %cttz +; AVX2: Found an estimated cost of 24 for instruction: %cttz +; AVX512F: Found an estimated cost of 24 for instruction: %cttz +; AVX512BW: Found an estimated cost of 12 for instruction: %cttz + %cttz = call <32 x i16> @llvm.cttz.v32i16(<32 x i16> %a, i1 1) + ret <32 x i16> %cttz +} + define <16 x i8> @var_cttz_v16i8(<16 x i8> %a) { ; CHECK: 'Cost Model Analysis' for function 'var_cttz_v16i8': ; SSE2: Found an estimated cost of 13 for instruction: %cttz ; SSE42: Found an estimated cost of 9 for instruction: %cttz ; AVX: Found an estimated cost of 9 for instruction: %cttz +; AVX512: Found an estimated cost of 9 for instruction: %cttz %cttz = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %a, i1 0) ret <16 x i8> %cttz } @@ -208,6 +300,7 @@ define <16 x i8> @var_cttz_v16i8u(<16 x i8> %a) { ; SSE2: Found an estimated cost of 13 for instruction: %cttz ; SSE42: Found an estimated cost of 9 for instruction: %cttz ; AVX: Found an estimated cost of 9 for instruction: %cttz +; AVX512: Found an estimated cost of 9 for instruction: %cttz %cttz = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %a, i1 1) ret <16 x i8> %cttz } @@ -218,6 +311,7 @@ define <32 x i8> @var_cttz_v32i8(<32 x i8> %a) { ; SSE42: Found an estimated cost of 18 for instruction: %cttz ; AVX1: Found an estimated cost of 20 for instruction: %cttz ; AVX2: Found an estimated cost of 9 for instruction: %cttz +; AVX512: Found an estimated cost of 9 for instruction: %cttz %cttz = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %a, i1 0) ret <32 x i8> %cttz } @@ -228,6 +322,31 @@ define <32 x i8> @var_cttz_v32i8u(<32 x i8> %a) { ; SSE42: Found an estimated cost of 18 for instruction: %cttz ; AVX1: Found an estimated cost of 20 for instruction: %cttz ; AVX2: Found an estimated cost of 9 for instruction: %cttz +; AVX512: Found an estimated cost of 9 for instruction: %cttz %cttz = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %a, i1 1) ret <32 x i8> %cttz } + +define <64 x i8> @var_cttz_v64i8(<64 x i8> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_cttz_v64i8': +; SSE2: Found an estimated cost of 52 for instruction: %cttz +; SSE42: Found an estimated cost of 36 for instruction: %cttz +; AVX1: Found an estimated cost of 40 for instruction: %cttz +; AVX2: Found an estimated cost of 18 for instruction: %cttz +; AVX512F: Found an estimated cost of 18 for instruction: %cttz +; AVX512BW: Found an estimated cost of 9 for instruction: %cttz + %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 0) + ret <64 x i8> %cttz +} + +define <64 x i8> @var_cttz_v64i8u(<64 x i8> %a) { +; CHECK: 'Cost Model Analysis' for function 'var_cttz_v64i8u': +; SSE2: Found an estimated cost of 52 for instruction: %cttz +; SSE42: Found an estimated cost of 36 for instruction: %cttz +; AVX1: Found an estimated cost of 40 for instruction: %cttz +; AVX2: Found an estimated cost of 18 for instruction: %cttz +; AVX512F: Found an estimated cost of 18 for instruction: %cttz +; AVX512BW: Found an estimated cost of 9 for instruction: %cttz + %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 1) + ret <64 x i8> %cttz +} diff --git a/test/Analysis/ScalarEvolution/nsw.ll b/test/Analysis/ScalarEvolution/nsw.ll index a3752919d334..39b958d3ea0e 100644 --- a/test/Analysis/ScalarEvolution/nsw.ll +++ b/test/Analysis/ScalarEvolution/nsw.ll @@ -102,7 +102,7 @@ for.body.i.i: ; preds = %entry, %for.body.i. %cmp.i.i = icmp eq i32* %ptrincdec.i.i, %end br i1 %cmp.i.i, label %_ZSt4fillIPiiEvT_S1_RKT0_.exit, label %for.body.i.i ; CHECK: Loop %for.body.i.i: backedge-taken count is ((-4 + (-1 * %begin) + %end) /u 4) -; CHECK: Loop %for.body.i.i: max backedge-taken count is ((-4 + (-1 * %begin) + %end) /u 4) +; CHECK: Loop %for.body.i.i: max backedge-taken count is 4611686018427387903 _ZSt4fillIPiiEvT_S1_RKT0_.exit: ; preds = %for.body.i.i, %entry ret void } diff --git a/test/Analysis/ScalarEvolution/trip-count-pow2.ll b/test/Analysis/ScalarEvolution/trip-count-pow2.ll index 8d053060b50c..04d1b9544ab2 100644 --- a/test/Analysis/ScalarEvolution/trip-count-pow2.ll +++ b/test/Analysis/ScalarEvolution/trip-count-pow2.ll @@ -14,7 +14,7 @@ exit: ; CHECK-LABEL: @test1 ; CHECK: Loop %loop: backedge-taken count is ((-32 + (96 * %n)) /u 32) -; CHECK: Loop %loop: max backedge-taken count is ((-32 + (96 * %n)) /u 32) +; CHECK: Loop %loop: max backedge-taken count is 134217727 } ; PR19183 @@ -32,7 +32,7 @@ exit: ; CHECK-LABEL: @test2 ; CHECK: Loop %loop: backedge-taken count is ((-32 + (32 * (%n /u 32))) /u 32) -; CHECK: Loop %loop: max backedge-taken count is ((-32 + (32 * (%n /u 32))) /u 32) +; CHECK: Loop %loop: max backedge-taken count is 134217727 } define void @test3(i32 %n) { @@ -49,7 +49,7 @@ exit: ; CHECK-LABEL: @test3 ; CHECK: Loop %loop: backedge-taken count is ((-32 + (32 * %n)) /u 32) -; CHECK: Loop %loop: max backedge-taken count is ((-32 + (32 * %n)) /u 32) +; CHECK: Loop %loop: max backedge-taken count is 134217727 } define void @test4(i32 %n) { @@ -66,7 +66,7 @@ exit: ; CHECK-LABEL: @test4 ; CHECK: Loop %loop: backedge-taken count is ((-4 + (-1431655764 * %n)) /u 4) -; CHECK: Loop %loop: max backedge-taken count is ((-4 + (-1431655764 * %n)) /u 4) +; CHECK: Loop %loop: max backedge-taken count is 1073741823 } define void @test5(i32 %n) { @@ -83,5 +83,5 @@ exit: ; CHECK-LABEL: @test5 ; CHECK: Loop %loop: backedge-taken count is ((-4 + (4 * %n)) /u 4) -; CHECK: Loop %loop: max backedge-taken count is ((-4 + (4 * %n)) /u 4) +; CHECK: Loop %loop: max backedge-taken count is 1073741823 } diff --git a/test/CodeGen/AArch64/arm64-ccmp.ll b/test/CodeGen/AArch64/arm64-ccmp.ll index fc1aeb7b37d9..2682fa7dcce1 100644 --- a/test/CodeGen/AArch64/arm64-ccmp.ll +++ b/test/CodeGen/AArch64/arm64-ccmp.ll @@ -378,11 +378,11 @@ define i64 @select_noccmp1(i64 %v1, i64 %v2, i64 %v3, i64 %r) { ; CHECK-NEXT: cmp x0, #13 ; CHECK-NOT: ccmp ; CHECK-NEXT: cset [[REG1:w[0-9]+]], gt -; CHECK-NEXT: and [[REG4:w[0-9]+]], [[REG0]], [[REG1]] ; CHECK-NEXT: cmp x2, #2 ; CHECK-NEXT: cset [[REG2:w[0-9]+]], lt ; CHECK-NEXT: cmp x2, #4 ; CHECK-NEXT: cset [[REG3:w[0-9]+]], gt +; CHECK-NEXT: and [[REG4:w[0-9]+]], [[REG0]], [[REG1]] ; CHECK-NEXT: and [[REG5:w[0-9]+]], [[REG2]], [[REG3]] ; CHECK-NEXT: orr [[REG6:w[0-9]+]], [[REG4]], [[REG5]] ; CHECK-NEXT: cmp [[REG6]], #0 diff --git a/test/CodeGen/AArch64/arm64-misched-multimmo.ll b/test/CodeGen/AArch64/arm64-misched-multimmo.ll index 4c0195b93a44..3593668e0156 100644 --- a/test/CodeGen/AArch64/arm64-misched-multimmo.ll +++ b/test/CodeGen/AArch64/arm64-misched-multimmo.ll @@ -12,7 +12,7 @@ ; CHECK: Successors: ; CHECK-NOT: ch SU(4) ; CHECK: SU(3) -; CHECK: SU(5): STRWui %WZR, %X{{[0-9]+}} +; CHECK: SU(4): STRWui %WZR, %X{{[0-9]+}} define i32 @foo() { entry: %0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @G2, i64 0, i64 0), align 4 diff --git a/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll b/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll index 5b7800996133..cdfb667c26bd 100644 --- a/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll +++ b/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll @@ -6,7 +6,8 @@ ; Tests for add. ; CHECK: name: addi32 ; CHECK: {{%[0-9]+}}(s32) = G_ADD -define i32 @addi32(i32 %arg1, i32 %arg2) { +define amdgpu_kernel void @addi32(i32 %arg1, i32 %arg2) { %res = add i32 %arg1, %arg2 - ret i32 %res + store i32 %res, i32 addrspace(1)* undef + ret void } diff --git a/test/CodeGen/AMDGPU/add.i16.ll b/test/CodeGen/AMDGPU/add.i16.ll index 3b274c9d2027..bee13d8c17f1 100644 --- a/test/CodeGen/AMDGPU/add.i16.ll +++ b/test/CodeGen/AMDGPU/add.i16.ll @@ -84,11 +84,10 @@ define amdgpu_kernel void @v_test_add_i16_zext_to_i32(i32 addrspace(1)* %out, i1 ; FIXME: Need to handle non-uniform case for function below (load without gep). ; GCN-LABEL: {{^}}v_test_add_i16_zext_to_i64: -; VI-DAG: v_mov_b32_e32 v[[VZERO:[0-9]+]], 0 ; VI: flat_load_ushort [[A:v[0-9]+]] ; VI: flat_load_ushort [[B:v[0-9]+]] ; VI-DAG: v_add_u16_e32 v[[ADD:[0-9]+]], [[B]], [[A]] -; VI: buffer_store_dwordx2 v{{\[}}[[ADD]]:[[VZERO]]{{\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0{{$}} +; VI: buffer_store_dwordx2 v{{\[}}[[ADD]]:{{[0-9]+\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0{{$}} define amdgpu_kernel void @v_test_add_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 { %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep.out = getelementptr inbounds i64, i64 addrspace(1)* %out, i32 %tid diff --git a/test/CodeGen/AMDGPU/add.v2i16.ll b/test/CodeGen/AMDGPU/add.v2i16.ll index 73e80d523f1e..a6b280578531 100644 --- a/test/CodeGen/AMDGPU/add.v2i16.ll +++ b/test/CodeGen/AMDGPU/add.v2i16.ll @@ -202,10 +202,10 @@ define amdgpu_kernel void @v_test_add_v2i16_zext_to_v2i32(<2 x i32> addrspace(1) ; VI: flat_load_ushort v[[B_LO:[0-9]+]] ; VI: flat_load_ushort v[[B_HI:[0-9]+]] -; VI: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}} -; VI: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}} -; VI: v_add_u16_e32 -; VI: v_add_u16_e32 +; VI-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}} +; VI-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}} +; VI-DAG: v_add_u16_e32 +; VI-DAG: v_add_u16_e32 ; VI: buffer_store_dwordx4 define amdgpu_kernel void @v_test_add_v2i16_zext_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in0, <2 x i16> addrspace(1)* %in1) #1 { diff --git a/test/CodeGen/AMDGPU/bfe-patterns.ll b/test/CodeGen/AMDGPU/bfe-patterns.ll index c23cc1c88b52..907c8c2216b7 100644 --- a/test/CodeGen/AMDGPU/bfe-patterns.ll +++ b/test/CodeGen/AMDGPU/bfe-patterns.ll @@ -50,7 +50,7 @@ define amdgpu_kernel void @v_ubfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out, ; GCN-LABEL: {{^}}s_ubfe_sub_i32: ; GCN: s_load_dword [[SRC:s[0-9]+]] ; GCN: s_load_dword [[WIDTH:s[0-9]+]] -; GCN: v_mov_b32_e32 [[VWIDTH:v[0-9]+]], {{s[0-9]+}} +; GCN: v_mov_b32_e32 [[VWIDTH:v[0-9]+]], [[WIDTH]] ; GCN: v_bfe_u32 v{{[0-9]+}}, [[SRC]], 0, [[VWIDTH]] define amdgpu_kernel void @s_ubfe_sub_i32(i32 addrspace(1)* %out, i32 %src, i32 %width) #1 { %id.x = tail call i32 @llvm.amdgcn.workitem.id.x() @@ -128,7 +128,7 @@ define amdgpu_kernel void @v_sbfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out, ; GCN-LABEL: {{^}}s_sbfe_sub_i32: ; GCN: s_load_dword [[SRC:s[0-9]+]] ; GCN: s_load_dword [[WIDTH:s[0-9]+]] -; GCN: v_mov_b32_e32 [[VWIDTH:v[0-9]+]], {{s[0-9]+}} +; GCN: v_mov_b32_e32 [[VWIDTH:v[0-9]+]], [[WIDTH]] ; GCN: v_bfe_i32 v{{[0-9]+}}, [[SRC]], 0, [[VWIDTH]] define amdgpu_kernel void @s_sbfe_sub_i32(i32 addrspace(1)* %out, i32 %src, i32 %width) #1 { %id.x = tail call i32 @llvm.amdgcn.workitem.id.x() diff --git a/test/CodeGen/AMDGPU/coalescer_distribute.ll b/test/CodeGen/AMDGPU/coalescer_distribute.ll index 7ca2612598c8..d0276a3fb59c 100644 --- a/test/CodeGen/AMDGPU/coalescer_distribute.ll +++ b/test/CodeGen/AMDGPU/coalescer_distribute.ll @@ -5,7 +5,7 @@ target triple = "amdgcn--" define spir_kernel void @hoge() { bb: - %tmp = tail call i32 @llvm.r600.read.tidig.x() + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() br i1 undef, label %bb2, label %bb23 bb2: @@ -50,4 +50,4 @@ bb34: ret void } -declare i32 @llvm.r600.read.tidig.x() +declare i32 @llvm.amdgcn.workitem.id.x() diff --git a/test/CodeGen/AMDGPU/ctlz.ll b/test/CodeGen/AMDGPU/ctlz.ll index e252971e3f42..149c50685b1d 100644 --- a/test/CodeGen/AMDGPU/ctlz.ll +++ b/test/CodeGen/AMDGPU/ctlz.ll @@ -135,7 +135,6 @@ define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 } ; FUNC-LABEL: {{^}}v_ctlz_i64: -; GCN-DAG: v_mov_b32_e32 v[[CTLZ_HI:[0-9]+]], 0{{$}} ; GCN-DAG: {{buffer|flat}}_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}} ; GCN-DAG: v_cmp_eq_u32_e64 [[CMPHI:s\[[0-9]+:[0-9]+\]]], 0, v[[HI]] ; GCN-DAG: v_ffbh_u32_e32 [[FFBH_LO:v[0-9]+]], v[[LO]] @@ -145,7 +144,7 @@ define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 ; GCN-DAG: v_or_b32_e32 [[OR:v[0-9]+]], v[[HI]], v[[LO]] ; GCN-DAG: v_cmp_ne_u32_e32 vcc, 0, [[OR]] ; GCN-DAG: v_cndmask_b32_e32 v[[CLTZ_LO:[0-9]+]], 64, v[[CTLZ:[0-9]+]], vcc -; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[CLTZ_LO]]:[[CTLZ_HI]]{{\]}} +; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[CLTZ_LO]]:[[CTLZ_HI:[0-9]+]]{{\]}} define amdgpu_kernel void @v_ctlz_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind { %tid = call i32 @llvm.r600.read.tidig.x() %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid diff --git a/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/test/CodeGen/AMDGPU/ctlz_zero_undef.ll index 87ba563a740f..48f3e4401f1a 100644 --- a/test/CodeGen/AMDGPU/ctlz_zero_undef.ll +++ b/test/CodeGen/AMDGPU/ctlz_zero_undef.ll @@ -121,8 +121,7 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i64_trunc(i32 addrspace(1)* noalias ; GCN-DAG: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, 32, [[FFBH_LO]] ; GCN-DAG: v_ffbh_u32_e32 [[FFBH_HI:v[0-9]+]], v[[HI]] ; GCN-DAG: v_cndmask_b32_e64 v[[CTLZ:[0-9]+]], [[FFBH_HI]], [[FFBH_LO]] -; GCN-DAG: v_mov_b32_e32 v[[CTLZ_HI:[0-9]+]], 0{{$}} -; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[CTLZ]]:[[CTLZ_HI]]{{\]}} +; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v{{\[}}[[CTLZ]]:[[CTLZ_HI:[0-9]+]]{{\]}} define amdgpu_kernel void @v_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind { %tid = call i32 @llvm.r600.read.tidig.x() %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid diff --git a/test/CodeGen/AMDGPU/ds_write2.ll b/test/CodeGen/AMDGPU/ds_write2.ll index ab1cf0ba25b5..0f49919a1d10 100644 --- a/test/CodeGen/AMDGPU/ds_write2.ll +++ b/test/CodeGen/AMDGPU/ds_write2.ll @@ -266,8 +266,8 @@ define amdgpu_kernel void @write2_ptr_subreg_arg_two_val_f32(float addrspace(1)* } ; SI-LABEL: @simple_write2_one_val_f64 -; SI: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]], -; SI: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 3, v{{[0-9]+}} +; SI-DAG: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]], +; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 3, v{{[0-9]+}} ; SI: ds_write2_b64 [[VPTR]], [[VAL]], [[VAL]] offset1:8 ; SI: s_endpgm define amdgpu_kernel void @simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 { diff --git a/test/CodeGen/AMDGPU/endcf-loop-header.ll b/test/CodeGen/AMDGPU/endcf-loop-header.ll index bd861e0c663e..3ae74abcb6cb 100644 --- a/test/CodeGen/AMDGPU/endcf-loop-header.ll +++ b/test/CodeGen/AMDGPU/endcf-loop-header.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s ; This tests that the llvm.SI.end.cf intrinsic is not inserted into the ; loop block. This intrinsic will be lowered to s_or_b64 by the code @@ -14,7 +14,7 @@ ; CHECK: s_cbranch_execnz [[LOOP_LABEL]] define amdgpu_kernel void @test(i32 addrspace(1)* %out) { entry: - %cond = call i32 @llvm.r600.read.tidig.x() #0 + %cond = call i32 @llvm.amdgcn.workitem.id.x() #0 %tmp0 = icmp eq i32 %cond, 0 br i1 %tmp0, label %if, label %loop @@ -34,6 +34,6 @@ done: ret void } -declare i32 @llvm.r600.read.tidig.x() #0 +declare i32 @llvm.amdgcn.workitem.id.x() #0 -attributes #0 = { readnone } +attributes #0 = { nounwind readnone } diff --git a/test/CodeGen/AMDGPU/fmed3.ll b/test/CodeGen/AMDGPU/fmed3.ll index d2cfc713ed37..27d9261b1fab 100644 --- a/test/CodeGen/AMDGPU/fmed3.ll +++ b/test/CodeGen/AMDGPU/fmed3.ll @@ -845,10 +845,10 @@ define amdgpu_kernel void @v_nnan_inputs_missing2_med3_f32_pat0(float addrspace( ; GCN: {{buffer_|flat_}}load_dword [[A:v[0-9]+]] ; GCN: {{buffer_|flat_}}load_dword [[B:v[0-9]+]] ; GCN: {{buffer_|flat_}}load_dword [[C:v[0-9]+]] -; GCN: v_min_f32 -; GCN: v_max_f32 -; GCN: v_min_f32 -; GCN: v_max_f32 +; GCN-DAG: v_min_f32 +; GCN-DAG: v_max_f32 +; GCN-DAG: v_min_f32 +; GCN-DAG: v_max_f32 define amdgpu_kernel void @v_test_global_nnans_med3_f32_pat0_srcmod0_mismatch(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #2 { %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid diff --git a/test/CodeGen/AMDGPU/frame-index-elimination.ll b/test/CodeGen/AMDGPU/frame-index-elimination.ll new file mode 100644 index 000000000000..d67988b46325 --- /dev/null +++ b/test/CodeGen/AMDGPU/frame-index-elimination.ll @@ -0,0 +1,124 @@ +; RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +; Test that non-entry function frame indices are expanded properly to +; give an index relative to the scratch wave offset register + +; Materialize into a mov. Make sure there isn't an unnecessary copy. +; GCN-LABEL: {{^}}func_mov_fi_i32: +; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN: s_sub_u32 vcc_hi, s5, s4 +; GCN-NEXT: s_lshr_b32 vcc_hi, vcc_hi, 6 +; GCN-NEXT: v_add_i32_e64 v0, vcc, vcc_hi, 4 +; GCN-NOT: v_mov +; GCN: ds_write_b32 v0, v0 +define void @func_mov_fi_i32() #0 { + %alloca = alloca i32 + store volatile i32* %alloca, i32* addrspace(3)* undef + ret void +} + +; Materialize into an add of a constant offset from the FI. +; FIXME: Should be able to merge adds + +; GCN-LABEL: {{^}}func_add_constant_to_fi_i32: +; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN: s_sub_u32 s6, s5, s4 +; GCN-NEXT: s_lshr_b32 s6, s6, 6 +; GCN-NEXT: v_add_i32_e64 v0, s{{\[[0-9]+:[0-9]+\]}}, s6, 4 +; GCN-NEXT: v_add_i32_e32 v0, vcc, 4, v0 +; GCN-NOT: v_mov +; GCN: ds_write_b32 v0, v0 +define void @func_add_constant_to_fi_i32() #0 { + %alloca = alloca [2 x i32], align 4 + %gep0 = getelementptr inbounds [2 x i32], [2 x i32]* %alloca, i32 0, i32 1 + store volatile i32* %gep0, i32* addrspace(3)* undef + ret void +} + +; A user the materialized frame index can't be meaningfully folded +; into. + +; GCN-LABEL: {{^}}func_other_fi_user_i32: +; GCN: s_sub_u32 vcc_hi, s5, s4 +; GCN-NEXT: s_lshr_b32 vcc_hi, vcc_hi, 6 +; GCN-NEXT: v_add_i32_e64 v0, vcc, vcc_hi, 4 +; GCN-NEXT: v_mul_lo_i32 v0, v0, 9 +; GCN-NOT: v_mov +; GCN: ds_write_b32 v0, v0 +define void @func_other_fi_user_i32() #0 { + %alloca = alloca [2 x i32], align 4 + %ptrtoint = ptrtoint [2 x i32]* %alloca to i32 + %mul = mul i32 %ptrtoint, 9 + store volatile i32 %mul, i32 addrspace(3)* undef + ret void +} + +; GCN-LABEL: {{^}}func_store_private_arg_i32_ptr: +; GCN: v_mov_b32_e32 v1, 15{{$}} +; GCN: buffer_store_dword v1, v0, s[0:3], s4 offen{{$}} +define void @func_store_private_arg_i32_ptr(i32* %ptr) #0 { + store volatile i32 15, i32* %ptr + ret void +} + +; GCN-LABEL: {{^}}func_load_private_arg_i32_ptr: +; GCN: s_waitcnt +; GCN-NEXT: buffer_load_dword v0, v0, s[0:3], s4 offen{{$}} +define void @func_load_private_arg_i32_ptr(i32* %ptr) #0 { + %val = load volatile i32, i32* %ptr + ret void +} + +; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr: +; GCN: s_waitcnt +; GCN-NEXT: s_sub_u32 s6, s5, s4 +; GCN-NEXT: v_lshr_b32_e64 v0, s6, 6 +; GCN-NEXT: v_add_i32_e32 v0, vcc, 4, v0 +; GCN-NOT: v_mov +; GCN: ds_write_b32 v0, v0 +define void @void_func_byval_struct_i8_i32_ptr({ i8, i32 }* byval %arg0) #0 { + %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 0 + %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 1 + %load1 = load i32, i32* %gep1 + store volatile i32* %gep1, i32* addrspace(3)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr_value: +; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_load_ubyte v0, off, s[0:3], s5 +; GCN_NEXT: buffer_load_dword v1, off, s[0:3], s5 offset:4 +define void @void_func_byval_struct_i8_i32_ptr_value({ i8, i32 }* byval %arg0) #0 { + %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 0 + %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 1 + %load0 = load i8, i8* %gep0 + %load1 = load i32, i32* %gep1 + store volatile i8 %load0, i8 addrspace(3)* undef + store volatile i32 %load1, i32 addrspace(3)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr_nonentry_block: +; GCN: s_sub_u32 s8, s5, s4 +; GCN: v_lshr_b32_e64 v1, s8, 6 +; GCN: s_and_saveexec_b64 + +; GCN: v_add_i32_e32 v0, vcc, 4, v1 +; GCN: buffer_load_dword v1, v1, s[0:3], s4 offen offset:4 +; GCN: ds_write_b32 +define void @void_func_byval_struct_i8_i32_ptr_nonentry_block({ i8, i32 }* byval %arg0, i32 %arg2) #0 { + %cmp = icmp eq i32 %arg2, 0 + br i1 %cmp, label %bb, label %ret + +bb: + %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 0 + %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 1 + %load1 = load volatile i32, i32* %gep1 + store volatile i32* %gep1, i32* addrspace(3)* undef + br label %ret + +ret: + ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/AMDGPU/function-args.ll b/test/CodeGen/AMDGPU/function-args.ll new file mode 100644 index 000000000000..9b1368493ba5 --- /dev/null +++ b/test/CodeGen/AMDGPU/function-args.ll @@ -0,0 +1,734 @@ +; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=CI %s +; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s + +; GCN-LABEL: {{^}}void_func_i1: +; GCN: v_and_b32_e32 v0, 1, v0 +; GCN: buffer_store_byte v0, off +define void @void_func_i1(i1 %arg0) #0 { + store i1 %arg0, i1 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_i1_zeroext: +; GCN: s_waitcnt +; GCN-NEXT: v_or_b32_e32 v0, 12, v0 +; GCN-NOT: v0 +; GCN: buffer_store_dword v0, off +define void @void_func_i1_zeroext(i1 zeroext %arg0) #0 { + %ext = zext i1 %arg0 to i32 + %add = add i32 %ext, 12 + store i32 %add, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_i1_signext: +; GCN: s_waitcnt +; GCN-NEXT: v_add_i32_e32 v0, vcc, 12, v0 +; GCN-NOT: v0 +; GCN: buffer_store_dword v0, off +define void @void_func_i1_signext(i1 signext %arg0) #0 { + %ext = sext i1 %arg0 to i32 + %add = add i32 %ext, 12 + store i32 %add, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_i8: +; GCN-NOT: v0 +; GCN: buffer_store_byte v0, off +define void @void_func_i8(i8 %arg0) #0 { + store i8 %arg0, i8 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_i8_zeroext: +; GCN-NOT: and_b32 +; GCN: v_add_i32_e32 v0, vcc, 12, v0 +define void @void_func_i8_zeroext(i8 zeroext %arg0) #0 { + %ext = zext i8 %arg0 to i32 + %add = add i32 %ext, 12 + store i32 %add, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_i8_signext: +; GCN-NOT: v_bfe_i32 +; GCN: v_add_i32_e32 v0, vcc, 12, v0 +define void @void_func_i8_signext(i8 signext %arg0) #0 { + %ext = sext i8 %arg0 to i32 + %add = add i32 %ext, 12 + store i32 %add, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_i16: +; GCN: buffer_store_short v0, off +define void @void_func_i16(i16 %arg0) #0 { + store i16 %arg0, i16 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_i16_zeroext: +; GCN-NOT: v0 +; GCN: v_add_i32_e32 v0, vcc, 12, v0 +define void @void_func_i16_zeroext(i16 zeroext %arg0) #0 { + %ext = zext i16 %arg0 to i32 + %add = add i32 %ext, 12 + store i32 %add, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_i16_signext: +; GCN-NOT: v0 +; GCN: v_add_i32_e32 v0, vcc, 12, v0 +define void @void_func_i16_signext(i16 signext %arg0) #0 { + %ext = sext i16 %arg0 to i32 + %add = add i32 %ext, 12 + store i32 %add, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_i32: +; GCN-NOT: v0 +; GCN: buffer_store_dword v0, off +define void @void_func_i32(i32 %arg0) #0 { + store i32 %arg0, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_i64: +; GCN-NOT: v[0:1] +; GCN-NOT: v0 +; GCN-NOT: v1 +; GCN: buffer_store_dwordx2 v[0:1], off +define void @void_func_i64(i64 %arg0) #0 { + store i64 %arg0, i64 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_f16: +; VI-NOT: v0 +; CI: v_cvt_f16_f32_e32 v0, v0 +; GCN: buffer_store_short v0, off +define void @void_func_f16(half %arg0) #0 { + store half %arg0, half addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_f32 +; GCN-NOT: v0 +; GCN: buffer_store_dword v0, off +define void @void_func_f32(float %arg0) #0 { + store float %arg0, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_f64: +; GCN-NOT: v[0:1] +; GCN-NOT: v0 +; GCN-NOT: v1 +; GCN: buffer_store_dwordx2 v[0:1], off +define void @void_func_f64(double %arg0) #0 { + store double %arg0, double addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v2i32: +; GCN-NOT: v[0:1] +; GCN-NOT: v0 +; GCN-NOT: v1 +; GCN: buffer_store_dwordx2 v[0:1], off +define void @void_func_v2i32(<2 x i32> %arg0) #0 { + store <2 x i32> %arg0, <2 x i32> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v3i32: +; GCN-DAG: buffer_store_dword v2, off +; GCN-DAG: buffer_store_dwordx2 v[0:1], off +define void @void_func_v3i32(<3 x i32> %arg0) #0 { + store <3 x i32> %arg0, <3 x i32> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v4i32: +; GCN: buffer_store_dwordx4 v[0:3], off +define void @void_func_v4i32(<4 x i32> %arg0) #0 { + store <4 x i32> %arg0, <4 x i32> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v5i32: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dword v4, off +define void @void_func_v5i32(<5 x i32> %arg0) #0 { + store <5 x i32> %arg0, <5 x i32> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v8i32: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +define void @void_func_v8i32(<8 x i32> %arg0) #0 { + store <8 x i32> %arg0, <8 x i32> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v16i32: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +; GCN-DAG: buffer_store_dwordx4 v[8:11], off +; GCN-DAG: buffer_store_dwordx4 v[12:15], off +define void @void_func_v16i32(<16 x i32> %arg0) #0 { + store <16 x i32> %arg0, <16 x i32> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v32i32: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +; GCN-DAG: buffer_store_dwordx4 v[8:11], off +; GCN-DAG: buffer_store_dwordx4 v[12:15], off +; GCN-DAG: buffer_store_dwordx4 v[16:19], off +; GCN-DAG: buffer_store_dwordx4 v[20:23], off +; GCN-DAG: buffer_store_dwordx4 v[24:27], off +; GCN-DAG: buffer_store_dwordx4 v[28:31], off +define void @void_func_v32i32(<32 x i32> %arg0) #0 { + store <32 x i32> %arg0, <32 x i32> addrspace(1)* undef + ret void +} + +; 1 over register limit +; GCN-LABEL: {{^}}void_func_v33i32: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +; GCN-DAG: buffer_store_dwordx4 v[8:11], off +; GCN-DAG: buffer_store_dwordx4 v[12:15], off +; GCN-DAG: buffer_load_dword [[STACKLOAD:v[0-9]+]], off, s[0:3], s5 +; GCN-DAG: buffer_store_dwordx4 v[16:19], off +; GCN-DAG: buffer_store_dwordx4 v[20:23], off +; GCN-DAG: buffer_store_dwordx4 v[24:27], off +; GCN-DAG: buffer_store_dwordx4 v[28:31], off +; GCN: buffer_store_dword [[STACKLOAD]], off +define void @void_func_v33i32(<33 x i32> %arg0) #0 { + store <33 x i32> %arg0, <33 x i32> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v2i64: +; GCN: buffer_store_dwordx4 v[0:3], off +define void @void_func_v2i64(<2 x i64> %arg0) #0 { + store <2 x i64> %arg0, <2 x i64> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v3i64: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx2 v[4:5], off +define void @void_func_v3i64(<3 x i64> %arg0) #0 { + store <3 x i64> %arg0, <3 x i64> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v4i64: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +define void @void_func_v4i64(<4 x i64> %arg0) #0 { + store <4 x i64> %arg0, <4 x i64> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v5i64: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +; GCN-DAG: buffer_store_dwordx2 v[8:9], off +define void @void_func_v5i64(<5 x i64> %arg0) #0 { + store <5 x i64> %arg0, <5 x i64> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v8i64: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +; GCN-DAG: buffer_store_dwordx4 v[8:11], off +; GCN-DAG: buffer_store_dwordx4 v[12:15], off +define void @void_func_v8i64(<8 x i64> %arg0) #0 { + store <8 x i64> %arg0, <8 x i64> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v16i64: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +; GCN-DAG: buffer_store_dwordx4 v[8:11], off +; GCN-DAG: buffer_store_dwordx4 v[12:15], off +; GCN-DAG: buffer_store_dwordx4 v[16:19], off +; GCN-DAG: buffer_store_dwordx4 v[20:23], off +; GCN-DAG: buffer_store_dwordx4 v[24:27], off +; GCN-DAG: buffer_store_dwordx4 v[28:31], off +define void @void_func_v16i64(<16 x i64> %arg0) #0 { + store <16 x i64> %arg0, <16 x i64> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v2i16: +; GFX9-NOT: v0 +; GFX9: buffer_store_dword v0, off +define void @void_func_v2i16(<2 x i16> %arg0) #0 { + store <2 x i16> %arg0, <2 x i16> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v3i16: +; GCN-DAG: buffer_store_dword v0, off +; GCN-DAG: buffer_store_short v2, off +define void @void_func_v3i16(<3 x i16> %arg0) #0 { + store <3 x i16> %arg0, <3 x i16> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v4i16: +; GFX9-NOT: v0 +; GFX9-NOT: v1 +; GFX9: buffer_store_dwordx2 v[0:1], off +define void @void_func_v4i16(<4 x i16> %arg0) #0 { + store <4 x i16> %arg0, <4 x i16> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v5i16: +; GCN-DAG: buffer_store_short v4, off, +; GCN-DAG: buffer_store_dwordx2 v[1:2], off +define void @void_func_v5i16(<5 x i16> %arg0) #0 { + store <5 x i16> %arg0, <5 x i16> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v8i16: +; GFX9-DAG: buffer_store_dwordx4 v[0:3], off +define void @void_func_v8i16(<8 x i16> %arg0) #0 { + store <8 x i16> %arg0, <8 x i16> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v16i16: +; GFX9-DAG: buffer_store_dwordx4 v[0:3], off +; GFX9-DAG: buffer_store_dwordx4 v[4:7], off +define void @void_func_v16i16(<16 x i16> %arg0) #0 { + store <16 x i16> %arg0, <16 x i16> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v2f32: +; GCN-NOT: v[0:1] +; GCN-NOT: v0 +; GCN-NOT: v1 +; GCN: buffer_store_dwordx2 v[0:1], off +define void @void_func_v2f32(<2 x float> %arg0) #0 { + store <2 x float> %arg0, <2 x float> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v3f32: +; GCN-DAG: buffer_store_dword v2, off +; GCN-DAG: buffer_store_dwordx2 v[0:1], off +define void @void_func_v3f32(<3 x float> %arg0) #0 { + store <3 x float> %arg0, <3 x float> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v4f32: +; GCN: buffer_store_dwordx4 v[0:3], off +define void @void_func_v4f32(<4 x float> %arg0) #0 { + store <4 x float> %arg0, <4 x float> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v8f32: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +define void @void_func_v8f32(<8 x float> %arg0) #0 { + store <8 x float> %arg0, <8 x float> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v16f32: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +; GCN-DAG: buffer_store_dwordx4 v[8:11], off +; GCN-DAG: buffer_store_dwordx4 v[12:15], off +define void @void_func_v16f32(<16 x float> %arg0) #0 { + store <16 x float> %arg0, <16 x float> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v2f64: +; GCN: buffer_store_dwordx4 v[0:3], off +define void @void_func_v2f64(<2 x double> %arg0) #0 { + store <2 x double> %arg0, <2 x double> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v3f64: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx2 v[4:5], off +define void @void_func_v3f64(<3 x double> %arg0) #0 { + store <3 x double> %arg0, <3 x double> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v4f64: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +define void @void_func_v4f64(<4 x double> %arg0) #0 { + store <4 x double> %arg0, <4 x double> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v8f64: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +; GCN-DAG: buffer_store_dwordx4 v[8:11], off +; GCN-DAG: buffer_store_dwordx4 v[12:15], off +define void @void_func_v8f64(<8 x double> %arg0) #0 { + store <8 x double> %arg0, <8 x double> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v16f64: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +; GCN-DAG: buffer_store_dwordx4 v[8:11], off +; GCN-DAG: buffer_store_dwordx4 v[12:15], off +; GCN-DAG: buffer_store_dwordx4 v[16:19], off +; GCN-DAG: buffer_store_dwordx4 v[20:23], off +; GCN-DAG: buffer_store_dwordx4 v[24:27], off +; GCN-DAG: buffer_store_dwordx4 v[28:31], off +define void @void_func_v16f64(<16 x double> %arg0) #0 { + store <16 x double> %arg0, <16 x double> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v2f16: +; GFX9-NOT: v0 +; GFX9: buffer_store_dword v0, off +define void @void_func_v2f16(<2 x half> %arg0) #0 { + store <2 x half> %arg0, <2 x half> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v3f16: +; GFX9-NOT: v0 +; GCN-DAG: buffer_store_dword v0, off +; GCN-DAG: buffer_store_short v2, off +define void @void_func_v3f16(<3 x half> %arg0) #0 { + store <3 x half> %arg0, <3 x half> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v4f16: +; GFX9-NOT: v0 +; GFX9-NOT: v1 +; GFX9-NOT: v[0:1] +; GFX9: buffer_store_dwordx2 v[0:1], off +define void @void_func_v4f16(<4 x half> %arg0) #0 { + store <4 x half> %arg0, <4 x half> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v8f16: +; GFX9-NOT: v0 +; GFX9-NOT: v1 +; GFX9: buffer_store_dwordx4 v[0:3], off +define void @void_func_v8f16(<8 x half> %arg0) #0 { + store <8 x half> %arg0, <8 x half> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v16f16: +; GFX9-NOT: v0 +; GFX9-NOT: v1 +; GFX9-DAG: buffer_store_dwordx4 v[0:3], off +; GFX9-DAG: buffer_store_dwordx4 v[4:7], off +define void @void_func_v16f16(<16 x half> %arg0) #0 { + store <16 x half> %arg0, <16 x half> addrspace(1)* undef + ret void +} + +; Make sure there is no alignment requirement for passed vgprs. +; GCN-LABEL: {{^}}void_func_i32_i64_i32: +; GCN-NOT: v0 +; GCN: buffer_store_dword v0, off +; GCN: buffer_store_dwordx2 v[1:2] +; GCN: buffer_store_dword v3 +define void @void_func_i32_i64_i32(i32 %arg0, i64 %arg1, i32 %arg2) #0 { + store volatile i32 %arg0, i32 addrspace(1)* undef + store volatile i64 %arg1, i64 addrspace(1)* undef + store volatile i32 %arg2, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_struct_i32: +; GCN-NOT: v0 +; GCN: buffer_store_dword v0, off +define void @void_func_struct_i32({ i32 } %arg0) #0 { + store { i32 } %arg0, { i32 } addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_struct_i8_i32: +; GCN-DAG: buffer_store_byte v0, off +; GCN-DAG: buffer_store_dword v1, off +define void @void_func_struct_i8_i32({ i8, i32 } %arg0) #0 { + store { i8, i32 } %arg0, { i8, i32 } addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32: +; GCN-DAG: buffer_load_ubyte v[[ELT0:[0-9]+]], off, s[0:3], s5{{$}} +; GCN-DAG: buffer_load_dword v[[ELT1:[0-9]+]], off, s[0:3], s5 offset:4{{$}} +; GCN-DAG: buffer_store_dword v[[ELT1]] +; GCN-DAG: buffer_store_byte v[[ELT0]] +define void @void_func_byval_struct_i8_i32({ i8, i32 }* byval %arg0) #0 { + %arg0.load = load { i8, i32 }, { i8, i32 }* %arg0 + store { i8, i32 } %arg0.load, { i8, i32 } addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_x2: +; GCN: buffer_load_ubyte v[[ELT0_0:[0-9]+]], off, s[0:3], s5{{$}} +; GCN: buffer_load_dword v[[ELT1_0:[0-9]+]], off, s[0:3], s5 offset:4{{$}} +; GCN: buffer_load_ubyte v[[ELT0_1:[0-9]+]], off, s[0:3], s5 offset:8{{$}} +; GCN: buffer_load_dword v[[ELT1_1:[0-9]+]], off, s[0:3], s5 offset:12{{$}} + +; GCN: ds_write_b32 v0, v0 +; GCN: s_setpc_b64 +define void @void_func_byval_struct_i8_i32_x2({ i8, i32 }* byval %arg0, { i8, i32 }* byval %arg1, i32 %arg2) #0 { + %arg0.load = load volatile { i8, i32 }, { i8, i32 }* %arg0 + %arg1.load = load volatile { i8, i32 }, { i8, i32 }* %arg1 + store volatile { i8, i32 } %arg0.load, { i8, i32 } addrspace(1)* undef + store volatile { i8, i32 } %arg1.load, { i8, i32 } addrspace(1)* undef + store volatile i32 %arg2, i32 addrspace(3)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_byval_i32_byval_i64: +; GCN-DAG: buffer_load_dword v[[ARG0_LOAD:[0-9]+]], off, s[0:3], s5{{$}} +; GCN-DAG: buffer_load_dword v[[ARG1_LOAD0:[0-9]+]], off, s[0:3], s5 offset:8{{$}} +; GCN-DAG: buffer_load_dword v[[ARG1_LOAD1:[0-9]+]], off, s[0:3], s5 offset:12{{$}} +; GCN-DAG: buffer_store_dword v[[ARG0_LOAD]], off +; GCN-DAG: buffer_store_dwordx2 v{{\[}}[[ARG1_LOAD0]]:[[ARG1_LOAD1]]{{\]}}, off +define void @void_func_byval_i32_byval_i64(i32* byval %arg0, i64* byval %arg1) #0 { + %arg0.load = load i32, i32* %arg0 + %arg1.load = load i64, i64* %arg1 + store i32 %arg0.load, i32 addrspace(1)* undef + store i64 %arg1.load, i64 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v32i32_i32_i64: +; GCN-DAG: buffer_store_dwordx4 v[0:3], off +; GCN-DAG: buffer_store_dwordx4 v[4:7], off +; GCN-DAG: buffer_store_dwordx4 v[8:11], off +; GCN-DAG: buffer_store_dwordx4 v[12:15], off +; GCN-DAG: buffer_store_dwordx4 v[16:19], off +; GCN-DAG: buffer_store_dwordx4 v[20:23], off +; GCN-DAG: buffer_store_dwordx4 v[24:27], off +; GCN-DAG: buffer_store_dwordx4 v[28:31], off +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1:[0-9]+]], off, s[0:3], s5{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:4 +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:8 + +; GCN: buffer_store_dword v[[LOAD_ARG1]] +; GCN: buffer_store_dwordx2 v{{\[}}[[LOAD_ARG2_0]]:[[LOAD_ARG2_1]]{{\]}}, off +define void @void_func_v32i32_i32_i64(<32 x i32> %arg0, i32 %arg1, i64 %arg2) #0 { + store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef + store volatile i32 %arg1, i32 addrspace(1)* undef + store volatile i64 %arg2, i64 addrspace(1)* undef + ret void +} + +; FIXME: Different ext load types on CI vs. VI +; GCN-LABEL: {{^}}void_func_v32i32_i1_i8_i16: +; GCN-DAG: buffer_load_ubyte [[LOAD_ARG1:v[0-9]+]], off, s[0:3], s5{{$}} +; VI-DAG: buffer_load_ushort [[LOAD_ARG2:v[0-9]+]], off, s[0:3], s5 offset:4{{$}} +; VI-DAG: buffer_load_ushort [[LOAD_ARG3:v[0-9]+]], off, s[0:3], s5 offset:8{{$}} +; VI-DAG: buffer_load_ushort [[LOAD_ARG4:v[0-9]+]], off, s[0:3], s5 offset:12{{$}} + +; CI-DAG: buffer_load_dword [[LOAD_ARG2:v[0-9]+]], off, s[0:3], s5 offset:4{{$}} +; CI-DAG: buffer_load_dword [[LOAD_ARG3:v[0-9]+]], off, s[0:3], s5 offset:8{{$}} +; CI-DAG: buffer_load_dword [[LOAD_ARG4:v[0-9]+]], off, s[0:3], s5 offset:12{{$}} + +; GCN-DAG: v_and_b32_e32 [[TRUNC_ARG1_I1:v[0-9]+]], 1, [[LOAD_ARG1]] +; CI-DAG: v_cvt_f16_f32_e32 [[CVT_ARG4:v[0-9]+]], [[LOAD_ARG4]] + +; GCN: buffer_store_byte [[TRUNC_ARG1_I1]], off +; GCN: buffer_store_byte [[LOAD_ARG2]], off +; GCN: buffer_store_short [[LOAD_ARG3]], off +; VI: buffer_store_short [[LOAD_ARG4]], off + +; CI: buffer_store_short [[CVT_ARG4]], off +define void @void_func_v32i32_i1_i8_i16(<32 x i32> %arg0, i1 %arg1, i8 %arg2, i16 %arg3, half %arg4) #0 { + store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef + store volatile i1 %arg1, i1 addrspace(1)* undef + store volatile i8 %arg2, i8 addrspace(1)* undef + store volatile i16 %arg3, i16 addrspace(1)* undef + store volatile half %arg4, half addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v32i32_v2i32_v2f32: +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_0:[0-9]+]], off, s[0:3], s5{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_1:[0-9]+]], off, s[0:3], s5 offset:4{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:8{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:12{{$}} + +; GCN: buffer_store_dwordx2 v{{\[}}[[LOAD_ARG1_0]]:[[LOAD_ARG1_1]]{{\]}}, off +; GCN: buffer_store_dwordx2 v{{\[}}[[LOAD_ARG2_0]]:[[LOAD_ARG2_1]]{{\]}}, off +define void @void_func_v32i32_v2i32_v2f32(<32 x i32> %arg0, <2 x i32> %arg1, <2 x float> %arg2) #0 { + store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef + store volatile <2 x i32> %arg1, <2 x i32> addrspace(1)* undef + store volatile <2 x float> %arg2, <2 x float> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v32i32_v2i16_v2f16: +; GFX9-DAG: buffer_load_dword [[LOAD_ARG1:v[0-9]+]], off, s[0:3], s5{{$}} +; GFX9-DAG: buffer_load_dword [[LOAD_ARG2:v[0-9]+]], off, s[0:3], s5 offset:4{{$}} +; GFX9: buffer_store_dword [[LOAD_ARG1]], off +; GFX9: buffer_store_short [[LOAD_ARG2]], off +define void @void_func_v32i32_v2i16_v2f16(<32 x i32> %arg0, <2 x i16> %arg1, <2 x half> %arg2) #0 { + store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef + store volatile <2 x i16> %arg1, <2 x i16> addrspace(1)* undef + store volatile <2 x half> %arg2, <2 x half> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v32i32_v2i64_v2f64: +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_0:[0-9]+]], off, s[0:3], s5{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_1:[0-9]+]], off, s[0:3], s5 offset:4{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_2:[0-9]+]], off, s[0:3], s5 offset:8{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_3:[0-9]+]], off, s[0:3], s5 offset:12{{$}} + +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:16{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:20{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_2:[0-9]+]], off, s[0:3], s5 offset:24{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_3:[0-9]+]], off, s[0:3], s5 offset:28{{$}} + +; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG1_0]]:[[LOAD_ARG1_3]]{{\]}}, off +; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG2_0]]:[[LOAD_ARG2_3]]{{\]}}, off +define void @void_func_v32i32_v2i64_v2f64(<32 x i32> %arg0, <2 x i64> %arg1, <2 x double> %arg2) #0 { + store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef + store volatile <2 x i64> %arg1, <2 x i64> addrspace(1)* undef + store volatile <2 x double> %arg2, <2 x double> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v32i32_v4i32_v4f32: +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_0:[0-9]+]], off, s[0:3], s5{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_1:[0-9]+]], off, s[0:3], s5 offset:4{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_2:[0-9]+]], off, s[0:3], s5 offset:8{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_3:[0-9]+]], off, s[0:3], s5 offset:12{{$}} + +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:16{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:20{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_2:[0-9]+]], off, s[0:3], s5 offset:24{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_3:[0-9]+]], off, s[0:3], s5 offset:28{{$}} + +; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG1_0]]:[[LOAD_ARG1_3]]{{\]}}, off +; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG2_0]]:[[LOAD_ARG2_3]]{{\]}}, off +define void @void_func_v32i32_v4i32_v4f32(<32 x i32> %arg0, <4 x i32> %arg1, <4 x float> %arg2) #0 { + store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef + store volatile <4 x i32> %arg1, <4 x i32> addrspace(1)* undef + store volatile <4 x float> %arg2, <4 x float> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v32i32_v8i32_v8f32: +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_0:[0-9]+]], off, s[0:3], s5{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_1:[0-9]+]], off, s[0:3], s5 offset:4{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_2:[0-9]+]], off, s[0:3], s5 offset:8{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_3:[0-9]+]], off, s[0:3], s5 offset:12{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_4:[0-9]+]], off, s[0:3], s5 offset:16{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_5:[0-9]+]], off, s[0:3], s5 offset:20{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_6:[0-9]+]], off, s[0:3], s5 offset:24{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_7:[0-9]+]], off, s[0:3], s5 offset:28{{$}} + +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:32{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:36{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_2:[0-9]+]], off, s[0:3], s5 offset:40{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_3:[0-9]+]], off, s[0:3], s5 offset:44{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_4:[0-9]+]], off, s[0:3], s5 offset:48{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_5:[0-9]+]], off, s[0:3], s5 offset:52{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_6:[0-9]+]], off, s[0:3], s5 offset:56{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_7:[0-9]+]], off, s[0:3], s5 offset:60{{$}} + +; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG1_4]]:[[LOAD_ARG1_7]]{{\]}}, off +; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG1_0]]:[[LOAD_ARG1_3]]{{\]}}, off +; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG2_4]]:[[LOAD_ARG2_7]]{{\]}}, off +; GCN: buffer_store_dwordx4 v{{\[}}[[LOAD_ARG2_0]]:[[LOAD_ARG2_3]]{{\]}}, off +define void @void_func_v32i32_v8i32_v8f32(<32 x i32> %arg0, <8 x i32> %arg1, <8 x float> %arg2) #0 { + store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef + store volatile <8 x i32> %arg1, <8 x i32> addrspace(1)* undef + store volatile <8 x float> %arg2, <8 x float> addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}void_func_v32i32_v16i32_v16f32: +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_0:[0-9]+]], off, s[0:3], s5{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_1:[0-9]+]], off, s[0:3], s5 offset:4{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_2:[0-9]+]], off, s[0:3], s5 offset:8{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_3:[0-9]+]], off, s[0:3], s5 offset:12{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_4:[0-9]+]], off, s[0:3], s5 offset:16{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_5:[0-9]+]], off, s[0:3], s5 offset:20{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_6:[0-9]+]], off, s[0:3], s5 offset:24{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_7:[0-9]+]], off, s[0:3], s5 offset:28{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_8:[0-9]+]], off, s[0:3], s5 offset:32{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_9:[0-9]+]], off, s[0:3], s5 offset:36{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_10:[0-9]+]], off, s[0:3], s5 offset:40{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_11:[0-9]+]], off, s[0:3], s5 offset:44{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_12:[0-9]+]], off, s[0:3], s5 offset:48{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_13:[0-9]+]], off, s[0:3], s5 offset:52{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_14:[0-9]+]], off, s[0:3], s5 offset:56{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG1_15:[0-9]+]], off, s[0:3], s5 offset:60{{$}} + +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_0:[0-9]+]], off, s[0:3], s5 offset:64{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_1:[0-9]+]], off, s[0:3], s5 offset:68{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_2:[0-9]+]], off, s[0:3], s5 offset:72{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_3:[0-9]+]], off, s[0:3], s5 offset:76{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_4:[0-9]+]], off, s[0:3], s5 offset:80{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_5:[0-9]+]], off, s[0:3], s5 offset:84{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_6:[0-9]+]], off, s[0:3], s5 offset:88{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_7:[0-9]+]], off, s[0:3], s5 offset:92{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_8:[0-9]+]], off, s[0:3], s5 offset:96{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_9:[0-9]+]], off, s[0:3], s5 offset:100{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_10:[0-9]+]], off, s[0:3], s5 offset:104{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_11:[0-9]+]], off, s[0:3], s5 offset:108{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_12:[0-9]+]], off, s[0:3], s5 offset:112{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_13:[0-9]+]], off, s[0:3], s5 offset:116{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_14:[0-9]+]], off, s[0:3], s5 offset:120{{$}} +; GCN-DAG: buffer_load_dword v[[LOAD_ARG2_15:[0-9]+]], off, s[0:3], s5 offset:124{{$}} +define void @void_func_v32i32_v16i32_v16f32(<32 x i32> %arg0, <16 x i32> %arg1, <16 x float> %arg2) #0 { + store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef + store volatile <16 x i32> %arg1, <16 x i32> addrspace(1)* undef + store volatile <16 x float> %arg2, <16 x float> addrspace(1)* undef + ret void +} + +; Check there is no crash. +; GCN-LABEL: {{^}}void_func_v16i8: +define void @void_func_v16i8(<16 x i8> %arg0) #0 { + store volatile <16 x i8> %arg0, <16 x i8> addrspace(1)* undef + ret void +} + +; Check there is no crash. +; GCN-LABEL: {{^}}void_func_v32i32_v16i8: +define void @void_func_v32i32_v16i8(<32 x i32> %arg0, <16 x i8> %arg1) #0 { + store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef + store volatile <16 x i8> %arg1, <16 x i8> addrspace(1)* undef + ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/AMDGPU/function-returns.ll b/test/CodeGen/AMDGPU/function-returns.ll new file mode 100644 index 000000000000..f704d43a1742 --- /dev/null +++ b/test/CodeGen/AMDGPU/function-returns.ll @@ -0,0 +1,514 @@ +; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=CI %s +; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s + +; GCN-LABEL: {{^}}i1_func_void: +; GCN: buffer_load_ubyte v0, off +; GCN-NEXT: s_waitcnt +; GCN-NEXT: s_setpc_b64 +define i1 @i1_func_void() #0 { + %val = load i1, i1 addrspace(1)* undef + ret i1 %val +} + +; FIXME: Missing and? +; GCN-LABEL: {{^}}i1_zeroext_func_void: +; GCN: buffer_load_ubyte v0, off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define zeroext i1 @i1_zeroext_func_void() #0 { + %val = load i1, i1 addrspace(1)* undef + ret i1 %val +} + +; GCN-LABEL: {{^}}i1_signext_func_void: +; GCN: buffer_load_ubyte v0, off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_bfe_i32 v0, v0, 0, 1{{$}} +; GCN-NEXT: s_setpc_b64 +define signext i1 @i1_signext_func_void() #0 { + %val = load i1, i1 addrspace(1)* undef + ret i1 %val +} + +; GCN-LABEL: {{^}}i8_func_void: +; GCN: buffer_load_ubyte v0, off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define i8 @i8_func_void() #0 { + %val = load i8, i8 addrspace(1)* undef + ret i8 %val +} + +; GCN-LABEL: {{^}}i8_zeroext_func_void: +; GCN: buffer_load_ubyte v0, off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define zeroext i8 @i8_zeroext_func_void() #0 { + %val = load i8, i8 addrspace(1)* undef + ret i8 %val +} + +; GCN-LABEL: {{^}}i8_signext_func_void: +; GCN: buffer_load_sbyte v0, off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define signext i8 @i8_signext_func_void() #0 { + %val = load i8, i8 addrspace(1)* undef + ret i8 %val +} + +; GCN-LABEL: {{^}}i16_func_void: +; GCN: buffer_load_ushort v0, off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define i16 @i16_func_void() #0 { + %val = load i16, i16 addrspace(1)* undef + ret i16 %val +} + +; GCN-LABEL: {{^}}i16_zeroext_func_void: +; GCN: buffer_load_ushort v0, off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define zeroext i16 @i16_zeroext_func_void() #0 { + %val = load i16, i16 addrspace(1)* undef + ret i16 %val +} + +; GCN-LABEL: {{^}}i16_signext_func_void: +; GCN: buffer_load_sshort v0, off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define signext i16 @i16_signext_func_void() #0 { + %val = load i16, i16 addrspace(1)* undef + ret i16 %val +} + +; GCN-LABEL: {{^}}i32_func_void: +; GCN: buffer_load_dword v0, off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define i32 @i32_func_void() #0 { + %val = load i32, i32 addrspace(1)* undef + ret i32 %val +} + +; GCN-LABEL: {{^}}i64_func_void: +; GCN: buffer_load_dwordx2 v[0:1], off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define i64 @i64_func_void() #0 { + %val = load i64, i64 addrspace(1)* undef + ret i64 %val +} + +; GCN-LABEL: {{^}}f32_func_void: +; GCN: buffer_load_dword v0, off, s[8:11], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define float @f32_func_void() #0 { + %val = load float, float addrspace(1)* undef + ret float %val +} + +; GCN-LABEL: {{^}}f64_func_void: +; GCN: buffer_load_dwordx2 v[0:1], off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define double @f64_func_void() #0 { + %val = load double, double addrspace(1)* undef + ret double %val +} + +; GCN-LABEL: {{^}}v2i32_func_void: +; GCN: buffer_load_dwordx2 v[0:1], off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <2 x i32> @v2i32_func_void() #0 { + %val = load <2 x i32>, <2 x i32> addrspace(1)* undef + ret <2 x i32> %val +} + +; GCN-LABEL: {{^}}v3i32_func_void: +; GCN: buffer_load_dwordx4 v[0:3], off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <3 x i32> @v3i32_func_void() #0 { + %val = load <3 x i32>, <3 x i32> addrspace(1)* undef + ret <3 x i32> %val +} + +; GCN-LABEL: {{^}}v4i32_func_void: +; GCN: buffer_load_dwordx4 v[0:3], off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <4 x i32> @v4i32_func_void() #0 { + %val = load <4 x i32>, <4 x i32> addrspace(1)* undef + ret <4 x i32> %val +} + +; GCN-LABEL: {{^}}v5i32_func_void: +; GCN-DAG: buffer_load_dword v4, off +; GCN-DAG: buffer_load_dwordx4 v[0:3], off +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <5 x i32> @v5i32_func_void() #0 { + %val = load volatile <5 x i32>, <5 x i32> addrspace(1)* undef + ret <5 x i32> %val +} + +; GCN-LABEL: {{^}}v8i32_func_void: +; GCN-DAG: buffer_load_dwordx4 v[0:3], off +; GCN-DAG: buffer_load_dwordx4 v[4:7], off +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <8 x i32> @v8i32_func_void() #0 { + %ptr = load volatile <8 x i32> addrspace(1)*, <8 x i32> addrspace(1)* addrspace(2)* undef + %val = load <8 x i32>, <8 x i32> addrspace(1)* %ptr + ret <8 x i32> %val +} + +; GCN-LABEL: {{^}}v16i32_func_void: +; GCN-DAG: buffer_load_dwordx4 v[0:3], off +; GCN-DAG: buffer_load_dwordx4 v[4:7], off +; GCN-DAG: buffer_load_dwordx4 v[8:11], off +; GCN-DAG: buffer_load_dwordx4 v[12:15], off +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <16 x i32> @v16i32_func_void() #0 { + %ptr = load volatile <16 x i32> addrspace(1)*, <16 x i32> addrspace(1)* addrspace(2)* undef + %val = load <16 x i32>, <16 x i32> addrspace(1)* %ptr + ret <16 x i32> %val +} + +; GCN-LABEL: {{^}}v32i32_func_void: +; GCN-DAG: buffer_load_dwordx4 v[0:3], off +; GCN-DAG: buffer_load_dwordx4 v[4:7], off +; GCN-DAG: buffer_load_dwordx4 v[8:11], off +; GCN-DAG: buffer_load_dwordx4 v[12:15], off +; GCN-DAG: buffer_load_dwordx4 v[16:19], off +; GCN-DAG: buffer_load_dwordx4 v[20:23], off +; GCN-DAG: buffer_load_dwordx4 v[24:27], off +; GCN-DAG: buffer_load_dwordx4 v[28:31], off +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <32 x i32> @v32i32_func_void() #0 { + %ptr = load volatile <32 x i32> addrspace(1)*, <32 x i32> addrspace(1)* addrspace(2)* undef + %val = load <32 x i32>, <32 x i32> addrspace(1)* %ptr + ret <32 x i32> %val +} + +; GCN-LABEL: {{^}}v2i64_func_void: +; GCN: buffer_load_dwordx4 v[0:3], off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <2 x i64> @v2i64_func_void() #0 { + %val = load <2 x i64>, <2 x i64> addrspace(1)* undef + ret <2 x i64> %val +} + +; GCN-LABEL: {{^}}v3i64_func_void: +; GCN-DAG: buffer_load_dwordx4 v[0:3], off +; GCN-DAG: buffer_load_dwordx4 v[4:7], off +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <3 x i64> @v3i64_func_void() #0 { + %ptr = load volatile <3 x i64> addrspace(1)*, <3 x i64> addrspace(1)* addrspace(2)* undef + %val = load <3 x i64>, <3 x i64> addrspace(1)* %ptr + ret <3 x i64> %val +} + +; GCN-LABEL: {{^}}v4i64_func_void: +; GCN: buffer_load_dwordx4 v[0:3], off +; GCN: buffer_load_dwordx4 v[4:7], off +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <4 x i64> @v4i64_func_void() #0 { + %ptr = load volatile <4 x i64> addrspace(1)*, <4 x i64> addrspace(1)* addrspace(2)* undef + %val = load <4 x i64>, <4 x i64> addrspace(1)* %ptr + ret <4 x i64> %val +} + +; GCN-LABEL: {{^}}v5i64_func_void: +; GCN-DAG: buffer_load_dwordx4 v[0:3], off +; GCN-DAG: buffer_load_dwordx4 v[4:7], off +; GCN-DAG: buffer_load_dwordx4 v[8:11], off +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <5 x i64> @v5i64_func_void() #0 { + %ptr = load volatile <5 x i64> addrspace(1)*, <5 x i64> addrspace(1)* addrspace(2)* undef + %val = load <5 x i64>, <5 x i64> addrspace(1)* %ptr + ret <5 x i64> %val +} + +; GCN-LABEL: {{^}}v8i64_func_void: +; GCN-DAG: buffer_load_dwordx4 v[0:3], off +; GCN-DAG: buffer_load_dwordx4 v[4:7], off +; GCN-DAG: buffer_load_dwordx4 v[8:11], off +; GCN-DAG: buffer_load_dwordx4 v[12:15], off +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <8 x i64> @v8i64_func_void() #0 { + %ptr = load volatile <8 x i64> addrspace(1)*, <8 x i64> addrspace(1)* addrspace(2)* undef + %val = load <8 x i64>, <8 x i64> addrspace(1)* %ptr + ret <8 x i64> %val +} + +; GCN-LABEL: {{^}}v16i64_func_void: +; GCN-DAG: buffer_load_dwordx4 v[0:3], off +; GCN-DAG: buffer_load_dwordx4 v[4:7], off +; GCN-DAG: buffer_load_dwordx4 v[8:11], off +; GCN-DAG: buffer_load_dwordx4 v[12:15], off +; GCN-DAG: buffer_load_dwordx4 v[16:19], off +; GCN-DAG: buffer_load_dwordx4 v[20:23], off +; GCN-DAG: buffer_load_dwordx4 v[24:27], off +; GCN-DAG: buffer_load_dwordx4 v[28:31], off +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <16 x i64> @v16i64_func_void() #0 { + %ptr = load volatile <16 x i64> addrspace(1)*, <16 x i64> addrspace(1)* addrspace(2)* undef + %val = load <16 x i64>, <16 x i64> addrspace(1)* %ptr + ret <16 x i64> %val +} + +; GCN-LABEL: {{^}}v2i16_func_void: +; GFX9: buffer_load_dword v0, off +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 +define <2 x i16> @v2i16_func_void() #0 { + %val = load <2 x i16>, <2 x i16> addrspace(1)* undef + ret <2 x i16> %val +} + +; GCN-LABEL: {{^}}v3i16_func_void: +; GFX9: buffer_load_dwordx2 v[0:1], off +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 +define <3 x i16> @v3i16_func_void() #0 { + %val = load <3 x i16>, <3 x i16> addrspace(1)* undef + ret <3 x i16> %val +} + +; GCN-LABEL: {{^}}v4i16_func_void: +; GFX9: buffer_load_dwordx2 v[0:1], off +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 +define <4 x i16> @v4i16_func_void() #0 { + %val = load <4 x i16>, <4 x i16> addrspace(1)* undef + ret <4 x i16> %val +} + +; FIXME: Should not scalarize +; GCN-LABEL: {{^}}v5i16_func_void: +; GFX9: buffer_load_dwordx2 v[0:1] +; GFX9: buffer_load_ushort v4 +; GFX9: v_lshrrev_b32_e32 v3, 16, v1 +; GFX9: v_mov_b32_e32 v2, v1 +; GFX9: v_lshrrev_b32_e32 v3, 16, v0 +; GCN: s_setpc_b64 +define <5 x i16> @v5i16_func_void() #0 { + %ptr = load volatile <5 x i16> addrspace(1)*, <5 x i16> addrspace(1)* addrspace(2)* undef + %val = load <5 x i16>, <5 x i16> addrspace(1)* %ptr + ret <5 x i16> %val +} + +; GCN-LABEL: {{^}}v8i16_func_void: +; GFX9-DAG: buffer_load_dwordx4 v[0:3], off +; GFX9: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 +define <8 x i16> @v8i16_func_void() #0 { + %ptr = load volatile <8 x i16> addrspace(1)*, <8 x i16> addrspace(1)* addrspace(2)* undef + %val = load <8 x i16>, <8 x i16> addrspace(1)* %ptr + ret <8 x i16> %val +} + +; GCN-LABEL: {{^}}v16i16_func_void: +; GFX9: buffer_load_dwordx4 v[0:3], off +; GFX9: buffer_load_dwordx4 v[4:7], off +; GFX9: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 +define <16 x i16> @v16i16_func_void() #0 { + %ptr = load volatile <16 x i16> addrspace(1)*, <16 x i16> addrspace(1)* addrspace(2)* undef + %val = load <16 x i16>, <16 x i16> addrspace(1)* %ptr + ret <16 x i16> %val +} + +; FIXME: Should pack +; GCN-LABEL: {{^}}v16i8_func_void: +; GCN-DAG: v12 +; GCN-DAG: v13 +; GCN-DAG: v14 +; GCN-DAG: v15 +define <16 x i8> @v16i8_func_void() #0 { + %ptr = load volatile <16 x i8> addrspace(1)*, <16 x i8> addrspace(1)* addrspace(2)* undef + %val = load <16 x i8>, <16 x i8> addrspace(1)* %ptr + ret <16 x i8> %val +} + +; FIXME: Should pack +; GCN-LABEL: {{^}}v4i8_func_void: +; GCN: buffer_load_dword v0 +; GCN-DAG: v_lshrrev_b32_e32 v2, 16, v0 +; GCN-DAG: v_lshrrev_b32_e32 v3, 24, v0 +; CI-DAG: v_bfe_u32 v1, v0, 8, 8 +; VI-DAG: v_lshrrev_b16_e32 v1, 8, v0 +; GCN: s_setpc_b64 +define <4 x i8> @v4i8_func_void() #0 { + %ptr = load volatile <4 x i8> addrspace(1)*, <4 x i8> addrspace(1)* addrspace(2)* undef + %val = load <4 x i8>, <4 x i8> addrspace(1)* %ptr + ret <4 x i8> %val +} + +; GCN-LABEL: {{^}}struct_i8_i32_func_void: +; GCN-DAG: buffer_load_dword v1 +; GCN-DAG: buffer_load_ubyte v0 +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define {i8, i32} @struct_i8_i32_func_void() #0 { + %val = load { i8, i32 }, { i8, i32 } addrspace(1)* undef + ret { i8, i32 } %val +} + +; GCN-LABEL: {{^}}void_func_sret_struct_i8_i32: +; GCN: buffer_load_ubyte [[VAL0:v[0-9]+]] +; GCN: buffer_load_dword [[VAL1:v[0-9]+]] +; GCN: buffer_store_byte [[VAL0]], v0, s[0:3], s4 offen{{$}} +; GCN: buffer_store_dword [[VAL1]], v0, s[0:3], s4 offen offset:4{{$}} +define void @void_func_sret_struct_i8_i32({ i8, i32 }* sret %arg0) #0 { + %val0 = load volatile i8, i8 addrspace(1)* undef + %val1 = load volatile i32, i32 addrspace(1)* undef + %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 0 + %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %arg0, i32 0, i32 1 + store i8 %val0, i8* %gep0 + store i32 %val1, i32* %gep1 + ret void +} + +; GCN-LABEL: {{^}}v33i32_func_void: +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:4{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:8{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:12{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:16{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:20{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:24{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:28{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:32{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:36{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:40{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:44{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:48{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:52{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:56{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:60{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:64{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:68{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:72{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:76{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:80{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:84{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:88{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:92{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:96{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:100{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:104{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:108{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:112{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:116{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:120{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:124{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:128{{$}} +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define <33 x i32> @v33i32_func_void() #0 { + %ptr = load volatile <33 x i32> addrspace(1)*, <33 x i32> addrspace(1)* addrspace(2)* undef + %val = load <33 x i32>, <33 x i32> addrspace(1)* %ptr + ret <33 x i32> %val +} + +; GCN-LABEL: {{^}}struct_v32i32_i32_func_void: +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:4{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:8{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:12{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:16{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:20{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:24{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:28{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:32{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:36{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:40{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:44{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:48{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:52{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:56{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:60{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:64{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:68{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:72{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:76{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:80{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:84{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:88{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:92{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:96{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:100{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:104{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:108{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:112{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:116{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:120{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:124{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:128{{$}} +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 { + %ptr = load volatile { <32 x i32>, i32 } addrspace(1)*, { <32 x i32>, i32 } addrspace(1)* addrspace(2)* undef + %val = load { <32 x i32>, i32 }, { <32 x i32>, i32 } addrspace(1)* %ptr + ret { <32 x i32>, i32 }%val +} + +; GCN-LABEL: {{^}}struct_i32_v32i32_func_void: +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:128{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:132{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:136{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:140{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:144{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:148{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:152{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:156{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:160{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:164{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:168{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:172{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:176{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:180{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:184{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:188{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:192{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:196{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:200{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:204{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:208{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:212{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:216{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:220{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:224{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:228{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:232{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:236{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:240{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:244{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:248{{$}} +; GCN-DAG: buffer_store_dword v{{[0-9]+}}, v0, s[0:3], s4 offen offset:252{{$}} +; GCN: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 +define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 { + %ptr = load volatile { i32, <32 x i32> } addrspace(1)*, { i32, <32 x i32> } addrspace(1)* addrspace(2)* undef + %val = load { i32, <32 x i32> }, { i32, <32 x i32> } addrspace(1)* %ptr + ret { i32, <32 x i32> }%val +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/AMDGPU/hsa-func.ll b/test/CodeGen/AMDGPU/hsa-func.ll index d96b796d4495..35aeeeaa225c 100644 --- a/test/CodeGen/AMDGPU/hsa-func.ll +++ b/test/CodeGen/AMDGPU/hsa-func.ll @@ -27,7 +27,7 @@ ; ELF: Symbol { ; ELF: Name: simple -; ELF: Size: 44 +; ELF: Size: 48 ; ELF: Type: Function (0x2) ; ELF: } @@ -41,14 +41,12 @@ ; HSA: .p2align 2 ; HSA: {{^}}simple: ; HSA-NOT: amd_kernel_code_t - -; FIXME: Check this isn't a kernarg load when calling convention implemented. -; XHSA-NOT: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0 +; HSA-NOT: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0 ; Make sure we are setting the ATC bit: -; HSA-CI: s_mov_b32 s[[HI:[0-9]]], 0x100f000 +; HSA-CI: s_mov_b32 s[[HI:[0-9]+]], 0x100f000 ; On VI+ we also need to set MTYPE = 2 -; HSA-VI: s_mov_b32 s[[HI:[0-9]]], 0x1100f000 +; HSA-VI: s_mov_b32 s[[HI:[0-9]+]], 0x1100f000 ; Make sure we generate flat store for HSA ; HSA: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} @@ -56,8 +54,9 @@ ; HSA: .size simple, .Lfunc_end0-simple ; HSA: ; Function info: ; HSA-NOT: COMPUTE_PGM_RSRC2 -define void @simple(i32 addrspace(1)* %out) { +define void @simple(i32 addrspace(1)* addrspace(2)* %ptr.out) { entry: + %out = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(2)* %ptr.out store i32 0, i32 addrspace(1)* %out ret void } diff --git a/test/CodeGen/AMDGPU/i1-copy-phi.ll b/test/CodeGen/AMDGPU/i1-copy-phi.ll index b160af86a2b6..4a0213dd1de5 100644 --- a/test/CodeGen/AMDGPU/i1-copy-phi.ll +++ b/test/CodeGen/AMDGPU/i1-copy-phi.ll @@ -12,7 +12,7 @@ ; SI: s_endpgm define amdgpu_kernel void @br_i1_phi(i32 %arg) { bb: - %tidig = call i32 @llvm.r600.read.tidig.x() #0 + %tidig = call i32 @llvm.amdgcn.workitem.id.x() %cmp = trunc i32 %tidig to i1 br i1 %cmp, label %bb2, label %bb3 @@ -32,6 +32,6 @@ bb6: ; preds = %bb4, %bb3 ret void } -declare i32 @llvm.r600.read.tidig.x() #0 +declare i32 @llvm.amdgcn.workitem.id.x() #0 -attributes #0 = { readnone } +attributes #0 = { nounwind readnone } diff --git a/test/CodeGen/AMDGPU/inline-asm.ll b/test/CodeGen/AMDGPU/inline-asm.ll index 636b45db698d..36441cf778c2 100644 --- a/test/CodeGen/AMDGPU/inline-asm.ll +++ b/test/CodeGen/AMDGPU/inline-asm.ll @@ -191,7 +191,7 @@ entry: ; CHECK: v_mov_b32_e32 v0, s0 ; CHECK: v_mov_b32_e32 v1, s1 ; CHECK: use v[0:1] -define void @i64_imm_input_phys_vgpr() { +define amdgpu_kernel void @i64_imm_input_phys_vgpr() { entry: call void asm sideeffect "; use $0 ", "{VGPR0_VGPR1}"(i64 123456) ret void diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll index 56966a19cf7b..1fc77893e7e9 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.dec.ll @@ -356,6 +356,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset(i64 addrspace(1)* ; GCN-LABEL: {{^}}global_atomic_dec_ret_i64_offset_addr64: ; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42 +; CI: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}} ; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}} ; CI: buffer_atomic_dec_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:40 glc{{$}} ; VI: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}} @@ -371,6 +372,7 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_addr64(i64 addrspace ; GCN-LABEL: {{^}}global_atomic_dec_noret_i64_offset_addr64: ; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42 +; CI: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}} ; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}} ; CI: buffer_atomic_dec_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:40{{$}} ; VI: flat_atomic_dec_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}} diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll index 3d64f93db2e4..eee8351de79b 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.inc.ll @@ -207,6 +207,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset(i64 addrspace(1)* ; GCN-LABEL: {{^}}global_atomic_inc_ret_i64_offset_addr64: ; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42 +; CI: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}} ; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}} ; CI: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:40 glc{{$}} ; VI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}} @@ -222,6 +223,7 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(i64 addrspace ; GCN-LABEL: {{^}}global_atomic_inc_noret_i64_offset_addr64: ; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42 +; CI: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}} ; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}} ; CI: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:40{{$}} ; VI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}} diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll index 5f8ca28ec5f0..1b937ab93247 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll @@ -7,14 +7,13 @@ ; GFX9: flat_store_dword ; GFX9-NOT: s_waitcnt ; GCN: s_barrier -define amdgpu_kernel void @test_barrier(i32 addrspace(1)* %out) #0 { +define amdgpu_kernel void @test_barrier(i32 addrspace(1)* %out, i32 %size) #0 { entry: %tmp = call i32 @llvm.amdgcn.workitem.id.x() %tmp1 = getelementptr i32, i32 addrspace(1)* %out, i32 %tmp store i32 %tmp, i32 addrspace(1)* %tmp1 call void @llvm.amdgcn.s.barrier() - %tmp2 = call i32 @llvm.r600.read.local.size.x() - %tmp3 = sub i32 %tmp2, 1 + %tmp3 = sub i32 %size, 1 %tmp4 = sub i32 %tmp3, %tmp %tmp5 = getelementptr i32, i32 addrspace(1)* %out, i32 %tmp4 %tmp6 = load i32, i32 addrspace(1)* %tmp5 @@ -24,7 +23,6 @@ entry: declare void @llvm.amdgcn.s.barrier() #1 declare i32 @llvm.amdgcn.workitem.id.x() #2 -declare i32 @llvm.r600.read.local.size.x() #2 attributes #0 = { nounwind } attributes #1 = { convergent nounwind } diff --git a/test/CodeGen/AMDGPU/lshl64-to-32.ll b/test/CodeGen/AMDGPU/lshl64-to-32.ll new file mode 100644 index 000000000000..5ff6b71c1f02 --- /dev/null +++ b/test/CodeGen/AMDGPU/lshl64-to-32.ll @@ -0,0 +1,45 @@ +; RUN: llc -march=amdgcn < %s | FileCheck %s + +; CHECK-LABEL: {{^}}zext_shl64_to_32: +; CHECK: s_lshl_b32 +; CHECK-NOT: s_lshl_b64 +define amdgpu_kernel void @zext_shl64_to_32(i64 addrspace(1)* nocapture %out, i32 %x) { + %and = and i32 %x, 1073741823 + %ext = zext i32 %and to i64 + %shl = shl i64 %ext, 2 + store i64 %shl, i64 addrspace(1)* %out, align 4 + ret void +} + +; CHECK-LABEL: {{^}}sext_shl64_to_32: +; CHECK: s_lshl_b32 +; CHECK-NOT: s_lshl_b64 +define amdgpu_kernel void @sext_shl64_to_32(i64 addrspace(1)* nocapture %out, i32 %x) { + %and = and i32 %x, 536870911 + %ext = sext i32 %and to i64 + %shl = shl i64 %ext, 2 + store i64 %shl, i64 addrspace(1)* %out, align 4 + ret void +} + +; CHECK-LABEL: {{^}}zext_shl64_overflow: +; CHECK: s_lshl_b64 +; CHECK-NOT: s_lshl_b32 +define amdgpu_kernel void @zext_shl64_overflow(i64 addrspace(1)* nocapture %out, i32 %x) { + %and = and i32 %x, 2147483647 + %ext = zext i32 %and to i64 + %shl = shl i64 %ext, 2 + store i64 %shl, i64 addrspace(1)* %out, align 4 + ret void +} + +; CHECK-LABEL: {{^}}sext_shl64_overflow: +; CHECK: s_lshl_b64 +; CHECK-NOT: s_lshl_b32 +define amdgpu_kernel void @sext_shl64_overflow(i64 addrspace(1)* nocapture %out, i32 %x) { + %and = and i32 %x, 2147483647 + %ext = sext i32 %and to i64 + %shl = shl i64 %ext, 2 + store i64 %shl, i64 addrspace(1)* %out, align 4 + ret void +} diff --git a/test/CodeGen/AMDGPU/packed-op-sel.ll b/test/CodeGen/AMDGPU/packed-op-sel.ll index 6ff0c54c33d0..4970375d40d3 100644 --- a/test/CodeGen/AMDGPU/packed-op-sel.ll +++ b/test/CodeGen/AMDGPU/packed-op-sel.ll @@ -181,8 +181,7 @@ bb: ; GCN-NOT: shl ; GCN-NOT: or -; GCN: v_xor_b32_e32 [[NEG_SCALAR0:v[0-9]+]], 0x8000, [[SCALAR0]] -; GCN-NEXT: v_pk_add_u16 v{{[0-9]+}}, [[VEC0]], [[NEG_SCALAR0]] op_sel_hi:[1,0]{{$}} +; GCN: v_pk_add_u16 v{{[0-9]+}}, [[VEC0]], [[SCALAR0]] op_sel_hi:[1,0] neg_lo:[0,1] neg_hi:[0,1]{{$}} define amdgpu_kernel void @add_vector_neg_bitcast_scalar_lo(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(3)* %lds, half addrspace(3)* %arg2) #0 { bb: %vec0 = load volatile <2 x i16>, <2 x i16> addrspace(3)* %lds, align 4 @@ -260,6 +259,434 @@ bb: ret void } +; GCN-LABEL: {{^}}fma_vector_vector_neg_vector_hi: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or + +; GCN: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[VEC2]] op_sel:[0,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]{{$}} +define amdgpu_kernel void @fma_vector_vector_neg_vector_hi(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + + %vec2.fneg = fsub <2 x half> <half -0.0, half -0.0>, %vec2 + %vec2.fneg.elt1.broadcast = shufflevector <2 x half> %vec2.fneg, <2 x half> undef, <2 x i32> <i32 1, i32 1> + + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %vec2.fneg.elt1.broadcast) + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fma_vector_vector_vector_neg_hi: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or + +; GCN: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[VEC2]] neg_hi:[0,0,1]{{$}} +define amdgpu_kernel void @fma_vector_vector_vector_neg_hi(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + + %vec2.elt1 = extractelement <2 x half> %vec2, i32 1 + %neg.vec2.elt1 = fsub half -0.0, %vec2.elt1 + + %neg.vec2.elt1.insert = insertelement <2 x half> %vec2, half %neg.vec2.elt1, i32 1 + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg.vec2.elt1.insert) + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}add_vector_scalar_hi: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or + +; GCN: v_pk_add_u16 v{{[0-9]+}}, [[VEC0]], [[VEC1]] op_sel:[0,1]{{$}} +define amdgpu_kernel void @add_vector_scalar_hi(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(3)* %lds, i32 1 + + %vec0 = load volatile <2 x i16>, <2 x i16> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x i16>, <2 x i16> addrspace(3)* %lds.gep1, align 4 + + %vec1.elt1.broadcast = shufflevector <2 x i16> %vec1, <2 x i16> undef, <2 x i32> <i32 1, i32 1> + %result = add <2 x i16> %vec0, %vec1.elt1.broadcast + + store <2 x i16> %result, <2 x i16> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fma_vector_vector_scalar_hi: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or + +; GCN: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[VEC2]] op_sel:[0,0,1]{{$}} +define amdgpu_kernel void @fma_vector_vector_scalar_hi(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + + %vec2.elt1.broadcast = shufflevector <2 x half> %vec2, <2 x half> undef, <2 x i32> <i32 1, i32 1> + + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %vec2.elt1.broadcast) + + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fma_vector_vector_neg_vector_lo_neg_hi: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or + +; GCN: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[VEC2]]{{$}} +define amdgpu_kernel void @fma_vector_vector_neg_vector_lo_neg_hi(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + + %neg.vec2 = fsub <2 x half> <half -0.0, half -0.0>, %vec2 + %neg.vec2.elt1 = extractelement <2 x half> %neg.vec2, i32 1 + %neg.neg.vec2.elt1 = fsub half -0.0, %neg.vec2.elt1 + %neg.neg.vec2.elt1.insert = insertelement <2 x half> %vec2, half %neg.neg.vec2.elt1, i32 1 + + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg.neg.vec2.elt1.insert) + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fma_vector_vector_swap_vector: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or + +; GCN: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[VEC2]] op_sel:[0,0,1] op_sel_hi:[1,1,0]{{$}} +define amdgpu_kernel void @fma_vector_vector_swap_vector(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + + %vec2.swap = shufflevector <2 x half> %vec2, <2 x half> undef, <2 x i32> <i32 1, i32 0> + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %vec2.swap) + + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fma_vector_vector_swap_neg_vector: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or +; GCN-NOT: xor + +; GCN: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[VEC2]] op_sel:[0,0,1] op_sel_hi:[1,1,0] neg_lo:[0,0,1] neg_hi:[0,0,1]{{$}} +define amdgpu_kernel void @fma_vector_vector_swap_neg_vector(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + %neg.vec2 = fsub <2 x half> <half -0.0, half -0.0>, %vec2 + + %neg.vec2.swap = shufflevector <2 x half> %neg.vec2, <2 x half> undef, <2 x i32> <i32 1, i32 0> + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg.vec2.swap) + + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fma_vector_vector_blend_vector_neg_vector_0: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or +; GCN-NOT: xor + +; GCN: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[VEC2]] op_sel:[0,0,1] op_sel_hi:[1,1,0] neg_lo:[0,0,1]{{$}} +define amdgpu_kernel void @fma_vector_vector_blend_vector_neg_vector_0(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + %neg.vec2 = fsub <2 x half> <half -0.0, half -0.0>, %vec2 + %combined = shufflevector <2 x half> %vec2, <2 x half> %neg.vec2, <2 x i32> <i32 3, i32 0> + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %combined) + + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fma_vector_vector_blend_vector_neg_vector_1: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or +; GCN-NOT: xor + +; GCN: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[VEC2]] neg_lo:[0,0,1]{{$}} +define amdgpu_kernel void @fma_vector_vector_blend_vector_neg_vector_1(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + %neg.vec2 = fsub <2 x half> <half -0.0, half -0.0>, %vec2 + %combined = shufflevector <2 x half> %vec2, <2 x half> %neg.vec2, <2 x i32> <i32 2, i32 1> + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %combined) + + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fma_vector_vector_blend_vector_neg_vector_2: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or +; GCN-NOT: xor + +; GCN: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[VEC2]] neg_hi:[0,0,1]{{$}} +define amdgpu_kernel void @fma_vector_vector_blend_vector_neg_vector_2(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + %neg.vec2 = fsub <2 x half> <half -0.0, half -0.0>, %vec2 + %combined = shufflevector <2 x half> %vec2, <2 x half> %neg.vec2, <2 x i32> <i32 0, i32 3> + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %combined) + + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fma_vector_vector_blend_vector_neg_vector_3: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: or +; GCN-NOT: xor + +; GCN: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[VEC2]] op_sel:[0,0,1] neg_lo:[0,0,1]{{$}} +define amdgpu_kernel void @fma_vector_vector_blend_vector_neg_vector_3(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + %neg.vec2 = fsub <2 x half> <half -0.0, half -0.0>, %vec2 + %combined = shufflevector <2 x half> %vec2, <2 x half> %neg.vec2, <2 x i32> <i32 3, i32 1> + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %combined) + + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}bitcast_fneg_f32: +; GCN: v_pk_add_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+$}} +define amdgpu_kernel void @bitcast_fneg_f32(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %f32 = load volatile float, float addrspace(3)* undef, align 4 + %neg.f32 = fsub float -0.0, %f32 + %bc = bitcast float %neg.f32 to <2 x half> + %result = fadd <2 x half> %vec0, %bc + + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}shuffle_bitcast_fneg_f32: +; GCN: v_pk_add_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} op_sel:[0,1] op_sel_hi:[1,0]{{$}} +define amdgpu_kernel void @shuffle_bitcast_fneg_f32(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + + %f32 = load volatile float, float addrspace(3)* undef, align 4 + %neg.f32 = fsub float -0.0, %f32 + %bc = bitcast float %neg.f32 to <2 x half> + %shuf = shufflevector <2 x half> %bc, <2 x half> undef, <2 x i32> <i32 1, i32 0> + %result = fadd <2 x half> %vec0, %shuf + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}extract_from_i64: +; GCN: v_lshl_or_b32 +; GCN: v_pk_add_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+$}} +define amdgpu_kernel void @extract_from_i64(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(3)* %lds) #0 { +bb: + %vec0 = load volatile <2 x i16>, <2 x i16> addrspace(3)* %lds, align 4 + %i64 = load volatile i64, i64 addrspace(1)* undef + + %elt0 = trunc i64 %i64 to i16 + %hi = lshr i64 %i64, 16 + %elt1 = trunc i64 %hi to i16 + + %ins0 = insertelement <2 x i16> undef, i16 %elt1, i32 0 + %ins1 = insertelement <2 x i16> %ins0, i16 %elt0, i32 1 + %result = add <2 x i16> %vec0, %ins1 + store <2 x i16> %result, <2 x i16> addrspace(1)* %out, align 4 + ret void +} + + +; Bitcast is final obstacle to identifying same source register +; GCN-LABEL: {{^}}bitcast_lo_elt_op_sel: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: _or + +; GCN: v_pk_add_f16 [[FADD:v[0-9]+]] +; GCN-NEXT: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[FADD]] op_sel:[0,0,1] op_sel_hi:[1,1,0]{{$}} +define amdgpu_kernel void @bitcast_lo_elt_op_sel(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + + %scalar0 = load volatile i16, i16 addrspace(1)* undef + %shl = shl i16 %scalar0, 1 + %shl.bc = bitcast i16 %shl to half + + %fadd = fadd <2 x half> %vec2, <half 2.0, half 2.0> + %shuffle = shufflevector <2 x half> %fadd, <2 x half> %vec2, <2 x i32> <i32 1, i32 0> + + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %shuffle) + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + + +; Bitcast is final obstacle to identifying same source register +; GCN-LABEL: {{^}}mix_elt_types_op_sel: +; GCN: ds_read_b32 [[VEC0:v[0-9]+]] +; GCN: ds_read_b32 [[VEC1:v[0-9]+]] +; GCN: ds_read_b32 [[VEC2:v[0-9]+]] + +; GCN-NOT: pack +; GCN-NOT: and +; GCN-NOT: shl +; GCN-NOT: _or + +; GCN: v_pk_add_f16 [[FADD:v[0-9]+]] +; GCN-NEXT: v_pk_fma_f16 v{{[0-9]+}}, [[VEC0]], [[VEC1]], [[FADD]] op_sel:[0,0,1] op_sel_hi:[1,1,0]{{$}} +define amdgpu_kernel void @mix_elt_types_op_sel(<2 x half> addrspace(1)* %out, <2 x half> addrspace(3)* %lds) #0 { +bb: + %lds.gep1 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 1 + %lds.gep2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(3)* %lds, i32 2 + + %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4 + %vec1 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep1, align 4 + %vec2 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds.gep2, align 4 + + %scalar0 = load volatile i16, i16 addrspace(1)* undef + %scalar1 = load volatile half, half addrspace(1)* undef + %shl = shl i16 %scalar0, 1 + %shl.bc = bitcast i16 %shl to half + + %insert0 = insertelement <2 x half> undef, half %shl.bc, i32 0 + + %fadd = fadd <2 x half> %vec2, <half 2.0, half 2.0> + %insert1 = shufflevector <2 x half> %fadd, <2 x half> %insert0, <2 x i32> <i32 1, i32 0> + + %result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %insert1) + store <2 x half> %result, <2 x half> addrspace(1)* %out, align 4 + ret void +} + declare <2 x half> @llvm.fma.v2f16(<2 x half>, <2 x half>, <2 x half>) #1 attributes #0 = { nounwind } diff --git a/test/CodeGen/AMDGPU/regcoalesce-prune.mir b/test/CodeGen/AMDGPU/regcoalesce-prune.mir new file mode 100644 index 000000000000..7ad474bf0ed2 --- /dev/null +++ b/test/CodeGen/AMDGPU/regcoalesce-prune.mir @@ -0,0 +1,31 @@ +# RUN: llc -o - %s -mtriple=amdgcn-amd-amdhsa-opencl -run-pass=simple-register-coalescing | FileCheck %s +--- +# Checks for a bug where subregister liveranges were not properly pruned for +# an IMPLCITI_DEF that gets removed completely. +# +# CHECK-LABEL: name: func +# IMPLICIT_DEF should be gone without llc hitting assertion failures. +# CHECK-NOT: IMPLICIT_DEF +name: func +tracksRegLiveness: true +body: | + bb.0: + undef %5.sub1 = V_MOV_B32_e32 0, implicit %exec + %6 = COPY %5 + S_CBRANCH_VCCZ %bb.2, implicit undef %vcc + + bb.1: + %1 : sreg_32_xm0 = S_MOV_B32 0 + undef %0.sub0 : sreg_64 = COPY %1 + %0.sub1 = COPY %1 + %4 : vreg_64 = COPY killed %0 + %5 : vreg_64 = IMPLICIT_DEF + %6 : vreg_64 = COPY killed %4 + + bb.2: + %2 : vgpr_32 = V_CVT_F32_I32_e32 killed %5.sub1, implicit %exec + + bb.3: + %3 : vgpr_32 = V_CVT_F32_I32_e32 killed %6.sub1, implicit %exec + S_ENDPGM +... diff --git a/test/CodeGen/AMDGPU/sdwa-peephole.ll b/test/CodeGen/AMDGPU/sdwa-peephole.ll index 1e0ac3807528..73defc17d04f 100644 --- a/test/CodeGen/AMDGPU/sdwa-peephole.ll +++ b/test/CodeGen/AMDGPU/sdwa-peephole.ll @@ -393,3 +393,53 @@ store_label: store <2 x i16> %add, <2 x i16> addrspace(1)* %out, align 4 ret void } + + +; Check that "pulling out" SDWA operands works correctly. +; GCN-LABEL: {{^}}pulled_out_test: +; NOSDWA-DAG: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +; NOSDWA-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}} +; NOSDWA-DAG: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +; NOSDWA-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}} +; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +; NOSDWA-NOT: v_and_b32_sdwa +; NOSDWA-NOT: v_or_b32_sdwa + +; SDWA-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; SDWA-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}} +; SDWA-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; SDWA-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}} +; SDWA: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD + +define amdgpu_kernel void @pulled_out_test(<8 x i8> addrspace(1)* %sourceA, <8 x i8> addrspace(1)* %destValues) { +entry: + %idxprom = ashr exact i64 15, 32 + %arrayidx = getelementptr inbounds <8 x i8>, <8 x i8> addrspace(1)* %sourceA, i64 %idxprom + %tmp = load <8 x i8>, <8 x i8> addrspace(1)* %arrayidx, align 8 + + %tmp1 = extractelement <8 x i8> %tmp, i32 0 + %tmp2 = extractelement <8 x i8> %tmp, i32 1 + %tmp3 = extractelement <8 x i8> %tmp, i32 2 + %tmp4 = extractelement <8 x i8> %tmp, i32 3 + %tmp5 = extractelement <8 x i8> %tmp, i32 4 + %tmp6 = extractelement <8 x i8> %tmp, i32 5 + %tmp7 = extractelement <8 x i8> %tmp, i32 6 + %tmp8 = extractelement <8 x i8> %tmp, i32 7 + + %tmp9 = insertelement <2 x i8> undef, i8 %tmp1, i32 0 + %tmp10 = insertelement <2 x i8> %tmp9, i8 %tmp2, i32 1 + %tmp11 = insertelement <2 x i8> undef, i8 %tmp3, i32 0 + %tmp12 = insertelement <2 x i8> %tmp11, i8 %tmp4, i32 1 + %tmp13 = insertelement <2 x i8> undef, i8 %tmp5, i32 0 + %tmp14 = insertelement <2 x i8> %tmp13, i8 %tmp6, i32 1 + %tmp15 = insertelement <2 x i8> undef, i8 %tmp7, i32 0 + %tmp16 = insertelement <2 x i8> %tmp15, i8 %tmp8, i32 1 + + %tmp17 = shufflevector <2 x i8> %tmp10, <2 x i8> %tmp12, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %tmp18 = shufflevector <2 x i8> %tmp14, <2 x i8> %tmp16, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %tmp19 = shufflevector <4 x i8> %tmp17, <4 x i8> %tmp18, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + + %arrayidx5 = getelementptr inbounds <8 x i8>, <8 x i8> addrspace(1)* %destValues, i64 %idxprom + store <8 x i8> %tmp19, <8 x i8> addrspace(1)* %arrayidx5, align 8 + ret void +} diff --git a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll index 6f5fc6d0f38c..36c33b876919 100644 --- a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll +++ b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll @@ -299,10 +299,10 @@ define amdgpu_kernel void @v_uextract_bit_31_32_i64_trunc_i32(i32 addrspace(1)* } ; GCN-LABEL: {{^}}and_not_mask_i64: -; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}} -; GCN: v_mov_b32_e32 v[[SHRHI]], 0{{$}} +; GCN-DAG: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}} +; GCN: v_mov_b32_e32 v[[SHRHI:[0-9]+]], 0{{$}} ; GCN: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 20, v[[VALLO]] -; GCN-DAG: v_and_b32_e32 v[[SHRLO]], 4, [[SHR]] +; GCN-DAG: v_and_b32_e32 v[[SHRLO:[0-9]+]], 4, [[SHR]] ; GCN-NOT: v[[SHRLO]] ; GCN-NOT: v[[SHRHI]] ; GCN: buffer_store_dwordx2 v{{\[}}[[SHRLO]]:[[SHRHI]]{{\]}} @@ -360,10 +360,9 @@ define amdgpu_kernel void @v_uextract_bit_34_37_multi_use_shift_i64(i64 addrspac } ; GCN-LABEL: {{^}}v_uextract_bit_33_36_use_upper_half_shift_i64: -; GCN: buffer_load_dword [[VAL:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}} +; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}} ; GCN-DAG: v_bfe_u32 v[[BFE:[0-9]+]], [[VAL]], 1, 3 -; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}} -; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}} +; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:{{[0-9]+\]}} ; GCN: buffer_store_dword v[[ZERO]] define amdgpu_kernel void @v_uextract_bit_33_36_use_upper_half_shift_i64(i64 addrspace(1)* %out0, i32 addrspace(1)* %out1, i64 addrspace(1)* %in) #1 { %id.x = tail call i32 @llvm.amdgcn.workitem.id.x() diff --git a/test/CodeGen/AMDGPU/srl.ll b/test/CodeGen/AMDGPU/srl.ll index 1daf4bb33e81..cb40ecf2de1c 100644 --- a/test/CodeGen/AMDGPU/srl.ll +++ b/test/CodeGen/AMDGPU/srl.ll @@ -201,7 +201,8 @@ define amdgpu_kernel void @s_lshr_32_i64(i64 addrspace(1)* %out, i64 %a) { ; GCN-LABEL: {{^}}v_lshr_32_i64: ; GCN-DAG: buffer_load_dword v[[HI_A:[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 -; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], 0{{$}} +; GCN-DAG: v_mov_b32_e32 v[[VHI1:[0-9]+]], 0{{$}} +; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], v[[VHI1]]{{$}} ; GCN: buffer_store_dwordx2 v{{\[}}[[HI_A]]:[[VHI]]{{\]}} define amdgpu_kernel void @v_lshr_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) { %tid = call i32 @llvm.r600.read.tidig.x() #0 diff --git a/test/CodeGen/AMDGPU/sub.i16.ll b/test/CodeGen/AMDGPU/sub.i16.ll index 6642411f7a63..cf9e714ea6d3 100644 --- a/test/CodeGen/AMDGPU/sub.i16.ll +++ b/test/CodeGen/AMDGPU/sub.i16.ll @@ -85,9 +85,9 @@ define amdgpu_kernel void @v_test_sub_i16_zext_to_i32(i32 addrspace(1)* %out, i1 ; FIXME: Need to handle non-uniform case for function below (load without gep). ; GCN-LABEL: {{^}}v_test_sub_i16_zext_to_i64: -; VI-DAG: v_mov_b32_e32 v[[VZERO:[0-9]+]], 0 ; VI: flat_load_ushort [[A:v[0-9]+]] ; VI: flat_load_ushort [[B:v[0-9]+]] +; VI-DAG: v_mov_b32_e32 v[[VZERO:[0-9]+]], 0 ; VI-DAG: v_subrev_u16_e32 v[[ADD:[0-9]+]], [[B]], [[A]] ; VI: buffer_store_dwordx2 v{{\[}}[[ADD]]:[[VZERO]]{{\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0{{$}} define amdgpu_kernel void @v_test_sub_i16_zext_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in0, i16 addrspace(1)* %in1) #1 { diff --git a/test/CodeGen/AMDGPU/subreg_interference.mir b/test/CodeGen/AMDGPU/subreg_interference.mir index 24d06a576c2a..6fc22c8d189f 100644 --- a/test/CodeGen/AMDGPU/subreg_interference.mir +++ b/test/CodeGen/AMDGPU/subreg_interference.mir @@ -1,4 +1,12 @@ # RUN: llc -o - %s -mtriple=amdgcn--amdhsa -verify-machineinstrs -run-pass=greedy,virtregrewriter | FileCheck %s +--- | + + define amdgpu_kernel void @func0() { + ret void + } + +... + --- # We should not detect any interference between v0/v1 here and only allocate # sgpr0-sgpr3. diff --git a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll index e82e548f23cd..135f02ac205a 100644 --- a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll +++ b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll @@ -186,7 +186,7 @@ bb12: ; preds = %bb145, %bb %tmp140 = phi float [ 0.000000e+00, %bb ], [ %tmp405, %bb145 ] %tmp141 = phi float [ 0.000000e+00, %bb ], [ %tmp406, %bb145 ] %tmp142 = bitcast float %tmp95 to i32 - %tid = call i32 @llvm.r600.read.tidig.x() #1 + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %tmp143 = icmp sgt i32 %tmp142, %tid br i1 %tmp143, label %bb144, label %bb145 @@ -593,7 +593,7 @@ bb145: ; preds = %bb12 br label %bb12 } -declare i32 @llvm.r600.read.tidig.x() #1 +declare i32 @llvm.amdgcn.workitem.id.x() #1 attributes #0 = { nounwind } attributes #1 = { nounwind readnone } diff --git a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll index 53577dbd76f6..1a0c7fd8e1d6 100644 --- a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll +++ b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll @@ -699,3 +699,33 @@ define i32 @test_shufflevector_v4s32_v2s32(i32 %arg1, i32 %arg2, i32 %arg3, i32 %res = extractelement <2 x i32> %shuffle, i32 0 ret i32 %res } + +%struct.v2s32 = type { <2 x i32> } + +define i32 @test_constantstruct_v2s32() { +; CHECK-LABEL: name: test_constantstruct_v2s32 +; CHECK: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1 +; CHECK: [[C2:%[0-9]+]](s32) = G_CONSTANT i32 2 +; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32) +; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>) + %vec = extractvalue %struct.v2s32 {<2 x i32><i32 1, i32 2>}, 0 + %elt = extractelement <2 x i32> %vec, i32 0 + ret i32 %elt +} + +%struct.v2s32.s32.s32 = type { <2 x i32>, i32, i32 } + +define i32 @test_constantstruct_v2s32_s32_s32() { +; CHECK-LABEL: name: test_constantstruct_v2s32_s32_s32 +; CHECK: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1 +; CHECK: [[C2:%[0-9]+]](s32) = G_CONSTANT i32 2 +; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32) +; CHECK: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3 +; CHECK: [[C4:%[0-9]+]](s32) = G_CONSTANT i32 4 +; CHECK: [[CS:%[0-9]+]](s128) = G_SEQUENCE [[VEC]](<2 x s32>), 0, [[C3]](s32), 64, [[C4]](s32), 96 +; CHECK: [[EXT:%[0-9]+]](<2 x s32>) = G_EXTRACT [[CS]](s128), 0 +; CHECK: G_EXTRACT_VECTOR_ELT [[EXT]](<2 x s32>) + %vec = extractvalue %struct.v2s32.s32.s32 {<2 x i32><i32 1, i32 2>, i32 3, i32 4}, 0 + %elt = extractelement <2 x i32> %vec, i32 0 + ret i32 %elt +} diff --git a/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll b/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll index a44c9721d6c1..1c8142e5ddd5 100644 --- a/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll +++ b/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll @@ -22,7 +22,7 @@ entry: ; for.body -> for.cond.backedge (100%) ; -> cond.false.i (0%) ; CHECK: BB#1: derived from LLVM BB %for.body -; CHECK: Successors according to CFG: BB#2(0x7ffffc00 / 0x80000000 = 100.00%) BB#4(0x00000400 / 0x80000000 = 0.00%) +; CHECK: Successors according to CFG: BB#2(0x80000000 / 0x80000000 = 100.00%) BB#4(0x00000001 / 0x80000000 = 0.00%) for.body: br i1 undef, label %for.cond.backedge, label %lor.lhs.false.i, !prof !1 diff --git a/test/CodeGen/Generic/opt-codegen-no-target-machine.ll b/test/CodeGen/Generic/opt-codegen-no-target-machine.ll new file mode 100644 index 000000000000..c6cb1c2b657b --- /dev/null +++ b/test/CodeGen/Generic/opt-codegen-no-target-machine.ll @@ -0,0 +1,3 @@ +; RUN: not opt %s -dwarfehprepare -o - 2>&1 | FileCheck %s + +; CHECK: Trying to construct TargetPassConfig without a target machine. Scheduling a CodeGen pass without a target triple set? diff --git a/test/CodeGen/Mips/dins.ll b/test/CodeGen/Mips/dins.ll new file mode 100644 index 000000000000..be3865703ba2 --- /dev/null +++ b/test/CodeGen/Mips/dins.ll @@ -0,0 +1,70 @@ +; RUN: llc -O2 -march=mips64 -mcpu=mips64r2 -target-abi=n64 < %s -o - | FileCheck %s -check-prefix=MIPS64R2 +; RUN: llc -O2 -march=mips -mcpu=mips32r2 < %s -o - | FileCheck %s -check-prefix=MIPS32R2 +; RUN: llc -O2 -march=mips -mattr=mips16 < %s -o - | FileCheck %s -check-prefix=MIPS16 + +; #include <stdint.h> +; #include <stdio.h> +; struct cvmx_buf_ptr { + +; struct { +; unsigned long long addr :37; +; unsigned long long addr1 :15; +; unsigned int lenght:14; +; uint64_t total_bytes:16; +; uint64_t segs : 6; +; } s; +; } +; +; unsigned long long foo(volatile struct cvmx_buf_ptr bufptr) { +; bufptr.s.addr = 123; +; bufptr.s.segs = 4; +; bufptr.s.lenght = 5; +; bufptr.s.total_bytes = bufptr.s.lenght; +; return bufptr.s.addr; +; } + +; Testing of selection INS/DINS instruction + +define i64 @f123(i64 inreg %bufptr.coerce0, i64 inreg %bufptr.coerce1) local_unnamed_addr #0 { +entry: + %bufptr.sroa.0 = alloca i64, align 8 + %bufptr.sroa.4 = alloca i64, align 8 + store i64 %bufptr.coerce0, i64* %bufptr.sroa.0, align 8 + store i64 %bufptr.coerce1, i64* %bufptr.sroa.4, align 8 + %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load = load volatile i64, i64* %bufptr.sroa.0, align 8 + %bf.clear = and i64 %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load, 134217727 + %bf.set = or i64 %bf.clear, 16508780544 + store volatile i64 %bf.set, i64* %bufptr.sroa.0, align 8 + %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load2 = load volatile i64, i64* %bufptr.sroa.4, align 8 + %bf.clear3 = and i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load2, -16911433729 + %bf.set4 = or i64 %bf.clear3, 1073741824 + store volatile i64 %bf.set4, i64* %bufptr.sroa.4, align 8 + %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load6 = load volatile i64, i64* %bufptr.sroa.4, align 8 + %bf.clear7 = and i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load6, 1125899906842623 + %bf.set8 = or i64 %bf.clear7, 5629499534213120 + store volatile i64 %bf.set8, i64* %bufptr.sroa.4, align 8 + %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load11 = load volatile i64, i64* %bufptr.sroa.4, align 8 + %bf.lshr = lshr i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load11, 50 + %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load13 = load volatile i64, i64* %bufptr.sroa.4, align 8 + %bf.shl = shl nuw nsw i64 %bf.lshr, 34 + %bf.clear14 = and i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load13, -1125882726973441 + %bf.set15 = or i64 %bf.clear14, %bf.shl + store volatile i64 %bf.set15, i64* %bufptr.sroa.4, align 8 + %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load17 = load volatile i64, i64* %bufptr.sroa.0, align 8 + %bf.lshr18 = lshr i64 %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load17, 27 + ret i64 %bf.lshr18 +} + + +; CHECK-LABEL: f123: +; MIPS64R2: daddiu $[[R0:[0-9]+]], $zero, 123 +; MIPS64R2: dins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 27, 37 +; MIPS64R2: daddiu $[[R0:[0-9]+]], $zero, 5 +; MIPS64R2: daddiu $[[R0:[0-9]+]], $zero, 4 +; MIPS64R2: dins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 28, 6 +; MIPS64R2: dins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 50, 14 +; MIPS64R2: dsrl $[[R0:[0-9]+]], $[[R1:[0-9]+]], 50 +; MIPS64R2: dins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 34, 16 +; MIPS32R2: ins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 2, 16 +; MIPS32R2-NOT: ins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 18, 46 +; MIPS16-NOT: ins{{[[:space:]].*}}
\ No newline at end of file diff --git a/test/CodeGen/Mips/micromips-attr.ll b/test/CodeGen/Mips/micromips-attr.ll new file mode 100644 index 000000000000..78bcc04a9b0c --- /dev/null +++ b/test/CodeGen/Mips/micromips-attr.ll @@ -0,0 +1,39 @@ +; RUN: llc -march=mips -mcpu=mips32 --mattr=-micromips < %s | FileCheck %s + +define void @foo() #0 { +entry: + ret void +} +; CHECK: .set micromips +; CHECK-NEXT: .set nomips16 +; CHECK-NEXT: .ent foo +; CHECK-NEXT: foo: + +define void @bar() #1 { +entry: + ret void +} +; CHECK: .set nomicromips +; CHECK-NEXT: .set nomips16 +; CHECK-NEXT: .ent bar +; CHECK-NEXT: bar: + +attributes #0 = { + nounwind "micromips" + "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" + "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" + "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" + "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" + "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" + "use-soft-float"="false" +} + +attributes #1 = { + nounwind + "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" + "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" + "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" + "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" + "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" + "use-soft-float"="false" +} diff --git a/test/CodeGen/Mips/mips64-f128.ll b/test/CodeGen/Mips/mips64-f128.ll index a6dafb1abfd6..aa73c522eda5 100644 --- a/test/CodeGen/Mips/mips64-f128.ll +++ b/test/CodeGen/Mips/mips64-f128.ll @@ -418,18 +418,17 @@ entry: declare fp128 @llvm.powi.f128(fp128, i32) #3 ; ALL-LABEL: libcall2_copysignl: -; ALL-DAG: daddiu $[[R2:[0-9]+]], $zero, 1 -; ALL-DAG: dsll $[[R3:[0-9]+]], $[[R2]], 63 +; NOT-R2R6-DAG: daddiu $[[R2:[0-9]+]], $zero, 1 +; NOT-R2R6-DAG: dsll $[[R3:[0-9]+]], $[[R2]], 63 ; ALL-DAG: ld $[[R0:[0-9]+]], %got_disp(gld1) ; ALL-DAG: ld $[[R1:[0-9]+]], 8($[[R0]]) -; ALL-DAG: and $[[R4:[0-9]+]], $[[R1]], $[[R3]] +; NOT-R2R6-DAG: and $[[R4:[0-9]+]], $[[R1]], $[[R3]] ; ALL-DAG: ld $[[R5:[0-9]+]], %got_disp(gld0) ; ALL-DAG: ld $[[R6:[0-9]+]], 8($[[R5]]) +; R2R6: dins $[[R0:[0-9]+]], $[[R1:[0-9]+]], 63, 1 ; NOT-R2R6-DAG: daddiu $[[R7:[0-9]+]], $[[R3]], -1 ; NOT-R2R6-DAG: and $[[R8:[0-9]+]], $[[R6]], $[[R7]] ; NOT-R2R6-DAG: or $4, $[[R8]], $[[R4]] -; R2R6-DAG: dextm $[[R7:[0-9]+]], $[[R6]], 0, 63 -; R2R6-DAG: or $4, $[[R7]], $[[R4]] ; ALL-DAG: ld $2, 0($[[R5]]) define fp128 @libcall2_copysignl() { diff --git a/test/CodeGen/NVPTX/sched1.ll b/test/CodeGen/NVPTX/sched1.ll index ecdf55ecdbeb..fb01eb262adc 100644 --- a/test/CodeGen/NVPTX/sched1.ll +++ b/test/CodeGen/NVPTX/sched1.ll @@ -6,11 +6,11 @@ define void @foo(i32* %a) { ; CHECK: .func foo ; CHECK: ld.u32 ; CHECK-NEXT: ld.u32 -; CHECK-NEXT: add.s32 ; CHECK-NEXT: ld.u32 -; CHECK-NEXT: add.s32 ; CHECK-NEXT: ld.u32 ; CHECK-NEXT: add.s32 +; CHECK-NEXT: add.s32 +; CHECK-NEXT: add.s32 %ptr0 = getelementptr i32, i32* %a, i32 0 %val0 = load i32, i32* %ptr0 %ptr1 = getelementptr i32, i32* %a, i32 1 diff --git a/test/CodeGen/NVPTX/sched2.ll b/test/CodeGen/NVPTX/sched2.ll index 347f77c5682c..91ed77878f81 100644 --- a/test/CodeGen/NVPTX/sched2.ll +++ b/test/CodeGen/NVPTX/sched2.ll @@ -4,12 +4,12 @@ define void @foo(<2 x i32>* %a) { ; CHECK: .func foo ; CHECK: ld.v2.u32 ; CHECK-NEXT: ld.v2.u32 +; CHECK-NEXT: ld.v2.u32 +; CHECK-NEXT: ld.v2.u32 ; CHECK-NEXT: add.s32 ; CHECK-NEXT: add.s32 -; CHECK-NEXT: ld.v2.u32 ; CHECK-NEXT: add.s32 ; CHECK-NEXT: add.s32 -; CHECK-NEXT: ld.v2.u32 ; CHECK-NEXT: add.s32 ; CHECK-NEXT: add.s32 %ptr0 = getelementptr <2 x i32>, <2 x i32>* %a, i32 0 diff --git a/test/CodeGen/NVPTX/vec8.ll b/test/CodeGen/NVPTX/vec8.ll index 93b39c1125f8..a86ba1e29d5c 100644 --- a/test/CodeGen/NVPTX/vec8.ll +++ b/test/CodeGen/NVPTX/vec8.ll @@ -7,7 +7,7 @@ define void @foo(<8 x i8> %a, i8* %b) { ; CHECK-DAG: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [foo_param_0] ; CHECK-DAG: ld.param.v4.u8 {[[E4:%rs[0-9]+]], [[E5:%rs[0-9]+]], [[E6:%rs[0-9]+]], [[E7:%rs[0-9]+]]}, [foo_param_0+4] ; CHECK-DAG: ld.param.u32 %[[B:r[0-9+]]], [foo_param_1] -; CHECK-DAG: add.s16 [[T:%rs[0-9+]]], [[E1]], [[E6]]; +; CHECK: add.s16 [[T:%rs[0-9+]]], [[E1]], [[E6]]; ; CHECK: st.u8 [%[[B]]], [[T]]; %t0 = extractelement <8 x i8> %a, i32 1 %t1 = extractelement <8 x i8> %a, i32 6 diff --git a/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll b/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll index 64d02c5b9632..2aeb0e1f71f9 100644 --- a/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll +++ b/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll @@ -1,5 +1,6 @@ ; RUN: llc -verify-machineinstrs -print-before=peephole-opt -print-after=peephole-opt -mtriple=powerpc64-unknown-linux-gnu -o /dev/null 2>&1 < %s | FileCheck %s +; CHECK-LABEL: fn1 define signext i32 @fn1(i32 %baz) { %1 = mul nsw i32 %baz, 208 %2 = zext i32 %1 to i64 @@ -21,3 +22,35 @@ foo: bar: ret i32 0 } + +; CHECK-LABEL: fn2 +define signext i32 @fn2(i64 %a, i64 %b) { +; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, %CR0<imp-def>; +; CHECK: [[CREG:[^, ]+]]<def> = COPY %CR0 +; CHECK: BCC 12, [[CREG]]<kill> + %1 = or i64 %b, %a + %2 = icmp sgt i64 %1, -1 + br i1 %2, label %foo, label %bar + +foo: + ret i32 1 + +bar: + ret i32 0 +} + +; CHECK-LABEL: fn3 +define signext i32 @fn3(i32 %a) { +; CHECK: ANDIo {{[^, ]+}}, 10, %CR0<imp-def>; +; CHECK: [[CREG:[^, ]+]]<def> = COPY %CR0 +; CHECK: BCC 76, [[CREG]]<kill> + %1 = and i32 %a, 10 + %2 = icmp ne i32 %1, 0 + br i1 %2, label %foo, label %bar + +foo: + ret i32 1 + +bar: + ret i32 0 +} diff --git a/test/CodeGen/PowerPC/shift128.ll b/test/CodeGen/PowerPC/shift128.ll index 17a380c71c35..48e1b96f838b 100644 --- a/test/CodeGen/PowerPC/shift128.ll +++ b/test/CodeGen/PowerPC/shift128.ll @@ -1,14 +1,98 @@ -; RUN: llc -verify-machineinstrs < %s -march=ppc64 | grep sld | count 5 +; RUN: llc -verify-machineinstrs < %s | FileCheck --check-prefix=P8 --check-prefix=CHECK %s +; RUN: llc -mcpu=pwr9 -verify-machineinstrs < %s | FileCheck --check-prefix=P9 --check-prefix=CHECK %s +target datalayout = "e-m:e-i64:64-n32:64" +target triple = "powerpc64le-unknown-linux-gnu" -define i128 @foo_lshr(i128 %x, i128 %y) { +; CHECK-LABEL: lshr: +; CHECK-DAG: subfic [[R0:[0-9]+]], 5, 64 +; CHECK-DAG: addi [[R1:[0-9]+]], 5, -64 +; CHECK-DAG: srd [[R2:[0-9]+]], 3, 5 +; CHECK-DAG: sld [[R3:[0-9]+]], 4, [[R0]] +; CHECK-DAG: srd [[R4:[0-9]+]], 4, [[R1]] +; CHECK-DAG: or [[R5:[0-9]+]], [[R2]], [[R3]] +; CHECK-DAG: or 3, [[R5]], [[R4]] +; CHECK-DAG: srd 4, 4, 5 +; CHECK: blr +define i128 @lshr(i128 %x, i128 %y) { %r = lshr i128 %x, %y ret i128 %r } -define i128 @foo_ashr(i128 %x, i128 %y) { +; CHECK-LABEL: ashr: +; CHECK-DAG: subfic [[R0:[0-9]+]], 5, 64 +; CHECK-DAG: addi [[R1:[0-9]+]], 5, -64 +; CHECK-DAG: srd [[R2:[0-9]+]], 3, 5 +; CHECK-DAG: sld [[R3:[0-9]+]], 4, [[R0]] +; CHECK-DAG: srad [[R4:[0-9]+]], 4, [[R1]] +; CHECK-DAG: or [[R5:[0-9]+]], [[R2]], [[R3]] +; CHECK-DAG: cmpwi [[R1]], 1 +; CHECK-DAG: srad 4, 4, 5 +; CHECK: isel 3, [[R5]], [[R4]], 0 +; CHECK: blr +define i128 @ashr(i128 %x, i128 %y) { %r = ashr i128 %x, %y ret i128 %r } -define i128 @foo_shl(i128 %x, i128 %y) { +; CHECK-LABEL: shl: +; CHECK-DAG: subfic [[R0:[0-9]+]], 5, 64 +; CHECK-DAG: addi [[R1:[0-9]+]], 5, -64 +; CHECK-DAG: sld [[R2:[0-9]+]], 4, 5 +; CHECK-DAG: srd [[R3:[0-9]+]], 3, [[R0]] +; CHECK-DAG: sld [[R4:[0-9]+]], 3, [[R1]] +; CHECK-DAG: or [[R5:[0-9]+]], [[R2]], [[R3]] +; CHECK-DAG: or 4, [[R5]], [[R4]] +; CHECK-DAG: sld 3, 3, 5 +; CHECK: blr +define i128 @shl(i128 %x, i128 %y) { %r = shl i128 %x, %y ret i128 %r } + +; CHECK-LABEL: shl_v1i128: +; P8-NOT: {{\b}}vslo +; P8-NOT: {{\b}}vsl +; P9-DAG: vslo +; P9-DAG: vspltb +; P9: vsl +; P9-NOT: {{\b}}sld +; P9-NOT: {{\b}}srd +; CHECK: blr +define i128 @shl_v1i128(i128 %arg, i128 %amt) local_unnamed_addr #0 { +entry: + %0 = insertelement <1 x i128> undef, i128 %arg, i32 0 + %1 = insertelement <1 x i128> undef, i128 %amt, i32 0 + %2 = shl <1 x i128> %0, %1 + %retval = extractelement <1 x i128> %2, i32 0 + ret i128 %retval +} + +; CHECK-LABEL: lshr_v1i128: +; P8-NOT: {{\b}}vsro +; P8-NOT: {{\b}}vsr +; P9-DAG: vsro +; P9-DAG: vspltb +; P9: vsr +; P9-NOT: {{\b}}srd +; P9-NOT: {{\b}}sld +; CHECK: blr +define i128 @lshr_v1i128(i128 %arg, i128 %amt) local_unnamed_addr #0 { +entry: + %0 = insertelement <1 x i128> undef, i128 %arg, i32 0 + %1 = insertelement <1 x i128> undef, i128 %amt, i32 0 + %2 = lshr <1 x i128> %0, %1 + %retval = extractelement <1 x i128> %2, i32 0 + ret i128 %retval +} + +; Arithmetic shift right is not available as an operation on the vector registers. +; CHECK-LABEL: ashr_v1i128: +; CHECK-NOT: {{\b}}vsro +; CHECK-NOT: {{\b}}vsr +; CHECK: blr +define i128 @ashr_v1i128(i128 %arg, i128 %amt) local_unnamed_addr #0 { +entry: + %0 = insertelement <1 x i128> undef, i128 %arg, i32 0 + %1 = insertelement <1 x i128> undef, i128 %amt, i32 0 + %2 = ashr <1 x i128> %0, %1 + %retval = extractelement <1 x i128> %2, i32 0 + ret i128 %retval +} diff --git a/test/CodeGen/SPARC/LeonItinerariesUT.ll b/test/CodeGen/SPARC/LeonItinerariesUT.ll index d586fe183a92..87e0c4621c08 100644 --- a/test/CodeGen/SPARC/LeonItinerariesUT.ll +++ b/test/CodeGen/SPARC/LeonItinerariesUT.ll @@ -28,8 +28,8 @@ ; LEON3_4_ITIN-LABEL: f32_ops: ; LEON3_4_ITIN: ld ; LEON3_4_ITIN-NEXT: ld -; LEON3_4_ITIN-NEXT: fadds ; LEON3_4_ITIN-NEXT: ld +; LEON3_4_ITIN-NEXT: fadds ; LEON3_4_ITIN-NEXT: ld ; LEON3_4_ITIN-NEXT: fsubs ; LEON3_4_ITIN-NEXT: fmuls @@ -47,4 +47,4 @@ entry: %6 = fmul float %5, %3 %7 = fdiv float %6, %4 ret float %7 -} +}
\ No newline at end of file diff --git a/test/CodeGen/X86/2007-01-08-InstrSched.ll b/test/CodeGen/X86/2007-01-08-InstrSched.ll index 24aa5b98d0bb..4ec703921e29 100644 --- a/test/CodeGen/X86/2007-01-08-InstrSched.ll +++ b/test/CodeGen/X86/2007-01-08-InstrSched.ll @@ -13,10 +13,10 @@ define float @foo(float %x) nounwind { ; CHECK: mulss ; CHECK: mulss -; CHECK: addss ; CHECK: mulss -; CHECK: addss ; CHECK: mulss ; CHECK: addss +; CHECK: addss +; CHECK: addss ; CHECK: ret } diff --git a/test/CodeGen/X86/GlobalISel/add-scalar.ll b/test/CodeGen/X86/GlobalISel/add-scalar.ll index 85db1c0e7e7a..55c825464039 100644 --- a/test/CodeGen/X86/GlobalISel/add-scalar.ll +++ b/test/CodeGen/X86/GlobalISel/add-scalar.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 -; RUN: llc -mtriple=i386-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 +; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 define i64 @test_add_i64(i64 %arg1, i64 %arg2) { ; X64-LABEL: test_add_i64: diff --git a/test/CodeGen/X86/GlobalISel/add-vec.ll b/test/CodeGen/X86/GlobalISel/add-vec.ll new file mode 100644 index 000000000000..679a49d733a2 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/add-vec.ll @@ -0,0 +1,111 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=SKX + +define <16 x i8> @test_add_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) { +; SKX-LABEL: test_add_v16i8: +; SKX: # BB#0: +; SKX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = add <16 x i8> %arg1, %arg2 + ret <16 x i8> %ret +} + +define <8 x i16> @test_add_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) { +; SKX-LABEL: test_add_v8i16: +; SKX: # BB#0: +; SKX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = add <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret +} + +define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) { +; SKX-LABEL: test_add_v4i32: +; SKX: # BB#0: +; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = add <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret +} + +define <2 x i64> @test_add_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) { +; SKX-LABEL: test_add_v2i64: +; SKX: # BB#0: +; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = add <2 x i64> %arg1, %arg2 + ret <2 x i64> %ret +} + +define <32 x i8> @test_add_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) { +; SKX-LABEL: test_add_v32i8: +; SKX: # BB#0: +; SKX-NEXT: vpaddb %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = add <32 x i8> %arg1, %arg2 + ret <32 x i8> %ret +} + +define <16 x i16> @test_add_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) { +; SKX-LABEL: test_add_v16i16: +; SKX: # BB#0: +; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = add <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret +} + +define <8 x i32> @test_add_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) { +; SKX-LABEL: test_add_v8i32: +; SKX: # BB#0: +; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = add <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret +} + +define <4 x i64> @test_add_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) { +; SKX-LABEL: test_add_v4i64: +; SKX: # BB#0: +; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = add <4 x i64> %arg1, %arg2 + ret <4 x i64> %ret +} + +define <64 x i8> @test_add_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) { +; SKX-LABEL: test_add_v64i8: +; SKX: # BB#0: +; SKX-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = add <64 x i8> %arg1, %arg2 + ret <64 x i8> %ret +} + +define <32 x i16> @test_add_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) { +; SKX-LABEL: test_add_v32i16: +; SKX: # BB#0: +; SKX-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = add <32 x i16> %arg1, %arg2 + ret <32 x i16> %ret +} + +define <16 x i32> @test_add_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) { +; SKX-LABEL: test_add_v16i32: +; SKX: # BB#0: +; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = add <16 x i32> %arg1, %arg2 + ret <16 x i32> %ret +} + +define <8 x i64> @test_add_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) { +; SKX-LABEL: test_add_v8i64: +; SKX: # BB#0: +; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = add <8 x i64> %arg1, %arg2 + ret <8 x i64> %ret +} + diff --git a/test/CodeGen/X86/GlobalISel/binop.ll b/test/CodeGen/X86/GlobalISel/binop.ll index 1aae1db8ab07..d7ae4435682f 100644 --- a/test/CodeGen/X86/GlobalISel/binop.ll +++ b/test/CodeGen/X86/GlobalISel/binop.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE -; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX -; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512F -; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512VL +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE +; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX +; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512F +; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512VL define i64 @test_sub_i64(i64 %arg1, i64 %arg2) { ; ALL-LABEL: test_sub_i64: diff --git a/test/CodeGen/X86/GlobalISel/br.ll b/test/CodeGen/X86/GlobalISel/br.ll index faa6a0350337..387e8797f0cd 100644 --- a/test/CodeGen/X86/GlobalISel/br.ll +++ b/test/CodeGen/X86/GlobalISel/br.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -O0 -mtriple=x86_64-linux-gnu -global-isel %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64 +; RUN: llc -O0 -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64 define void @uncondbr() { ; CHECK-LABEL: uncondbr: diff --git a/test/CodeGen/X86/GlobalISel/callingconv.ll b/test/CodeGen/X86/GlobalISel/callingconv.ll index c7e4d91ac3c7..997115d4d900 100644 --- a/test/CodeGen/X86/GlobalISel/callingconv.ll +++ b/test/CodeGen/X86/GlobalISel/callingconv.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 --check-prefix=X32_GISEL -; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 --check-prefix=X32_ISEL -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_GISEL -; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_ISEL +; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 --check-prefix=X32_GISEL +; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 --check-prefix=X32_ISEL +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_GISEL +; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_ISEL define i32 @test_ret_i32() { ; X32-LABEL: test_ret_i32: diff --git a/test/CodeGen/X86/GlobalISel/cmp.ll b/test/CodeGen/X86/GlobalISel/cmp.ll index 03692bb6b1de..39fee409d785 100644 --- a/test/CodeGen/X86/GlobalISel/cmp.ll +++ b/test/CodeGen/X86/GlobalISel/cmp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL define i32 @test_icmp_eq_i8(i8 %a, i8 %b) { ; ALL-LABEL: test_icmp_eq_i8: diff --git a/test/CodeGen/X86/GlobalISel/constant.ll b/test/CodeGen/X86/GlobalISel/constant.ll index cab043a51f05..b550bb0bc7be 100644 --- a/test/CodeGen/X86/GlobalISel/constant.ll +++ b/test/CodeGen/X86/GlobalISel/constant.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 define i8 @const_i8() { ; ALL-LABEL: const_i8: diff --git a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll index 64cd0e70a4fd..b08ac062fb4b 100644 --- a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll +++ b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X64 +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64 ; TODO merge with ext.ll after i64 sext suported on 32bit platform diff --git a/test/CodeGen/X86/GlobalISel/ext.ll b/test/CodeGen/X86/GlobalISel/ext.ll index 4d4e3b05ca28..27aecd118b38 100644 --- a/test/CodeGen/X86/GlobalISel/ext.ll +++ b/test/CodeGen/X86/GlobalISel/ext.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X64 -; RUN: llc -mtriple=i386-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X32 +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64 +; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32 define i32 @test_zext_i1(i32 %a) { ; X64-LABEL: test_zext_i1: diff --git a/test/CodeGen/X86/GlobalISel/frameIndex.ll b/test/CodeGen/X86/GlobalISel/frameIndex.ll index 2bb11adcc3b5..a9ec94defea8 100644 --- a/test/CodeGen/X86/GlobalISel/frameIndex.ll +++ b/test/CodeGen/X86/GlobalISel/frameIndex.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X64 -; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=X64 -; RUN: llc -mtriple=i386-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X32 -; RUN: llc -mtriple=i386-linux-gnu < %s -o - | FileCheck %s --check-prefix=X32 -; RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel < %s -o - | FileCheck %s --check-prefix=X32ABI -; RUN: llc -mtriple=x86_64-linux-gnux32 < %s -o - | FileCheck %s --check-prefix=X32ABI +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64 +; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64 +; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32 +; RUN: llc -mtriple=i386-linux-gnu -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32 +; RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32ABI +; RUN: llc -mtriple=x86_64-linux-gnux32 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32ABI define i32* @allocai32() { ; X64-LABEL: allocai32: diff --git a/test/CodeGen/X86/GlobalISel/gep.ll b/test/CodeGen/X86/GlobalISel/gep.ll index bc5b0152b24a..94da9fb46761 100644 --- a/test/CodeGen/X86/GlobalISel/gep.ll +++ b/test/CodeGen/X86/GlobalISel/gep.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64_GISEL -; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64_GISEL +; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 define i32* @test_gep_i8(i32 *%arr, i8 %ind) { ; X64_GISEL-LABEL: test_gep_i8: diff --git a/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir b/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir new file mode 100644 index 000000000000..feba33ac91be --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir @@ -0,0 +1,119 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE2 + +--- | + define void @test_add_v16i8() { + %ret = add <16 x i8> undef, undef + ret void + } + + define void @test_add_v8i16() { + %ret = add <8 x i16> undef, undef + ret void + } + + define void @test_add_v4i32() { + %ret = add <4 x i32> undef, undef + ret void + } + + define void @test_add_v2i64() { + %ret = add <2 x i64> undef, undef + ret void + } +... +--- +name: test_add_v16i8 +# ALL-LABEL: name: test_add_v16i8 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<16 x s8>) = IMPLICIT_DEF +# ALL-NEXT: %1(<16 x s8>) = IMPLICIT_DEF +# ALL-NEXT: %2(<16 x s8>) = G_ADD %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<16 x s8>) = IMPLICIT_DEF + %1(<16 x s8>) = IMPLICIT_DEF + %2(<16 x s8>) = G_ADD %0, %1 + RET 0 + +... +--- +name: test_add_v8i16 +# ALL-LABEL: name: test_add_v8i16 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<8 x s16>) = IMPLICIT_DEF +# ALL-NEXT: %1(<8 x s16>) = IMPLICIT_DEF +# ALL-NEXT: %2(<8 x s16>) = G_ADD %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = IMPLICIT_DEF + %1(<8 x s16>) = IMPLICIT_DEF + %2(<8 x s16>) = G_ADD %0, %1 + RET 0 + +... +--- +name: test_add_v4i32 +# ALL-LABEL: name: test_add_v4i32 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<4 x s32>) = IMPLICIT_DEF +# ALL-NEXT: %1(<4 x s32>) = IMPLICIT_DEF +# ALL-NEXT: %2(<4 x s32>) = G_ADD %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = IMPLICIT_DEF + %1(<4 x s32>) = IMPLICIT_DEF + %2(<4 x s32>) = G_ADD %0, %1 + RET 0 + +... +--- +name: test_add_v2i64 +# ALL-LABEL: name: test_add_v2i64 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<2 x s64>) = IMPLICIT_DEF +# ALL-NEXT: %1(<2 x s64>) = IMPLICIT_DEF +# ALL-NEXT: %2(<2 x s64>) = G_ADD %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<2 x s64>) = IMPLICIT_DEF + %1(<2 x s64>) = IMPLICIT_DEF + %2(<2 x s64>) = G_ADD %0, %1 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir b/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir new file mode 100644 index 000000000000..f7dc8031b4f5 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir @@ -0,0 +1,157 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX1 +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 + +--- | + define void @test_add_v32i8() { + %ret = add <32 x i8> undef, undef + ret void + } + + define void @test_add_v16i16() { + %ret = add <16 x i16> undef, undef + ret void + } + + define void @test_add_v8i32() { + %ret = add <8 x i32> undef, undef + ret void + } + + define void @test_add_v4i64() { + %ret = add <4 x i64> undef, undef + ret void + } + +... +--- +name: test_add_v32i8 +# ALL-LABEL: name: test_add_v32i8 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX1: %0(<32 x s8>) = IMPLICIT_DEF +# AVX1-NEXT: %1(<32 x s8>) = IMPLICIT_DEF +# AVX1-NEXT: %3(<16 x s8>), %4(<16 x s8>) = G_UNMERGE_VALUES %0(<32 x s8>) +# AVX1-NEXT: %5(<16 x s8>), %6(<16 x s8>) = G_UNMERGE_VALUES %1(<32 x s8>) +# AVX1-NEXT: %7(<16 x s8>) = G_ADD %3, %5 +# AVX1-NEXT: %8(<16 x s8>) = G_ADD %4, %6 +# AVX1-NEXT: %2(<32 x s8>) = G_MERGE_VALUES %7(<16 x s8>), %8(<16 x s8>) +# AVX1-NEXT: RET 0 +# +# AVX2: %0(<32 x s8>) = IMPLICIT_DEF +# AVX2-NEXT: %1(<32 x s8>) = IMPLICIT_DEF +# AVX2-NEXT: %2(<32 x s8>) = G_ADD %0, %1 +# AVX2-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<32 x s8>) = IMPLICIT_DEF + %1(<32 x s8>) = IMPLICIT_DEF + %2(<32 x s8>) = G_ADD %0, %1 + RET 0 + +... +--- +name: test_add_v16i16 +# ALL-LABEL: name: test_add_v16i16 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX1: %0(<16 x s16>) = IMPLICIT_DEF +# AVX1-NEXT: %1(<16 x s16>) = IMPLICIT_DEF +# AVX1-NEXT: %3(<8 x s16>), %4(<8 x s16>) = G_UNMERGE_VALUES %0(<16 x s16>) +# AVX1-NEXT: %5(<8 x s16>), %6(<8 x s16>) = G_UNMERGE_VALUES %1(<16 x s16>) +# AVX1-NEXT: %7(<8 x s16>) = G_ADD %3, %5 +# AVX1-NEXT: %8(<8 x s16>) = G_ADD %4, %6 +# AVX1-NEXT: %2(<16 x s16>) = G_MERGE_VALUES %7(<8 x s16>), %8(<8 x s16>) +# AVX1-NEXT: RET 0 +# +# AVX2: %0(<16 x s16>) = IMPLICIT_DEF +# AVX2-NEXT: %1(<16 x s16>) = IMPLICIT_DEF +# AVX2-NEXT: %2(<16 x s16>) = G_ADD %0, %1 +# AVX2-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<16 x s16>) = IMPLICIT_DEF + %1(<16 x s16>) = IMPLICIT_DEF + %2(<16 x s16>) = G_ADD %0, %1 + RET 0 + +... +--- +name: test_add_v8i32 +# ALL-LABEL: name: test_add_v8i32 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX1: %0(<8 x s32>) = IMPLICIT_DEF +# AVX1-NEXT: %1(<8 x s32>) = IMPLICIT_DEF +# AVX1-NEXT: %3(<4 x s32>), %4(<4 x s32>) = G_UNMERGE_VALUES %0(<8 x s32>) +# AVX1-NEXT: %5(<4 x s32>), %6(<4 x s32>) = G_UNMERGE_VALUES %1(<8 x s32>) +# AVX1-NEXT: %7(<4 x s32>) = G_ADD %3, %5 +# AVX1-NEXT: %8(<4 x s32>) = G_ADD %4, %6 +# AVX1-NEXT: %2(<8 x s32>) = G_MERGE_VALUES %7(<4 x s32>), %8(<4 x s32>) +# AVX1-NEXT: RET 0 +# +# AVX2: %0(<8 x s32>) = IMPLICIT_DEF +# AVX2-NEXT: %1(<8 x s32>) = IMPLICIT_DEF +# AVX2-NEXT: %2(<8 x s32>) = G_ADD %0, %1 +# AVX2-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<8 x s32>) = IMPLICIT_DEF + %1(<8 x s32>) = IMPLICIT_DEF + %2(<8 x s32>) = G_ADD %0, %1 + RET 0 + +... +--- +name: test_add_v4i64 +# ALL-LABEL: name: test_add_v4i64 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX1: %0(<4 x s64>) = IMPLICIT_DEF +# AVX1-NEXT: %1(<4 x s64>) = IMPLICIT_DEF +# AVX1-NEXT: %3(<2 x s64>), %4(<2 x s64>) = G_UNMERGE_VALUES %0(<4 x s64>) +# AVX1-NEXT: %5(<2 x s64>), %6(<2 x s64>) = G_UNMERGE_VALUES %1(<4 x s64>) +# AVX1-NEXT: %7(<2 x s64>) = G_ADD %3, %5 +# AVX1-NEXT: %8(<2 x s64>) = G_ADD %4, %6 +# AVX1-NEXT: %2(<4 x s64>) = G_MERGE_VALUES %7(<2 x s64>), %8(<2 x s64>) +# AVX1-NEXT: RET 0 +# +# AVX2: %0(<4 x s64>) = IMPLICIT_DEF +# AVX2-NEXT: %1(<4 x s64>) = IMPLICIT_DEF +# AVX2-NEXT: %2(<4 x s64>) = G_ADD %0, %1 +# AVX2-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<4 x s64>) = IMPLICIT_DEF + %1(<4 x s64>) = IMPLICIT_DEF + %2(<4 x s64>) = G_ADD %0, %1 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir b/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir new file mode 100644 index 000000000000..2b8b51acaa55 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir @@ -0,0 +1,139 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512F +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512bw -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW + +--- | + define void @test_add_v64i8() { + %ret = add <64 x i8> undef, undef + ret void + } + + define void @test_add_v32i16() { + %ret = add <32 x i16> undef, undef + ret void + } + + define void @test_add_v16i32() { + %ret = add <16 x i32> undef, undef + ret void + } + + define void @test_add_v8i64() { + %ret = add <8 x i64> undef, undef + ret void + } + +... +--- +name: test_add_v64i8 +# ALL-LABEL: name: test_add_v64i8 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX512F: %0(<64 x s8>) = IMPLICIT_DEF +# AVX512F-NEXT: %1(<64 x s8>) = IMPLICIT_DEF +# AVX512F-NEXT: %3(<32 x s8>), %4(<32 x s8>) = G_UNMERGE_VALUES %0(<64 x s8>) +# AVX512F-NEXT: %5(<32 x s8>), %6(<32 x s8>) = G_UNMERGE_VALUES %1(<64 x s8>) +# AVX512F-NEXT: %7(<32 x s8>) = G_ADD %3, %5 +# AVX512F-NEXT: %8(<32 x s8>) = G_ADD %4, %6 +# AVX512F-NEXT: %2(<64 x s8>) = G_MERGE_VALUES %7(<32 x s8>), %8(<32 x s8>) +# AVX512F-NEXT: RET 0 +# +# AVX512BW: %0(<64 x s8>) = IMPLICIT_DEF +# AVX512BW-NEXT: %1(<64 x s8>) = IMPLICIT_DEF +# AVX512BW-NEXT: %2(<64 x s8>) = G_ADD %0, %1 +# AVX512BW-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<64 x s8>) = IMPLICIT_DEF + %1(<64 x s8>) = IMPLICIT_DEF + %2(<64 x s8>) = G_ADD %0, %1 + RET 0 + +... +--- +name: test_add_v32i16 +# ALL-LABEL: name: test_add_v32i16 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX512F: %0(<32 x s16>) = IMPLICIT_DEF +# AVX512F-NEXT: %1(<32 x s16>) = IMPLICIT_DEF +# AVX512F-NEXT: %3(<16 x s16>), %4(<16 x s16>) = G_UNMERGE_VALUES %0(<32 x s16>) +# AVX512F-NEXT: %5(<16 x s16>), %6(<16 x s16>) = G_UNMERGE_VALUES %1(<32 x s16>) +# AVX512F-NEXT: %7(<16 x s16>) = G_ADD %3, %5 +# AVX512F-NEXT: %8(<16 x s16>) = G_ADD %4, %6 +# AVX512F-NEXT: %2(<32 x s16>) = G_MERGE_VALUES %7(<16 x s16>), %8(<16 x s16>) +# AVX512F-NEXT: RET 0 +# +# AVX512BW: %0(<32 x s16>) = IMPLICIT_DEF +# AVX512BW-NEXT: %1(<32 x s16>) = IMPLICIT_DEF +# AVX512BW-NEXT: %2(<32 x s16>) = G_ADD %0, %1 +# AVX512BW-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<32 x s16>) = IMPLICIT_DEF + %1(<32 x s16>) = IMPLICIT_DEF + %2(<32 x s16>) = G_ADD %0, %1 + RET 0 + +... +--- +name: test_add_v16i32 +# ALL-LABEL: name: test_add_v16i32 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<16 x s32>) = IMPLICIT_DEF +# ALL-NEXT: %1(<16 x s32>) = IMPLICIT_DEF +# ALL-NEXT: %2(<16 x s32>) = G_ADD %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<16 x s32>) = IMPLICIT_DEF + %1(<16 x s32>) = IMPLICIT_DEF + %2(<16 x s32>) = G_ADD %0, %1 + RET 0 + +... +--- +name: test_add_v8i64 +# ALL-LABEL: name: test_add_v8i64 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<8 x s64>) = IMPLICIT_DEF +# ALL-NEXT: %1(<8 x s64>) = IMPLICIT_DEF +# ALL-NEXT: %2(<8 x s64>) = G_ADD %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<8 x s64>) = IMPLICIT_DEF + %1(<8 x s64>) = IMPLICIT_DEF + %2(<8 x s64>) = G_ADD %0, %1 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir b/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir new file mode 100644 index 000000000000..2f90fc9a3c90 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir @@ -0,0 +1,119 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE2 + +--- | + define void @test_sub_v16i8() { + %ret = sub <16 x i8> undef, undef + ret void + } + + define void @test_sub_v8i16() { + %ret = sub <8 x i16> undef, undef + ret void + } + + define void @test_sub_v4i32() { + %ret = sub <4 x i32> undef, undef + ret void + } + + define void @test_sub_v2i64() { + %ret = sub <2 x i64> undef, undef + ret void + } +... +--- +name: test_sub_v16i8 +# ALL-LABEL: name: test_sub_v16i8 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<16 x s8>) = IMPLICIT_DEF +# ALL-NEXT: %1(<16 x s8>) = IMPLICIT_DEF +# ALL-NEXT: %2(<16 x s8>) = G_SUB %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<16 x s8>) = IMPLICIT_DEF + %1(<16 x s8>) = IMPLICIT_DEF + %2(<16 x s8>) = G_SUB %0, %1 + RET 0 + +... +--- +name: test_sub_v8i16 +# ALL-LABEL: name: test_sub_v8i16 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<8 x s16>) = IMPLICIT_DEF +# ALL-NEXT: %1(<8 x s16>) = IMPLICIT_DEF +# ALL-NEXT: %2(<8 x s16>) = G_SUB %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = IMPLICIT_DEF + %1(<8 x s16>) = IMPLICIT_DEF + %2(<8 x s16>) = G_SUB %0, %1 + RET 0 + +... +--- +name: test_sub_v4i32 +# ALL-LABEL: name: test_sub_v4i32 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<4 x s32>) = IMPLICIT_DEF +# ALL-NEXT: %1(<4 x s32>) = IMPLICIT_DEF +# ALL-NEXT: %2(<4 x s32>) = G_SUB %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = IMPLICIT_DEF + %1(<4 x s32>) = IMPLICIT_DEF + %2(<4 x s32>) = G_SUB %0, %1 + RET 0 + +... +--- +name: test_sub_v2i64 +# ALL-LABEL: name: test_sub_v2i64 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<2 x s64>) = IMPLICIT_DEF +# ALL-NEXT: %1(<2 x s64>) = IMPLICIT_DEF +# ALL-NEXT: %2(<2 x s64>) = G_SUB %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<2 x s64>) = IMPLICIT_DEF + %1(<2 x s64>) = IMPLICIT_DEF + %2(<2 x s64>) = G_SUB %0, %1 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir b/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir new file mode 100644 index 000000000000..9d07787b8ecb --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir @@ -0,0 +1,120 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 +# TODO: add tests for additional configuration after the legalization supported +--- | + define void @test_sub_v32i8() { + %ret = sub <32 x i8> undef, undef + ret void + } + + define void @test_sub_v16i16() { + %ret = sub <16 x i16> undef, undef + ret void + } + + define void @test_sub_v8i32() { + %ret = sub <8 x i32> undef, undef + ret void + } + + define void @test_sub_v4i64() { + %ret = sub <4 x i64> undef, undef + ret void + } + +... +--- +name: test_sub_v32i8 +# ALL-LABEL: name: test_sub_v32i8 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX2: %0(<32 x s8>) = IMPLICIT_DEF +# AVX2-NEXT: %1(<32 x s8>) = IMPLICIT_DEF +# AVX2-NEXT: %2(<32 x s8>) = G_SUB %0, %1 +# AVX2-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<32 x s8>) = IMPLICIT_DEF + %1(<32 x s8>) = IMPLICIT_DEF + %2(<32 x s8>) = G_SUB %0, %1 + RET 0 + +... +--- +name: test_sub_v16i16 +# ALL-LABEL: name: test_sub_v16i16 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX2: %0(<16 x s16>) = IMPLICIT_DEF +# AVX2-NEXT: %1(<16 x s16>) = IMPLICIT_DEF +# AVX2-NEXT: %2(<16 x s16>) = G_SUB %0, %1 +# AVX2-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<16 x s16>) = IMPLICIT_DEF + %1(<16 x s16>) = IMPLICIT_DEF + %2(<16 x s16>) = G_SUB %0, %1 + RET 0 + +... +--- +name: test_sub_v8i32 +# ALL-LABEL: name: test_sub_v8i32 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX2: %0(<8 x s32>) = IMPLICIT_DEF +# AVX2-NEXT: %1(<8 x s32>) = IMPLICIT_DEF +# AVX2-NEXT: %2(<8 x s32>) = G_SUB %0, %1 +# AVX2-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<8 x s32>) = IMPLICIT_DEF + %1(<8 x s32>) = IMPLICIT_DEF + %2(<8 x s32>) = G_SUB %0, %1 + RET 0 + +... +--- +name: test_sub_v4i64 +# ALL-LABEL: name: test_sub_v4i64 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX2: %0(<4 x s64>) = IMPLICIT_DEF +# AVX2-NEXT: %1(<4 x s64>) = IMPLICIT_DEF +# AVX2-NEXT: %2(<4 x s64>) = G_SUB %0, %1 +# AVX2-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<4 x s64>) = IMPLICIT_DEF + %1(<4 x s64>) = IMPLICIT_DEF + %2(<4 x s64>) = G_SUB %0, %1 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir b/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir new file mode 100644 index 000000000000..c88e074ca413 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir @@ -0,0 +1,120 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512bw -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW +# TODO: add tests for additional configuration after the legalization supported +--- | + define void @test_sub_v64i8() { + %ret = sub <64 x i8> undef, undef + ret void + } + + define void @test_sub_v32i16() { + %ret = sub <32 x i16> undef, undef + ret void + } + + define void @test_sub_v16i32() { + %ret = sub <16 x i32> undef, undef + ret void + } + + define void @test_sub_v8i64() { + %ret = sub <8 x i64> undef, undef + ret void + } + +... +--- +name: test_sub_v64i8 +# ALL-LABEL: name: test_sub_v64i8 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX512BW: %0(<64 x s8>) = IMPLICIT_DEF +# AVX512BW-NEXT: %1(<64 x s8>) = IMPLICIT_DEF +# AVX512BW-NEXT: %2(<64 x s8>) = G_SUB %0, %1 +# AVX512BW-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<64 x s8>) = IMPLICIT_DEF + %1(<64 x s8>) = IMPLICIT_DEF + %2(<64 x s8>) = G_SUB %0, %1 + RET 0 + +... +--- +name: test_sub_v32i16 +# ALL-LABEL: name: test_sub_v32i16 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# AVX512BW: %0(<32 x s16>) = IMPLICIT_DEF +# AVX512BW-NEXT: %1(<32 x s16>) = IMPLICIT_DEF +# AVX512BW-NEXT: %2(<32 x s16>) = G_SUB %0, %1 +# AVX512BW-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<32 x s16>) = IMPLICIT_DEF + %1(<32 x s16>) = IMPLICIT_DEF + %2(<32 x s16>) = G_SUB %0, %1 + RET 0 + +... +--- +name: test_sub_v16i32 +# ALL-LABEL: name: test_sub_v16i32 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<16 x s32>) = IMPLICIT_DEF +# ALL-NEXT: %1(<16 x s32>) = IMPLICIT_DEF +# ALL-NEXT: %2(<16 x s32>) = G_SUB %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<16 x s32>) = IMPLICIT_DEF + %1(<16 x s32>) = IMPLICIT_DEF + %2(<16 x s32>) = G_SUB %0, %1 + RET 0 + +... +--- +name: test_sub_v8i64 +# ALL-LABEL: name: test_sub_v8i64 +alignment: 4 +legalized: false +regBankSelected: false +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +# ALL: %0(<8 x s64>) = IMPLICIT_DEF +# ALL-NEXT: %1(<8 x s64>) = IMPLICIT_DEF +# ALL-NEXT: %2(<8 x s64>) = G_SUB %0, %1 +# ALL-NEXT: RET 0 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<8 x s64>) = IMPLICIT_DEF + %1(<8 x s64>) = IMPLICIT_DEF + %2(<8 x s64>) = G_SUB %0, %1 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll b/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll index 49a7fd79f8b2..5df52c5a058b 100644 --- a/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll +++ b/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=i386-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_FAST -; RUN: llc -mtriple=i386-linux-gnu -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_GREEDY +; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_FAST +; RUN: llc -mtriple=i386-linux-gnu -regbankselect-greedy -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_GREEDY ;TODO merge with x86-64 tests (many operations not suppored yet) diff --git a/test/CodeGen/X86/GlobalISel/memop-scalar.ll b/test/CodeGen/X86/GlobalISel/memop-scalar.ll index 3e45a9c9a49d..d3d4b297a802 100644 --- a/test/CodeGen/X86/GlobalISel/memop-scalar.ll +++ b/test/CodeGen/X86/GlobalISel/memop-scalar.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_FAST -; RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_GREEDY +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_FAST +; RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_GREEDY define i8 @test_load_i8(i8 * %p1) { ; ALL-LABEL: test_load_i8: diff --git a/test/CodeGen/X86/GlobalISel/memop-vec.ll b/test/CodeGen/X86/GlobalISel/memop-vec.ll index e218fded4d5f..f1ffc15f4d03 100644 --- a/test/CodeGen/X86/GlobalISel/memop-vec.ll +++ b/test/CodeGen/X86/GlobalISel/memop-vec.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX -; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX +; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX +; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -regbankselect-greedy -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX define <4 x i32> @test_load_v4i32_noalign(<4 x i32> * %p1) { ; ALL-LABEL: test_load_v4i32_noalign: diff --git a/test/CodeGen/X86/GlobalISel/mul-scalar.ll b/test/CodeGen/X86/GlobalISel/mul-scalar.ll index 529e81c43304..450c3839797c 100644 --- a/test/CodeGen/X86/GlobalISel/mul-scalar.ll +++ b/test/CodeGen/X86/GlobalISel/mul-scalar.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 ;TODO: instruction selection not supported yet ;define i8 @test_mul_i8(i8 %arg1, i8 %arg2) { diff --git a/test/CodeGen/X86/GlobalISel/mul-vec.ll b/test/CodeGen/X86/GlobalISel/mul-vec.ll index 83615a718528..b2e211470f39 100644 --- a/test/CodeGen/X86/GlobalISel/mul-vec.ll +++ b/test/CodeGen/X86/GlobalISel/mul-vec.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel < %s -o - | FileCheck %s --check-prefix=SKX +; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=SKX define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) { ; SKX-LABEL: test_mul_v8i16: diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir b/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir index 446db56b992c..f925c836f3d1 100644 --- a/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir +++ b/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir @@ -5,6 +5,15 @@ define void @test_mul_vec256() { ret void } + + define void @test_add_vec256() { + ret void + } + + define void @test_sub_vec256() { + ret void + } + ... --- name: test_mul_vec256 @@ -29,3 +38,49 @@ body: | RET 0 ... +--- +name: test_add_vec256 +alignment: 4 +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +# CHECK-LABEL: name: test_add_vec256 +# CHECK: registers: +# CHECK: - { id: 0, class: vecr } +# CHECK: - { id: 1, class: vecr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.1 (%ir-block.0): + + %0(<8 x s32>) = IMPLICIT_DEF + %1(<8 x s32>) = G_ADD %0, %0 + RET 0 + +... +--- +name: test_sub_vec256 +alignment: 4 +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +# CHECK-LABEL: name: test_sub_vec256 +# CHECK: registers: +# CHECK: - { id: 0, class: vecr } +# CHECK: - { id: 1, class: vecr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.1 (%ir-block.0): + + %0(<8 x s32>) = IMPLICIT_DEF + %1(<8 x s32>) = G_SUB %0, %0 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir b/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir index f824ee12dcfb..e0c12ff44a2f 100644 --- a/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir +++ b/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir @@ -7,6 +7,14 @@ ret void } + define void @test_add_vec512() { + ret void + } + + define void @test_sub_vec512() { + ret void + } + ... --- name: test_mul_vec512 @@ -31,3 +39,49 @@ body: | RET 0 ... +--- +name: test_add_vec512 +alignment: 4 +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +# CHECK-LABEL: name: test_add_vec512 +# CHECK: registers: +# CHECK: - { id: 0, class: vecr } +# CHECK: - { id: 1, class: vecr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.1 (%ir-block.0): + + %0(<16 x s32>) = IMPLICIT_DEF + %1(<16 x s32>) = G_ADD %0, %0 + RET 0 + +... +--- +name: test_sub_vec512 +alignment: 4 +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +# CHECK-LABEL: name: test_sub_vec512 +# CHECK: registers: +# CHECK: - { id: 0, class: vecr } +# CHECK: - { id: 1, class: vecr } +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } +body: | + bb.1 (%ir-block.0): + + %0(<16 x s32>) = IMPLICIT_DEF + %1(<16 x s32>) = G_SUB %0, %0 + RET 0 + +... diff --git a/test/CodeGen/X86/GlobalISel/select-add-v128.mir b/test/CodeGen/X86/GlobalISel/select-add-v128.mir new file mode 100644 index 000000000000..a39702340bc2 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-add-v128.mir @@ -0,0 +1,195 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=SSE2 +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=AVX1 +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BWVL + +--- | + define <16 x i8> @test_add_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) { + %ret = add <16 x i8> %arg1, %arg2 + ret <16 x i8> %ret + } + + define <8 x i16> @test_add_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) { + %ret = add <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret + } + + define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) { + %ret = add <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <2 x i64> @test_add_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) { + %ret = add <2 x i64> %arg1, %arg2 + ret <2 x i64> %ret + } + +... +--- +name: test_add_v16i8 +# ALL-LABEL: name: test_add_v16i8 +alignment: 4 +legalized: true +regBankSelected: true +# NOVL: registers: +# NOVL-NEXT: - { id: 0, class: vr128 } +# NOVL-NEXT: - { id: 1, class: vr128 } +# NOVL-NEXT: - { id: 2, class: vr128 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr128 } +# AVX512VL-NEXT: - { id: 1, class: vr128 } +# AVX512VL-NEXT: - { id: 2, class: vr128 } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr128x } +# AVX512BWVL-NEXT: - { id: 1, class: vr128x } +# AVX512BWVL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# SSE2: %2 = PADDBrr %0, %1 +# +# AVX1: %2 = VPADDBrr %0, %1 +# +# AVX512VL: %2 = VPADDBrr %0, %1 +# +# AVX512BWVL: %2 = VPADDBZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<16 x s8>) = COPY %xmm0 + %1(<16 x s8>) = COPY %xmm1 + %2(<16 x s8>) = G_ADD %0, %1 + %xmm0 = COPY %2(<16 x s8>) + RET 0, implicit %xmm0 + +... +--- +name: test_add_v8i16 +# ALL-LABEL: name: test_add_v8i16 +alignment: 4 +legalized: true +regBankSelected: true +# NOVL: registers: +# NOVL-NEXT: - { id: 0, class: vr128 } +# NOVL-NEXT: - { id: 1, class: vr128 } +# NOVL-NEXT: - { id: 2, class: vr128 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr128 } +# AVX512VL-NEXT: - { id: 1, class: vr128 } +# AVX512VL-NEXT: - { id: 2, class: vr128 } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr128x } +# AVX512BWVL-NEXT: - { id: 1, class: vr128x } +# AVX512BWVL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# SSE2: %2 = PADDWrr %0, %1 +# +# AVX1: %2 = VPADDWrr %0, %1 +# +# AVX512VL: %2 = VPADDWrr %0, %1 +# +# AVX512BWVL: %2 = VPADDWZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = COPY %xmm0 + %1(<8 x s16>) = COPY %xmm1 + %2(<8 x s16>) = G_ADD %0, %1 + %xmm0 = COPY %2(<8 x s16>) + RET 0, implicit %xmm0 + +... +--- +name: test_add_v4i32 +# ALL-LABEL: name: test_add_v4i32 +alignment: 4 +legalized: true +regBankSelected: true +# NOVL: registers: +# NOVL-NEXT: - { id: 0, class: vr128 } +# NOVL-NEXT: - { id: 1, class: vr128 } +# NOVL-NEXT: - { id: 2, class: vr128 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr128x } +# AVX512VL-NEXT: - { id: 1, class: vr128x } +# AVX512VL-NEXT: - { id: 2, class: vr128x } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr128x } +# AVX512BWVL-NEXT: - { id: 1, class: vr128x } +# AVX512BWVL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# SSE2: %2 = PADDDrr %0, %1 +# +# AVX1: %2 = VPADDDrr %0, %1 +# +# AVX512VL: %2 = VPADDDZ128rr %0, %1 +# +# AVX512BWVL: %2 = VPADDDZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_ADD %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_add_v2i64 +# ALL-LABEL: name: test_add_v2i64 +alignment: 4 +legalized: true +regBankSelected: true +# NOVL: registers: +# NOVL-NEXT: - { id: 0, class: vr128 } +# NOVL-NEXT: - { id: 1, class: vr128 } +# NOVL-NEXT: - { id: 2, class: vr128 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr128x } +# AVX512VL-NEXT: - { id: 1, class: vr128x } +# AVX512VL-NEXT: - { id: 2, class: vr128x } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr128x } +# AVX512BWVL-NEXT: - { id: 1, class: vr128x } +# AVX512BWVL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# SSE2: %2 = PADDQrr %0, %1 +# +# AVX1: %2 = VPADDQrr %0, %1 +# +# AVX512VL: %2 = VPADDQZ128rr %0, %1 +# +# AVX512BWVL: %2 = VPADDQZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<2 x s64>) = COPY %xmm0 + %1(<2 x s64>) = COPY %xmm1 + %2(<2 x s64>) = G_ADD %0, %1 + %xmm0 = COPY %2(<2 x s64>) + RET 0, implicit %xmm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/select-add-v256.mir b/test/CodeGen/X86/GlobalISel/select-add-v256.mir new file mode 100644 index 000000000000..7556c2104124 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-add-v256.mir @@ -0,0 +1,185 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BWVL + +--- | + define <32 x i8> @test_add_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) { + %ret = add <32 x i8> %arg1, %arg2 + ret <32 x i8> %ret + } + + define <16 x i16> @test_add_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) { + %ret = add <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret + } + + define <8 x i32> @test_add_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) { + %ret = add <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret + } + + define <4 x i64> @test_add_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) { + %ret = add <4 x i64> %arg1, %arg2 + ret <4 x i64> %ret + } +... +--- +name: test_add_v32i8 +# ALL-LABEL: name: test_add_v32i8 +alignment: 4 +legalized: true +regBankSelected: true +# AVX2: registers: +# AVX2-NEXT: - { id: 0, class: vr256 } +# AVX2-NEXT: - { id: 1, class: vr256 } +# AVX2-NEXT: - { id: 2, class: vr256 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr256 } +# AVX512VL-NEXT: - { id: 1, class: vr256 } +# AVX512VL-NEXT: - { id: 2, class: vr256 } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr256x } +# AVX512BWVL-NEXT: - { id: 1, class: vr256x } +# AVX512BWVL-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# AVX2: %2 = VPADDBYrr %0, %1 +# +# AVX512VL: %2 = VPADDBYrr %0, %1 +# +# AVX512BWVL: %2 = VPADDBZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<32 x s8>) = COPY %ymm0 + %1(<32 x s8>) = COPY %ymm1 + %2(<32 x s8>) = G_ADD %0, %1 + %ymm0 = COPY %2(<32 x s8>) + RET 0, implicit %ymm0 + +... +--- +name: test_add_v16i16 +# ALL-LABEL: name: test_add_v16i16 +alignment: 4 +legalized: true +regBankSelected: true +# AVX2: registers: +# AVX2-NEXT: - { id: 0, class: vr256 } +# AVX2-NEXT: - { id: 1, class: vr256 } +# AVX2-NEXT: - { id: 2, class: vr256 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr256 } +# AVX512VL-NEXT: - { id: 1, class: vr256 } +# AVX512VL-NEXT: - { id: 2, class: vr256 } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr256x } +# AVX512BWVL-NEXT: - { id: 1, class: vr256x } +# AVX512BWVL-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# AVX2: %2 = VPADDWYrr %0, %1 +# +# AVX512VL: %2 = VPADDWYrr %0, %1 +# +# AVX512BWVL: %2 = VPADDWZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<16 x s16>) = COPY %ymm0 + %1(<16 x s16>) = COPY %ymm1 + %2(<16 x s16>) = G_ADD %0, %1 + %ymm0 = COPY %2(<16 x s16>) + RET 0, implicit %ymm0 + +... +--- +name: test_add_v8i32 +# ALL-LABEL: name: test_add_v8i32 +alignment: 4 +legalized: true +regBankSelected: true +# AVX2: registers: +# AVX2-NEXT: - { id: 0, class: vr256 } +# AVX2-NEXT: - { id: 1, class: vr256 } +# AVX2-NEXT: - { id: 2, class: vr256 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr256x } +# AVX512VL-NEXT: - { id: 1, class: vr256x } +# AVX512VL-NEXT: - { id: 2, class: vr256x } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr256x } +# AVX512BWVL-NEXT: - { id: 1, class: vr256x } +# AVX512BWVL-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# AVX2: %2 = VPADDDYrr %0, %1 +# +# AVX512VL: %2 = VPADDDZ256rr %0, %1 +# +# AVX512BWVL: %2 = VPADDDZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<8 x s32>) = COPY %ymm0 + %1(<8 x s32>) = COPY %ymm1 + %2(<8 x s32>) = G_ADD %0, %1 + %ymm0 = COPY %2(<8 x s32>) + RET 0, implicit %ymm0 + +... +--- +name: test_add_v4i64 +# ALL-LABEL: name: test_add_v4i64 +alignment: 4 +legalized: true +regBankSelected: true +# AVX2: registers: +# AVX2-NEXT: - { id: 0, class: vr256 } +# AVX2-NEXT: - { id: 1, class: vr256 } +# AVX2-NEXT: - { id: 2, class: vr256 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr256x } +# AVX512VL-NEXT: - { id: 1, class: vr256x } +# AVX512VL-NEXT: - { id: 2, class: vr256x } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr256x } +# AVX512BWVL-NEXT: - { id: 1, class: vr256x } +# AVX512BWVL-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# AVX2: %2 = VPADDQYrr %0, %1 +# +# AVX512VL: %2 = VPADDQZ256rr %0, %1 +# +# AVX512BWVL: %2 = VPADDQZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<4 x s64>) = COPY %ymm0 + %1(<4 x s64>) = COPY %ymm1 + %2(<4 x s64>) = G_ADD %0, %1 + %ymm0 = COPY %2(<4 x s64>) + RET 0, implicit %ymm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/select-add-v512.mir b/test/CodeGen/X86/GlobalISel/select-add-v512.mir new file mode 100644 index 000000000000..e90be4e996f8 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-add-v512.mir @@ -0,0 +1,130 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL + +--- | + define <64 x i8> @test_add_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) #0 { + %ret = add <64 x i8> %arg1, %arg2 + ret <64 x i8> %ret + } + + define <32 x i16> @test_add_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #0 { + %ret = add <32 x i16> %arg1, %arg2 + ret <32 x i16> %ret + } + + define <16 x i32> @test_add_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #1 { + %ret = add <16 x i32> %arg1, %arg2 + ret <16 x i32> %ret + } + + define <8 x i64> @test_add_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #1 { + %ret = add <8 x i64> %arg1, %arg2 + ret <8 x i64> %ret + } + + attributes #0 = { "target-features"="+avx512f,+avx512bw" } + attributes #1 = { "target-features"="+avx512f" } +... +--- +name: test_add_v64i8 +# ALL-LABEL: name: test_add_v64i8 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: vr512 } +# ALL-NEXT: - { id: 1, class: vr512 } +# ALL-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %2 = VPADDBZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<64 x s8>) = COPY %zmm0 + %1(<64 x s8>) = COPY %zmm1 + %2(<64 x s8>) = G_ADD %0, %1 + %zmm0 = COPY %2(<64 x s8>) + RET 0, implicit %zmm0 + +... +--- +name: test_add_v32i16 +# ALL-LABEL: name: test_add_v32i16 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: vr512 } +# ALL-NEXT: - { id: 1, class: vr512 } +# ALL-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %2 = VPADDWZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<32 x s16>) = COPY %zmm0 + %1(<32 x s16>) = COPY %zmm1 + %2(<32 x s16>) = G_ADD %0, %1 + %zmm0 = COPY %2(<32 x s16>) + RET 0, implicit %zmm0 + +... +--- +name: test_add_v16i32 +# ALL-LABEL: name: test_add_v16i32 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: vr512 } +# ALL-NEXT: - { id: 1, class: vr512 } +# ALL-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %2 = VPADDDZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<16 x s32>) = COPY %zmm0 + %1(<16 x s32>) = COPY %zmm1 + %2(<16 x s32>) = G_ADD %0, %1 + %zmm0 = COPY %2(<16 x s32>) + RET 0, implicit %zmm0 + +... +--- +name: test_add_v8i64 +# ALL-LABEL: name: test_add_v8i64 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: vr512 } +# ALL-NEXT: - { id: 1, class: vr512 } +# ALL-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %2 = VPADDQZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<8 x s64>) = COPY %zmm0 + %1(<8 x s64>) = COPY %zmm1 + %2(<8 x s64>) = G_ADD %0, %1 + %zmm0 = COPY %2(<8 x s64>) + RET 0, implicit %zmm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/select-br.mir b/test/CodeGen/X86/GlobalISel/select-br.mir index 6d8cd2b1367d..9d2a878e7575 100644 --- a/test/CodeGen/X86/GlobalISel/select-br.mir +++ b/test/CodeGen/X86/GlobalISel/select-br.mir @@ -1,5 +1,5 @@ -# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64 -# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32 +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64 +# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32 --- | define void @uncondbr() { diff --git a/test/CodeGen/X86/GlobalISel/select-cmp.mir b/test/CodeGen/X86/GlobalISel/select-cmp.mir index 1d3da6cb88b9..a92c388c1db9 100644 --- a/test/CodeGen/X86/GlobalISel/select-cmp.mir +++ b/test/CodeGen/X86/GlobalISel/select-cmp.mir @@ -1,4 +1,4 @@ -# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --- | define i32 @test_icmp_eq_i8(i8 %a, i8 %b) { diff --git a/test/CodeGen/X86/GlobalISel/select-constant.mir b/test/CodeGen/X86/GlobalISel/select-constant.mir index f6b97b578b92..162de0264435 100644 --- a/test/CodeGen/X86/GlobalISel/select-constant.mir +++ b/test/CodeGen/X86/GlobalISel/select-constant.mir @@ -1,4 +1,4 @@ -# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --- | define i8 @const_i8() { diff --git a/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir b/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir index 0844701487bc..d1a3abfd0f93 100644 --- a/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir +++ b/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir @@ -1,4 +1,4 @@ -# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --- | define i64 @test_zext_i1(i8 %a) { diff --git a/test/CodeGen/X86/GlobalISel/select-ext.mir b/test/CodeGen/X86/GlobalISel/select-ext.mir index 831d6efb75f1..dccc20e57100 100644 --- a/test/CodeGen/X86/GlobalISel/select-ext.mir +++ b/test/CodeGen/X86/GlobalISel/select-ext.mir @@ -1,5 +1,5 @@ -# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 -# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 +# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --- | define i32 @test_zext_i1(i1 %a) { diff --git a/test/CodeGen/X86/GlobalISel/select-frameIndex.mir b/test/CodeGen/X86/GlobalISel/select-frameIndex.mir index 2fa9ac23a7af..1d641ba279af 100644 --- a/test/CodeGen/X86/GlobalISel/select-frameIndex.mir +++ b/test/CodeGen/X86/GlobalISel/select-frameIndex.mir @@ -1,6 +1,6 @@ -# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64 -# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32 -# RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ABI +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64 +# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32 +# RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ABI --- | define i32* @allocai32() { diff --git a/test/CodeGen/X86/GlobalISel/select-gep.mir b/test/CodeGen/X86/GlobalISel/select-gep.mir index 2c89b7057c3d..c8a4dc80cb2c 100644 --- a/test/CodeGen/X86/GlobalISel/select-gep.mir +++ b/test/CodeGen/X86/GlobalISel/select-gep.mir @@ -1,4 +1,4 @@ -# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --- | define i32* @test_gep_i32(i32* %arr) { diff --git a/test/CodeGen/X86/GlobalISel/select-sub-v128.mir b/test/CodeGen/X86/GlobalISel/select-sub-v128.mir new file mode 100644 index 000000000000..d60d4155e29d --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-sub-v128.mir @@ -0,0 +1,195 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=SSE2 +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=AVX1 +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BWVL + +--- | + define <16 x i8> @test_sub_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) { + %ret = sub <16 x i8> %arg1, %arg2 + ret <16 x i8> %ret + } + + define <8 x i16> @test_sub_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) { + %ret = sub <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret + } + + define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) { + %ret = sub <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret + } + + define <2 x i64> @test_sub_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) { + %ret = sub <2 x i64> %arg1, %arg2 + ret <2 x i64> %ret + } + +... +--- +name: test_sub_v16i8 +# ALL-LABEL: name: test_sub_v16i8 +alignment: 4 +legalized: true +regBankSelected: true +# NOVL: registers: +# NOVL-NEXT: - { id: 0, class: vr128 } +# NOVL-NEXT: - { id: 1, class: vr128 } +# NOVL-NEXT: - { id: 2, class: vr128 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr128 } +# AVX512VL-NEXT: - { id: 1, class: vr128 } +# AVX512VL-NEXT: - { id: 2, class: vr128 } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr128x } +# AVX512BWVL-NEXT: - { id: 1, class: vr128x } +# AVX512BWVL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# SSE2: %2 = PSUBBrr %0, %1 +# +# AVX1: %2 = VPSUBBrr %0, %1 +# +# AVX512VL: %2 = VPSUBBrr %0, %1 +# +# AVX512BWVL: %2 = VPSUBBZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<16 x s8>) = COPY %xmm0 + %1(<16 x s8>) = COPY %xmm1 + %2(<16 x s8>) = G_SUB %0, %1 + %xmm0 = COPY %2(<16 x s8>) + RET 0, implicit %xmm0 + +... +--- +name: test_sub_v8i16 +# ALL-LABEL: name: test_sub_v8i16 +alignment: 4 +legalized: true +regBankSelected: true +# NOVL: registers: +# NOVL-NEXT: - { id: 0, class: vr128 } +# NOVL-NEXT: - { id: 1, class: vr128 } +# NOVL-NEXT: - { id: 2, class: vr128 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr128 } +# AVX512VL-NEXT: - { id: 1, class: vr128 } +# AVX512VL-NEXT: - { id: 2, class: vr128 } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr128x } +# AVX512BWVL-NEXT: - { id: 1, class: vr128x } +# AVX512BWVL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# SSE2: %2 = PSUBWrr %0, %1 +# +# AVX1: %2 = VPSUBWrr %0, %1 +# +# AVX512VL: %2 = VPSUBWrr %0, %1 +# +# AVX512BWVL: %2 = VPSUBWZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<8 x s16>) = COPY %xmm0 + %1(<8 x s16>) = COPY %xmm1 + %2(<8 x s16>) = G_SUB %0, %1 + %xmm0 = COPY %2(<8 x s16>) + RET 0, implicit %xmm0 + +... +--- +name: test_sub_v4i32 +# ALL-LABEL: name: test_sub_v4i32 +alignment: 4 +legalized: true +regBankSelected: true +# NOVL: registers: +# NOVL-NEXT: - { id: 0, class: vr128 } +# NOVL-NEXT: - { id: 1, class: vr128 } +# NOVL-NEXT: - { id: 2, class: vr128 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr128x } +# AVX512VL-NEXT: - { id: 1, class: vr128x } +# AVX512VL-NEXT: - { id: 2, class: vr128x } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr128x } +# AVX512BWVL-NEXT: - { id: 1, class: vr128x } +# AVX512BWVL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# SSE2: %2 = PSUBDrr %0, %1 +# +# AVX1: %2 = VPSUBDrr %0, %1 +# +# AVX512VL: %2 = VPSUBDZ128rr %0, %1 +# +# AVX512BWVL: %2 = VPSUBDZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<4 x s32>) = COPY %xmm0 + %1(<4 x s32>) = COPY %xmm1 + %2(<4 x s32>) = G_SUB %0, %1 + %xmm0 = COPY %2(<4 x s32>) + RET 0, implicit %xmm0 + +... +--- +name: test_sub_v2i64 +# ALL-LABEL: name: test_sub_v2i64 +alignment: 4 +legalized: true +regBankSelected: true +# NOVL: registers: +# NOVL-NEXT: - { id: 0, class: vr128 } +# NOVL-NEXT: - { id: 1, class: vr128 } +# NOVL-NEXT: - { id: 2, class: vr128 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr128x } +# AVX512VL-NEXT: - { id: 1, class: vr128x } +# AVX512VL-NEXT: - { id: 2, class: vr128x } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr128x } +# AVX512BWVL-NEXT: - { id: 1, class: vr128x } +# AVX512BWVL-NEXT: - { id: 2, class: vr128x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# SSE2: %2 = PSUBQrr %0, %1 +# +# AVX1: %2 = VPSUBQrr %0, %1 +# +# AVX512VL: %2 = VPSUBQZ128rr %0, %1 +# +# AVX512BWVL: %2 = VPSUBQZ128rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %xmm0, %xmm1 + + %0(<2 x s64>) = COPY %xmm0 + %1(<2 x s64>) = COPY %xmm1 + %2(<2 x s64>) = G_SUB %0, %1 + %xmm0 = COPY %2(<2 x s64>) + RET 0, implicit %xmm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/select-sub-v256.mir b/test/CodeGen/X86/GlobalISel/select-sub-v256.mir new file mode 100644 index 000000000000..fbc44997b4a2 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-sub-v256.mir @@ -0,0 +1,185 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BWVL + +--- | + define <32 x i8> @test_sub_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) { + %ret = sub <32 x i8> %arg1, %arg2 + ret <32 x i8> %ret + } + + define <16 x i16> @test_sub_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) { + %ret = sub <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret + } + + define <8 x i32> @test_sub_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) { + %ret = sub <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret + } + + define <4 x i64> @test_sub_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) { + %ret = sub <4 x i64> %arg1, %arg2 + ret <4 x i64> %ret + } +... +--- +name: test_sub_v32i8 +# ALL-LABEL: name: test_sub_v32i8 +alignment: 4 +legalized: true +regBankSelected: true +# AVX2: registers: +# AVX2-NEXT: - { id: 0, class: vr256 } +# AVX2-NEXT: - { id: 1, class: vr256 } +# AVX2-NEXT: - { id: 2, class: vr256 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr256 } +# AVX512VL-NEXT: - { id: 1, class: vr256 } +# AVX512VL-NEXT: - { id: 2, class: vr256 } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr256x } +# AVX512BWVL-NEXT: - { id: 1, class: vr256x } +# AVX512BWVL-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# AVX2: %2 = VPSUBBYrr %0, %1 +# +# AVX512VL: %2 = VPSUBBYrr %0, %1 +# +# AVX512BWVL: %2 = VPSUBBZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<32 x s8>) = COPY %ymm0 + %1(<32 x s8>) = COPY %ymm1 + %2(<32 x s8>) = G_SUB %0, %1 + %ymm0 = COPY %2(<32 x s8>) + RET 0, implicit %ymm0 + +... +--- +name: test_sub_v16i16 +# ALL-LABEL: name: test_sub_v16i16 +alignment: 4 +legalized: true +regBankSelected: true +# AVX2: registers: +# AVX2-NEXT: - { id: 0, class: vr256 } +# AVX2-NEXT: - { id: 1, class: vr256 } +# AVX2-NEXT: - { id: 2, class: vr256 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr256 } +# AVX512VL-NEXT: - { id: 1, class: vr256 } +# AVX512VL-NEXT: - { id: 2, class: vr256 } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr256x } +# AVX512BWVL-NEXT: - { id: 1, class: vr256x } +# AVX512BWVL-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# AVX2: %2 = VPSUBWYrr %0, %1 +# +# AVX512VL: %2 = VPSUBWYrr %0, %1 +# +# AVX512BWVL: %2 = VPSUBWZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<16 x s16>) = COPY %ymm0 + %1(<16 x s16>) = COPY %ymm1 + %2(<16 x s16>) = G_SUB %0, %1 + %ymm0 = COPY %2(<16 x s16>) + RET 0, implicit %ymm0 + +... +--- +name: test_sub_v8i32 +# ALL-LABEL: name: test_sub_v8i32 +alignment: 4 +legalized: true +regBankSelected: true +# AVX2: registers: +# AVX2-NEXT: - { id: 0, class: vr256 } +# AVX2-NEXT: - { id: 1, class: vr256 } +# AVX2-NEXT: - { id: 2, class: vr256 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr256x } +# AVX512VL-NEXT: - { id: 1, class: vr256x } +# AVX512VL-NEXT: - { id: 2, class: vr256x } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr256x } +# AVX512BWVL-NEXT: - { id: 1, class: vr256x } +# AVX512BWVL-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# AVX2: %2 = VPSUBDYrr %0, %1 +# +# AVX512VL: %2 = VPSUBDZ256rr %0, %1 +# +# AVX512BWVL: %2 = VPSUBDZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<8 x s32>) = COPY %ymm0 + %1(<8 x s32>) = COPY %ymm1 + %2(<8 x s32>) = G_SUB %0, %1 + %ymm0 = COPY %2(<8 x s32>) + RET 0, implicit %ymm0 + +... +--- +name: test_sub_v4i64 +# ALL-LABEL: name: test_sub_v4i64 +alignment: 4 +legalized: true +regBankSelected: true +# AVX2: registers: +# AVX2-NEXT: - { id: 0, class: vr256 } +# AVX2-NEXT: - { id: 1, class: vr256 } +# AVX2-NEXT: - { id: 2, class: vr256 } +# +# AVX512VL: registers: +# AVX512VL-NEXT: - { id: 0, class: vr256x } +# AVX512VL-NEXT: - { id: 1, class: vr256x } +# AVX512VL-NEXT: - { id: 2, class: vr256x } +# +# AVX512BWVL: registers: +# AVX512BWVL-NEXT: - { id: 0, class: vr256x } +# AVX512BWVL-NEXT: - { id: 1, class: vr256x } +# AVX512BWVL-NEXT: - { id: 2, class: vr256x } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# AVX2: %2 = VPSUBQYrr %0, %1 +# +# AVX512VL: %2 = VPSUBQZ256rr %0, %1 +# +# AVX512BWVL: %2 = VPSUBQZ256rr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %ymm0, %ymm1 + + %0(<4 x s64>) = COPY %ymm0 + %1(<4 x s64>) = COPY %ymm1 + %2(<4 x s64>) = G_SUB %0, %1 + %ymm0 = COPY %2(<4 x s64>) + RET 0, implicit %ymm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/select-sub-v512.mir b/test/CodeGen/X86/GlobalISel/select-sub-v512.mir new file mode 100644 index 000000000000..dcd05f056949 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/select-sub-v512.mir @@ -0,0 +1,130 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL + +--- | + define <64 x i8> @test_sub_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) #0 { + %ret = sub <64 x i8> %arg1, %arg2 + ret <64 x i8> %ret + } + + define <32 x i16> @test_sub_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #0 { + %ret = sub <32 x i16> %arg1, %arg2 + ret <32 x i16> %ret + } + + define <16 x i32> @test_sub_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #1 { + %ret = sub <16 x i32> %arg1, %arg2 + ret <16 x i32> %ret + } + + define <8 x i64> @test_sub_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #1 { + %ret = sub <8 x i64> %arg1, %arg2 + ret <8 x i64> %ret + } + + attributes #0 = { "target-features"="+avx512f,+avx512bw" } + attributes #1 = { "target-features"="+avx512f" } +... +--- +name: test_sub_v64i8 +# ALL-LABEL: name: test_sub_v64i8 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: vr512 } +# ALL-NEXT: - { id: 1, class: vr512 } +# ALL-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %2 = VPSUBBZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<64 x s8>) = COPY %zmm0 + %1(<64 x s8>) = COPY %zmm1 + %2(<64 x s8>) = G_SUB %0, %1 + %zmm0 = COPY %2(<64 x s8>) + RET 0, implicit %zmm0 + +... +--- +name: test_sub_v32i16 +# ALL-LABEL: name: test_sub_v32i16 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: vr512 } +# ALL-NEXT: - { id: 1, class: vr512 } +# ALL-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %2 = VPSUBWZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<32 x s16>) = COPY %zmm0 + %1(<32 x s16>) = COPY %zmm1 + %2(<32 x s16>) = G_SUB %0, %1 + %zmm0 = COPY %2(<32 x s16>) + RET 0, implicit %zmm0 + +... +--- +name: test_sub_v16i32 +# ALL-LABEL: name: test_sub_v16i32 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: vr512 } +# ALL-NEXT: - { id: 1, class: vr512 } +# ALL-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %2 = VPSUBDZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<16 x s32>) = COPY %zmm0 + %1(<16 x s32>) = COPY %zmm1 + %2(<16 x s32>) = G_SUB %0, %1 + %zmm0 = COPY %2(<16 x s32>) + RET 0, implicit %zmm0 + +... +--- +name: test_sub_v8i64 +# ALL-LABEL: name: test_sub_v8i64 +alignment: 4 +legalized: true +regBankSelected: true +# ALL: registers: +# ALL-NEXT: - { id: 0, class: vr512 } +# ALL-NEXT: - { id: 1, class: vr512 } +# ALL-NEXT: - { id: 2, class: vr512 } +registers: + - { id: 0, class: vecr } + - { id: 1, class: vecr } + - { id: 2, class: vecr } +# ALL: %2 = VPSUBQZrr %0, %1 +body: | + bb.1 (%ir-block.0): + liveins: %zmm0, %zmm1 + + %0(<8 x s64>) = COPY %zmm0 + %1(<8 x s64>) = COPY %zmm1 + %2(<8 x s64>) = G_SUB %0, %1 + %zmm0 = COPY %2(<8 x s64>) + RET 0, implicit %zmm0 + +... diff --git a/test/CodeGen/X86/GlobalISel/select-trunc.mir b/test/CodeGen/X86/GlobalISel/select-trunc.mir index 714340248ff6..9b90543d6559 100644 --- a/test/CodeGen/X86/GlobalISel/select-trunc.mir +++ b/test/CodeGen/X86/GlobalISel/select-trunc.mir @@ -1,4 +1,4 @@ -# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --- | define i1 @trunc_i32toi1(i32 %a) { %r = trunc i32 %a to i1 @@ -33,19 +33,20 @@ ... --- name: trunc_i32toi1 +# CHECK-LABEL: name: trunc_i32toi1 alignment: 4 legalized: true regBankSelected: true -selected: false -# CHECK-LABEL: name: trunc_i32toi1 -# CHECK: registers: -# CHECK-NEXT: - { id: 0, class: gr32 } -# CHECK-NEXT: - { id: 1, class: gr8 } +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr32 } +# CHECK-NEXT: - { id: 1, class: gr8 } registers: - { id: 0, class: gpr } - { id: 1, class: gpr } -# CHECK: body: -# CHECK: %1 = COPY %0.sub_8 +# CHECK: %0 = COPY %edi +# CHECK-NEXT: %1 = COPY %0.sub_8bit +# CHECK-NEXT: %al = COPY %1 +# CHECK-NEXT: RET 0, implicit %al body: | bb.1 (%ir-block.0): liveins: %edi @@ -58,19 +59,20 @@ body: | ... --- name: trunc_i32toi8 +# CHECK-LABEL: name: trunc_i32toi8 alignment: 4 legalized: true regBankSelected: true -selected: false -# CHECK-LABEL: name: trunc_i32toi8 -# CHECK: registers: -# CHECK-NEXT: - { id: 0, class: gr32 } -# CHECK-NEXT: - { id: 1, class: gr8 } +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr32 } +# CHECK-NEXT: - { id: 1, class: gr8 } registers: - { id: 0, class: gpr } - { id: 1, class: gpr } -# CHECK: body: -# CHECK: %1 = COPY %0.sub_8 +# CHECK: %0 = COPY %edi +# CHECK-NEXT: %1 = COPY %0.sub_8bit +# CHECK-NEXT: %al = COPY %1 +# CHECK-NEXT: RET 0, implicit %al body: | bb.1 (%ir-block.0): liveins: %edi @@ -83,19 +85,20 @@ body: | ... --- name: trunc_i32toi16 +# CHECK-LABEL: name: trunc_i32toi16 alignment: 4 legalized: true regBankSelected: true -selected: false -# CHECK-LABEL: name: trunc_i32toi16 -# CHECK: registers: -# CHECK-NEXT: - { id: 0, class: gr32 } -# CHECK-NEXT: - { id: 1, class: gr16 } +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr32 } +# CHECK-NEXT: - { id: 1, class: gr16 } registers: - { id: 0, class: gpr } - { id: 1, class: gpr } -# CHECK: body: -# CHECK: %1 = COPY %0.sub_16 +# CHECK: %0 = COPY %edi +# CHECK-NEXT: %1 = COPY %0.sub_16bit +# CHECK-NEXT: %ax = COPY %1 +# CHECK-NEXT: RET 0, implicit %ax body: | bb.1 (%ir-block.0): liveins: %edi @@ -108,19 +111,20 @@ body: | ... --- name: trunc_i64toi8 +# CHECK-LABEL: name: trunc_i64toi8 alignment: 4 legalized: true regBankSelected: true -selected: false -# CHECK-LABEL: name: trunc_i64toi8 -# CHECK: registers: -# CHECK-NEXT: - { id: 0, class: gr64 } -# CHECK-NEXT: - { id: 1, class: gr8 } +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr64_with_sub_8bit } +# CHECK-NEXT: - { id: 1, class: gr8 } registers: - { id: 0, class: gpr } - { id: 1, class: gpr } -# CHECK: body: -# CHECK: %1 = COPY %0.sub_8 +# CHECK: %0 = COPY %rdi +# CHECK-NEXT: %1 = COPY %0.sub_8bit +# CHECK-NEXT: %al = COPY %1 +# CHECK-NEXT: RET 0, implicit %al body: | bb.1 (%ir-block.0): liveins: %rdi @@ -133,19 +137,20 @@ body: | ... --- name: trunc_i64toi16 +# CHECK-LABEL: name: trunc_i64toi16 alignment: 4 legalized: true regBankSelected: true -selected: false -# CHECK-LABEL: name: trunc_i64toi16 -# CHECK: registers: -# CHECK-NEXT: - { id: 0, class: gr64 } -# CHECK-NEXT: - { id: 1, class: gr16 } +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr64 } +# CHECK-NEXT: - { id: 1, class: gr16 } registers: - { id: 0, class: gpr } - { id: 1, class: gpr } -# CHECK: body: -# CHECK: %1 = COPY %0.sub_16 +# CHECK: %0 = COPY %rdi +# CHECK-NEXT: %1 = COPY %0.sub_16bit +# CHECK-NEXT: %ax = COPY %1 +# CHECK-NEXT: RET 0, implicit %ax body: | bb.1 (%ir-block.0): liveins: %rdi @@ -158,19 +163,20 @@ body: | ... --- name: trunc_i64toi32 +# CHECK-LABEL: name: trunc_i64toi32 alignment: 4 legalized: true regBankSelected: true -selected: false -# CHECK-LABEL: name: trunc_i64toi32 -# CHECK: registers: -# CHECK-NEXT: - { id: 0, class: gr64 } -# CHECK-NEXT: - { id: 1, class: gr32 } +# CHECK: registers: +# CHECK-NEXT: - { id: 0, class: gr64 } +# CHECK-NEXT: - { id: 1, class: gr32 } registers: - { id: 0, class: gpr } - { id: 1, class: gpr } -# CHECK: body: -# CHECK: %1 = COPY %0.sub_32 +# CHECK: %0 = COPY %rdi +# CHECK-NEXT: %1 = COPY %0.sub_32bit +# CHECK-NEXT: %eax = COPY %1 +# CHECK-NEXT: RET 0, implicit %eax body: | bb.1 (%ir-block.0): liveins: %rdi diff --git a/test/CodeGen/X86/GlobalISel/sub-vec.ll b/test/CodeGen/X86/GlobalISel/sub-vec.ll new file mode 100644 index 000000000000..9caf18f0c0c7 --- /dev/null +++ b/test/CodeGen/X86/GlobalISel/sub-vec.ll @@ -0,0 +1,111 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=SKX + +define <16 x i8> @test_sub_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) { +; SKX-LABEL: test_sub_v16i8: +; SKX: # BB#0: +; SKX-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = sub <16 x i8> %arg1, %arg2 + ret <16 x i8> %ret +} + +define <8 x i16> @test_sub_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) { +; SKX-LABEL: test_sub_v8i16: +; SKX: # BB#0: +; SKX-NEXT: vpsubw %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = sub <8 x i16> %arg1, %arg2 + ret <8 x i16> %ret +} + +define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) { +; SKX-LABEL: test_sub_v4i32: +; SKX: # BB#0: +; SKX-NEXT: vpsubd %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = sub <4 x i32> %arg1, %arg2 + ret <4 x i32> %ret +} + +define <2 x i64> @test_sub_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) { +; SKX-LABEL: test_sub_v2i64: +; SKX: # BB#0: +; SKX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; SKX-NEXT: retq + %ret = sub <2 x i64> %arg1, %arg2 + ret <2 x i64> %ret +} + +define <32 x i8> @test_sub_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) { +; SKX-LABEL: test_sub_v32i8: +; SKX: # BB#0: +; SKX-NEXT: vpsubb %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = sub <32 x i8> %arg1, %arg2 + ret <32 x i8> %ret +} + +define <16 x i16> @test_sub_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) { +; SKX-LABEL: test_sub_v16i16: +; SKX: # BB#0: +; SKX-NEXT: vpsubw %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = sub <16 x i16> %arg1, %arg2 + ret <16 x i16> %ret +} + +define <8 x i32> @test_sub_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) { +; SKX-LABEL: test_sub_v8i32: +; SKX: # BB#0: +; SKX-NEXT: vpsubd %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = sub <8 x i32> %arg1, %arg2 + ret <8 x i32> %ret +} + +define <4 x i64> @test_sub_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) { +; SKX-LABEL: test_sub_v4i64: +; SKX: # BB#0: +; SKX-NEXT: vpsubq %ymm1, %ymm0, %ymm0 +; SKX-NEXT: retq + %ret = sub <4 x i64> %arg1, %arg2 + ret <4 x i64> %ret +} + +define <64 x i8> @test_sub_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) { +; SKX-LABEL: test_sub_v64i8: +; SKX: # BB#0: +; SKX-NEXT: vpsubb %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = sub <64 x i8> %arg1, %arg2 + ret <64 x i8> %ret +} + +define <32 x i16> @test_sub_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) { +; SKX-LABEL: test_sub_v32i16: +; SKX: # BB#0: +; SKX-NEXT: vpsubw %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = sub <32 x i16> %arg1, %arg2 + ret <32 x i16> %ret +} + +define <16 x i32> @test_sub_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) { +; SKX-LABEL: test_sub_v16i32: +; SKX: # BB#0: +; SKX-NEXT: vpsubd %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = sub <16 x i32> %arg1, %arg2 + ret <16 x i32> %ret +} + +define <8 x i64> @test_sub_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) { +; SKX-LABEL: test_sub_v8i64: +; SKX: # BB#0: +; SKX-NEXT: vpsubq %zmm1, %zmm0, %zmm0 +; SKX-NEXT: retq + %ret = sub <8 x i64> %arg1, %arg2 + ret <8 x i64> %ret +} + diff --git a/test/CodeGen/X86/GlobalISel/trunc.ll b/test/CodeGen/X86/GlobalISel/trunc.ll index a56fc3b5a87f..6c0f01673afc 100644 --- a/test/CodeGen/X86/GlobalISel/trunc.ll +++ b/test/CodeGen/X86/GlobalISel/trunc.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=CHECK +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=CHECK define i1 @trunc_i32toi1(i32 %a) { ; CHECK-LABEL: trunc_i32toi1: diff --git a/test/CodeGen/X86/O0-pipeline.ll b/test/CodeGen/X86/O0-pipeline.ll index 262cb96ca6d8..874e3e379d8e 100644 --- a/test/CodeGen/X86/O0-pipeline.ll +++ b/test/CodeGen/X86/O0-pipeline.ll @@ -4,8 +4,8 @@ ; CHECK-LABEL: Pass Arguments: ; CHECK-NEXT: Target Library Information -; CHECK-NEXT: Target Transform Information ; CHECK-NEXT: Target Pass Configuration +; CHECK-NEXT: Target Transform Information ; CHECK-NEXT: Type-Based Alias Analysis ; CHECK-NEXT: Scoped NoAlias Alias Analysis ; CHECK-NEXT: Assumption Cache Tracker diff --git a/test/CodeGen/X86/addcarry.ll b/test/CodeGen/X86/addcarry.ll index be550e3fe2d1..3f4ee362e230 100644 --- a/test/CodeGen/X86/addcarry.ll +++ b/test/CodeGen/X86/addcarry.ll @@ -86,12 +86,12 @@ entry: define %scalar @pr31719(%scalar* nocapture readonly %this, %scalar %arg.b) { ; CHECK-LABEL: pr31719: ; CHECK: # BB#0: # %entry +; CHECK-NEXT: xorl %r10d, %r10d ; CHECK-NEXT: addq 8(%rsi), %rcx -; CHECK-NEXT: sbbq %r10, %r10 -; CHECK-NEXT: andl $1, %r10d +; CHECK-NEXT: setb %r10b +; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: addq 16(%rsi), %r8 -; CHECK-NEXT: sbbq %rax, %rax -; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: setb %al ; CHECK-NEXT: addq 24(%rsi), %r9 ; CHECK-NEXT: addq (%rsi), %rdx ; CHECK-NEXT: adcq $0, %rcx @@ -190,9 +190,9 @@ entry: define i64 @shiftadd(i64 %a, i64 %b, i64 %c, i64 %d) { ; CHECK-LABEL: shiftadd: ; CHECK: # BB#0: # %entry +; CHECK-NEXT: leaq (%rdx,%rcx), %rax ; CHECK-NEXT: addq %rsi, %rdi -; CHECK-NEXT: adcq %rcx, %rdx -; CHECK-NEXT: movq %rdx, %rax +; CHECK-NEXT: adcq $0, %rax ; CHECK-NEXT: retq entry: %0 = zext i64 %a to i128 @@ -213,12 +213,12 @@ define %S @readd(%S* nocapture readonly %this, %S %arg.b) { ; CHECK-NEXT: addq (%rsi), %rdx ; CHECK-NEXT: movq 8(%rsi), %r10 ; CHECK-NEXT: adcq $0, %r10 -; CHECK-NEXT: sbbq %rax, %rax -; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: setb %al +; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: addq %rcx, %r10 ; CHECK-NEXT: adcq 16(%rsi), %rax -; CHECK-NEXT: sbbq %rcx, %rcx -; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: setb %cl +; CHECK-NEXT: movzbl %cl, %ecx ; CHECK-NEXT: addq %r8, %rax ; CHECK-NEXT: adcq 24(%rsi), %rcx ; CHECK-NEXT: addq %r9, %rcx diff --git a/test/CodeGen/X86/avg.ll b/test/CodeGen/X86/avg.ll index aa28ef5175ed..2aaf14001758 100644 --- a/test/CodeGen/X86/avg.ll +++ b/test/CodeGen/X86/avg.ll @@ -135,87 +135,88 @@ define void @avg_v16i8(<16 x i8>* %a, <16 x i8>* %b) { define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) { ; SSE2-LABEL: avg_v32i8: ; SSE2: # BB#0: -; SSE2-NEXT: movdqa (%rdi), %xmm3 -; SSE2-NEXT: movdqa 16(%rdi), %xmm8 +; SSE2-NEXT: movdqa (%rdi), %xmm8 +; SSE2-NEXT: movdqa 16(%rdi), %xmm11 ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm3, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15] -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] -; SSE2-NEXT: movdqa %xmm3, %xmm12 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm8, %xmm7 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15] -; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7] ; SSE2-NEXT: movdqa %xmm8, %xmm10 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15] +; SSE2-NEXT: movdqa %xmm10, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm8, %xmm12 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15] -; SSE2-NEXT: movdqa %xmm2, %xmm9 +; SSE2-NEXT: movdqa %xmm11, %xmm15 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm4[8],xmm15[9],xmm4[9],xmm15[10],xmm4[10],xmm15[11],xmm4[11],xmm15[12],xmm4[12],xmm15[13],xmm4[13],xmm15[14],xmm4[14],xmm15[15],xmm4[15] +; SSE2-NEXT: movdqa %xmm15, %xmm14 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm11, %xmm9 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7] -; SSE2-NEXT: paddd %xmm6, %xmm9 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; SSE2-NEXT: paddd %xmm5, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] -; SSE2-NEXT: paddd %xmm12, %xmm5 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; SSE2-NEXT: paddd %xmm3, %xmm0 -; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3] +; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] -; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] -; SSE2-NEXT: paddd %xmm11, %xmm6 +; SSE2-NEXT: movdqa %xmm3, %xmm7 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] -; SSE2-NEXT: paddd %xmm7, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm0, %xmm6 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15] +; SSE2-NEXT: movdqa %xmm2, %xmm5 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-NEXT: movdqa %xmm1, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] -; SSE2-NEXT: paddd %xmm10, %xmm7 +; SSE2-NEXT: movdqa %xmm1, %xmm13 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-NEXT: paddd %xmm8, %xmm1 +; SSE2-NEXT: paddd %xmm11, %xmm1 +; SSE2-NEXT: paddd %xmm9, %xmm13 +; SSE2-NEXT: paddd %xmm15, %xmm2 +; SSE2-NEXT: paddd %xmm14, %xmm5 +; SSE2-NEXT: paddd %xmm8, %xmm0 +; SSE2-NEXT: paddd %xmm12, %xmm6 +; SSE2-NEXT: paddd %xmm10, %xmm3 +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] -; SSE2-NEXT: paddd %xmm4, %xmm9 -; SSE2-NEXT: paddd %xmm4, %xmm2 -; SSE2-NEXT: paddd %xmm4, %xmm5 -; SSE2-NEXT: paddd %xmm4, %xmm0 -; SSE2-NEXT: paddd %xmm4, %xmm6 -; SSE2-NEXT: paddd %xmm4, %xmm3 ; SSE2-NEXT: paddd %xmm4, %xmm7 +; SSE2-NEXT: paddd %xmm4, %xmm3 +; SSE2-NEXT: paddd %xmm4, %xmm6 +; SSE2-NEXT: paddd %xmm4, %xmm0 +; SSE2-NEXT: paddd %xmm4, %xmm5 +; SSE2-NEXT: paddd %xmm4, %xmm2 +; SSE2-NEXT: paddd %xmm4, %xmm13 ; SSE2-NEXT: paddd %xmm4, %xmm1 -; SSE2-NEXT: psrld $1, %xmm1 -; SSE2-NEXT: psrld $1, %xmm7 ; SSE2-NEXT: psrld $1, %xmm3 -; SSE2-NEXT: psrld $1, %xmm6 +; SSE2-NEXT: psrld $1, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; SSE2-NEXT: pand %xmm4, %xmm7 +; SSE2-NEXT: pand %xmm4, %xmm3 +; SSE2-NEXT: packuswb %xmm7, %xmm3 ; SSE2-NEXT: psrld $1, %xmm0 -; SSE2-NEXT: psrld $1, %xmm5 +; SSE2-NEXT: psrld $1, %xmm6 +; SSE2-NEXT: pand %xmm4, %xmm6 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: packuswb %xmm6, %xmm0 +; SSE2-NEXT: packuswb %xmm3, %xmm0 ; SSE2-NEXT: psrld $1, %xmm2 -; SSE2-NEXT: psrld $1, %xmm9 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; SSE2-NEXT: pand %xmm4, %xmm9 -; SSE2-NEXT: pand %xmm4, %xmm2 -; SSE2-NEXT: packuswb %xmm9, %xmm2 +; SSE2-NEXT: psrld $1, %xmm5 ; SSE2-NEXT: pand %xmm4, %xmm5 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: packuswb %xmm5, %xmm0 -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm3 -; SSE2-NEXT: packuswb %xmm6, %xmm3 -; SSE2-NEXT: pand %xmm4, %xmm7 +; SSE2-NEXT: pand %xmm4, %xmm2 +; SSE2-NEXT: packuswb %xmm5, %xmm2 +; SSE2-NEXT: psrld $1, %xmm1 +; SSE2-NEXT: psrld $1, %xmm13 +; SSE2-NEXT: pand %xmm4, %xmm13 ; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: packuswb %xmm7, %xmm1 -; SSE2-NEXT: packuswb %xmm3, %xmm1 +; SSE2-NEXT: packuswb %xmm13, %xmm1 +; SSE2-NEXT: packuswb %xmm2, %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq @@ -258,183 +259,198 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) { define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) { ; SSE2-LABEL: avg_v64i8: ; SSE2: # BB#0: -; SSE2-NEXT: movdqa (%rdi), %xmm6 -; SSE2-NEXT: movdqa 16(%rdi), %xmm2 -; SSE2-NEXT: movdqa 32(%rdi), %xmm1 -; SSE2-NEXT: movdqa 48(%rdi), %xmm0 -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa (%rsi), %xmm5 -; SSE2-NEXT: movdqa 16(%rsi), %xmm13 -; SSE2-NEXT: movdqa 32(%rsi), %xmm11 +; SSE2-NEXT: subq $152, %rsp +; SSE2-NEXT: .Lcfi0: +; SSE2-NEXT: .cfi_def_cfa_offset 160 +; SSE2-NEXT: movdqa (%rdi), %xmm1 +; SSE2-NEXT: movdqa 16(%rdi), %xmm4 +; SSE2-NEXT: movdqa 32(%rdi), %xmm5 +; SSE2-NEXT: movdqa 48(%rdi), %xmm6 ; SSE2-NEXT: pxor %xmm0, %xmm0 -; SSE2-NEXT: movdqa %xmm6, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15] -; SSE2-NEXT: movdqa %xmm4, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] +; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm4, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] +; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; SSE2-NEXT: movdqa %xmm6, %xmm12 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3] -; SSE2-NEXT: movdqa %xmm2, %xmm15 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15] -; SSE2-NEXT: movdqa %xmm15, %xmm14 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-NEXT: movdqa %xmm2, %xmm8 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-NEXT: movdqa %xmm5, %xmm10 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm0[8],xmm10[9],xmm0[9],xmm10[10],xmm0[10],xmm10[11],xmm0[11],xmm10[12],xmm0[12],xmm10[13],xmm0[13],xmm10[14],xmm0[14],xmm10[15],xmm0[15] -; SSE2-NEXT: movdqa %xmm10, %xmm3 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: paddd %xmm7, %xmm3 -; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm1, %xmm7 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3] -; SSE2-NEXT: paddd %xmm4, %xmm10 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm5, %xmm3 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: paddd %xmm12, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] ; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm5, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3] -; SSE2-NEXT: paddd %xmm6, %xmm5 ; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm13, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15] -; SSE2-NEXT: movdqa %xmm4, %xmm12 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7] -; SSE2-NEXT: paddd %xmm14, %xmm12 -; SSE2-NEXT: movdqa %xmm7, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] -; SSE2-NEXT: paddd %xmm15, %xmm4 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3],xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7] -; SSE2-NEXT: movdqa %xmm13, %xmm15 +; SSE2-NEXT: movdqa %xmm6, %xmm8 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm8, %xmm1 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm6, %xmm1 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3] +; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa (%rsi), %xmm14 +; SSE2-NEXT: movdqa %xmm14, %xmm7 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm7, %xmm15 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7] -; SSE2-NEXT: paddd %xmm8, %xmm15 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3] -; SSE2-NEXT: paddd %xmm2, %xmm13 -; SSE2-NEXT: movdqa %xmm11, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15] -; SSE2-NEXT: movdqa %xmm6, %xmm9 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3],xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm14, %xmm9 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7] -; SSE2-NEXT: paddd %xmm5, %xmm9 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3] +; SSE2-NEXT: movdqa 16(%rsi), %xmm12 +; SSE2-NEXT: movdqa %xmm12, %xmm6 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm6, %xmm13 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3] -; SSE2-NEXT: paddd %xmm7, %xmm6 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3],xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7] -; SSE2-NEXT: movdqa %xmm11, %xmm14 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7] -; SSE2-NEXT: paddd %xmm2, %xmm14 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload -; SSE2-NEXT: movdqa %xmm5, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3] -; SSE2-NEXT: paddd %xmm1, %xmm11 -; SSE2-NEXT: movdqa %xmm2, %xmm1 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: movdqa 48(%rsi), %xmm7 -; SSE2-NEXT: movdqa %xmm7, %xmm3 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] -; SSE2-NEXT: movdqa %xmm3, %xmm8 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7] -; SSE2-NEXT: paddd %xmm1, %xmm8 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] -; SSE2-NEXT: paddd %xmm2, %xmm3 -; SSE2-NEXT: movdqa %xmm5, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm12, %xmm10 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3] +; SSE2-NEXT: movdqa 32(%rsi), %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm5 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm5, %xmm11 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] ; SSE2-NEXT: movdqa %xmm2, %xmm1 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] -; SSE2-NEXT: movdqa %xmm7, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] -; SSE2-NEXT: paddd %xmm1, %xmm5 +; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3] -; SSE2-NEXT: paddd %xmm2, %xmm7 +; SSE2-NEXT: movdqa 48(%rsi), %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm4, %xmm3 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload +; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: paddd %xmm8, %xmm4 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Folded Reload +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Folded Reload +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Folded Reload +; SSE2-NEXT: paddd (%rsp), %xmm11 # 16-byte Folded Reload +; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm12 # 16-byte Folded Reload +; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm10 # 16-byte Folded Reload +; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm6 # 16-byte Folded Reload +; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm13 # 16-byte Folded Reload +; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm14 # 16-byte Folded Reload +; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm9 # 16-byte Folded Reload +; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload +; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm15 # 16-byte Folded Reload ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1] -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm0, %xmm1 -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: paddd %xmm0, %xmm10 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm0, %xmm1 -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload -; SSE2-NEXT: paddd %xmm0, %xmm2 -; SSE2-NEXT: paddd %xmm0, %xmm12 -; SSE2-NEXT: paddd %xmm0, %xmm4 ; SSE2-NEXT: paddd %xmm0, %xmm15 -; SSE2-NEXT: paddd %xmm0, %xmm13 +; SSE2-NEXT: paddd %xmm0, %xmm7 ; SSE2-NEXT: paddd %xmm0, %xmm9 -; SSE2-NEXT: paddd %xmm0, %xmm6 ; SSE2-NEXT: paddd %xmm0, %xmm14 +; SSE2-NEXT: paddd %xmm0, %xmm13 +; SSE2-NEXT: paddd %xmm0, %xmm6 +; SSE2-NEXT: paddd %xmm0, %xmm10 +; SSE2-NEXT: paddd %xmm0, %xmm12 ; SSE2-NEXT: paddd %xmm0, %xmm11 +; SSE2-NEXT: paddd %xmm0, %xmm5 +; SSE2-NEXT: paddd %xmm0, %xmm3 +; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: paddd %xmm0, %xmm2 ; SSE2-NEXT: paddd %xmm0, %xmm8 +; SSE2-NEXT: paddd %xmm0, %xmm4 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload ; SSE2-NEXT: paddd %xmm0, %xmm3 -; SSE2-NEXT: paddd %xmm0, %xmm5 -; SSE2-NEXT: paddd %xmm0, %xmm7 -; SSE2-NEXT: psrld $1, %xmm10 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: psrld $1, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: pand %xmm0, %xmm10 -; SSE2-NEXT: packuswb %xmm1, %xmm10 -; SSE2-NEXT: psrld $1, %xmm2 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: psrld $1, %xmm1 -; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: packuswb %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm10, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm1 -; SSE2-NEXT: psrld $1, %xmm4 -; SSE2-NEXT: psrld $1, %xmm12 -; SSE2-NEXT: pand %xmm0, %xmm12 -; SSE2-NEXT: pand %xmm0, %xmm4 -; SSE2-NEXT: packuswb %xmm12, %xmm4 -; SSE2-NEXT: psrld $1, %xmm13 +; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: psrld $1, %xmm7 ; SSE2-NEXT: psrld $1, %xmm15 +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] ; SSE2-NEXT: pand %xmm0, %xmm15 -; SSE2-NEXT: pand %xmm0, %xmm13 -; SSE2-NEXT: packuswb %xmm15, %xmm13 -; SSE2-NEXT: packuswb %xmm4, %xmm13 -; SSE2-NEXT: psrld $1, %xmm6 +; SSE2-NEXT: pand %xmm0, %xmm7 +; SSE2-NEXT: packuswb %xmm15, %xmm7 +; SSE2-NEXT: psrld $1, %xmm14 ; SSE2-NEXT: psrld $1, %xmm9 ; SSE2-NEXT: pand %xmm0, %xmm9 +; SSE2-NEXT: pand %xmm0, %xmm14 +; SSE2-NEXT: packuswb %xmm9, %xmm14 +; SSE2-NEXT: packuswb %xmm7, %xmm14 +; SSE2-NEXT: psrld $1, %xmm6 +; SSE2-NEXT: psrld $1, %xmm13 +; SSE2-NEXT: pand %xmm0, %xmm13 ; SSE2-NEXT: pand %xmm0, %xmm6 -; SSE2-NEXT: packuswb %xmm9, %xmm6 +; SSE2-NEXT: packuswb %xmm13, %xmm6 +; SSE2-NEXT: psrld $1, %xmm12 +; SSE2-NEXT: psrld $1, %xmm10 +; SSE2-NEXT: pand %xmm0, %xmm10 +; SSE2-NEXT: pand %xmm0, %xmm12 +; SSE2-NEXT: packuswb %xmm10, %xmm12 +; SSE2-NEXT: packuswb %xmm6, %xmm12 +; SSE2-NEXT: psrld $1, %xmm5 ; SSE2-NEXT: psrld $1, %xmm11 -; SSE2-NEXT: psrld $1, %xmm14 -; SSE2-NEXT: pand %xmm0, %xmm14 ; SSE2-NEXT: pand %xmm0, %xmm11 -; SSE2-NEXT: packuswb %xmm14, %xmm11 -; SSE2-NEXT: packuswb %xmm6, %xmm11 -; SSE2-NEXT: psrld $1, %xmm3 -; SSE2-NEXT: psrld $1, %xmm8 -; SSE2-NEXT: pand %xmm0, %xmm8 -; SSE2-NEXT: pand %xmm0, %xmm3 -; SSE2-NEXT: packuswb %xmm8, %xmm3 -; SSE2-NEXT: psrld $1, %xmm7 +; SSE2-NEXT: pand %xmm0, %xmm5 +; SSE2-NEXT: packuswb %xmm11, %xmm5 +; SSE2-NEXT: psrld $1, %xmm2 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload +; SSE2-NEXT: psrld $1, %xmm6 +; SSE2-NEXT: pand %xmm0, %xmm6 +; SSE2-NEXT: pand %xmm0, %xmm2 +; SSE2-NEXT: packuswb %xmm6, %xmm2 +; SSE2-NEXT: packuswb %xmm5, %xmm2 +; SSE2-NEXT: psrld $1, %xmm4 +; SSE2-NEXT: movdqa %xmm8, %xmm5 ; SSE2-NEXT: psrld $1, %xmm5 ; SSE2-NEXT: pand %xmm0, %xmm5 -; SSE2-NEXT: pand %xmm0, %xmm7 -; SSE2-NEXT: packuswb %xmm5, %xmm7 -; SSE2-NEXT: packuswb %xmm3, %xmm7 -; SSE2-NEXT: movdqu %xmm7, (%rax) -; SSE2-NEXT: movdqu %xmm11, (%rax) -; SSE2-NEXT: movdqu %xmm13, (%rax) +; SSE2-NEXT: pand %xmm0, %xmm4 +; SSE2-NEXT: packuswb %xmm5, %xmm4 +; SSE2-NEXT: psrld $1, %xmm1 +; SSE2-NEXT: movdqa %xmm3, %xmm5 +; SSE2-NEXT: psrld $1, %xmm5 +; SSE2-NEXT: pand %xmm0, %xmm5 +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: packuswb %xmm5, %xmm1 +; SSE2-NEXT: packuswb %xmm4, %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rax) +; SSE2-NEXT: movdqu %xmm2, (%rax) +; SSE2-NEXT: movdqu %xmm12, (%rax) +; SSE2-NEXT: movdqu %xmm14, (%rax) +; SSE2-NEXT: addq $152, %rsp ; SSE2-NEXT: retq ; ; AVX2-LABEL: avg_v64i8: @@ -448,21 +464,21 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) { ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm12 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vpaddd %ymm15, %ymm7, %ymm7 +; AVX2-NEXT: vpaddd %ymm14, %ymm6, %ymm6 +; AVX2-NEXT: vpaddd %ymm13, %ymm5, %ymm5 +; AVX2-NEXT: vpaddd %ymm12, %ymm4, %ymm4 +; AVX2-NEXT: vpaddd %ymm11, %ymm3, %ymm3 +; AVX2-NEXT: vpaddd %ymm10, %ymm2, %ymm2 +; AVX2-NEXT: vpaddd %ymm9, %ymm1, %ymm1 ; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm0 -; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpaddd %ymm8, %ymm1, %ymm1 -; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpaddd %ymm8, %ymm2, %ymm2 -; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpaddd %ymm8, %ymm3, %ymm3 -; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpaddd %ymm8, %ymm4, %ymm4 -; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpaddd %ymm8, %ymm5, %ymm5 -; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpaddd %ymm8, %ymm6, %ymm6 -; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpaddd %ymm8, %ymm7, %ymm7 ; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm8 ; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm9 ; AVX2-NEXT: vpaddd %ymm8, %ymm1, %ymm10 @@ -524,13 +540,13 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) { ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero +; AVX512F-NEXT: vpaddd %zmm7, %zmm3, %zmm3 +; AVX512F-NEXT: vpaddd %zmm6, %zmm2, %zmm2 +; AVX512F-NEXT: vpaddd %zmm5, %zmm1, %zmm1 ; AVX512F-NEXT: vpaddd %zmm4, %zmm0, %zmm0 -; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero -; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm1 -; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero -; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm2 -; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero -; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm3 ; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm4 ; AVX512F-NEXT: vpaddd %zmm4, %zmm0, %zmm0 ; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm1 @@ -657,27 +673,27 @@ define void @avg_v8i16(<8 x i16>* %a, <8 x i16>* %b) { define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) { ; SSE2-LABEL: avg_v16i16: ; SSE2: # BB#0: -; SSE2-NEXT: movdqa (%rdi), %xmm2 -; SSE2-NEXT: movdqa 16(%rdi), %xmm4 +; SSE2-NEXT: movdqa (%rdi), %xmm4 +; SSE2-NEXT: movdqa 16(%rdi), %xmm5 ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3] -; SSE2-NEXT: movdqa %xmm4, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] +; SSE2-NEXT: pxor %xmm6, %xmm6 +; SSE2-NEXT: movdqa %xmm4, %xmm8 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3] +; SSE2-NEXT: movdqa %xmm5, %xmm7 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3] ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] -; SSE2-NEXT: paddd %xmm6, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; SSE2-NEXT: paddd %xmm2, %xmm0 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3] ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3] +; SSE2-NEXT: paddd %xmm5, %xmm1 ; SSE2-NEXT: paddd %xmm7, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3] -; SSE2-NEXT: paddd %xmm4, %xmm1 +; SSE2-NEXT: paddd %xmm4, %xmm0 +; SSE2-NEXT: paddd %xmm8, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] ; SSE2-NEXT: paddd %xmm4, %xmm3 ; SSE2-NEXT: paddd %xmm4, %xmm0 @@ -739,79 +755,80 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) { define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) { ; SSE2-LABEL: avg_v32i16: ; SSE2: # BB#0: -; SSE2-NEXT: movdqa (%rdi), %xmm4 -; SSE2-NEXT: movdqa 16(%rdi), %xmm11 -; SSE2-NEXT: movdqa 32(%rdi), %xmm10 +; SSE2-NEXT: movdqa (%rdi), %xmm10 +; SSE2-NEXT: movdqa 16(%rdi), %xmm9 +; SSE2-NEXT: movdqa 32(%rdi), %xmm11 ; SSE2-NEXT: movdqa 48(%rdi), %xmm8 -; SSE2-NEXT: movdqa (%rsi), %xmm9 +; SSE2-NEXT: movdqa (%rsi), %xmm14 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3 ; SSE2-NEXT: pxor %xmm0, %xmm0 -; SSE2-NEXT: movdqa %xmm4, %xmm6 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] -; SSE2-NEXT: movdqa %xmm11, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3] -; SSE2-NEXT: movdqa %xmm10, %xmm12 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm10, %xmm4 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3] +; SSE2-NEXT: movdqa %xmm9, %xmm12 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3] +; SSE2-NEXT: movdqa %xmm11, %xmm15 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3] ; SSE2-NEXT: movdqa %xmm8, %xmm13 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3] -; SSE2-NEXT: movdqa %xmm9, %xmm7 +; SSE2-NEXT: movdqa %xmm14, %xmm7 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] -; SSE2-NEXT: paddd %xmm6, %xmm7 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3] -; SSE2-NEXT: paddd %xmm4, %xmm9 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3] ; SSE2-NEXT: movdqa %xmm1, %xmm6 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; SSE2-NEXT: paddd %xmm5, %xmm6 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: paddd %xmm11, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm5 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] -; SSE2-NEXT: paddd %xmm12, %xmm5 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-NEXT: paddd %xmm10, %xmm2 ; SSE2-NEXT: movdqa %xmm3, %xmm4 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] -; SSE2-NEXT: paddd %xmm13, %xmm4 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] ; SSE2-NEXT: paddd %xmm8, %xmm3 +; SSE2-NEXT: paddd %xmm13, %xmm4 +; SSE2-NEXT: paddd %xmm11, %xmm2 +; SSE2-NEXT: paddd %xmm15, %xmm5 +; SSE2-NEXT: paddd %xmm9, %xmm1 +; SSE2-NEXT: paddd %xmm12, %xmm6 +; SSE2-NEXT: paddd %xmm10, %xmm14 +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1] ; SSE2-NEXT: paddd %xmm0, %xmm7 -; SSE2-NEXT: paddd %xmm0, %xmm9 +; SSE2-NEXT: paddd %xmm0, %xmm14 ; SSE2-NEXT: paddd %xmm0, %xmm6 ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: paddd %xmm0, %xmm5 ; SSE2-NEXT: paddd %xmm0, %xmm2 ; SSE2-NEXT: paddd %xmm0, %xmm4 ; SSE2-NEXT: paddd %xmm0, %xmm3 -; SSE2-NEXT: psrld $1, %xmm3 -; SSE2-NEXT: psrld $1, %xmm4 -; SSE2-NEXT: psrld $1, %xmm2 -; SSE2-NEXT: psrld $1, %xmm5 -; SSE2-NEXT: psrld $1, %xmm1 -; SSE2-NEXT: psrld $1, %xmm6 -; SSE2-NEXT: psrld $1, %xmm9 +; SSE2-NEXT: psrld $1, %xmm14 ; SSE2-NEXT: psrld $1, %xmm7 ; SSE2-NEXT: pslld $16, %xmm7 ; SSE2-NEXT: psrad $16, %xmm7 -; SSE2-NEXT: pslld $16, %xmm9 -; SSE2-NEXT: psrad $16, %xmm9 -; SSE2-NEXT: packssdw %xmm7, %xmm9 +; SSE2-NEXT: pslld $16, %xmm14 +; SSE2-NEXT: psrad $16, %xmm14 +; SSE2-NEXT: packssdw %xmm7, %xmm14 +; SSE2-NEXT: psrld $1, %xmm1 +; SSE2-NEXT: psrld $1, %xmm6 ; SSE2-NEXT: pslld $16, %xmm6 ; SSE2-NEXT: psrad $16, %xmm6 ; SSE2-NEXT: pslld $16, %xmm1 ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: packssdw %xmm6, %xmm1 +; SSE2-NEXT: psrld $1, %xmm2 +; SSE2-NEXT: psrld $1, %xmm5 ; SSE2-NEXT: pslld $16, %xmm5 ; SSE2-NEXT: psrad $16, %xmm5 ; SSE2-NEXT: pslld $16, %xmm2 ; SSE2-NEXT: psrad $16, %xmm2 ; SSE2-NEXT: packssdw %xmm5, %xmm2 +; SSE2-NEXT: psrld $1, %xmm3 +; SSE2-NEXT: psrld $1, %xmm4 ; SSE2-NEXT: pslld $16, %xmm4 ; SSE2-NEXT: psrad $16, %xmm4 ; SSE2-NEXT: pslld $16, %xmm3 @@ -820,7 +837,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) { ; SSE2-NEXT: movdqu %xmm3, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) -; SSE2-NEXT: movdqu %xmm9, (%rax) +; SSE2-NEXT: movdqu %xmm14, (%rax) ; SSE2-NEXT: retq ; ; AVX2-LABEL: avg_v32i16: @@ -830,13 +847,13 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) { ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vpaddd %ymm7, %ymm3, %ymm3 +; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2 +; AVX2-NEXT: vpaddd %ymm5, %ymm1, %ymm1 ; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0 -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1 -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2 -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4 ; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0 ; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1 @@ -867,9 +884,9 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) { ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512F-NEXT: vpaddd %zmm3, %zmm1, %zmm1 ; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0 -; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1 ; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm2 ; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1 @@ -1030,87 +1047,88 @@ define void @avg_v16i8_2(<16 x i8>* %a, <16 x i8>* %b) { define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) { ; SSE2-LABEL: avg_v32i8_2: ; SSE2: # BB#0: -; SSE2-NEXT: movdqa (%rdi), %xmm3 -; SSE2-NEXT: movdqa 16(%rdi), %xmm8 +; SSE2-NEXT: movdqa (%rdi), %xmm8 +; SSE2-NEXT: movdqa 16(%rdi), %xmm11 ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm3, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15] -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] -; SSE2-NEXT: movdqa %xmm3, %xmm12 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm8, %xmm7 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15] -; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7] ; SSE2-NEXT: movdqa %xmm8, %xmm10 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15] +; SSE2-NEXT: movdqa %xmm10, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm8, %xmm12 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15] -; SSE2-NEXT: movdqa %xmm2, %xmm9 +; SSE2-NEXT: movdqa %xmm11, %xmm15 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm4[8],xmm15[9],xmm4[9],xmm15[10],xmm4[10],xmm15[11],xmm4[11],xmm15[12],xmm4[12],xmm15[13],xmm4[13],xmm15[14],xmm4[14],xmm15[15],xmm4[15] +; SSE2-NEXT: movdqa %xmm15, %xmm14 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm11, %xmm9 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7] -; SSE2-NEXT: paddd %xmm6, %xmm9 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; SSE2-NEXT: paddd %xmm5, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] -; SSE2-NEXT: paddd %xmm12, %xmm5 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; SSE2-NEXT: paddd %xmm3, %xmm0 -; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3] +; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] -; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] -; SSE2-NEXT: paddd %xmm11, %xmm6 +; SSE2-NEXT: movdqa %xmm3, %xmm7 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] -; SSE2-NEXT: paddd %xmm7, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm0, %xmm6 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15] +; SSE2-NEXT: movdqa %xmm2, %xmm5 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-NEXT: movdqa %xmm1, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] -; SSE2-NEXT: paddd %xmm10, %xmm7 +; SSE2-NEXT: movdqa %xmm1, %xmm13 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-NEXT: paddd %xmm8, %xmm1 +; SSE2-NEXT: paddd %xmm11, %xmm1 +; SSE2-NEXT: paddd %xmm9, %xmm13 +; SSE2-NEXT: paddd %xmm15, %xmm2 +; SSE2-NEXT: paddd %xmm14, %xmm5 +; SSE2-NEXT: paddd %xmm8, %xmm0 +; SSE2-NEXT: paddd %xmm12, %xmm6 +; SSE2-NEXT: paddd %xmm10, %xmm3 +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] -; SSE2-NEXT: paddd %xmm4, %xmm9 -; SSE2-NEXT: paddd %xmm4, %xmm2 -; SSE2-NEXT: paddd %xmm4, %xmm5 -; SSE2-NEXT: paddd %xmm4, %xmm0 -; SSE2-NEXT: paddd %xmm4, %xmm6 -; SSE2-NEXT: paddd %xmm4, %xmm3 ; SSE2-NEXT: paddd %xmm4, %xmm7 +; SSE2-NEXT: paddd %xmm4, %xmm3 +; SSE2-NEXT: paddd %xmm4, %xmm6 +; SSE2-NEXT: paddd %xmm4, %xmm0 +; SSE2-NEXT: paddd %xmm4, %xmm5 +; SSE2-NEXT: paddd %xmm4, %xmm2 +; SSE2-NEXT: paddd %xmm4, %xmm13 ; SSE2-NEXT: paddd %xmm4, %xmm1 -; SSE2-NEXT: psrld $1, %xmm1 -; SSE2-NEXT: psrld $1, %xmm7 ; SSE2-NEXT: psrld $1, %xmm3 -; SSE2-NEXT: psrld $1, %xmm6 +; SSE2-NEXT: psrld $1, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; SSE2-NEXT: pand %xmm4, %xmm7 +; SSE2-NEXT: pand %xmm4, %xmm3 +; SSE2-NEXT: packuswb %xmm7, %xmm3 ; SSE2-NEXT: psrld $1, %xmm0 -; SSE2-NEXT: psrld $1, %xmm5 +; SSE2-NEXT: psrld $1, %xmm6 +; SSE2-NEXT: pand %xmm4, %xmm6 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: packuswb %xmm6, %xmm0 +; SSE2-NEXT: packuswb %xmm3, %xmm0 ; SSE2-NEXT: psrld $1, %xmm2 -; SSE2-NEXT: psrld $1, %xmm9 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; SSE2-NEXT: pand %xmm4, %xmm9 -; SSE2-NEXT: pand %xmm4, %xmm2 -; SSE2-NEXT: packuswb %xmm9, %xmm2 +; SSE2-NEXT: psrld $1, %xmm5 ; SSE2-NEXT: pand %xmm4, %xmm5 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: packuswb %xmm5, %xmm0 -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm3 -; SSE2-NEXT: packuswb %xmm6, %xmm3 -; SSE2-NEXT: pand %xmm4, %xmm7 +; SSE2-NEXT: pand %xmm4, %xmm2 +; SSE2-NEXT: packuswb %xmm5, %xmm2 +; SSE2-NEXT: psrld $1, %xmm1 +; SSE2-NEXT: psrld $1, %xmm13 +; SSE2-NEXT: pand %xmm4, %xmm13 ; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: packuswb %xmm7, %xmm1 -; SSE2-NEXT: packuswb %xmm3, %xmm1 +; SSE2-NEXT: packuswb %xmm13, %xmm1 +; SSE2-NEXT: packuswb %xmm2, %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq @@ -1494,27 +1512,27 @@ define void @avg_v8i16_2(<8 x i16>* %a, <8 x i16>* %b) { define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) { ; SSE2-LABEL: avg_v16i16_2: ; SSE2: # BB#0: -; SSE2-NEXT: movdqa (%rdi), %xmm2 -; SSE2-NEXT: movdqa 16(%rdi), %xmm4 +; SSE2-NEXT: movdqa (%rdi), %xmm4 +; SSE2-NEXT: movdqa 16(%rdi), %xmm5 ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3] -; SSE2-NEXT: movdqa %xmm4, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] +; SSE2-NEXT: pxor %xmm6, %xmm6 +; SSE2-NEXT: movdqa %xmm4, %xmm8 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3] +; SSE2-NEXT: movdqa %xmm5, %xmm7 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3] ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] -; SSE2-NEXT: paddd %xmm6, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; SSE2-NEXT: paddd %xmm2, %xmm0 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3] ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3] +; SSE2-NEXT: paddd %xmm5, %xmm1 ; SSE2-NEXT: paddd %xmm7, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3] -; SSE2-NEXT: paddd %xmm4, %xmm1 +; SSE2-NEXT: paddd %xmm4, %xmm0 +; SSE2-NEXT: paddd %xmm8, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] ; SSE2-NEXT: paddd %xmm4, %xmm3 ; SSE2-NEXT: paddd %xmm4, %xmm0 @@ -1576,79 +1594,80 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) { define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) { ; SSE2-LABEL: avg_v32i16_2: ; SSE2: # BB#0: -; SSE2-NEXT: movdqa (%rdi), %xmm4 -; SSE2-NEXT: movdqa 16(%rdi), %xmm11 -; SSE2-NEXT: movdqa 32(%rdi), %xmm10 +; SSE2-NEXT: movdqa (%rdi), %xmm10 +; SSE2-NEXT: movdqa 16(%rdi), %xmm9 +; SSE2-NEXT: movdqa 32(%rdi), %xmm11 ; SSE2-NEXT: movdqa 48(%rdi), %xmm8 -; SSE2-NEXT: movdqa (%rsi), %xmm9 +; SSE2-NEXT: movdqa (%rsi), %xmm14 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3 ; SSE2-NEXT: pxor %xmm0, %xmm0 -; SSE2-NEXT: movdqa %xmm4, %xmm6 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] -; SSE2-NEXT: movdqa %xmm11, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3] -; SSE2-NEXT: movdqa %xmm10, %xmm12 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm10, %xmm4 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3] +; SSE2-NEXT: movdqa %xmm9, %xmm12 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3] +; SSE2-NEXT: movdqa %xmm11, %xmm15 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3] ; SSE2-NEXT: movdqa %xmm8, %xmm13 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3] -; SSE2-NEXT: movdqa %xmm9, %xmm7 +; SSE2-NEXT: movdqa %xmm14, %xmm7 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] -; SSE2-NEXT: paddd %xmm6, %xmm7 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3] -; SSE2-NEXT: paddd %xmm4, %xmm9 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3] ; SSE2-NEXT: movdqa %xmm1, %xmm6 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; SSE2-NEXT: paddd %xmm5, %xmm6 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: paddd %xmm11, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm5 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] -; SSE2-NEXT: paddd %xmm12, %xmm5 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-NEXT: paddd %xmm10, %xmm2 ; SSE2-NEXT: movdqa %xmm3, %xmm4 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] -; SSE2-NEXT: paddd %xmm13, %xmm4 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] ; SSE2-NEXT: paddd %xmm8, %xmm3 +; SSE2-NEXT: paddd %xmm13, %xmm4 +; SSE2-NEXT: paddd %xmm11, %xmm2 +; SSE2-NEXT: paddd %xmm15, %xmm5 +; SSE2-NEXT: paddd %xmm9, %xmm1 +; SSE2-NEXT: paddd %xmm12, %xmm6 +; SSE2-NEXT: paddd %xmm10, %xmm14 +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1] ; SSE2-NEXT: paddd %xmm0, %xmm7 -; SSE2-NEXT: paddd %xmm0, %xmm9 +; SSE2-NEXT: paddd %xmm0, %xmm14 ; SSE2-NEXT: paddd %xmm0, %xmm6 ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: paddd %xmm0, %xmm5 ; SSE2-NEXT: paddd %xmm0, %xmm2 ; SSE2-NEXT: paddd %xmm0, %xmm4 ; SSE2-NEXT: paddd %xmm0, %xmm3 -; SSE2-NEXT: psrld $1, %xmm3 -; SSE2-NEXT: psrld $1, %xmm4 -; SSE2-NEXT: psrld $1, %xmm2 -; SSE2-NEXT: psrld $1, %xmm5 -; SSE2-NEXT: psrld $1, %xmm1 -; SSE2-NEXT: psrld $1, %xmm6 -; SSE2-NEXT: psrld $1, %xmm9 +; SSE2-NEXT: psrld $1, %xmm14 ; SSE2-NEXT: psrld $1, %xmm7 ; SSE2-NEXT: pslld $16, %xmm7 ; SSE2-NEXT: psrad $16, %xmm7 -; SSE2-NEXT: pslld $16, %xmm9 -; SSE2-NEXT: psrad $16, %xmm9 -; SSE2-NEXT: packssdw %xmm7, %xmm9 +; SSE2-NEXT: pslld $16, %xmm14 +; SSE2-NEXT: psrad $16, %xmm14 +; SSE2-NEXT: packssdw %xmm7, %xmm14 +; SSE2-NEXT: psrld $1, %xmm1 +; SSE2-NEXT: psrld $1, %xmm6 ; SSE2-NEXT: pslld $16, %xmm6 ; SSE2-NEXT: psrad $16, %xmm6 ; SSE2-NEXT: pslld $16, %xmm1 ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: packssdw %xmm6, %xmm1 +; SSE2-NEXT: psrld $1, %xmm2 +; SSE2-NEXT: psrld $1, %xmm5 ; SSE2-NEXT: pslld $16, %xmm5 ; SSE2-NEXT: psrad $16, %xmm5 ; SSE2-NEXT: pslld $16, %xmm2 ; SSE2-NEXT: psrad $16, %xmm2 ; SSE2-NEXT: packssdw %xmm5, %xmm2 +; SSE2-NEXT: psrld $1, %xmm3 +; SSE2-NEXT: psrld $1, %xmm4 ; SSE2-NEXT: pslld $16, %xmm4 ; SSE2-NEXT: psrad $16, %xmm4 ; SSE2-NEXT: pslld $16, %xmm3 @@ -1657,7 +1676,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) { ; SSE2-NEXT: movdqu %xmm3, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) -; SSE2-NEXT: movdqu %xmm9, (%rax) +; SSE2-NEXT: movdqu %xmm14, (%rax) ; SSE2-NEXT: retq ; ; AVX2-LABEL: avg_v32i16_2: @@ -1667,13 +1686,13 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) { ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX2-NEXT: vpaddd %ymm7, %ymm3, %ymm3 +; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2 +; AVX2-NEXT: vpaddd %ymm5, %ymm1, %ymm1 ; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0 -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1 -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2 -; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4 ; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0 ; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1 @@ -1704,9 +1723,9 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) { ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512F-NEXT: vpaddd %zmm3, %zmm1, %zmm1 ; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0 -; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1 ; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm2 ; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1 diff --git a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll index 3cadbe2a8db3..ff5a2371a145 100644 --- a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll @@ -2244,11 +2244,11 @@ define <4 x double> @test_mm256_set_pd(double %a0, double %a1, double %a2, doubl ; X32: # BB#0: ; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; X32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero -; X32-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; X32-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X32-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_set_pd: @@ -2269,19 +2269,19 @@ define <8 x float> @test_mm256_set_ps(float %a0, float %a1, float %a2, float %a3 ; X32: # BB#0: ; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] -; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] -; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3] ; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] -; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0] -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero +; X32-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero +; X32-NEXT: vmovss {{.*#+}} xmm6 = mem[0],zero,zero,zero +; X32-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero +; X32-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] +; X32-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3] +; X32-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0] +; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] +; X32-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_set_ps: @@ -2881,10 +2881,10 @@ define <4 x double> @test_mm256_setr_pd(double %a0, double %a1, double %a2, doub ; X32: # BB#0: ; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; X32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero -; X32-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; X32-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; X32-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm3[0],xmm2[0] ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X32-NEXT: retl ; @@ -2908,16 +2908,16 @@ define <8 x float> @test_mm256_setr_ps(float %a0, float %a1, float %a2, float %a ; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero +; X32-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero +; X32-NEXT: vmovss {{.*#+}} xmm6 = mem[0],zero,zero,zero +; X32-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero ; X32-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] ; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero -; X32-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3] -; X32-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0],xmm3[3] -; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0] +; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm7[0],xmm6[0],xmm7[2,3] +; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0],xmm1[3] +; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0] ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X32-NEXT: retl ; diff --git a/test/CodeGen/X86/avx.ll b/test/CodeGen/X86/avx.ll index 647b7a8f4dfc..341dd867e4ff 100644 --- a/test/CodeGen/X86/avx.ll +++ b/test/CodeGen/X86/avx.ll @@ -113,11 +113,11 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl ; CHECK-NOT: mov ; CHECK: insertps $48 ; CHECK: insertps $48 -; CHECK: vaddps ; CHECK: insertps $48 ; CHECK: insertps $48 ; CHECK: vaddps ; CHECK: vaddps +; CHECK: vaddps ; CHECK-NEXT: ret %1 = getelementptr inbounds float, float* %fb, i64 %index %2 = load float, float* %1, align 4 diff --git a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll index e29cf09718ad..63b0281a7339 100644 --- a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll +++ b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll @@ -13,10 +13,10 @@ define zeroext i16 @cmp_kor_seq_16(<16 x float> %a, <16 x float> %b, <16 x float ; CHECK: # BB#0: # %entry ; CHECK-NEXT: vcmpgeps %zmm4, %zmm0, %k0 ; CHECK-NEXT: vcmpgeps %zmm4, %zmm1, %k1 +; CHECK-NEXT: vcmpgeps %zmm4, %zmm2, %k2 +; CHECK-NEXT: vcmpgeps %zmm4, %zmm3, %k3 ; CHECK-NEXT: korw %k1, %k0, %k0 -; CHECK-NEXT: vcmpgeps %zmm4, %zmm2, %k1 -; CHECK-NEXT: vcmpgeps %zmm4, %zmm3, %k2 -; CHECK-NEXT: korw %k2, %k1, %k1 +; CHECK-NEXT: korw %k3, %k2, %k1 ; CHECK-NEXT: korw %k1, %k0, %k0 ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> diff --git a/test/CodeGen/X86/avx512-cmp.ll b/test/CodeGen/X86/avx512-cmp.ll index c1b64743f898..eae7b94f5135 100644 --- a/test/CodeGen/X86/avx512-cmp.ll +++ b/test/CodeGen/X86/avx512-cmp.ll @@ -47,16 +47,20 @@ l2: ret float %c1 } -; FIXME: Can use vcmpeqss and extract from the mask here in AVX512. define i32 @test3(float %a, float %b) { -; ALL-LABEL: test3: -; ALL: ## BB#0: -; ALL-NEXT: vucomiss %xmm1, %xmm0 -; ALL-NEXT: setnp %al -; ALL-NEXT: sete %cl -; ALL-NEXT: andb %al, %cl -; ALL-NEXT: movzbl %cl, %eax -; ALL-NEXT: retq +; KNL-LABEL: test3: +; KNL: ## BB#0: +; KNL-NEXT: vcmpeqss %xmm1, %xmm0, %k0 +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: movzbl %al, %eax +; KNL-NEXT: retq +; +; SKX-LABEL: test3: +; SKX: ## BB#0: +; SKX-NEXT: vcmpeqss %xmm1, %xmm0, %k0 +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: movzbl %al, %eax +; SKX-NEXT: retq %cmp10.i = fcmp oeq float %a, %b %conv11.i = zext i1 %cmp10.i to i32 @@ -69,7 +73,7 @@ define float @test5(float %p) #0 { ; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; ALL-NEXT: vucomiss %xmm1, %xmm0 ; ALL-NEXT: jne LBB3_1 -; ALL-NEXT: jp LBB3_1 +; ALL-NEXT: jp LBB3_1 ; ALL-NEXT: ## BB#2: ## %return ; ALL-NEXT: retq ; ALL-NEXT: LBB3_1: ## %if.end @@ -158,47 +162,22 @@ B: } define i32 @test10(i64 %b, i64 %c, i1 %d) { -; KNL-LABEL: test10: -; KNL: ## BB#0: -; KNL-NEXT: andl $1, %edx -; KNL-NEXT: kmovw %edx, %k0 -; KNL-NEXT: cmpq %rsi, %rdi -; KNL-NEXT: sete %al -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: korw %k1, %k0, %k1 -; KNL-NEXT: kxorw %k1, %k0, %k0 -; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: testb %al, %al -; KNL-NEXT: je LBB8_1 -; KNL-NEXT: ## BB#2: ## %if.end.i -; KNL-NEXT: movl $6, %eax -; KNL-NEXT: retq -; KNL-NEXT: LBB8_1: ## %if.then.i -; KNL-NEXT: movl $5, %eax -; KNL-NEXT: retq -; -; SKX-LABEL: test10: -; SKX: ## BB#0: -; SKX-NEXT: andl $1, %edx -; SKX-NEXT: kmovd %edx, %k0 -; SKX-NEXT: cmpq %rsi, %rdi -; SKX-NEXT: sete %al -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: korw %k1, %k0, %k1 -; SKX-NEXT: kxorw %k1, %k0, %k0 -; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: testb %al, %al -; SKX-NEXT: je LBB8_1 -; SKX-NEXT: ## BB#2: ## %if.end.i -; SKX-NEXT: movl $6, %eax -; SKX-NEXT: retq -; SKX-NEXT: LBB8_1: ## %if.then.i -; SKX-NEXT: movl $5, %eax -; SKX-NEXT: retq +; ALL-LABEL: test10: +; ALL: ## BB#0: +; ALL-NEXT: movl %edx, %eax +; ALL-NEXT: andb $1, %al +; ALL-NEXT: cmpq %rsi, %rdi +; ALL-NEXT: sete %cl +; ALL-NEXT: orb %dl, %cl +; ALL-NEXT: andb $1, %cl +; ALL-NEXT: cmpb %cl, %al +; ALL-NEXT: je LBB8_1 +; ALL-NEXT: ## BB#2: ## %if.end.i +; ALL-NEXT: movl $6, %eax +; ALL-NEXT: retq +; ALL-NEXT: LBB8_1: ## %if.then.i +; ALL-NEXT: movl $5, %eax +; ALL-NEXT: retq %cmp8.i = icmp eq i64 %b, %c %or1 = or i1 %d, %cmp8.i diff --git a/test/CodeGen/X86/avx512-cvt.ll b/test/CodeGen/X86/avx512-cvt.ll index 2b55372f3066..33ac15de9de9 100644 --- a/test/CodeGen/X86/avx512-cvt.ll +++ b/test/CodeGen/X86/avx512-cvt.ll @@ -1552,10 +1552,10 @@ define <2 x float> @uitofp_2i1_float(<2 x i32> %a) { ; NOVL-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808] ; NOVL-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; NOVL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 -; NOVL-NEXT: vpextrq $1, %xmm0, %rax +; NOVL-NEXT: vpextrb $8, %xmm0, %eax ; NOVL-NEXT: andl $1, %eax ; NOVL-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm1 -; NOVL-NEXT: vmovq %xmm0, %rax +; NOVL-NEXT: vpextrb $0, %xmm0, %eax ; NOVL-NEXT: andl $1, %eax ; NOVL-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0 ; NOVL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] diff --git a/test/CodeGen/X86/avx512-ext.ll b/test/CodeGen/X86/avx512-ext.ll index b31b00e54e83..2145f5fb09a8 100644 --- a/test/CodeGen/X86/avx512-ext.ll +++ b/test/CodeGen/X86/avx512-ext.ll @@ -1434,26 +1434,26 @@ define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind { define i16 @trunc_i32_to_i1(i32 %a) { ; KNL-LABEL: trunc_i32_to_i1: ; KNL: ## BB#0: -; KNL-NEXT: andl $1, %edi -; KNL-NEXT: kmovw %edi, %k0 ; KNL-NEXT: movw $-4, %ax -; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: kshiftrw $1, %k1, %k1 -; KNL-NEXT: kshiftlw $1, %k1, %k1 -; KNL-NEXT: korw %k0, %k1, %k0 +; KNL-NEXT: kmovw %eax, %k0 +; KNL-NEXT: kshiftrw $1, %k0, %k0 +; KNL-NEXT: kshiftlw $1, %k0, %k0 +; KNL-NEXT: andl $1, %edi +; KNL-NEXT: kmovw %edi, %k1 +; KNL-NEXT: korw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill> ; KNL-NEXT: retq ; ; SKX-LABEL: trunc_i32_to_i1: ; SKX: ## BB#0: -; SKX-NEXT: andl $1, %edi -; SKX-NEXT: kmovd %edi, %k0 ; SKX-NEXT: movw $-4, %ax -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: kshiftrw $1, %k1, %k1 -; SKX-NEXT: kshiftlw $1, %k1, %k1 -; SKX-NEXT: korw %k0, %k1, %k0 +; SKX-NEXT: kmovd %eax, %k0 +; SKX-NEXT: kshiftrw $1, %k0, %k0 +; SKX-NEXT: kshiftlw $1, %k0, %k0 +; SKX-NEXT: andl $1, %edi +; SKX-NEXT: kmovw %edi, %k1 +; SKX-NEXT: korw %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill> ; SKX-NEXT: retq diff --git a/test/CodeGen/X86/avx512-fsel.ll b/test/CodeGen/X86/avx512-fsel.ll index a9b8914ee1fe..7777ba795416 100644 --- a/test/CodeGen/X86/avx512-fsel.ll +++ b/test/CodeGen/X86/avx512-fsel.ll @@ -10,25 +10,11 @@ define i32 @test(float %a, float %b) { ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: Lcfi0: ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: vucomiss %xmm1, %xmm0 -; CHECK-NEXT: setp %al -; CHECK-NEXT: setne %cl -; CHECK-NEXT: setnp %dl -; CHECK-NEXT: sete %sil -; CHECK-NEXT: andb %dl, %sil -; CHECK-NEXT: ## implicit-def: %EDI -; CHECK-NEXT: movb %sil, %dil -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k0 -; CHECK-NEXT: orb %al, %cl -; CHECK-NEXT: ## implicit-def: %EDI -; CHECK-NEXT: movb %cl, %dil -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: kmovw %k1, %edi -; CHECK-NEXT: movb %dil, %al -; CHECK-NEXT: testb $1, %al -; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill +; CHECK-NEXT: vcmpeqss %xmm1, %xmm0, %k0 +; CHECK-NEXT: kmovw %k0, %eax +; CHECK-NEXT: movb %al, %cl +; CHECK-NEXT: xorb $-1, %cl +; CHECK-NEXT: testb $1, %cl ; CHECK-NEXT: jne LBB0_1 ; CHECK-NEXT: jmp LBB0_2 ; CHECK-NEXT: LBB0_1: ## %L_0 diff --git a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll index c03623a2f035..4890afec2164 100644 --- a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll +++ b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll @@ -852,16 +852,16 @@ define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %b ; CHECK-NEXT: kxorw %k0, %k0, %k1 ; CHECK-NEXT: vmovaps %zmm1, %zmm3 ; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm3 {%k1} -; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm2 ; CHECK-NEXT: movw $1, %ax ; CHECK-NEXT: kmovd %eax, %k1 -; CHECK-NEXT: vmovaps %zmm1, %zmm3 -; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm3 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm4 +; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm4 {%k1} ; CHECK-NEXT: movw $220, %ax ; CHECK-NEXT: kmovd %eax, %k1 ; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1} -; CHECK-NEXT: vaddps %zmm3, %zmm1, %zmm0 -; CHECK-NEXT: vaddps %zmm2, %zmm0, %zmm0 +; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm0 +; CHECK-NEXT: vaddps %zmm4, %zmm1, %zmm1 +; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 -1, i32 4) %res1 = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 0, i32 4) diff --git a/test/CodeGen/X86/avx512-i1test.ll b/test/CodeGen/X86/avx512-i1test.ll index 69fafdfff9aa..321f26674e1e 100755 --- a/test/CodeGen/X86/avx512-i1test.ll +++ b/test/CodeGen/X86/avx512-i1test.ll @@ -66,14 +66,13 @@ L_30: ; preds = %bb51, %L_10 define i64 @func2(i1 zeroext %i, i32 %j) { ; CHECK-LABEL: func2: ; CHECK: # BB#0: # %entry -; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> ; CHECK-NEXT: testl %esi, %esi ; CHECK-NEXT: je .LBB1_1 ; CHECK-NEXT: # BB#2: # %if.then ; CHECK-NEXT: jmp bar # TAILCALL ; CHECK-NEXT: .LBB1_1: # %return -; CHECK-NEXT: orq $-2, %rdi -; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: movzbl %dil, %eax +; CHECK-NEXT: orq $-2, %rax ; CHECK-NEXT: retq entry: %tobool = icmp eq i32 %j, 0 diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll index 87928348a851..29a5325a0ae9 100644 --- a/test/CodeGen/X86/avx512-insert-extract.ll +++ b/test/CodeGen/X86/avx512-insert-extract.ll @@ -260,8 +260,7 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) { ; KNL-NEXT: kshiftlw $11, %k0, %k0 ; KNL-NEXT: kshiftrw $15, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: testb %al, %al +; KNL-NEXT: testb $1, %al ; KNL-NEXT: je LBB10_2 ; KNL-NEXT: ## BB#1: ## %A ; KNL-NEXT: vmovdqa64 %zmm1, %zmm0 @@ -276,8 +275,7 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) { ; SKX-NEXT: kshiftlw $11, %k0, %k0 ; SKX-NEXT: kshiftrw $15, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: testb %al, %al +; SKX-NEXT: testb $1, %al ; SKX-NEXT: je LBB10_2 ; SKX-NEXT: ## BB#1: ## %A ; SKX-NEXT: vmovdqa64 %zmm1, %zmm0 @@ -299,13 +297,10 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) { ; KNL-LABEL: test12: ; KNL: ## BB#0: ; KNL-NEXT: vpcmpgtq %zmm0, %zmm2, %k0 -; KNL-NEXT: vpcmpgtq %zmm1, %zmm3, %k1 -; KNL-NEXT: kunpckbw %k0, %k1, %k0 ; KNL-NEXT: kshiftlw $15, %k0, %k0 ; KNL-NEXT: kshiftrw $15, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: testb %al, %al +; KNL-NEXT: testb $1, %al ; KNL-NEXT: cmoveq %rsi, %rdi ; KNL-NEXT: movq %rdi, %rax ; KNL-NEXT: retq @@ -313,13 +308,10 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) { ; SKX-LABEL: test12: ; SKX: ## BB#0: ; SKX-NEXT: vpcmpgtq %zmm0, %zmm2, %k0 -; SKX-NEXT: vpcmpgtq %zmm1, %zmm3, %k1 -; SKX-NEXT: kunpckbw %k0, %k1, %k0 -; SKX-NEXT: kshiftlw $15, %k0, %k0 -; SKX-NEXT: kshiftrw $15, %k0, %k0 +; SKX-NEXT: kshiftlb $7, %k0, %k0 +; SKX-NEXT: kshiftrb $7, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: testb %al, %al +; SKX-NEXT: testb $1, %al ; SKX-NEXT: cmoveq %rsi, %rdi ; SKX-NEXT: movq %rdi, %rax ; SKX-NEXT: vzeroupper @@ -335,13 +327,13 @@ define i16 @test13(i32 %a, i32 %b) { ; KNL: ## BB#0: ; KNL-NEXT: cmpl %esi, %edi ; KNL-NEXT: setb %al +; KNL-NEXT: movw $-4, %cx +; KNL-NEXT: kmovw %ecx, %k0 +; KNL-NEXT: kshiftrw $1, %k0, %k0 +; KNL-NEXT: kshiftlw $1, %k0, %k0 ; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k0 -; KNL-NEXT: movw $-4, %ax ; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: kshiftrw $1, %k1, %k1 -; KNL-NEXT: kshiftlw $1, %k1, %k1 -; KNL-NEXT: korw %k0, %k1, %k0 +; KNL-NEXT: korw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill> ; KNL-NEXT: retq @@ -350,13 +342,13 @@ define i16 @test13(i32 %a, i32 %b) { ; SKX: ## BB#0: ; SKX-NEXT: cmpl %esi, %edi ; SKX-NEXT: setb %al +; SKX-NEXT: movw $-4, %cx +; SKX-NEXT: kmovd %ecx, %k0 +; SKX-NEXT: kshiftrw $1, %k0, %k0 +; SKX-NEXT: kshiftlw $1, %k0, %k0 ; SKX-NEXT: andl $1, %eax -; SKX-NEXT: kmovd %eax, %k0 -; SKX-NEXT: movw $-4, %ax -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: kshiftrw $1, %k1, %k1 -; SKX-NEXT: kshiftlw $1, %k1, %k1 -; SKX-NEXT: korw %k0, %k1, %k0 +; SKX-NEXT: kmovw %eax, %k1 +; SKX-NEXT: korw %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill> ; SKX-NEXT: retq @@ -373,8 +365,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) { ; KNL-NEXT: kshiftlw $11, %k0, %k0 ; KNL-NEXT: kshiftrw $15, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: testb %al, %al +; KNL-NEXT: testb $1, %al ; KNL-NEXT: cmoveq %rsi, %rdi ; KNL-NEXT: movq %rdi, %rax ; KNL-NEXT: retq @@ -385,8 +376,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) { ; SKX-NEXT: kshiftlb $3, %k0, %k0 ; SKX-NEXT: kshiftrb $7, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: testb %al, %al +; SKX-NEXT: testb $1, %al ; SKX-NEXT: cmoveq %rsi, %rdi ; SKX-NEXT: movq %rdi, %rax ; SKX-NEXT: vzeroupper @@ -424,14 +414,13 @@ define i16 @test15(i1 *%addr) { define i16 @test16(i1 *%addr, i16 %a) { ; KNL-LABEL: test16: ; KNL: ## BB#0: -; KNL-NEXT: movzbl (%rdi), %eax -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: kmovw %esi, %k2 +; KNL-NEXT: movb (%rdi), %al +; KNL-NEXT: kmovw %esi, %k1 +; KNL-NEXT: kmovw %eax, %k2 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z} ; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z} ; KNL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15] -; KNL-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 +; KNL-NEXT: vpermi2d %zmm0, %zmm1, %zmm2 ; KNL-NEXT: vpslld $31, %zmm2, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax @@ -440,14 +429,13 @@ define i16 @test16(i1 *%addr, i16 %a) { ; ; SKX-LABEL: test16: ; SKX: ## BB#0: -; SKX-NEXT: movzbl (%rdi), %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: kmovd %eax, %k0 -; SKX-NEXT: kmovd %esi, %k1 +; SKX-NEXT: movb (%rdi), %al +; SKX-NEXT: kmovd %esi, %k0 +; SKX-NEXT: kmovd %eax, %k1 ; SKX-NEXT: vpmovm2d %k1, %zmm0 ; SKX-NEXT: vpmovm2d %k0, %zmm1 ; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15] -; SKX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 +; SKX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2 ; SKX-NEXT: vpmovd2m %zmm2, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill> @@ -463,14 +451,13 @@ define i16 @test16(i1 *%addr, i16 %a) { define i8 @test17(i1 *%addr, i8 %a) { ; KNL-LABEL: test17: ; KNL: ## BB#0: -; KNL-NEXT: movzbl (%rdi), %eax -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: kmovw %esi, %k2 +; KNL-NEXT: movb (%rdi), %al +; KNL-NEXT: kmovw %esi, %k1 +; KNL-NEXT: kmovw %eax, %k2 ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z} ; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z} ; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7] -; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 +; KNL-NEXT: vpermi2q %zmm0, %zmm1, %zmm2 ; KNL-NEXT: vpsllq $63, %zmm2, %zmm0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kmovw %k0, %eax @@ -479,14 +466,13 @@ define i8 @test17(i1 *%addr, i8 %a) { ; ; SKX-LABEL: test17: ; SKX: ## BB#0: -; SKX-NEXT: movzbl (%rdi), %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: kmovd %eax, %k0 -; SKX-NEXT: kmovd %esi, %k1 +; SKX-NEXT: movb (%rdi), %al +; SKX-NEXT: kmovd %esi, %k0 +; SKX-NEXT: kmovd %eax, %k1 ; SKX-NEXT: vpmovm2q %k1, %zmm0 ; SKX-NEXT: vpmovm2q %k0, %zmm1 ; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7] -; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 +; SKX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2 ; SKX-NEXT: vpmovq2m %zmm2, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> @@ -1283,12 +1269,11 @@ define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32> ; SKX: ## BB#0: ; SKX-NEXT: cmpl %esi, %edi ; SKX-NEXT: setb %al -; SKX-NEXT: andl $1, %eax +; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k0 +; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k1 +; SKX-NEXT: kunpckwd %k0, %k1, %k0 +; SKX-NEXT: vpmovm2w %k0, %zmm0 ; SKX-NEXT: kmovd %eax, %k0 -; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k1 -; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k2 -; SKX-NEXT: kunpckwd %k1, %k2, %k1 -; SKX-NEXT: vpmovm2w %k1, %zmm0 ; SKX-NEXT: vpmovm2w %k0, %zmm1 ; SKX-NEXT: vmovdqu16 {{.*#+}} zmm2 = [0,1,2,3,32,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31] ; SKX-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 @@ -1308,33 +1293,29 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) ; KNL: ## BB#0: ; KNL-NEXT: cmpl %esi, %edi ; KNL-NEXT: setb %al -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 ; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 -; KNL-NEXT: vpextrd $1, %xmm0, %eax -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k2 -; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z} -; KNL-NEXT: vmovd %xmm0, %eax -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k2 -; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k2} {z} +; KNL-NEXT: vpextrb $4, %xmm0, %ecx +; KNL-NEXT: kmovw %ecx, %k1 +; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z} +; KNL-NEXT: vpextrb $0, %xmm0, %ecx +; KNL-NEXT: kmovw %ecx, %k1 +; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z} ; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7] ; KNL-NEXT: vpermi2q %zmm1, %zmm2, %zmm3 ; KNL-NEXT: vpsllq $63, %zmm3, %zmm1 -; KNL-NEXT: vptestmq %zmm1, %zmm1, %k2 -; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z} +; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 +; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z} +; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z} ; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7] ; KNL-NEXT: vpermi2q %zmm2, %zmm1, %zmm3 ; KNL-NEXT: vpsllq $63, %zmm3, %zmm1 ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 ; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z} -; KNL-NEXT: vpextrd $3, %xmm0, %eax -; KNL-NEXT: andl $1, %eax +; KNL-NEXT: vpextrb $12, %xmm0, %eax ; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7] @@ -1349,10 +1330,9 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) ; SKX: ## BB#0: ; SKX-NEXT: cmpl %esi, %edi ; SKX-NEXT: setb %al -; SKX-NEXT: andl $1, %eax +; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 +; SKX-NEXT: vpmovm2d %k0, %xmm0 ; SKX-NEXT: kmovd %eax, %k0 -; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k1 -; SKX-NEXT: vpmovm2d %k1, %xmm0 ; SKX-NEXT: vpmovm2d %k0, %xmm1 ; SKX-NEXT: vpbroadcastq %xmm1, %xmm1 ; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3] @@ -1373,16 +1353,14 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y) ; KNL: ## BB#0: ; KNL-NEXT: cmpl %esi, %edi ; KNL-NEXT: setb %al -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; KNL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 -; KNL-NEXT: vmovq %xmm0, %rax -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k2 -; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z} +; KNL-NEXT: vpextrb $0, %xmm0, %ecx +; KNL-NEXT: kmovw %ecx, %k1 +; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z} ; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7] ; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 @@ -1396,13 +1374,12 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y) ; SKX: ## BB#0: ; SKX-NEXT: cmpl %esi, %edi ; SKX-NEXT: setb %al -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: kmovd %eax, %k0 -; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 +; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 +; SKX-NEXT: kmovd %eax, %k1 ; SKX-NEXT: kshiftlw $1, %k1, %k1 -; SKX-NEXT: kshiftrw $1, %k1, %k1 ; SKX-NEXT: kshiftlw $1, %k0, %k0 -; SKX-NEXT: korw %k0, %k1, %k0 +; SKX-NEXT: kshiftrw $1, %k0, %k0 +; SKX-NEXT: korw %k1, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax ; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; SKX-NEXT: retq @@ -1422,8 +1399,10 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) { ; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; KNL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vpextrb $0, %xmm0, %eax -; KNL-NEXT: addb $4, %al -; KNL-NEXT: movzbl %al, %eax +; KNL-NEXT: andb $1, %al +; KNL-NEXT: movb $4, %cl +; KNL-NEXT: subb %al, %cl +; KNL-NEXT: movzbl %cl, %eax ; KNL-NEXT: retq ; ; SKX-LABEL: test_extractelement_v2i1: @@ -1432,11 +1411,10 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) { ; SKX-NEXT: kshiftlw $15, %k0, %k0 ; SKX-NEXT: kshiftrw $15, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: cmpb $1, %al -; SKX-NEXT: movb $3, %al -; SKX-NEXT: adcb $0, %al -; SKX-NEXT: movzbl %al, %eax +; SKX-NEXT: andb $1, %al +; SKX-NEXT: movb $4, %cl +; SKX-NEXT: subb %al, %cl +; SKX-NEXT: movzbl %cl, %eax ; SKX-NEXT: retq %t1 = icmp ugt <2 x i64> %a, %b %t2 = extractelement <2 x i1> %t1, i32 0 @@ -1452,8 +1430,10 @@ define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) { ; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; KNL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vpextrb $0, %xmm0, %eax -; KNL-NEXT: addb $4, %al -; KNL-NEXT: movzbl %al, %eax +; KNL-NEXT: andb $1, %al +; KNL-NEXT: movb $4, %cl +; KNL-NEXT: subb %al, %cl +; KNL-NEXT: movzbl %cl, %eax ; KNL-NEXT: retq ; ; SKX-LABEL: extractelement_v2i1_alt: @@ -1462,11 +1442,10 @@ define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) { ; SKX-NEXT: kshiftlw $15, %k0, %k0 ; SKX-NEXT: kshiftrw $15, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: cmpb $1, %al -; SKX-NEXT: movb $3, %al -; SKX-NEXT: adcb $0, %al -; SKX-NEXT: movzbl %al, %eax +; SKX-NEXT: andb $1, %al +; SKX-NEXT: movb $4, %cl +; SKX-NEXT: subb %al, %cl +; SKX-NEXT: movzbl %cl, %eax ; SKX-NEXT: retq %t1 = icmp ugt <2 x i64> %a, %b %t2 = extractelement <2 x i1> %t1, i32 0 @@ -1535,8 +1514,10 @@ define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) { ; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0 ; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0 ; KNL-NEXT: vpextrb $15, %xmm0, %eax -; KNL-NEXT: addb $4, %al -; KNL-NEXT: movzbl %al, %eax +; KNL-NEXT: andb $1, %al +; KNL-NEXT: movb $4, %cl +; KNL-NEXT: subb %al, %cl +; KNL-NEXT: movzbl %cl, %eax ; KNL-NEXT: retq ; ; SKX-LABEL: test_extractelement_v64i1: @@ -1544,11 +1525,10 @@ define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) { ; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0 ; SKX-NEXT: kshiftrq $63, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: cmpb $1, %al -; SKX-NEXT: movb $3, %al -; SKX-NEXT: adcb $0, %al -; SKX-NEXT: movzbl %al, %eax +; SKX-NEXT: andb $1, %al +; SKX-NEXT: movb $4, %cl +; SKX-NEXT: subb %al, %cl +; SKX-NEXT: movzbl %cl, %eax ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %t1 = icmp ugt <64 x i8> %a, %b @@ -1566,8 +1546,10 @@ define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) { ; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0 ; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0 ; KNL-NEXT: vpextrb $15, %xmm0, %eax -; KNL-NEXT: addb $4, %al -; KNL-NEXT: movzbl %al, %eax +; KNL-NEXT: andb $1, %al +; KNL-NEXT: movb $4, %cl +; KNL-NEXT: subb %al, %cl +; KNL-NEXT: movzbl %cl, %eax ; KNL-NEXT: retq ; ; SKX-LABEL: extractelement_v64i1_alt: @@ -1575,11 +1557,10 @@ define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) { ; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0 ; SKX-NEXT: kshiftrq $63, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: cmpb $1, %al -; SKX-NEXT: movb $3, %al -; SKX-NEXT: adcb $0, %al -; SKX-NEXT: movzbl %al, %eax +; SKX-NEXT: andb $1, %al +; SKX-NEXT: movb $4, %cl +; SKX-NEXT: subb %al, %cl +; SKX-NEXT: movzbl %cl, %eax ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %t1 = icmp ugt <64 x i8> %a, %b @@ -2332,7 +2313,7 @@ define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b, ; SKX-NEXT: vpmovm2q %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; SKX-NEXT: andl $1, %edi -; SKX-NEXT: movl -24(%rsp,%rdi,8), %eax +; SKX-NEXT: movzbl -24(%rsp,%rdi,8), %eax ; SKX-NEXT: andl $1, %eax ; SKX-NEXT: retq %t1 = icmp ugt <2 x i64> %a, %b @@ -2362,7 +2343,7 @@ define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b, ; SKX-NEXT: vpmovm2d %k0, %xmm0 ; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) ; SKX-NEXT: andl $3, %edi -; SKX-NEXT: movl -24(%rsp,%rdi,4), %eax +; SKX-NEXT: movzbl -24(%rsp,%rdi,4), %eax ; SKX-NEXT: andl $1, %eax ; SKX-NEXT: retq %t1 = icmp ugt <4 x i32> %a, %b @@ -2391,7 +2372,7 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b, ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vmovdqa64 %zmm0, (%rsp) ; KNL-NEXT: andl $7, %edi -; KNL-NEXT: movl (%rsp,%rdi,8), %eax +; KNL-NEXT: movzbl (%rsp,%rdi,8), %eax ; KNL-NEXT: andl $1, %eax ; KNL-NEXT: movq %rbp, %rsp ; KNL-NEXT: popq %rbp @@ -2414,7 +2395,7 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b, ; SKX-NEXT: vpmovm2q %k0, %zmm0 ; SKX-NEXT: vmovdqa64 %zmm0, (%rsp) ; SKX-NEXT: andl $7, %edi -; SKX-NEXT: movl (%rsp,%rdi,8), %eax +; SKX-NEXT: movzbl (%rsp,%rdi,8), %eax ; SKX-NEXT: andl $1, %eax ; SKX-NEXT: movq %rbp, %rsp ; SKX-NEXT: popq %rbp @@ -2444,7 +2425,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> % ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vmovdqa32 %zmm0, (%rsp) ; KNL-NEXT: andl $15, %edi -; KNL-NEXT: movl (%rsp,%rdi,4), %eax +; KNL-NEXT: movzbl (%rsp,%rdi,4), %eax ; KNL-NEXT: andl $1, %eax ; KNL-NEXT: movq %rbp, %rsp ; KNL-NEXT: popq %rbp @@ -2467,7 +2448,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> % ; SKX-NEXT: vpmovm2d %k0, %zmm0 ; SKX-NEXT: vmovdqa32 %zmm0, (%rsp) ; SKX-NEXT: andl $15, %edi -; SKX-NEXT: movl (%rsp,%rdi,4), %eax +; SKX-NEXT: movzbl (%rsp,%rdi,4), %eax ; SKX-NEXT: andl $1, %eax ; SKX-NEXT: movq %rbp, %rsp ; SKX-NEXT: popq %rbp @@ -2500,9 +2481,8 @@ define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b, ; KNL-NEXT: vmovdqa %ymm0, (%rsp) ; KNL-NEXT: andl $31, %edi ; KNL-NEXT: movq %rsp, %rax -; KNL-NEXT: movb (%rdi,%rax), %al -; KNL-NEXT: andb $1, %al -; KNL-NEXT: movzbl %al, %eax +; KNL-NEXT: movzbl (%rdi,%rax), %eax +; KNL-NEXT: andl $1, %eax ; KNL-NEXT: movq %rbp, %rsp ; KNL-NEXT: popq %rbp ; KNL-NEXT: retq @@ -2524,7 +2504,7 @@ define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b, ; SKX-NEXT: vpmovm2w %k0, %zmm0 ; SKX-NEXT: vmovdqu16 %zmm0, (%rsp) ; SKX-NEXT: andl $31, %edi -; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax +; SKX-NEXT: movzbl (%rsp,%rdi,2), %eax ; SKX-NEXT: andl $1, %eax ; SKX-NEXT: movq %rbp, %rsp ; SKX-NEXT: popq %rbp diff --git a/test/CodeGen/X86/avx512-insert-extract_i1.ll b/test/CodeGen/X86/avx512-insert-extract_i1.ll index a1d1a7dae190..a099b80898ee 100644 --- a/test/CodeGen/X86/avx512-insert-extract_i1.ll +++ b/test/CodeGen/X86/avx512-insert-extract_i1.ll @@ -22,9 +22,8 @@ define zeroext i8 @test_extractelement_varible_v64i1(<64 x i8> %a, <64 x i8> %b, ; SKX-NEXT: vmovdqu8 %zmm0, (%rsp) ; SKX-NEXT: andl $63, %edi ; SKX-NEXT: movq %rsp, %rax -; SKX-NEXT: movb (%rdi,%rax), %al -; SKX-NEXT: andb $1, %al -; SKX-NEXT: movzbl %al, %eax +; SKX-NEXT: movzbl (%rdi,%rax), %eax +; SKX-NEXT: andl $1, %eax ; SKX-NEXT: movq %rbp, %rsp ; SKX-NEXT: popq %rbp ; SKX-NEXT: vzeroupper diff --git a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll index 56962ca2671d..32da0a70218e 100644 --- a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll +++ b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll @@ -9,8 +9,8 @@ define <16 x float> @test_x86_vbroadcast_ss_ps_512(<4 x float> %a0, <16 x float> ; CHECK-NEXT: vbroadcastss %xmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1} -; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1 ; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1 ; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq @@ -30,8 +30,8 @@ define <8 x double> @test_x86_vbroadcast_sd_pd_512(<2 x double> %a0, <8 x double ; CHECK-NEXT: vbroadcastsd %xmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1} -; CHECK-NEXT: vaddpd %zmm1, %zmm2, %zmm1 ; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vaddpd %zmm1, %zmm2, %zmm1 ; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq @@ -51,8 +51,8 @@ define <16 x i32>@test_int_x86_avx512_pbroadcastd_512(<4 x i32> %x0, <16 x i32> ; CHECK-NEXT: vpbroadcastd %xmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vpbroadcastd %xmm0, %zmm1 {%k1} -; CHECK-NEXT: vpaddd %zmm1, %zmm2, %zmm1 ; CHECK-NEXT: vpbroadcastd %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vpaddd %zmm1, %zmm2, %zmm1 ; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <16 x i32> @llvm.x86.avx512.pbroadcastd.512(<4 x i32> %x0, <16 x i32> %x1, i16 -1) @@ -71,8 +71,8 @@ define <8 x i64>@test_int_x86_avx512_pbroadcastq_512(<2 x i64> %x0, <8 x i64> %x ; CHECK-NEXT: vpbroadcastq %xmm0, %zmm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vpbroadcastq %xmm0, %zmm1 {%k1} -; CHECK-NEXT: vpaddq %zmm1, %zmm2, %zmm1 ; CHECK-NEXT: vpbroadcastq %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vpaddq %zmm1, %zmm2, %zmm1 ; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <8 x i64> @llvm.x86.avx512.pbroadcastq.512(<2 x i64> %x0, <8 x i64> %x1,i8 -1) @@ -91,8 +91,8 @@ define <16 x float>@test_int_x86_avx512_mask_movsldup_512(<16 x float> %x0, <16 ; CHECK-NEXT: vmovsldup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] -; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <16 x float> @llvm.x86.avx512.mask.movsldup.512(<16 x float> %x0, <16 x float> %x1, i16 %x2) @@ -111,8 +111,8 @@ define <16 x float>@test_int_x86_avx512_mask_movshdup_512(<16 x float> %x0, <16 ; CHECK-NEXT: vmovshdup {{.*#+}} zmm2 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] -; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <16 x float> @llvm.x86.avx512.mask.movshdup.512(<16 x float> %x0, <16 x float> %x1, i16 %x2) @@ -131,8 +131,8 @@ define <8 x double>@test_int_x86_avx512_mask_movddup_512(<8 x double> %x0, <8 x ; CHECK-NEXT: vmovddup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6] ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6] -; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <8 x double> @llvm.x86.avx512.mask.movddup.512(<8 x double> %x0, <8 x double> %x1, i8 %x2) @@ -671,9 +671,9 @@ define <8 x i64>@test_int_x86_avx512_mask_punpcklqd_q_512(<8 x i64> %x0, <8 x i6 ; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] -; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm2 ; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] -; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm1 +; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) %res1 = call <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1) @@ -1616,9 +1616,9 @@ define <8 x double>@test_int_x86_avx512_mask_shuf_pd_512(<8 x double> %x0, <8 x ; CHECK-NEXT: vshufpd {{.*#+}} zmm3 = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6] ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6] -; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm2 ; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6] -; CHECK-NEXT: vaddpd %zmm0, %zmm2, %zmm0 +; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm1 +; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> %x3, i8 %x4) %res1 = call <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> %x3, i8 -1) @@ -2031,8 +2031,8 @@ define <8 x i64>@test_int_x86_avx512_mask_psrl_qi_512(<8 x i64> %x0, i32 %x1, <8 ; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm2 ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm1 {%k1} -; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 4, <8 x i64> %x2, i8 %x3) @@ -2051,8 +2051,8 @@ define <16 x i32>@test_int_x86_avx512_mask_psrl_di_512(<16 x i32> %x0, i32 %x1, ; CHECK-NEXT: vpsrld $4, %zmm0, %zmm2 ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vpsrld $4, %zmm0, %zmm1 {%k1} -; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vpsrld $4, %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 4, <16 x i32> %x2, i16 %x3) @@ -2651,8 +2651,8 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vpermilps {{.*#+}} zmm2 {%k1} = zmm0[2,3,0,1,7,6,5,4,9,8,11,10,12,13,14,15] ; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] -; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1 ; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12] +; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1 ; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 3, i32 2, i32 1, i32 0, i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3>, <16 x float> %x2, i16 %x3) @@ -2881,23 +2881,23 @@ define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8 ; CHECK-LABEL: test_mask_vextractf32x4: ; CHECK: ## BB#0: ; CHECK-NEXT: vextractf32x4 $2, %zmm1, %xmm1 -; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: kshiftlw $12, %k1, %k0 -; CHECK-NEXT: kshiftrw $15, %k0, %k0 -; CHECK-NEXT: kshiftlw $13, %k1, %k2 +; CHECK-NEXT: kmovw %edi, %k0 +; CHECK-NEXT: kshiftlw $12, %k0, %k1 +; CHECK-NEXT: kshiftrw $15, %k1, %k1 +; CHECK-NEXT: kshiftlw $13, %k0, %k2 ; CHECK-NEXT: kshiftrw $15, %k2, %k2 -; CHECK-NEXT: kshiftlw $15, %k1, %k3 +; CHECK-NEXT: kshiftlw $15, %k0, %k3 ; CHECK-NEXT: kshiftrw $15, %k3, %k3 -; CHECK-NEXT: kshiftlw $14, %k1, %k1 -; CHECK-NEXT: kshiftrw $15, %k1, %k1 -; CHECK-NEXT: kmovw %k1, %eax +; CHECK-NEXT: kshiftlw $14, %k0, %k0 +; CHECK-NEXT: kshiftrw $15, %k0, %k0 +; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: kmovw %k3, %ecx ; CHECK-NEXT: vmovd %ecx, %xmm2 -; CHECK-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 +; CHECK-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 ; CHECK-NEXT: kmovw %k2, %eax -; CHECK-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2 +; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 +; CHECK-NEXT: kmovw %k1, %eax +; CHECK-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 ; CHECK-NEXT: vpslld $31, %xmm2, %xmm2 ; CHECK-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq @@ -2911,23 +2911,23 @@ define <4 x i64> @test_mask_vextracti64x4(<4 x i64> %b, <8 x i64> %a, i8 %mask) ; CHECK-LABEL: test_mask_vextracti64x4: ; CHECK: ## BB#0: ; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm1 -; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: kshiftlw $12, %k1, %k0 -; CHECK-NEXT: kshiftrw $15, %k0, %k0 -; CHECK-NEXT: kshiftlw $13, %k1, %k2 +; CHECK-NEXT: kmovw %edi, %k0 +; CHECK-NEXT: kshiftlw $12, %k0, %k1 +; CHECK-NEXT: kshiftrw $15, %k1, %k1 +; CHECK-NEXT: kshiftlw $13, %k0, %k2 ; CHECK-NEXT: kshiftrw $15, %k2, %k2 -; CHECK-NEXT: kshiftlw $15, %k1, %k3 +; CHECK-NEXT: kshiftlw $15, %k0, %k3 ; CHECK-NEXT: kshiftrw $15, %k3, %k3 -; CHECK-NEXT: kshiftlw $14, %k1, %k1 -; CHECK-NEXT: kshiftrw $15, %k1, %k1 -; CHECK-NEXT: kmovw %k1, %eax +; CHECK-NEXT: kshiftlw $14, %k0, %k0 +; CHECK-NEXT: kshiftrw $15, %k0, %k0 +; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: kmovw %k3, %ecx ; CHECK-NEXT: vmovd %ecx, %xmm2 -; CHECK-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 +; CHECK-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 ; CHECK-NEXT: kmovw %k2, %eax -; CHECK-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2 -; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2 +; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 +; CHECK-NEXT: kmovw %k1, %eax +; CHECK-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 ; CHECK-NEXT: vpslld $31, %xmm2, %xmm2 ; CHECK-NEXT: vpmovsxdq %xmm2, %ymm2 ; CHECK-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 @@ -2942,23 +2942,23 @@ define <4 x i32> @test_maskz_vextracti32x4(<16 x i32> %a, i8 %mask) { ; CHECK-LABEL: test_maskz_vextracti32x4: ; CHECK: ## BB#0: ; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm0 -; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: kshiftlw $12, %k1, %k0 -; CHECK-NEXT: kshiftrw $15, %k0, %k0 -; CHECK-NEXT: kshiftlw $13, %k1, %k2 +; CHECK-NEXT: kmovw %edi, %k0 +; CHECK-NEXT: kshiftlw $12, %k0, %k1 +; CHECK-NEXT: kshiftrw $15, %k1, %k1 +; CHECK-NEXT: kshiftlw $13, %k0, %k2 ; CHECK-NEXT: kshiftrw $15, %k2, %k2 -; CHECK-NEXT: kshiftlw $15, %k1, %k3 +; CHECK-NEXT: kshiftlw $15, %k0, %k3 ; CHECK-NEXT: kshiftrw $15, %k3, %k3 -; CHECK-NEXT: kshiftlw $14, %k1, %k1 -; CHECK-NEXT: kshiftrw $15, %k1, %k1 -; CHECK-NEXT: kmovw %k1, %eax +; CHECK-NEXT: kshiftlw $14, %k0, %k0 +; CHECK-NEXT: kshiftrw $15, %k0, %k0 +; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: kmovw %k3, %ecx ; CHECK-NEXT: vmovd %ecx, %xmm1 -; CHECK-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 +; CHECK-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 ; CHECK-NEXT: kmovw %k2, %eax -; CHECK-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 -; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 +; CHECK-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; CHECK-NEXT: kmovw %k1, %eax +; CHECK-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 ; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 ; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1 ; CHECK-NEXT: vpand %xmm0, %xmm1, %xmm0 @@ -2989,9 +2989,9 @@ define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, < ; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1} -; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm2 ; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0 {%k1} {z} -; CHECK-NEXT: vaddps %zmm2, %zmm0, %zmm0 +; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm1 +; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float> %x0, <4 x float> %x1, i32 1, <16 x float> %x3, i16 %x4) %res1 = call <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float> %x0, <4 x float> %x1, i32 1, <16 x float> %x3, i16 -1) @@ -3010,9 +3010,9 @@ define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x ; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1} -; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm2 ; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0 {%k1} {z} -; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0 +; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm1 +; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32> %x0, <4 x i32> %x1, i32 1, <16 x i32> %x3, i16 %x4) %res1 = call <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32> %x0, <4 x i32> %x1, i32 1, <16 x i32> %x3, i16 -1) @@ -3030,9 +3030,9 @@ define <8 x double>@test_int_x86_avx512_mask_insertf64x4_512(<8 x double> %x0, < ; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm3 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm2 {%k1} -; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm2 ; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 {%k1} {z} -; CHECK-NEXT: vaddpd %zmm2, %zmm0, %zmm0 +; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm1 +; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double> %x0, <4 x double> %x1, i32 1, <8 x double> %x3, i8 %x4) %res1 = call <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double> %x0, <4 x double> %x1, i32 1, <8 x double> %x3, i8 -1) @@ -3050,9 +3050,9 @@ define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i6 ; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm3 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm2 {%k1} -; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm2 ; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 {%k1} {z} -; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm1 +; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %res = call <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64> %x0, <4 x i64> %x1, i32 1, <8 x i64> %x3, i8 %x4) %res1 = call <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64> %x0, <4 x i64> %x1, i32 1, <8 x i64> %x3, i8 -1) diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll index f800d01064ba..563cad04b8c2 100644 --- a/test/CodeGen/X86/avx512-intrinsics.ll +++ b/test/CodeGen/X86/avx512-intrinsics.ll @@ -112,6 +112,8 @@ define i16 @unpckbw_test(i16 %a0, i16 %a1) { } declare i16 @llvm.x86.avx512.kxnor.w(i16, i16) nounwind readnone +; TODO: the two kxnor instructions here a no op and should be elimintaed, +; probably by FoldConstantArithmetic in SelectionDAG. define i16 @test_kxnor(i16 %a0, i16 %a1) { ; CHECK-LABEL: test_kxnor: ; CHECK: ## BB#0: @@ -121,6 +123,8 @@ define i16 @test_kxnor(i16 %a0, i16 %a1) { ; CHECK-NEXT: kmovw %eax, %k2 ; CHECK-NEXT: kxorw %k0, %k1, %k0 ; CHECK-NEXT: kxorw %k0, %k2, %k0 +; CHECK-NEXT: kxnorw %k0, %k0, %k1 +; CHECK-NEXT: kxnorw %k1, %k0, %k0 ; CHECK-NEXT: kmovw %k0, %eax ; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill> ; CHECK-NEXT: retq @@ -269,16 +273,15 @@ declare <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>, <4 x float>, <4 x define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_sqrt_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm2, %xmm3 ; CHECK-NEXT: vsqrtss %xmm1, %xmm0, %xmm3 {%k1} ; CHECK-NEXT: vsqrtss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm2 -; CHECK-NEXT: vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z} +; CHECK-NEXT: vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm4 {%k1} {z} ; CHECK-NEXT: vsqrtss {rz-sae}, %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0 +; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm1 +; CHECK-NEXT: vaddps %xmm0, %xmm4, %xmm0 +; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq %res0 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4) %res1 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 1) @@ -296,16 +299,15 @@ declare <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>, <2 x double>, < define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_sqrt_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm2, %xmm3 ; CHECK-NEXT: vsqrtsd %xmm1, %xmm0, %xmm3 {%k1} ; CHECK-NEXT: vsqrtsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm2 -; CHECK-NEXT: vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z} +; CHECK-NEXT: vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm4 {%k1} {z} ; CHECK-NEXT: vsqrtsd {rz-sae}, %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 -; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0 +; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm1 +; CHECK-NEXT: vaddpd %xmm0, %xmm4, %xmm0 +; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq %res0 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4) %res1 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 1) @@ -477,11 +479,11 @@ declare i64 @llvm.x86.avx512.cvttss2usi64(<4 x float>, i32) nounwind readnone define i64 @test_x86_avx512_cvtsd2usi64(<2 x double> %a0) { ; CHECK-LABEL: test_x86_avx512_cvtsd2usi64: ; CHECK: ## BB#0: -; CHECK-NEXT: vcvtsd2usi %xmm0, %rax -; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %rcx -; CHECK-NEXT: addq %rax, %rcx -; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %rax +; CHECK-NEXT: vcvtsd2usi %xmm0, %rcx +; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %rax +; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %rdx ; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: addq %rdx, %rax ; CHECK-NEXT: retq %res = call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> %a0, i32 4) @@ -496,11 +498,11 @@ declare i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double>, i32) nounwind readnone define i64 @test_x86_avx512_cvtsd2si64(<2 x double> %a0) { ; CHECK-LABEL: test_x86_avx512_cvtsd2si64: ; CHECK: ## BB#0: -; CHECK-NEXT: vcvtsd2si %xmm0, %rax -; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %rcx -; CHECK-NEXT: addq %rax, %rcx -; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %rax +; CHECK-NEXT: vcvtsd2si %xmm0, %rcx +; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %rax +; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %rdx ; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: addq %rdx, %rax ; CHECK-NEXT: retq %res = call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> %a0, i32 4) @@ -515,11 +517,11 @@ declare i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double>, i32) nounwind readnone define i64 @test_x86_avx512_cvtss2usi64(<4 x float> %a0) { ; CHECK-LABEL: test_x86_avx512_cvtss2usi64: ; CHECK: ## BB#0: -; CHECK-NEXT: vcvtss2usi %xmm0, %rax -; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %rcx -; CHECK-NEXT: addq %rax, %rcx -; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %rax +; CHECK-NEXT: vcvtss2usi %xmm0, %rcx +; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %rax +; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %rdx ; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: addq %rdx, %rax ; CHECK-NEXT: retq %res = call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> %a0, i32 4) @@ -534,11 +536,11 @@ declare i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float>, i32) nounwind readnone define i64 @test_x86_avx512_cvtss2si64(<4 x float> %a0) { ; CHECK-LABEL: test_x86_avx512_cvtss2si64: ; CHECK: ## BB#0: -; CHECK-NEXT: vcvtss2si %xmm0, %rax -; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %rcx -; CHECK-NEXT: addq %rax, %rcx -; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %rax +; CHECK-NEXT: vcvtss2si %xmm0, %rcx +; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %rax +; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %rdx ; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: addq %rdx, %rax ; CHECK-NEXT: retq %res = call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> %a0, i32 4) @@ -553,11 +555,11 @@ declare i64 @llvm.x86.avx512.vcvtss2si64(<4 x float>, i32) nounwind readnone define i32 @test_x86_avx512_cvtsd2usi32(<2 x double> %a0) { ; CHECK-LABEL: test_x86_avx512_cvtsd2usi32: ; CHECK: ## BB#0: -; CHECK-NEXT: vcvtsd2usi %xmm0, %eax -; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %ecx -; CHECK-NEXT: addl %eax, %ecx -; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %eax +; CHECK-NEXT: vcvtsd2usi %xmm0, %ecx +; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %eax +; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %edx ; CHECK-NEXT: addl %ecx, %eax +; CHECK-NEXT: addl %edx, %eax ; CHECK-NEXT: retq %res = call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> %a0, i32 4) @@ -572,11 +574,11 @@ declare i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double>, i32) nounwind readnone define i32 @test_x86_avx512_cvtsd2si32(<2 x double> %a0) { ; CHECK-LABEL: test_x86_avx512_cvtsd2si32: ; CHECK: ## BB#0: -; CHECK-NEXT: vcvtsd2si %xmm0, %eax -; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %ecx -; CHECK-NEXT: addl %eax, %ecx -; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %eax +; CHECK-NEXT: vcvtsd2si %xmm0, %ecx +; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %eax +; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %edx ; CHECK-NEXT: addl %ecx, %eax +; CHECK-NEXT: addl %edx, %eax ; CHECK-NEXT: retq %res = call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> %a0, i32 4) @@ -591,11 +593,11 @@ declare i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double>, i32) nounwind readnone define i32 @test_x86_avx512_cvtss2usi32(<4 x float> %a0) { ; CHECK-LABEL: test_x86_avx512_cvtss2usi32: ; CHECK: ## BB#0: -; CHECK-NEXT: vcvtss2usi %xmm0, %eax -; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %ecx -; CHECK-NEXT: addl %eax, %ecx -; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %eax +; CHECK-NEXT: vcvtss2usi %xmm0, %ecx +; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %eax +; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %edx ; CHECK-NEXT: addl %ecx, %eax +; CHECK-NEXT: addl %edx, %eax ; CHECK-NEXT: retq %res = call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> %a0, i32 4) @@ -610,11 +612,11 @@ declare i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float>, i32) nounwind readnone define i32 @test_x86_avx512_cvtss2si32(<4 x float> %a0) { ; CHECK-LABEL: test_x86_avx512_cvtss2si32: ; CHECK: ## BB#0: -; CHECK-NEXT: vcvtss2si %xmm0, %eax -; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %ecx -; CHECK-NEXT: addl %eax, %ecx -; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %eax +; CHECK-NEXT: vcvtss2si %xmm0, %ecx +; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %eax +; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %edx ; CHECK-NEXT: addl %ecx, %eax +; CHECK-NEXT: addl %edx, %eax ; CHECK-NEXT: retq %res = call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> %a0, i32 4) @@ -683,9 +685,8 @@ define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0, <16 x i16> %src, i16 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vcvtps2ph $2, %zmm0, %ymm1 {%k1} ; CHECK-NEXT: vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z} -; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm1 ; CHECK-NEXT: vcvtps2ph $2, %zmm0, (%rsi) -; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm0 ; CHECK-NEXT: retq %res1 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 -1) %res2 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 %mask) @@ -2215,7 +2216,6 @@ declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>, define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_ss_rn: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovaps %xmm2, %xmm0 @@ -2227,7 +2227,6 @@ define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x f define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_ss_rd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovaps %xmm2, %xmm0 @@ -2239,7 +2238,6 @@ define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x f define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_ss_ru: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovaps %xmm2, %xmm0 @@ -2251,7 +2249,6 @@ define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x f define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_ss_rz: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovaps %xmm2, %xmm0 @@ -2263,7 +2260,6 @@ define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x f define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_ss_current: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovaps %xmm2, %xmm0 @@ -2275,7 +2271,6 @@ define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, < define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %mask) { ; CHECK-LABEL: test_maskz_add_ss_rn: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -2295,7 +2290,6 @@ define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) { define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_ss_current_memfold: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm1 {%k1} ; CHECK-NEXT: vmovaps %xmm1, %xmm0 @@ -2312,7 +2306,6 @@ define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1 define <4 x float> @test_maskz_add_ss_current_memfold(<4 x float> %a0, float* %a1, i8 %mask) { ; CHECK-LABEL: test_maskz_add_ss_current_memfold: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -2330,7 +2323,6 @@ declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x doubl define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_sd_rn: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovapd %xmm2, %xmm0 @@ -2342,7 +2334,6 @@ define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_sd_rd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovapd %xmm2, %xmm0 @@ -2354,7 +2345,6 @@ define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_sd_ru: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovapd %xmm2, %xmm0 @@ -2366,7 +2356,6 @@ define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_sd_rz: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovapd %xmm2, %xmm0 @@ -2378,7 +2367,6 @@ define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_sd_current: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddsd %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovapd %xmm2, %xmm0 @@ -2390,7 +2378,6 @@ define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1 define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 %mask) { ; CHECK-LABEL: test_maskz_add_sd_rn: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -2410,7 +2397,6 @@ define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) { define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_add_sd_current_memfold: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vaddsd (%rdi), %xmm0, %xmm1 {%k1} ; CHECK-NEXT: vmovapd %xmm1, %xmm0 @@ -2425,7 +2411,6 @@ define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double* define <2 x double> @test_maskz_add_sd_current_memfold(<2 x double> %a0, double* %a1, i8 %mask) { ; CHECK-LABEL: test_maskz_add_sd_current_memfold: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vaddsd (%rdi), %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -2441,7 +2426,6 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>, define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_max_ss_sae: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovaps %xmm2, %xmm0 @@ -2453,7 +2437,6 @@ define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %mask) { ; CHECK-LABEL: test_maskz_max_ss_sae: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -2473,7 +2456,6 @@ define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) { define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_max_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovaps %xmm2, %xmm0 @@ -2485,7 +2467,6 @@ define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x floa define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) { ; CHECK-LABEL: test_maskz_max_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -2505,7 +2486,6 @@ define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) { define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_max_ss_memfold: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm1 {%k1} ; CHECK-NEXT: vmovaps %xmm1, %xmm0 @@ -2522,7 +2502,6 @@ define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x f define <4 x float> @test_maskz_max_ss_memfold(<4 x float> %a0, float* %a1, i8 %mask) { ; CHECK-LABEL: test_maskz_max_ss_memfold: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -2539,7 +2518,6 @@ declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x doubl define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_max_sd_sae: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovapd %xmm2, %xmm0 @@ -2551,7 +2529,6 @@ define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i8 %mask) { ; CHECK-LABEL: test_maskz_max_sd_sae: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -2571,7 +2548,6 @@ define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) { define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_max_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vmovapd %xmm2, %xmm0 @@ -2583,7 +2559,6 @@ define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x d define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) { ; CHECK-LABEL: test_maskz_max_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -2603,7 +2578,6 @@ define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) { define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_max_sd_memfold: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vmaxsd (%rdi), %xmm0, %xmm1 {%k1} ; CHECK-NEXT: vmovapd %xmm1, %xmm0 @@ -2618,7 +2592,6 @@ define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2 define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, double* %a1, i8 %mask) { ; CHECK-LABEL: test_maskz_max_sd_memfold: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -3652,16 +3625,15 @@ declare <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>, <4 x float>, <4 define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; CHECK-LABEL: test_getexp_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm2, %xmm3 ; CHECK-NEXT: vgetexpss %xmm1, %xmm0, %xmm3 {%k1} +; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm4 {%k1} {z} +; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm5 ; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm2 -; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm3 {%k1} {z} -; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0 +; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm0 +; CHECK-NEXT: vaddps %xmm5, %xmm4, %xmm1 +; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %res0 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4) %res1 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 8) @@ -3679,16 +3651,15 @@ declare <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>, <2 x double>, define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_getexp_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi +; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm3 ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vmovapd %xmm2, %xmm3 -; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm3 {%k1} -; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm4 +; CHECK-NEXT: vmovapd %xmm2, %xmm4 +; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm4 {%k1} +; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm5 {%k1} {z} ; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm2 -; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z} -; CHECK-NEXT: vaddpd %xmm4, %xmm0, %xmm0 -; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0 +; CHECK-NEXT: vaddpd %xmm2, %xmm4, %xmm0 +; CHECK-NEXT: vaddpd %xmm3, %xmm5, %xmm1 +; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %res0 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4) %res1 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 8) @@ -3706,11 +3677,9 @@ declare i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double>, <2 x double>, i32, i8, i32 define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: andl $1, %eax ; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; CHECK-NEXT: retq @@ -3721,18 +3690,18 @@ define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 define i8@test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd_all: ; CHECK: ## BB#0: +; CHECK-NEXT: vcmplesd %xmm1, %xmm0, %k0 +; CHECK-NEXT: kmovw %k0, %ecx ; CHECK-NEXT: vcmpunordsd {sae}, %xmm1, %xmm0, %k0 -; CHECK-NEXT: vcmplesd %xmm1, %xmm0, %k1 -; CHECK-NEXT: korw %k0, %k1, %k0 -; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k1 -; CHECK-NEXT: vcmpneqsd %xmm1, %xmm0, %k2 -; CHECK-NEXT: korw %k1, %k2, %k1 -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k2 -; CHECK-NEXT: kandw %k2, %k1, %k1 -; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: kmovw %k0, %edx +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vcmpneqsd %xmm1, %xmm0, %k0 {%k1} +; CHECK-NEXT: kmovw %k0, %esi +; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: orb %cl, %dl +; CHECK-NEXT: orb %sil, %al +; CHECK-NEXT: orb %dl, %al ; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; CHECK-NEXT: retq @@ -3752,11 +3721,9 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32) define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: andl $1, %eax ; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; CHECK-NEXT: retq @@ -3768,17 +3735,17 @@ define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 % define i8@test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss_all: ; CHECK: ## BB#0: -; CHECK-NEXT: vcmpless %xmm1, %xmm0, %k1 -; CHECK-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0 {%k1} -; CHECK-NEXT: andl $1, %edi +; CHECK-NEXT: vcmpless %xmm1, %xmm0, %k0 +; CHECK-NEXT: kmovw %k0, %ecx +; CHECK-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0 +; CHECK-NEXT: kmovw %k0, %edx ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vcmpneqss %xmm1, %xmm0, %k2 {%k1} -; CHECK-NEXT: kmovw %k2, %ecx -; CHECK-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k1 {%k1} -; CHECK-NEXT: kmovw %k1, %edx -; CHECK-NEXT: andl $1, %edx +; CHECK-NEXT: vcmpneqss %xmm1, %xmm0, %k0 {%k1} +; CHECK-NEXT: kmovw %k0, %esi +; CHECK-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: andb %cl, %al +; CHECK-NEXT: andb %cl, %dl +; CHECK-NEXT: andb %sil, %al ; CHECK-NEXT: andb %dl, %al ; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; CHECK-NEXT: retq @@ -3899,16 +3866,15 @@ declare <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double>, <2 x double> define <2 x double>@test_int_x86_avx512_mask_getmant_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) { ; CHECK-LABEL: test_int_x86_avx512_mask_getmant_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi +; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm3 ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vmovapd %xmm2, %xmm3 -; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm3 {%k1} -; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1} {z} -; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm3 -; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4 +; CHECK-NEXT: vmovapd %xmm2, %xmm4 +; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1} +; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm5 {%k1} {z} ; CHECK-NEXT: vgetmantsd $11, {sae}, %xmm1, %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0 -; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vaddpd %xmm5, %xmm4, %xmm0 +; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %res = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> %x2, i8 %x3, i32 4) %res1 = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> zeroinitializer, i8 %x3, i32 4) @@ -3925,15 +3891,14 @@ declare <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float>, <4 x float>, i define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) { ; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi +; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3 {%k1} {z} -; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2 -; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3 +; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm4 {%k1} {z} ; CHECK-NEXT: vgetmantss $11, {sae}, %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm1 ; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0 +; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> %x2, i8 %x3, i32 4) %res1 = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> zeroinitializer, i8 %x3, i32 4) @@ -4057,7 +4022,6 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double>, <4 x flo define <2 x double>@test_int_x86_avx512_mask_cvt_ss2sd_round(<2 x double> %x0,<4 x float> %x1, <2 x double> %x2, i8 %x3) { ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ss2sd_round: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vcvtss2sd {sae}, %xmm1, %xmm0, %xmm0 @@ -4074,7 +4038,6 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float>, <2 x doubl define <4 x float>@test_int_x86_avx512_mask_cvt_sd2ss_round(<4 x float> %x0,<2 x double> %x1, <4 x float> %x2, i8 %x3) { ; CHECK-LABEL: test_int_x86_avx512_mask_cvt_sd2ss_round: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vcvtsd2ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vcvtsd2ss {rn-sae}, %xmm1, %xmm0, %xmm0 @@ -4435,8 +4398,8 @@ define <16 x i32>@test_int_x86_avx512_mask_prol_d_512(<16 x i32> %x0, i32 %x1, < ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vprold $3, %zmm0, %zmm1 {%k1} ; CHECK-NEXT: vprold $3, %zmm0, %zmm2 {%k1} {z} -; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vprold $3, %zmm0, %zmm0 +; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3) @@ -4455,8 +4418,8 @@ define <8 x i64>@test_int_x86_avx512_mask_prol_q_512(<8 x i64> %x0, i32 %x1, <8 ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vprolq $3, %zmm0, %zmm1 {%k1} ; CHECK-NEXT: vprolq $3, %zmm0, %zmm2 {%k1} {z} -; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vprolq $3, %zmm0, %zmm0 +; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3) @@ -4557,9 +4520,9 @@ define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, < ; CHECK-NEXT: vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1} ; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4 ; CHECK-NEXT: vfixupimmpd $5, %zmm2, %zmm1, %zmm4 {%k1} {z} -; CHECK-NEXT: vaddpd %zmm4, %zmm3, %zmm3 ; CHECK-NEXT: vfixupimmpd $3, {sae}, %zmm2, %zmm1, %zmm0 -; CHECK-NEXT: vaddpd %zmm0, %zmm3, %zmm0 +; CHECK-NEXT: vaddpd %zmm4, %zmm3, %zmm1 +; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 4, i8 %x4, i32 4) %res1 = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> zeroinitializer, <8 x double> %x1, <8 x i64> %x2, i32 5, i8 %x4, i32 4) @@ -4580,9 +4543,9 @@ define <8 x double>@test_int_x86_avx512_maskz_fixupimm_pd_512(<8 x double> %x0, ; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4 ; CHECK-NEXT: vmovapd %zmm0, %zmm5 ; CHECK-NEXT: vfixupimmpd $5, %zmm4, %zmm1, %zmm5 {%k1} {z} -; CHECK-NEXT: vaddpd %zmm5, %zmm3, %zmm3 ; CHECK-NEXT: vfixupimmpd $2, {sae}, %zmm2, %zmm1, %zmm0 -; CHECK-NEXT: vaddpd %zmm0, %zmm3, %zmm0 +; CHECK-NEXT: vaddpd %zmm5, %zmm3, %zmm1 +; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 3, i8 %x4, i32 4) %res1 = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> zeroinitializer, i32 5, i8 %x4, i32 4) @@ -4597,16 +4560,15 @@ declare <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float>, <4 x float>, define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm0, %xmm3 ; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1} ; CHECK-NEXT: vxorps %xmm4, %xmm4, %xmm4 ; CHECK-NEXT: vmovaps %xmm0, %xmm5 ; CHECK-NEXT: vfixupimmss $5, %xmm4, %xmm1, %xmm5 {%k1} -; CHECK-NEXT: vaddps %xmm5, %xmm3, %xmm3 ; CHECK-NEXT: vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vaddps %xmm5, %xmm3, %xmm1 +; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4, i32 4) %res1 = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4, i32 4) @@ -4621,16 +4583,15 @@ declare <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float>, <4 x float>, define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) { ; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm0, %xmm3 -; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1} {z} +; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 +; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm0, %xmm4 -; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm4 +; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm4 {%k1} {z} ; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} -; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 -; CHECK-NEXT: vaddps %xmm4, %xmm0, %xmm0 +; CHECK-NEXT: vaddps %xmm0, %xmm4, %xmm0 +; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0 ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4, i32 4) %res1 = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4, i32 8) @@ -4651,9 +4612,9 @@ define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, < ; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4 ; CHECK-NEXT: vmovaps %zmm0, %zmm5 ; CHECK-NEXT: vfixupimmps $5, %zmm4, %zmm1, %zmm5 {%k1} -; CHECK-NEXT: vaddps %zmm5, %zmm3, %zmm3 ; CHECK-NEXT: vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0 -; CHECK-NEXT: vaddps %zmm0, %zmm3, %zmm0 +; CHECK-NEXT: vaddps %zmm5, %zmm3, %zmm1 +; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 %x4, i32 4) %res1 = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> zeroinitializer, i32 5, i16 %x4, i32 4) @@ -4691,16 +4652,15 @@ declare <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double>, <2 x double define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm0, %xmm3 -; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} +; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 +; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm0, %xmm4 -; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm4 +; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm4 {%k1} ; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} -; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 -; CHECK-NEXT: vaddpd %xmm4, %xmm0, %xmm0 +; CHECK-NEXT: vaddpd %xmm0, %xmm4, %xmm0 +; CHECK-NEXT: vaddpd %xmm3, %xmm0, %xmm0 ; CHECK-NEXT: retq %res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 4) %res1 = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> zeroinitializer, i32 5, i8 %x4, i32 8) @@ -4715,16 +4675,15 @@ declare <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double>, <2 x doubl define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) { ; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm0, %xmm3 ; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} {z} ; CHECK-NEXT: vxorpd %xmm4, %xmm4, %xmm4 ; CHECK-NEXT: vmovapd %xmm0, %xmm5 ; CHECK-NEXT: vfixupimmsd $5, {sae}, %xmm4, %xmm1, %xmm5 {%k1} {z} -; CHECK-NEXT: vaddpd %xmm5, %xmm3, %xmm3 ; CHECK-NEXT: vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} -; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vaddpd %xmm5, %xmm3, %xmm1 +; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq %res = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 4) %res1 = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> zeroinitializer, i32 5, i8 %x4, i32 8) @@ -4816,18 +4775,17 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>, define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){ ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm0, %xmm3 -; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1} -; CHECK-NEXT: vmovapd %xmm0, %xmm4 -; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm4 -; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3 +; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 +; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm0, %xmm4 -; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm4 {%k1} -; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 -; CHECK-NEXT: vaddpd %xmm4, %xmm0, %xmm0 -; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm4 {%k1} +; CHECK-NEXT: vmovapd %xmm0, %xmm5 +; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm5 +; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm1 +; CHECK-NEXT: vaddpd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4) %res1 = call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4) @@ -4844,18 +4802,17 @@ declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4 define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){ ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm0, %xmm3 -; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm3 {%k1} -; CHECK-NEXT: vmovaps %xmm0, %xmm4 -; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm4 -; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3 +; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm3 +; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm0, %xmm4 -; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm4 {%k1} -; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0 -; CHECK-NEXT: vaddps %xmm4, %xmm0, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm4 {%k1} +; CHECK-NEXT: vmovaps %xmm0, %xmm5 +; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm5 +; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm1 +; CHECK-NEXT: vaddps %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4) %res1 = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4) @@ -4872,7 +4829,6 @@ declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double>, <2 x double> define <2 x double>@test_int_x86_avx512_maskz_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){ ; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm0, %xmm3 ; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1} {z} @@ -4890,7 +4846,6 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, < define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){ ; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -4904,18 +4859,17 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double>, <2 x double> define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){ ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm2, %xmm3 -; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm3 {%k1} -; CHECK-NEXT: vmovapd %xmm2, %xmm4 -; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm4 -; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3 +; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm3 +; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm2, %xmm4 -; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1} -; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2 -; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0 -; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm4 {%k1} +; CHECK-NEXT: vmovapd %xmm2, %xmm5 +; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm5 +; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0 +; CHECK-NEXT: vaddpd %xmm2, %xmm5, %xmm1 +; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %res = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4) %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4) @@ -4932,18 +4886,17 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, < define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){ ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm2, %xmm3 -; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm3 {%k1} -; CHECK-NEXT: vmovaps %xmm2, %xmm4 -; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm4 -; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3 +; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm3 +; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm2, %xmm4 -; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1} -; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2 -; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm4 {%k1} +; CHECK-NEXT: vmovaps %xmm2, %xmm5 +; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm5 +; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm0 +; CHECK-NEXT: vaddps %xmm2, %xmm5, %xmm1 +; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4) %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4) @@ -4959,7 +4912,6 @@ define void @fmadd_ss_mask_memfold(float* %a, float* %b, i8 %c) { ; CHECK-LABEL: fmadd_ss_mask_memfold: ; CHECK: ## BB#0: ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK-NEXT: andl $1, %edx ; CHECK-NEXT: kmovw %edx, %k1 ; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1} ; CHECK-NEXT: vmovss %xmm0, (%rdi) @@ -4987,7 +4939,6 @@ define void @fmadd_ss_maskz_memfold(float* %a, float* %b, i8 %c) { ; CHECK-LABEL: fmadd_ss_maskz_memfold: ; CHECK: ## BB#0: ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK-NEXT: andl $1, %edx ; CHECK-NEXT: kmovw %edx, %k1 ; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: vmovss %xmm0, (%rdi) @@ -5015,7 +4966,6 @@ define void @fmadd_sd_mask_memfold(double* %a, double* %b, i8 %c) { ; CHECK-LABEL: fmadd_sd_mask_memfold: ; CHECK: ## BB#0: ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-NEXT: andl $1, %edx ; CHECK-NEXT: kmovw %edx, %k1 ; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1} ; CHECK-NEXT: vmovlpd %xmm0, (%rdi) @@ -5039,7 +4989,6 @@ define void @fmadd_sd_maskz_memfold(double* %a, double* %b, i8 %c) { ; CHECK-LABEL: fmadd_sd_maskz_memfold: ; CHECK: ## BB#0: ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-NEXT: andl $1, %edx ; CHECK-NEXT: kmovw %edx, %k1 ; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: vmovlpd %xmm0, (%rdi) @@ -5064,18 +5013,17 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double> define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){ ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm2, %xmm3 -; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm3 {%k1} -; CHECK-NEXT: vmovapd %xmm2, %xmm4 -; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm4 -; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3 +; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm3 +; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm2, %xmm4 -; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1} -; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 -; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0 -; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm4 {%k1} +; CHECK-NEXT: vmovapd %xmm2, %xmm5 +; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5 +; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0 +; CHECK-NEXT: vaddpd %xmm2, %xmm5, %xmm1 +; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4) %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4) @@ -5092,18 +5040,17 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, < define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){ ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm2, %xmm3 -; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm3 {%k1} -; CHECK-NEXT: vmovaps %xmm2, %xmm4 -; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm4 -; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3 +; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm3 +; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm2, %xmm4 -; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1} -; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 -; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm4 {%k1} +; CHECK-NEXT: vmovaps %xmm2, %xmm5 +; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5 +; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm0 +; CHECK-NEXT: vaddps %xmm2, %xmm5, %xmm1 +; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4) %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4) @@ -5120,18 +5067,17 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double>, <2 x double define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){ ; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm2, %xmm3 -; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm3 {%k1} -; CHECK-NEXT: vmovapd %xmm2, %xmm4 -; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm4 -; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3 +; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm3 +; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovapd %xmm2, %xmm4 -; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1} -; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 -; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0 -; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm4 {%k1} +; CHECK-NEXT: vmovapd %xmm2, %xmm5 +; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5 +; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0 +; CHECK-NEXT: vaddpd %xmm2, %xmm5, %xmm1 +; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %res = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4) %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4) @@ -5148,18 +5094,17 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float>, <4 x float>, define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){ ; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm2, %xmm3 -; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm3 {%k1} -; CHECK-NEXT: vmovaps %xmm2, %xmm4 -; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm4 -; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3 +; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm3 +; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovaps %xmm2, %xmm4 -; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1} -; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 -; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm4 {%k1} +; CHECK-NEXT: vmovaps %xmm2, %xmm5 +; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5 +; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm0 +; CHECK-NEXT: vaddps %xmm2, %xmm5, %xmm1 +; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4) %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4) @@ -5174,7 +5119,6 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x fl define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, float *%ptr_b ,i8 %x3,i32 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vfmadd231ss (%rdi), %xmm0, %xmm1 {%k1} ; CHECK-NEXT: vmovaps %xmm1, %xmm0 @@ -5188,7 +5132,6 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vfmadd132ss (%rdi), %xmm1, %xmm0 {%k1} ; CHECK-NEXT: retq @@ -5202,7 +5145,8 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x f define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) { ; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss_rm: ; CHECK: ## BB#0: -; CHECK-NEXT: kxorw %k0, %k0, %k1 +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: kmovw %eax, %k1 ; CHECK-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0 {%k1} {z} ; CHECK-NEXT: retq %q = load float, float* %ptr_b diff --git a/test/CodeGen/X86/avx512-load-store.ll b/test/CodeGen/X86/avx512-load-store.ll index 3295c66c6d42..4fd985bf24cd 100644 --- a/test/CodeGen/X86/avx512-load-store.ll +++ b/test/CodeGen/X86/avx512-load-store.ll @@ -12,7 +12,7 @@ define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x ; CHECK32-LABEL: test_mm_mask_move_ss: ; CHECK32: # BB#0: # %entry ; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al -; CHECK32-NEXT: andl $1, %eax +; CHECK32-NEXT: andb $1, %al ; CHECK32-NEXT: kmovw %eax, %k1 ; CHECK32-NEXT: vmovss %xmm2, %xmm0, %xmm0 {%k1} ; CHECK32-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] @@ -37,7 +37,7 @@ define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4 ; CHECK32-LABEL: test_mm_maskz_move_ss: ; CHECK32: # BB#0: # %entry ; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al -; CHECK32-NEXT: andl $1, %eax +; CHECK32-NEXT: andb $1, %al ; CHECK32-NEXT: kmovw %eax, %k1 ; CHECK32-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; CHECK32-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} @@ -62,7 +62,7 @@ define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2 ; CHECK32-LABEL: test_mm_mask_move_sd: ; CHECK32: # BB#0: # %entry ; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al -; CHECK32-NEXT: andl $1, %eax +; CHECK32-NEXT: andb $1, %al ; CHECK32-NEXT: kmovw %eax, %k1 ; CHECK32-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1} ; CHECK32-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1] @@ -87,7 +87,7 @@ define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, < ; CHECK32-LABEL: test_mm_maskz_move_sd: ; CHECK32: # BB#0: # %entry ; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al -; CHECK32-NEXT: andl $1, %eax +; CHECK32-NEXT: andb $1, %al ; CHECK32-NEXT: kmovw %eax, %k1 ; CHECK32-NEXT: vxorpd %xmm2, %xmm2, %xmm2 ; CHECK32-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} diff --git a/test/CodeGen/X86/avx512-mask-bugfix.ll b/test/CodeGen/X86/avx512-mask-bugfix.ll deleted file mode 100755 index 1940680f1c10..000000000000 --- a/test/CodeGen/X86/avx512-mask-bugfix.ll +++ /dev/null @@ -1,57 +0,0 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s - -; ModuleID = 'foo.ll' -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -; Function Attrs: nounwind readnone -declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) #0 - -; Function Attrs: nounwind readnone -declare i64 @llvm.cttz.i64(i64, i1) #0 - -; Function Attrs: nounwind -define void @foo(float* noalias %aFOO, float %b, i32 %a) { -allocas: - %full_mask_memory.i57 = alloca <8 x float> - %return_value_memory.i60 = alloca i1 - %cmp.i = icmp eq i32 %a, 65535 - br i1 %cmp.i, label %all_on, label %some_on - -all_on: - %mask0 = load <8 x float>, <8 x float>* %full_mask_memory.i57 - %v0.i.i.i70 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask0) #0 - %allon.i.i76 = icmp eq i32 %v0.i.i.i70, 65535 - br i1 %allon.i.i76, label %check_neighbors.i.i121, label %domixed.i.i100 - -domixed.i.i100: - br label %check_neighbors.i.i121 - -check_neighbors.i.i121: - %v1.i5.i.i116 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask0) #0 - %alleq.i.i120 = icmp eq i32 %v1.i5.i.i116, 65535 - br i1 %alleq.i.i120, label %all_equal.i.i123, label %not_all_equal.i.i124 - -; CHECK: kxnorw %k0, %k0, %k0 -; CHECK: kshiftrw $15, %k0, %k0 -; CHECK: jmp -; CHECK: kxorw %k0, %k0, %k0 - -all_equal.i.i123: - br label %reduce_equal___vyi.exit128 - -not_all_equal.i.i124: - br label %reduce_equal___vyi.exit128 - -reduce_equal___vyi.exit128: - %calltmp2.i125 = phi i1 [ true, %all_equal.i.i123 ], [ false, %not_all_equal.i.i124 ] - store i1 %calltmp2.i125, i1* %return_value_memory.i60 - %return_value.i126 = load i1, i1* %return_value_memory.i60 - %. = select i1 %return_value.i126, i32 1, i32 0 - %select_to_float = sitofp i32 %. to float - ret void - -some_on: - ret void -} - diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll index 7103efe050a4..01153a9e45f7 100644 --- a/test/CodeGen/X86/avx512-mask-op.ll +++ b/test/CodeGen/X86/avx512-mask-op.ll @@ -418,7 +418,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) { ; KNL-NEXT: kshiftlw $10, %k0, %k0 ; KNL-NEXT: kshiftrw $15, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: andl $1, %eax +; KNL-NEXT: andb $1, %al ; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; KNL-NEXT: retq ; @@ -428,7 +428,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) { ; SKX-NEXT: kshiftlw $10, %k0, %k0 ; SKX-NEXT: kshiftrw $15, %k0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: andl $1, %eax +; SKX-NEXT: andb $1, %al ; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq @@ -439,7 +439,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) { ; AVX512BW-NEXT: kshiftlw $10, %k0, %k0 ; AVX512BW-NEXT: kshiftrw $15, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: andl $1, %eax +; AVX512BW-NEXT: andb $1, %al ; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq @@ -450,7 +450,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) { ; AVX512DQ-NEXT: kshiftlw $10, %k0, %k0 ; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0 ; AVX512DQ-NEXT: kmovw %k0, %eax -; AVX512DQ-NEXT: andl $1, %eax +; AVX512DQ-NEXT: andb $1, %al ; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; AVX512DQ-NEXT: vzeroupper ; AVX512DQ-NEXT: retq @@ -965,8 +965,8 @@ define <64 x i8> @test16(i64 %x) { ; SKX-LABEL: test16: ; SKX: ## BB#0: ; SKX-NEXT: kmovq %rdi, %k0 -; SKX-NEXT: kxnorw %k0, %k0, %k1 -; SKX-NEXT: kshiftrw $15, %k1, %k1 +; SKX-NEXT: movb $1, %al +; SKX-NEXT: kmovd %eax, %k1 ; SKX-NEXT: vpmovm2b %k1, %zmm0 ; SKX-NEXT: vpsllq $40, %xmm0, %xmm0 ; SKX-NEXT: vpmovm2b %k0, %zmm1 @@ -981,8 +981,8 @@ define <64 x i8> @test16(i64 %x) { ; AVX512BW-LABEL: test16: ; AVX512BW: ## BB#0: ; AVX512BW-NEXT: kmovq %rdi, %k0 -; AVX512BW-NEXT: kxnorw %k0, %k0, %k1 -; AVX512BW-NEXT: kshiftrw $15, %k1, %k1 +; AVX512BW-NEXT: movb $1, %al +; AVX512BW-NEXT: kmovd %eax, %k1 ; AVX512BW-NEXT: vpmovm2b %k1, %zmm0 ; AVX512BW-NEXT: vpsllq $40, %xmm0, %xmm0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm1 @@ -1085,7 +1085,6 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) { ; SKX-NEXT: kmovq %rdi, %k0 ; SKX-NEXT: cmpl %edx, %esi ; SKX-NEXT: setg %al -; SKX-NEXT: andl $1, %eax ; SKX-NEXT: kmovd %eax, %k1 ; SKX-NEXT: vpmovm2b %k1, %zmm0 ; SKX-NEXT: vpsllq $40, %xmm0, %xmm0 @@ -1103,7 +1102,6 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) { ; AVX512BW-NEXT: kmovq %rdi, %k0 ; AVX512BW-NEXT: cmpl %edx, %esi ; AVX512BW-NEXT: setg %al -; AVX512BW-NEXT: andl $1, %eax ; AVX512BW-NEXT: kmovd %eax, %k1 ; AVX512BW-NEXT: vpmovm2b %k1, %zmm0 ; AVX512BW-NEXT: vpsllq $40, %xmm0, %xmm0 @@ -1166,21 +1164,25 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; KNL-LABEL: test18: ; KNL: ## BB#0: ; KNL-NEXT: kmovw %edi, %k1 -; KNL-NEXT: kmovw %esi, %k2 -; KNL-NEXT: kshiftlw $7, %k2, %k0 -; KNL-NEXT: kshiftrw $15, %k0, %k0 -; KNL-NEXT: kshiftlw $6, %k2, %k2 +; KNL-NEXT: kmovw %esi, %k0 +; KNL-NEXT: kshiftlw $7, %k0, %k2 ; KNL-NEXT: kshiftrw $15, %k2, %k2 +; KNL-NEXT: kmovw %k2, %eax +; KNL-NEXT: kshiftlw $6, %k0, %k0 +; KNL-NEXT: kshiftrw $15, %k0, %k0 +; KNL-NEXT: kmovw %k0, %ecx ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z} +; KNL-NEXT: kmovw %ecx, %k1 +; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z} ; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7] ; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 ; KNL-NEXT: vpsllq $63, %zmm2, %zmm0 -; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 -; KNL-NEXT: kshiftlw $1, %k1, %k1 -; KNL-NEXT: kshiftrw $1, %k1, %k1 -; KNL-NEXT: kshiftlw $7, %k0, %k0 -; KNL-NEXT: korw %k0, %k1, %k1 +; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 +; KNL-NEXT: kshiftlw $1, %k0, %k0 +; KNL-NEXT: kshiftrw $1, %k0, %k0 +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: kshiftlw $7, %k1, %k1 +; KNL-NEXT: korw %k1, %k0, %k1 ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vpmovqw %zmm0, %xmm0 ; KNL-NEXT: retq @@ -1191,16 +1193,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; SKX-NEXT: kmovd %esi, %k1 ; SKX-NEXT: kshiftlw $7, %k1, %k2 ; SKX-NEXT: kshiftrw $15, %k2, %k2 +; SKX-NEXT: kmovd %k2, %eax ; SKX-NEXT: kshiftlw $6, %k1, %k1 ; SKX-NEXT: kshiftrw $15, %k1, %k1 +; SKX-NEXT: kmovd %k1, %ecx ; SKX-NEXT: vpmovm2q %k0, %zmm0 -; SKX-NEXT: vpmovm2q %k1, %zmm1 +; SKX-NEXT: kmovd %ecx, %k0 +; SKX-NEXT: vpmovm2q %k0, %zmm1 ; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7] ; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 ; SKX-NEXT: vpmovq2m %zmm2, %k0 ; SKX-NEXT: kshiftlb $1, %k0, %k0 ; SKX-NEXT: kshiftrb $1, %k0, %k0 -; SKX-NEXT: kshiftlb $7, %k2, %k1 +; SKX-NEXT: kmovd %eax, %k1 +; SKX-NEXT: kshiftlb $7, %k1, %k1 ; SKX-NEXT: korb %k1, %k0, %k0 ; SKX-NEXT: vpmovm2w %k0, %xmm0 ; SKX-NEXT: vzeroupper @@ -1209,21 +1215,25 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; AVX512BW-LABEL: test18: ; AVX512BW: ## BB#0: ; AVX512BW-NEXT: kmovd %edi, %k1 -; AVX512BW-NEXT: kmovd %esi, %k2 -; AVX512BW-NEXT: kshiftlw $7, %k2, %k0 -; AVX512BW-NEXT: kshiftrw $15, %k0, %k0 -; AVX512BW-NEXT: kshiftlw $6, %k2, %k2 +; AVX512BW-NEXT: kmovd %esi, %k0 +; AVX512BW-NEXT: kshiftlw $7, %k0, %k2 ; AVX512BW-NEXT: kshiftrw $15, %k2, %k2 +; AVX512BW-NEXT: kmovd %k2, %eax +; AVX512BW-NEXT: kshiftlw $6, %k0, %k0 +; AVX512BW-NEXT: kshiftrw $15, %k0, %k0 +; AVX512BW-NEXT: kmovd %k0, %ecx ; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512BW-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z} +; AVX512BW-NEXT: kmovd %ecx, %k1 +; AVX512BW-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z} ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7] ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vpsllq $63, %zmm2, %zmm0 -; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 -; AVX512BW-NEXT: kshiftlw $1, %k1, %k1 -; AVX512BW-NEXT: kshiftrw $1, %k1, %k1 -; AVX512BW-NEXT: kshiftlw $7, %k0, %k0 -; AVX512BW-NEXT: korw %k0, %k1, %k0 +; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 +; AVX512BW-NEXT: kshiftlw $1, %k0, %k0 +; AVX512BW-NEXT: kshiftrw $1, %k0, %k0 +; AVX512BW-NEXT: kmovd %eax, %k1 +; AVX512BW-NEXT: kshiftlw $7, %k1, %k1 +; AVX512BW-NEXT: korw %k1, %k0, %k0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 ; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> ; AVX512BW-NEXT: vzeroupper @@ -1235,16 +1245,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; AVX512DQ-NEXT: kmovw %esi, %k1 ; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2 ; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2 +; AVX512DQ-NEXT: kmovw %k2, %eax ; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1 ; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1 +; AVX512DQ-NEXT: kmovw %k1, %ecx ; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0 -; AVX512DQ-NEXT: vpmovm2q %k1, %zmm1 +; AVX512DQ-NEXT: kmovw %ecx, %k0 +; AVX512DQ-NEXT: vpmovm2q %k0, %zmm1 ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7] ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 ; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0 ; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0 ; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0 -; AVX512DQ-NEXT: kshiftlb $7, %k2, %k1 +; AVX512DQ-NEXT: kmovw %eax, %k1 +; AVX512DQ-NEXT: kshiftlb $7, %k1, %k1 ; AVX512DQ-NEXT: korb %k1, %k0, %k0 ; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0 ; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0 @@ -1383,10 +1397,8 @@ define void @test23(<2 x i1> %a, <2 x i1>* %addr) { define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) { ; KNL-LABEL: store_v1i1: ; KNL: ## BB#0: -; KNL-NEXT: andl $1, %edi ; KNL-NEXT: kmovw %edi, %k0 ; KNL-NEXT: kxnorw %k0, %k0, %k1 -; KNL-NEXT: kshiftrw $15, %k1, %k1 ; KNL-NEXT: kxorw %k1, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: movb %al, (%rsi) @@ -1394,20 +1406,16 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) { ; ; SKX-LABEL: store_v1i1: ; SKX: ## BB#0: -; SKX-NEXT: andl $1, %edi ; SKX-NEXT: kmovd %edi, %k0 ; SKX-NEXT: kxnorw %k0, %k0, %k1 -; SKX-NEXT: kshiftrw $15, %k1, %k1 ; SKX-NEXT: kxorw %k1, %k0, %k0 ; SKX-NEXT: kmovb %k0, (%rsi) ; SKX-NEXT: retq ; ; AVX512BW-LABEL: store_v1i1: ; AVX512BW: ## BB#0: -; AVX512BW-NEXT: andl $1, %edi ; AVX512BW-NEXT: kmovd %edi, %k0 ; AVX512BW-NEXT: kxnorw %k0, %k0, %k1 -; AVX512BW-NEXT: kshiftrw $15, %k1, %k1 ; AVX512BW-NEXT: kxorw %k1, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %eax ; AVX512BW-NEXT: movb %al, (%rsi) @@ -1415,10 +1423,8 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) { ; ; AVX512DQ-LABEL: store_v1i1: ; AVX512DQ: ## BB#0: -; AVX512DQ-NEXT: andl $1, %edi ; AVX512DQ-NEXT: kmovw %edi, %k0 ; AVX512DQ-NEXT: kxnorw %k0, %k0, %k1 -; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1 ; AVX512DQ-NEXT: kxorw %k1, %k0, %k0 ; AVX512DQ-NEXT: kmovb %k0, (%rsi) ; AVX512DQ-NEXT: retq @@ -1613,59 +1619,14 @@ define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) { @f1.v = internal unnamed_addr global i1 false, align 4 define void @f1(i32 %c) { -; KNL-LABEL: f1: -; KNL: ## BB#0: ## %entry -; KNL-NEXT: movzbl {{.*}}(%rip), %edi -; KNL-NEXT: movl %edi, %eax -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k0 -; KNL-NEXT: kxnorw %k0, %k0, %k1 -; KNL-NEXT: kshiftrw $15, %k1, %k1 -; KNL-NEXT: kxorw %k1, %k0, %k0 -; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: movb %al, {{.*}}(%rip) -; KNL-NEXT: xorl $1, %edi -; KNL-NEXT: jmp _f2 ## TAILCALL -; -; SKX-LABEL: f1: -; SKX: ## BB#0: ## %entry -; SKX-NEXT: movzbl {{.*}}(%rip), %edi -; SKX-NEXT: movl %edi, %eax -; SKX-NEXT: andl $1, %eax -; SKX-NEXT: kmovd %eax, %k0 -; SKX-NEXT: kxnorw %k0, %k0, %k1 -; SKX-NEXT: kshiftrw $15, %k1, %k1 -; SKX-NEXT: kxorw %k1, %k0, %k0 -; SKX-NEXT: kmovb %k0, {{.*}}(%rip) -; SKX-NEXT: xorl $1, %edi -; SKX-NEXT: jmp _f2 ## TAILCALL -; -; AVX512BW-LABEL: f1: -; AVX512BW: ## BB#0: ## %entry -; AVX512BW-NEXT: movzbl {{.*}}(%rip), %edi -; AVX512BW-NEXT: movl %edi, %eax -; AVX512BW-NEXT: andl $1, %eax -; AVX512BW-NEXT: kmovd %eax, %k0 -; AVX512BW-NEXT: kxnorw %k0, %k0, %k1 -; AVX512BW-NEXT: kshiftrw $15, %k1, %k1 -; AVX512BW-NEXT: kxorw %k1, %k0, %k0 -; AVX512BW-NEXT: kmovd %k0, %eax -; AVX512BW-NEXT: movb %al, {{.*}}(%rip) -; AVX512BW-NEXT: xorl $1, %edi -; AVX512BW-NEXT: jmp _f2 ## TAILCALL -; -; AVX512DQ-LABEL: f1: -; AVX512DQ: ## BB#0: ## %entry -; AVX512DQ-NEXT: movzbl {{.*}}(%rip), %edi -; AVX512DQ-NEXT: movl %edi, %eax -; AVX512DQ-NEXT: andl $1, %eax -; AVX512DQ-NEXT: kmovw %eax, %k0 -; AVX512DQ-NEXT: kxnorw %k0, %k0, %k1 -; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1 -; AVX512DQ-NEXT: kxorw %k1, %k0, %k0 -; AVX512DQ-NEXT: kmovb %k0, {{.*}}(%rip) -; AVX512DQ-NEXT: xorl $1, %edi -; AVX512DQ-NEXT: jmp _f2 ## TAILCALL +; CHECK-LABEL: f1: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: movzbl {{.*}}(%rip), %edi +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: xorb $1, %al +; CHECK-NEXT: movb %al, {{.*}}(%rip) +; CHECK-NEXT: xorl $1, %edi +; CHECK-NEXT: jmp _f2 ## TAILCALL entry: %.b1 = load i1, i1* @f1.v, align 4 %not..b1 = xor i1 %.b1, true diff --git a/test/CodeGen/X86/avx512-mask-spills.ll b/test/CodeGen/X86/avx512-mask-spills.ll index 96aefdb10584..4ef88ac495c3 100644 --- a/test/CodeGen/X86/avx512-mask-spills.ll +++ b/test/CodeGen/X86/avx512-mask-spills.ll @@ -9,11 +9,13 @@ define <4 x i1> @test_4i1(<4 x i32> %a, <4 x i32> %b) { ; CHECK-NEXT: Lcfi0: ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k0 -; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k1 -; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill +; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill ; CHECK-NEXT: callq _f ; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload +; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload +; CHECK-NEXT: korw %k1, %k0, %k0 ; CHECK-NEXT: vpmovm2d %k0, %xmm0 ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq @@ -32,12 +34,14 @@ define <8 x i1> @test_8i1(<8 x i32> %a, <8 x i32> %b) { ; CHECK-NEXT: Lcfi1: ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: vpcmpnleud %ymm1, %ymm0, %k0 -; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k1 -; CHECK-NEXT: korb %k1, %k0, %k0 +; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill +; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq _f ; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload +; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload +; CHECK-NEXT: korb %k1, %k0, %k0 ; CHECK-NEXT: vpmovm2w %k0, %xmm0 ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq @@ -56,12 +60,14 @@ define <16 x i1> @test_16i1(<16 x i32> %a, <16 x i32> %b) { ; CHECK-NEXT: Lcfi2: ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k0 -; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1 -; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill +; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 ; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq _f ; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload +; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload +; CHECK-NEXT: korw %k1, %k0, %k0 ; CHECK-NEXT: vpmovm2b %k0, %xmm0 ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq @@ -79,12 +85,14 @@ define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) { ; CHECK-NEXT: Lcfi3: ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: vpcmpnleuw %zmm1, %zmm0, %k0 -; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1 -; CHECK-NEXT: kord %k1, %k0, %k0 ; CHECK-NEXT: kmovd %k0, {{[0-9]+}}(%rsp) ## 4-byte Spill +; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k0 +; CHECK-NEXT: kmovd %k0, (%rsp) ## 4-byte Spill ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq _f ; CHECK-NEXT: kmovd {{[0-9]+}}(%rsp), %k0 ## 4-byte Reload +; CHECK-NEXT: kmovd (%rsp), %k1 ## 4-byte Reload +; CHECK-NEXT: kord %k1, %k0, %k0 ; CHECK-NEXT: vpmovm2b %k0, %ymm0 ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq @@ -98,18 +106,20 @@ define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) { define <64 x i1> @test_64i1(<64 x i8> %a, <64 x i8> %b) { ; CHECK-LABEL: test_64i1: ; CHECK: ## BB#0: -; CHECK-NEXT: pushq %rax +; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: Lcfi4: -; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: vpcmpnleub %zmm1, %zmm0, %k0 -; CHECK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1 -; CHECK-NEXT: korq %k1, %k0, %k0 -; CHECK-NEXT: kmovq %k0, (%rsp) ## 8-byte Spill +; CHECK-NEXT: kmovq %k0, {{[0-9]+}}(%rsp) ## 8-byte Spill +; CHECK-NEXT: vpcmpgtb %zmm1, %zmm0, %k0 +; CHECK-NEXT: kmovq %k0, {{[0-9]+}}(%rsp) ## 8-byte Spill ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq _f -; CHECK-NEXT: kmovq (%rsp), %k0 ## 8-byte Reload +; CHECK-NEXT: kmovq {{[0-9]+}}(%rsp), %k0 ## 8-byte Reload +; CHECK-NEXT: kmovq {{[0-9]+}}(%rsp), %k1 ## 8-byte Reload +; CHECK-NEXT: korq %k1, %k0, %k0 ; CHECK-NEXT: vpmovm2b %k0, %zmm0 -; CHECK-NEXT: popq %rax +; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: retq %cmp_res = icmp ugt <64 x i8> %a, %b diff --git a/test/CodeGen/X86/avx512-memfold.ll b/test/CodeGen/X86/avx512-memfold.ll index d754b2b78f6c..17cb30255f75 100644 --- a/test/CodeGen/X86/avx512-memfold.ll +++ b/test/CodeGen/X86/avx512-memfold.ll @@ -4,11 +4,9 @@ define i8 @test_int_x86_avx512_mask_cmp_ss(<4 x float> %a, float* %b, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vcmpunordss (%rdi), %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: andl $1, %eax ; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; CHECK-NEXT: retq %b.val = load float, float* %b @@ -24,7 +22,6 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32) define <4 x float> @test_mask_max_ss(<4 x float> %a, float* %b, i8 %mask) { ; CHECK-LABEL: test_mask_max_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -41,7 +38,6 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>, define <4 x float> @test_maskz_add_ss(<4 x float> %a, float* %b, i8 %mask) { ; CHECK-LABEL: test_maskz_add_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -61,7 +57,6 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>, define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, double* %c, i8 %mask){ ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %esi ; CHECK-NEXT: kmovw %esi, %k1 ; CHECK-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0 {%k1} ; CHECK-NEXT: retq diff --git a/test/CodeGen/X86/avx512-regcall-NoMask.ll b/test/CodeGen/X86/avx512-regcall-NoMask.ll index 334097917853..f43d5b3e11dd 100644 --- a/test/CodeGen/X86/avx512-regcall-NoMask.ll +++ b/test/CodeGen/X86/avx512-regcall-NoMask.ll @@ -1,16 +1,10 @@ -; RUN: llc < %s -mtriple=i386-pc-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=X32 %s -; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=WIN64 %s +; RUN: llc < %s -mtriple=i386-pc-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=ALL --check-prefix=X32 %s +; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=ALL --check-prefix=WIN64 %s ; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=LINUXOSX64 %s -; X32-LABEL: test_argReti1: -; X32: kmov{{.*}} %eax, %k{{[0-7]}} -; X32: kmov{{.*}} %k{{[0-7]}}, %eax -; X32: ret{{.*}} - -; WIN64-LABEL: test_argReti1: -; WIN64: kmov{{.*}} %eax, %k{{[0-7]}} -; WIN64: kmov{{.*}} %k{{[0-7]}}, %eax -; WIN64: ret{{.*}} +; ALL-LABEL: test_argReti1: +; ALL: incb %al +; ALL: ret{{.*}} ; Test regcall when receiving/returning i1 define x86_regcallcc i1 @test_argReti1(i1 %a) { @@ -18,17 +12,11 @@ define x86_regcallcc i1 @test_argReti1(i1 %a) { ret i1 %add } -; X32-LABEL: test_CallargReti1: -; X32: kmov{{.*}} %k{{[0-7]}}, %eax -; X32: call{{.*}} {{.*}}test_argReti1 -; X32: kmov{{.*}} %eax, %k{{[0-7]}} -; X32: ret{{.*}} - -; WIN64-LABEL: test_CallargReti1: -; WIN64: kmov{{.*}} %k{{[0-7]}}, %eax -; WIN64: call{{.*}} {{.*}}test_argReti1 -; WIN64: kmov{{.*}} %eax, %k{{[0-7]}} -; WIN64: ret{{.*}} +; ALL-LABEL: test_CallargReti1: +; ALL: movzbl %al, %eax +; ALL: call{{.*}}test_argReti1 +; ALL: incb %al +; ALL: ret{{.*}} ; Test regcall when passing/retrieving i1 define x86_regcallcc i1 @test_CallargReti1(i1 %a) { diff --git a/test/CodeGen/X86/avx512-scalar_mask.ll b/test/CodeGen/X86/avx512-scalar_mask.ll index 47c6813fa8dc..f6ee8ff4c0f6 100644 --- a/test/CodeGen/X86/avx512-scalar_mask.ll +++ b/test/CodeGen/X86/avx512-scalar_mask.ll @@ -7,7 +7,6 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, < define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) { ; CHECK-LABEL: test_var_mask: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: retq @@ -18,7 +17,6 @@ define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> % define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) { ; CHECK-LABEL: test_var_maskz: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ; CHECK-NEXT: retq @@ -30,7 +28,8 @@ define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: test_const0_mask: ; CHECK: ## BB#0: -; CHECK-NEXT: kxorw %k0, %k0, %k1 +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: kmovw %eax, %k1 ; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 0, i32 4) @@ -41,7 +40,8 @@ define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: test_const0_maskz: ; CHECK: ## BB#0: -; CHECK-NEXT: kxorw %k0, %k0, %k1 +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: kmovw %eax, %k1 ; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 0, i32 4) @@ -52,7 +52,8 @@ define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x floa define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: test_const2_mask: ; CHECK: ## BB#0: -; CHECK-NEXT: kxorw %k0, %k0, %k1 +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovw %eax, %k1 ; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 2, i32 4) @@ -63,7 +64,8 @@ define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float define <4 x float>@test_const2_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) { ; CHECK-LABEL: test_const2_maskz: ; CHECK: ## BB#0: -; CHECK-NEXT: kxorw %k0, %k0, %k1 +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovw %eax, %k1 ; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ; CHECK-NEXT: retq %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 2, i32 4) diff --git a/test/CodeGen/X86/avx512-select.ll b/test/CodeGen/X86/avx512-select.ll index 1859b1bcfaf6..e81f983d9fe6 100644 --- a/test/CodeGen/X86/avx512-select.ll +++ b/test/CodeGen/X86/avx512-select.ll @@ -161,7 +161,7 @@ define i64 @pr30249() { define double @pr30561_f64(double %b, double %a, i1 %c) { ; CHECK-LABEL: pr30561_f64: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi +; CHECK-NEXT: andb $1, %dil ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} ; CHECK-NEXT: retq @@ -172,7 +172,7 @@ define double @pr30561_f64(double %b, double %a, i1 %c) { define float @pr30561_f32(float %b, float %a, i1 %c) { ; CHECK-LABEL: pr30561_f32: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi +; CHECK-NEXT: andb $1, %dil ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} ; CHECK-NEXT: retq diff --git a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll index faa055dfbbf3..9b4e73a18fc2 100644 --- a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll +++ b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll @@ -796,9 +796,9 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16> ; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm3 ; AVX512BW-NEXT: kmovd %edi, %k1 ; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1} -; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 {%k1} {z} -; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0 +; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1 +; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_w_512: @@ -806,9 +806,9 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16> ; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm3 ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1} -; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm2 ; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 {%k1} {z} -; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0 +; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1 +; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512F-32-NEXT: retl %res = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) %res1 = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 -1) @@ -826,8 +826,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1, ; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm2 ; AVX512BW-NEXT: kmovd %esi, %k1 ; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1} -; AVX512BW-NEXT: vpaddw %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: vpaddw %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq ; @@ -836,8 +836,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1, ; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm2 ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1} -; AVX512F-32-NEXT: vpaddw %zmm2, %zmm1, %zmm1 ; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm0 {%k1} {z} +; AVX512F-32-NEXT: vpaddw %zmm2, %zmm1, %zmm1 ; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512F-32-NEXT: retl %res = call <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 %x3) diff --git a/test/CodeGen/X86/avx512bw-intrinsics.ll b/test/CodeGen/X86/avx512bw-intrinsics.ll index 13b850ccc3b6..3337f42eb142 100644 --- a/test/CodeGen/X86/avx512bw-intrinsics.ll +++ b/test/CodeGen/X86/avx512bw-intrinsics.ll @@ -2159,9 +2159,9 @@ define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8> ; AVX512BW-NEXT: kmovd %edi, %k1 ; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm2 {%k1} ; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm3 {%k1} {z} -; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0 +; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1 +; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512F-32-LABEL: test_int_x86_avx512_mask_dbpsadbw_512: @@ -2169,9 +2169,9 @@ define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8> ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm2 {%k1} ; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm3 {%k1} {z} -; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm2 ; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm0 -; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0 +; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1 +; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512F-32-NEXT: retl %res = call <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <32 x i16> %x3, i32 %x4) %res1 = call <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <32 x i16> zeroinitializer, i32 %x4) @@ -2411,9 +2411,9 @@ define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x ; AVX512BW-NEXT: kmovd %edi, %k1 ; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1} ; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm3 {%k1} {z} -; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0 -; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0 +; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1 +; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512F-32-LABEL: test_int_x86_avx512_mask_permvar_hi_512: @@ -2421,9 +2421,9 @@ define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1} ; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm3 {%k1} {z} -; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm2 ; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm0 -; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0 +; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1 +; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512F-32-NEXT: retl %res = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) %res1 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3) diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll index 571f345d4616..7df07b0413ed 100644 --- a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll +++ b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll @@ -9,8 +9,8 @@ define <32 x i8>@test_int_x86_avx512_pbroadcastb_256(<16 x i8> %x0, <32 x i8> %x ; CHECK-NEXT: vpbroadcastb %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x78,0xd0] ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpbroadcastb %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x78,0xc8] -; CHECK-NEXT: vpaddb %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfc,0xc9] ; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x78,0xc0] +; CHECK-NEXT: vpaddb %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfc,0xc9] ; CHECK-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfc,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx512.pbroadcastb.256(<16 x i8> %x0, <32 x i8> %x1, i32 -1) @@ -29,8 +29,8 @@ define <16 x i8>@test_int_x86_avx512_pbroadcastb_128(<16 x i8> %x0, <16 x i8> %x ; CHECK-NEXT: vpbroadcastb %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x78,0xd0] ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpbroadcastb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x78,0xc8] -; CHECK-NEXT: vpaddb %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc9] ; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x78,0xc0] +; CHECK-NEXT: vpaddb %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc9] ; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <16 x i8> @llvm.x86.avx512.pbroadcastb.128(<16 x i8> %x0, <16 x i8> %x1, i16 -1) @@ -49,8 +49,8 @@ define <16 x i16>@test_int_x86_avx512_pbroadcastw_256(<8 x i16> %x0, <16 x i16> ; CHECK-NEXT: vpbroadcastw %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x79,0xd0] ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpbroadcastw %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x79,0xc8] -; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc9] ; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x79,0xc0] +; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc9] ; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx512.pbroadcastw.256(<8 x i16> %x0, <16 x i16> %x1, i16 -1) @@ -69,8 +69,8 @@ define <8 x i16>@test_int_x86_avx512_pbroadcastw_128(<8 x i16> %x0, <8 x i16> %x ; CHECK-NEXT: vpbroadcastw %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x79,0xd0] ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpbroadcastw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x79,0xc8] -; CHECK-NEXT: vpaddw %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc9] ; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x79,0xc0] +; CHECK-NEXT: vpaddw %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc9] ; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x i16> @llvm.x86.avx512.pbroadcastw.128(<8 x i16> %x0, <8 x i16> %x1, i8 -1) @@ -89,8 +89,8 @@ define <64 x i8>@test_int_x86_avx512_pbroadcastb_512(<16 x i8> %x0, <64 x i8> %x ; CHECK-NEXT: vpbroadcastb %xmm0, %zmm2 ## encoding: [0x62,0xf2,0x7d,0x48,0x78,0xd0] ; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf] ; CHECK-NEXT: vpbroadcastb %xmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x78,0xc8] -; CHECK-NEXT: vpaddb %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6d,0x48,0xfc,0xc9] ; CHECK-NEXT: vpbroadcastb %xmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x78,0xc0] +; CHECK-NEXT: vpaddb %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6d,0x48,0xfc,0xc9] ; CHECK-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfc,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <64 x i8> @llvm.x86.avx512.pbroadcastb.512(<16 x i8> %x0, <64 x i8> %x1, i64 -1) @@ -109,8 +109,8 @@ define <32 x i16>@test_int_x86_avx512_pbroadcastw_512(<8 x i16> %x0, <32 x i16> ; CHECK-NEXT: vpbroadcastw %xmm0, %zmm2 ## encoding: [0x62,0xf2,0x7d,0x48,0x79,0xd0] ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpbroadcastw %xmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x79,0xc8] -; CHECK-NEXT: vpaddw %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc9] ; CHECK-NEXT: vpbroadcastw %xmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x79,0xc0] +; CHECK-NEXT: vpaddw %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc9] ; CHECK-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <32 x i16> @llvm.x86.avx512.pbroadcastw.512(<8 x i16> %x0, <32 x i16> %x1, i32 -1) @@ -1476,9 +1476,9 @@ define <8 x i16>@test_int_x86_avx512_mask_psrl_w_128(<8 x i16> %x0, <8 x i16> %x ; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xd9] ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd1,0xd1] -; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xd3] ; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd1,0xc1] -; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc2] +; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xcb] +; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x i16> @llvm.x86.avx512.mask.psrl.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) %res1 = call <8 x i16> @llvm.x86.avx512.mask.psrl.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1) @@ -1496,9 +1496,9 @@ define <16 x i16>@test_int_x86_avx512_mask_psrl_w_256(<16 x i16> %x0, <8 x i16> ; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xd9] ; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf] ; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd1,0xd1] -; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xd3] ; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd1,0xc1] -; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0] +; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xcb] +; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx512.mask.psrl.w.256(<16 x i16> %x0, <8 x i16> %x1, <16 x i16> %x2, i16 %x3) %res1 = call <16 x i16> @llvm.x86.avx512.mask.psrl.w.256(<16 x i16> %x0, <8 x i16> %x1, <16 x i16> %x2, i16 -1) @@ -1596,8 +1596,8 @@ define <8 x i16>@test_int_x86_avx512_mask_psrl_wi_128(<8 x i16> %x0, i32 %x1, <8 ; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x71,0xd0,0x03] ; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce] ; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x71,0xd0,0x03] -; CHECK-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xca] ; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x71,0xd0,0x03] +; CHECK-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xca] ; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x i16> @llvm.x86.avx512.mask.psrl.wi.128(<8 x i16> %x0, i32 3, <8 x i16> %x2, i8 %x3) @@ -1616,8 +1616,8 @@ define <16 x i16>@test_int_x86_avx512_mask_psrl_wi_256(<16 x i16> %x0, i32 %x1, ; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x71,0xd0,0x03] ; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce] ; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x71,0xd0,0x03] -; CHECK-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xca] ; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x71,0xd0,0x03] +; CHECK-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xca] ; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx512.mask.psrl.wi.256(<16 x i16> %x0, i32 3, <16 x i16> %x2, i16 %x3) diff --git a/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll index f8f47c87100a..8f528394f5bd 100644 --- a/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll +++ b/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll @@ -9,8 +9,8 @@ define <4 x i32>@test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32> ; CHECK-NEXT: vplzcntd %xmm0, %xmm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vplzcntd %xmm0, %xmm1 {%k1} -; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; CHECK-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq %res = call <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) diff --git a/test/CodeGen/X86/avx512cdvl-intrinsics.ll b/test/CodeGen/X86/avx512cdvl-intrinsics.ll index 96254f7c95b0..37aea45e6107 100644 --- a/test/CodeGen/X86/avx512cdvl-intrinsics.ll +++ b/test/CodeGen/X86/avx512cdvl-intrinsics.ll @@ -7,8 +7,8 @@ define <4 x i32> @test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32 ; CHECK-NEXT: vplzcntd %xmm0, %xmm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vplzcntd %xmm0, %xmm1 {%k1} -; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; CHECK-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq %1 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x0, i1 false) diff --git a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll index 1377733739fe..cf79819734a2 100644 --- a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll +++ b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll @@ -13,10 +13,9 @@ define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_512(<8 x double> %x0, ; CHECK-NEXT: kshiftlb $6, %k0, %k0 ; CHECK-NEXT: kshiftrb $7, %k0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: vmovq %rax, %xmm2 -; CHECK-NEXT: kmovw %k1, %eax -; CHECK-NEXT: vmovq %rax, %xmm3 -; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0] +; CHECK-NEXT: kmovw %k1, %ecx +; CHECK-NEXT: vmovd %ecx, %xmm2 +; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 ; CHECK-NEXT: vpsllq $63, %xmm2, %xmm2 ; CHECK-NEXT: vpsraq $63, %zmm2, %zmm2 ; CHECK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1 @@ -40,8 +39,8 @@ define <8 x float>@test_int_x86_avx512_mask_vextractf32x8(<16 x float> %x0, <8 x ; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm2 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1} -; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: retq %res = call <8 x float> @llvm.x86.avx512.mask.vextractf32x8.512(<16 x float> %x0,i32 1, <8 x float> %x2, i8 %x3) diff --git a/test/CodeGen/X86/avx512dq-intrinsics.ll b/test/CodeGen/X86/avx512dq-intrinsics.ll index 97ac0fde10ec..06ee237593e7 100644 --- a/test/CodeGen/X86/avx512dq-intrinsics.ll +++ b/test/CodeGen/X86/avx512dq-intrinsics.ll @@ -262,7 +262,6 @@ declare <4 x float> @llvm.x86.avx512.mask.reduce.ss(<4 x float>, <4 x float>,<4 define <4 x float>@test_int_x86_avx512_mask_reduce_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_reduce_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vreducess $4, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vreducess $4, {sae}, %xmm1, %xmm0, %xmm0 @@ -279,7 +278,6 @@ declare <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float>, <4 x float>,<4 x define <4 x float>@test_int_x86_avx512_mask_range_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_range_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm0 @@ -296,7 +294,6 @@ declare <2 x double> @llvm.x86.avx512.mask.reduce.sd(<2 x double>, <2 x double>, define <2 x double>@test_int_x86_avx512_mask_reduce_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_reduce_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vreducesd $4, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vreducesd $4, {sae}, %xmm1, %xmm0, %xmm0 @@ -313,7 +310,6 @@ declare <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double>, <2 x double>,< define <2 x double>@test_int_x86_avx512_mask_range_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_range_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vrangesd $4, %xmm1, %xmm0, %xmm2 {%k1} ; CHECK-NEXT: vrangesd $4, {sae}, %xmm1, %xmm0, %xmm0 @@ -367,14 +363,11 @@ declare i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double>, i32, i8) define i8 @test_int_x86_avx512_mask_fpclass_sd(<2 x double> %x0, i8 %x1) { ; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_sd: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfpclasssd $2, %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %ecx -; CHECK-NEXT: andl $1, %ecx ; CHECK-NEXT: vfpclasssd $4, %xmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: andl $1, %eax ; CHECK-NEXT: addb %cl, %al ; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; CHECK-NEXT: retq @@ -389,14 +382,11 @@ declare i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float>, i32, i8) define i8 @test_int_x86_avx512_mask_fpclass_ss(<4 x float> %x0, i8 %x1) { ; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ss: ; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfpclassss $4, %xmm0, %k0 {%k1} ; CHECK-NEXT: kmovw %k0, %ecx -; CHECK-NEXT: andl $1, %ecx ; CHECK-NEXT: vfpclassss $4, %xmm0, %k0 ; CHECK-NEXT: kmovw %k0, %eax -; CHECK-NEXT: andl $1, %eax ; CHECK-NEXT: addb %cl, %al ; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> ; CHECK-NEXT: retq @@ -414,8 +404,8 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x2_512(<4 x float> %x0, ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] ; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] -; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <16 x float> @llvm.x86.avx512.mask.broadcastf32x2.512(<4 x float> %x0, <16 x float> %x2, i16 %x3) @@ -434,8 +424,8 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x2_512(<4 x i32> %x0, <16 ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] ; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] -; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1 ; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq %res = call <16 x i32> @llvm.x86.avx512.mask.broadcasti32x2.512(<4 x i32> %x0, <16 x i32> %x2, i16 %x3) diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll index 595b3e0ebb86..52a84deebf51 100644 --- a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll +++ b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll @@ -1568,8 +1568,8 @@ define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_256(<4 x double> %x0, ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x19,0xc2,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x19,0xc1,0x01] -; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca] ; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x19,0xc0,0x01] +; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca] ; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.mask.vextractf64x2.256(<4 x double> %x0,i32 1, <2 x double> %x2, i8 %x3) @@ -1588,9 +1588,9 @@ define <4 x double>@test_int_x86_avx512_mask_insertf64x2_256(<4 x double> %x0, < ; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd9,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xd1,0x01] -; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xd3] ; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x18,0xc1,0x01] -; CHECK-NEXT: vaddpd %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc2] +; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xcb] +; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.mask.insertf64x2.256(<4 x double> %x0, <2 x double> %x1, i32 1, <4 x double> %x3, i8 %x4) %res1 = call <4 x double> @llvm.x86.avx512.mask.insertf64x2.256(<4 x double> %x0, <2 x double> %x1, i32 1, <4 x double> %x3, i8 -1) @@ -1608,9 +1608,9 @@ define <4 x i64>@test_int_x86_avx512_mask_inserti64x2_256(<4 x i64> %x0, <2 x i6 ; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd9,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xd1,0x01] -; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xd3] ; CHECK-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x38,0xc1,0x01] -; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0] +; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xcb] +; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx512.mask.inserti64x2.256(<4 x i64> %x0, <2 x i64> %x1, i32 1, <4 x i64> %x3, i8 %x4) %res1 = call <4 x i64> @llvm.x86.avx512.mask.inserti64x2.256(<4 x i64> %x0, <2 x i64> %x1, i32 1, <4 x i64> %x3, i8 -1) diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics.ll b/test/CodeGen/X86/avx512dqvl-intrinsics.ll index 1bfdfd0e634d..ad9ea93c2031 100644 --- a/test/CodeGen/X86/avx512dqvl-intrinsics.ll +++ b/test/CodeGen/X86/avx512dqvl-intrinsics.ll @@ -635,8 +635,8 @@ define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0, ; CHECK-NEXT: ## ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1] ; CHECK-NEXT: vbroadcastf32x2 %xmm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x19,0xd0] ; CHECK-NEXT: ## ymm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] -; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca] ; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x19,0xc0] +; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca] ; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask.broadcastf32x2.256(<4 x float> %x0, <8 x float> %x2, i8 %x3) @@ -680,8 +680,8 @@ define <4 x i32>@test_int_x86_avx512_mask_broadcasti32x2_128(<4 x i32> %x0, <4 x ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x59,0xc8] ; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x59,0xd0] -; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca] ; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xc0] +; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca] ; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx512.mask.broadcasti32x2.128(<4 x i32> %x0, <4 x i32> %x2, i8 %x3) diff --git a/test/CodeGen/X86/avx512er-intrinsics.ll b/test/CodeGen/X86/avx512er-intrinsics.ll index b8531e25bfa1..0e4922f37bbb 100644 --- a/test/CodeGen/X86/avx512er-intrinsics.ll +++ b/test/CodeGen/X86/avx512er-intrinsics.ll @@ -121,7 +121,6 @@ declare <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float>, <4 x float>, <4 x flo define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0, i8 %mask) { ; CHECK-LABEL: test_rsqrt28_ss_maskz: ; CHECK: # BB#0: -; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01] ; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcd,0xc0] ; CHECK-NEXT: retq # encoding: [0xc3] @@ -132,7 +131,6 @@ define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0, i8 %mask) { define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 %mask) { ; CHECK-LABEL: test_rsqrt28_ss_mask: ; CHECK: # BB#0: -; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01] ; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vrsqrt28ss {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1] ; CHECK-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2] @@ -144,7 +142,6 @@ define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0, i8 %mask) { ; CHECK-LABEL: test_rsqrt28_sd_maskz: ; CHECK: # BB#0: -; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01] ; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vrsqrt28sd {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x99,0xcd,0xc0] ; CHECK-NEXT: retq # encoding: [0xc3] @@ -155,7 +152,6 @@ define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0, i8 %mask) { define <2 x double> @test_rsqrt28_sd_mask(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0, i8 %mask) { ; CHECK-LABEL: test_rsqrt28_sd_mask: ; CHECK: # BB#0: -; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01] ; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vrsqrt28sd {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x19,0xcd,0xd1] ; CHECK-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2] @@ -169,7 +165,6 @@ declare <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double>, <2 x double>, <2 define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr, i8 %mask) { ; CHECK-LABEL: test_rsqrt28_sd_maskz_mem: ; CHECK: # BB#0: -; CHECK-NEXT: andl $1, %esi # encoding: [0x83,0xe6,0x01] ; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07] ; CHECK-NEXT: retq # encoding: [0xc3] @@ -182,7 +177,6 @@ define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr, i define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr, i8 %mask) { ; CHECK-LABEL: test_rsqrt28_sd_maskz_mem_offset: ; CHECK: # BB#0: -; CHECK-NEXT: andl $1, %esi # encoding: [0x83,0xe6,0x01] ; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12] ; CHECK-NEXT: retq # encoding: [0xc3] diff --git a/test/CodeGen/X86/avx512ifma-intrinsics.ll b/test/CodeGen/X86/avx512ifma-intrinsics.ll index 9659dc6d455a..30ecc0d2e49e 100644 --- a/test/CodeGen/X86/avx512ifma-intrinsics.ll +++ b/test/CodeGen/X86/avx512ifma-intrinsics.ll @@ -13,8 +13,8 @@ define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512(<8 x i64> %x0, <8 x i ; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm4 {%k1} ; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1} -; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0 ; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z} +; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0 ; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1 ; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq @@ -41,8 +41,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512(<8 x i64> %x0, <8 x ; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm4 {%k1} {z} ; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1} {z} -; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0 ; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z} +; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0 ; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1 ; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq @@ -69,8 +69,8 @@ define <8 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_512(<8 x i64> %x0, <8 x i ; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm4 {%k1} ; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1} -; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0 ; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z} +; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0 ; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1 ; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq @@ -97,8 +97,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_512(<8 x i64> %x0, <8 x ; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm4 {%k1} {z} ; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1} {z} -; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0 ; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z} +; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0 ; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1 ; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: retq diff --git a/test/CodeGen/X86/avx512ifmavl-intrinsics.ll b/test/CodeGen/X86/avx512ifmavl-intrinsics.ll index b2fe6eba88ab..3ca686cef3bf 100644 --- a/test/CodeGen/X86/avx512ifmavl-intrinsics.ll +++ b/test/CodeGen/X86/avx512ifmavl-intrinsics.ll @@ -14,8 +14,8 @@ define <2 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_128(<2 x i64> %x0, <2 x i ; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm4 {%k1} ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm0 {%k1} -; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z} +; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1 ; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq @@ -42,8 +42,8 @@ define <4 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_256(<4 x i64> %x0, <4 x i ; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm4 {%k1} ; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1} -; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0 ; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z} +; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0 ; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1 ; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; CHECK-NEXT: retq @@ -70,8 +70,8 @@ define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_128(<2 x i64> %x0, <2 x ; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm4 {%k1} {z} ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm0 {%k1} {z} -; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z} +; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1 ; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq @@ -98,8 +98,8 @@ define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_256(<4 x i64> %x0, <4 x ; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm4 {%k1} {z} ; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1} {z} -; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0 ; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z} +; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0 ; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1 ; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; CHECK-NEXT: retq @@ -126,8 +126,8 @@ define <2 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_128(<2 x i64> %x0, <2 x i ; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm4 {%k1} ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm0 {%k1} -; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z} +; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1 ; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq @@ -154,8 +154,8 @@ define <4 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_256(<4 x i64> %x0, <4 x i ; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm4 {%k1} ; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1} -; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0 ; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z} +; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0 ; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1 ; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; CHECK-NEXT: retq @@ -182,8 +182,8 @@ define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_128(<2 x i64> %x0, <2 x ; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm4 {%k1} {z} ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm0 {%k1} {z} -; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z} +; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1 ; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq @@ -210,8 +210,8 @@ define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_256(<4 x i64> %x0, <4 x ; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm4 {%k1} {z} ; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1} {z} -; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0 ; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z} +; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0 ; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1 ; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; CHECK-NEXT: retq diff --git a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll index c2d8df6476b3..4d906a4fd29a 100644 --- a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll +++ b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll @@ -30,8 +30,8 @@ define <4 x i32>@test_int_x86_avx512_pbroadcastd_128(<4 x i32> %x0, <4 x i32> %x ; CHECK-NEXT: vpbroadcastd %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x58,0xd0] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpbroadcastd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x58,0xc8] -; CHECK-NEXT: vpaddd %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc9] ; CHECK-NEXT: vpbroadcastd %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x58,0xc0] +; CHECK-NEXT: vpaddd %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc9] ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx512.pbroadcastd.128(<4 x i32> %x0, <4 x i32> %x1, i8 -1) @@ -50,8 +50,8 @@ define <4 x i64>@test_int_x86_avx512_pbroadcastq_256(<2 x i64> %x0, <4 x i64> %x ; CHECK-NEXT: vpbroadcastq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x59,0xd0] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpbroadcastq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x59,0xc8] -; CHECK-NEXT: vpaddq %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc9] ; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x59,0xc0] +; CHECK-NEXT: vpaddq %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc9] ; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx512.pbroadcastq.256(<2 x i64> %x0, <4 x i64> %x1,i8 -1) @@ -70,8 +70,8 @@ define <2 x i64>@test_int_x86_avx512_pbroadcastq_128(<2 x i64> %x0, <2 x i64> %x ; CHECK-NEXT: vpbroadcastq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xd0] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpbroadcastq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x59,0xc8] -; CHECK-NEXT: vpaddq %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc9] ; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x59,0xc0] +; CHECK-NEXT: vpaddq %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc9] ; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx512.pbroadcastq.128(<2 x i64> %x0, <2 x i64> %x1,i8 -1) @@ -90,8 +90,8 @@ define <4 x double> @test_x86_vbroadcast_sd_pd_256(<2 x double> %a0, <4 x double ; CHECK-NEXT: vbroadcastsd %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x19,0xd0] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x19,0xc8] -; CHECK-NEXT: vaddpd %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc9] ; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x19,0xc0] +; CHECK-NEXT: vaddpd %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc9] ; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.mask.broadcast.sd.pd.256(<2 x double> %a0, <4 x double> zeroinitializer, i8 -1) @@ -110,8 +110,8 @@ define <8 x float> @test_x86_vbroadcast_ss_ps_256(<4 x float> %a0, <8 x float> % ; CHECK-NEXT: vbroadcastss %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x18,0xd0] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x18,0xc8] -; CHECK-NEXT: vaddps %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc9] ; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x18,0xc0] +; CHECK-NEXT: vaddps %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc9] ; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.256(<4 x float> %a0, <8 x float> zeroinitializer, i8 -1) @@ -130,8 +130,8 @@ define <4 x float> @test_x86_vbroadcast_ss_ps_128(<4 x float> %a0, <4 x float> % ; CHECK-NEXT: vbroadcastss %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xd0] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x18,0xc8] -; CHECK-NEXT: vaddps %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc9] ; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x18,0xc0] +; CHECK-NEXT: vaddps %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc9] ; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.128(<4 x float> %a0, <4 x float> zeroinitializer, i8 -1) @@ -152,9 +152,9 @@ define <4 x float>@test_int_x86_avx512_mask_movsldup_128(<4 x float> %x0, <4 x f ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vmovsldup %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x12,0xc8] ; CHECK-NEXT: ## xmm1 {%k1} = xmm0[0,0,2,2] -; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca] ; CHECK-NEXT: vmovsldup %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x12,0xc0] ; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[0,0,2,2] +; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca] ; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask.movsldup.128(<4 x float> %x0, <4 x float> %x1, i8 %x2) @@ -175,9 +175,9 @@ define <8 x float>@test_int_x86_avx512_mask_movsldup_256(<8 x float> %x0, <8 x f ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vmovsldup %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x12,0xc8] ; CHECK-NEXT: ## ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6] -; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca] ; CHECK-NEXT: vmovsldup %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xa9,0x12,0xc0] ; CHECK-NEXT: ## ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca] ; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask.movsldup.256(<8 x float> %x0, <8 x float> %x1, i8 %x2) @@ -198,9 +198,9 @@ define <4 x float>@test_int_x86_avx512_mask_movshdup_128(<4 x float> %x0, <4 x f ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vmovshdup %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x16,0xc8] ; CHECK-NEXT: ## xmm1 {%k1} = xmm0[1,1,3,3] -; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca] ; CHECK-NEXT: vmovshdup %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x16,0xc0] ; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[1,1,3,3] +; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca] ; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask.movshdup.128(<4 x float> %x0, <4 x float> %x1, i8 %x2) @@ -221,9 +221,9 @@ define <8 x float>@test_int_x86_avx512_mask_movshdup_256(<8 x float> %x0, <8 x f ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vmovshdup %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x16,0xc8] ; CHECK-NEXT: ## ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7] -; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca] ; CHECK-NEXT: vmovshdup %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xa9,0x16,0xc0] ; CHECK-NEXT: ## ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca] ; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask.movshdup.256(<8 x float> %x0, <8 x float> %x1, i8 %x2) @@ -243,9 +243,9 @@ define <2 x double>@test_int_x86_avx512_mask_movddup_128(<2 x double> %x0, <2 x ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vmovddup %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x12,0xc8] ; CHECK-NEXT: ## xmm1 {%k1} = xmm0[0,0] -; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca] ; CHECK-NEXT: vmovddup %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x12,0xc0] ; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[0,0] +; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca] ; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.mask.movddup.128(<2 x double> %x0, <2 x double> %x1, i8 %x2) @@ -266,9 +266,9 @@ define <4 x double>@test_int_x86_avx512_mask_movddup_256(<4 x double> %x0, <4 x ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vmovddup %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x12,0xc8] ; CHECK-NEXT: ## ymm1 {%k1} = ymm0[0,0,2,2] -; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xca] ; CHECK-NEXT: vmovddup %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0xa9,0x12,0xc0] ; CHECK-NEXT: ## ymm0 {%k1} {z} = ymm0[0,0,2,2] +; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xca] ; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.mask.movddup.256(<4 x double> %x0, <4 x double> %x1, i8 %x2) @@ -3209,10 +3209,10 @@ define <2 x double>@test_int_x86_avx512_mask_shuf_pd_128(<2 x double> %x0, <2 x ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vshufpd $1, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xc6,0xd1,0x01] ; CHECK-NEXT: ## xmm2 {%k1} = xmm0[1],xmm1[0] -; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xd3] ; CHECK-NEXT: vshufpd $1, %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xc6,0xc1,0x01] ; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[1],xmm1[0] -; CHECK-NEXT: vaddpd %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc2] +; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xcb] +; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.mask.shuf.pd.128(<2 x double> %x0, <2 x double> %x1, i32 1, <2 x double> %x3, i8 %x4) %res1 = call <2 x double> @llvm.x86.avx512.mask.shuf.pd.128(<2 x double> %x0, <2 x double> %x1, i32 1, <2 x double> %x3, i8 -1) @@ -3540,9 +3540,9 @@ define <2 x i64>@test_int_x86_avx512_mask_psrl_q_128(<2 x i64> %x0, <2 x i64> %x ; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xd9] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xd3,0xd1] -; CHECK-NEXT: vpaddq %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xd3] ; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xd3,0xc1] -; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0] +; CHECK-NEXT: vpaddq %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xcb] +; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx512.mask.psrl.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) %res1 = call <2 x i64> @llvm.x86.avx512.mask.psrl.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1) @@ -3560,9 +3560,9 @@ define <4 x i64>@test_int_x86_avx512_mask_psrl_q_256(<4 x i64> %x0, <2 x i64> %x ; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xd9] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xd3,0xd1] -; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xd3] ; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0xd3,0xc1] -; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0] +; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xcb] +; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx512.mask.psrl.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 %x3) %res1 = call <4 x i64> @llvm.x86.avx512.mask.psrl.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 -1) @@ -3580,9 +3580,9 @@ define <4 x i32>@test_int_x86_avx512_mask_psrl_d_128(<4 x i32> %x0, <4 x i32> %x ; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xd9] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd2,0xd1] -; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xd3] ; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd2,0xc1] -; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0] +; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xcb] +; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx512.mask.psrl.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) %res1 = call <4 x i32> @llvm.x86.avx512.mask.psrl.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1) @@ -3600,9 +3600,9 @@ define <8 x i32>@test_int_x86_avx512_mask_psrl_d_256(<8 x i32> %x0, <4 x i32> %x ; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xd9] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd2,0xd1] -; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xd3] ; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd2,0xc1] -; CHECK-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2] +; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xcb] +; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx512.mask.psrl.d.256(<8 x i32> %x0, <4 x i32> %x1, <8 x i32> %x2, i8 %x3) %res1 = call <8 x i32> @llvm.x86.avx512.mask.psrl.d.256(<8 x i32> %x0, <4 x i32> %x1, <8 x i32> %x2, i8 -1) @@ -3720,8 +3720,8 @@ define <2 x i64>@test_int_x86_avx512_mask_psrl_qi_128(<2 x i64> %x0, i32 %x1, <2 ; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x73,0xd0,0x03] ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x73,0xd0,0x03] -; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca] ; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x73,0xd0,0x03] +; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca] ; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx512.mask.psrl.qi.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3) @@ -3740,8 +3740,8 @@ define <4 x i64>@test_int_x86_avx512_mask_psrl_qi_256(<4 x i64> %x0, i32 %x1, <4 ; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x73,0xd0,0x03] ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x73,0xd0,0x03] -; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca] ; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x73,0xd0,0x03] +; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca] ; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx512.mask.psrl.qi.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3) @@ -3760,8 +3760,8 @@ define <4 x i32>@test_int_x86_avx512_mask_psrl_di_128(<4 x i32> %x0, i32 %x1, <4 ; CHECK-NEXT: vpsrld $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x72,0xd0,0x03] ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpsrld $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xd0,0x03] -; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca] ; CHECK-NEXT: vpsrld $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x72,0xd0,0x03] +; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca] ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx512.mask.psrl.di.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3) @@ -3780,8 +3780,8 @@ define <8 x i32>@test_int_x86_avx512_mask_psrl_di_256(<8 x i32> %x0, i32 %x1, <8 ; CHECK-NEXT: vpsrld $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x72,0xd0,0x03] ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpsrld $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xd0,0x03] -; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca] ; CHECK-NEXT: vpsrld $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xd0,0x03] +; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca] ; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx512.mask.psrl.di.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3) @@ -4642,10 +4642,10 @@ define <4 x i32>@test_int_x86_avx512_mask_valign_d_128(<4 x i32> %x0, <4 x i32> ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: valignd $2, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x03,0xd1,0x02] ; CHECK-NEXT: ## xmm2 {%k1} = xmm1[2,3],xmm0[0,1] -; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xd3] ; CHECK-NEXT: valignd $2, %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x03,0xc1,0x02] ; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm1[2,3],xmm0[0,1] -; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0] +; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xcb] +; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx512.mask.valign.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 2, <4 x i32> %x3, i8 %x4) %res1 = call <4 x i32> @llvm.x86.avx512.mask.valign.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 2, <4 x i32> %x3, i8 -1) @@ -4817,9 +4817,9 @@ define <8 x float>@test_int_x86_avx512_mask_insertf32x4_256(<8 x float> %x0, <4 ; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd9,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xd1,0x01] -; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xd3] ; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x18,0xc1,0x01] -; CHECK-NEXT: vaddps %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc2] +; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xcb] +; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask.insertf32x4.256(<8 x float> %x0, <4 x float> %x1, i32 1, <8 x float> %x3, i8 %x4) %res1 = call <8 x float> @llvm.x86.avx512.mask.insertf32x4.256(<8 x float> %x0, <4 x float> %x1, i32 1, <8 x float> %x3, i8 -1) @@ -4837,9 +4837,9 @@ define <8 x i32>@test_int_x86_avx512_mask_inserti32x4_256(<8 x i32> %x0, <4 x i3 ; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd9,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x38,0xd1,0x01] -; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xd3] ; CHECK-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x38,0xc1,0x01] -; CHECK-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2] +; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xcb] +; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx512.mask.inserti32x4.256(<8 x i32> %x0, <4 x i32> %x1, i32 1, <8 x i32> %x3, i8 %x4) diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll index 684b0468cf51..1f324d679564 100644 --- a/test/CodeGen/X86/avx512vl-intrinsics.ll +++ b/test/CodeGen/X86/avx512vl-intrinsics.ll @@ -4368,8 +4368,8 @@ define <4 x i32>@test_int_x86_avx512_mask_prol_d_128(<4 x i32> %x0, i32 %x1, <4 ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vprold $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03] ; CHECK-NEXT: vprold $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc8,0x03] -; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca] ; CHECK-NEXT: vprold $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc8,0x03] +; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca] ; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3) @@ -4388,8 +4388,8 @@ define <8 x i32>@test_int_x86_avx512_mask_prol_d_256(<8 x i32> %x0, i32 %x1, <8 ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vprold $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03] ; CHECK-NEXT: vprold $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc8,0x03] -; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca] ; CHECK-NEXT: vprold $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc8,0x03] +; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca] ; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3) @@ -4408,8 +4408,8 @@ define <2 x i64>@test_int_x86_avx512_mask_prol_q_128(<2 x i64> %x0, i32 %x1, <2 ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vprolq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03] ; CHECK-NEXT: vprolq $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0x89,0x72,0xc8,0x03] -; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca] ; CHECK-NEXT: vprolq $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc8,0x03] +; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca] ; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3) @@ -4428,8 +4428,8 @@ define <4 x i64>@test_int_x86_avx512_mask_prol_q_256(<4 x i64> %x0, i32 %x1, <4 ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vprolq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03] ; CHECK-NEXT: vprolq $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc8,0x03] -; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca] ; CHECK-NEXT: vprolq $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc8,0x03] +; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca] ; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3) @@ -4528,8 +4528,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pror_d_128(<4 x i32> %x0, i32 %x1, <4 ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vprord $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03] ; CHECK-NEXT: vprord $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc0,0x03] -; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca] ; CHECK-NEXT: vprord $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc0,0x03] +; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca] ; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3) @@ -4548,8 +4548,8 @@ define <8 x i32>@test_int_x86_avx512_mask_pror_d_256(<8 x i32> %x0, i32 %x1, <8 ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vprord $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03] ; CHECK-NEXT: vprord $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc0,0x03] -; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca] ; CHECK-NEXT: vprord $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc0,0x03] +; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca] ; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3) @@ -4568,8 +4568,8 @@ define <2 x i64>@test_int_x86_avx512_mask_pror_q_128(<2 x i64> %x0, i32 %x1, <2 ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vprorq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03] ; CHECK-NEXT: vprorq $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0x89,0x72,0xc0,0x03] -; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca] ; CHECK-NEXT: vprorq $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc0,0x03] +; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca] ; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3) @@ -4588,8 +4588,8 @@ define <4 x i64>@test_int_x86_avx512_mask_pror_q_256(<4 x i64> %x0, i32 %x1, <4 ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vprorq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03] ; CHECK-NEXT: vprorq $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc0,0x03] -; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca] ; CHECK-NEXT: vprorq $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc0,0x03] +; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca] ; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3) @@ -4690,9 +4690,9 @@ define <2 x double>@test_int_x86_avx512_mask_fixupimm_pd_128(<2 x double> %x0, < ; CHECK-NEXT: vfixupimmpd $5, %xmm2, %xmm1, %xmm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x09,0x54,0xda,0x05] ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vfixupimmpd $4, %xmm2, %xmm1, %xmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0x89,0x54,0xe2,0x04] -; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xdc] ; CHECK-NEXT: vfixupimmpd $3, %xmm2, %xmm1, %xmm0 ## encoding: [0x62,0xf3,0xf5,0x08,0x54,0xc2,0x03] -; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc0] +; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xcc] +; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.pd.128(<2 x double> %x0, <2 x double> %x1,<2 x i64> %x2, i32 5, i8 %x4) %res1 = call <2 x double> @llvm.x86.avx512.mask.fixupimm.pd.128(<2 x double> zeroinitializer, <2 x double> %x1, <2 x i64> %x2, i32 4, i8 %x4) @@ -4732,9 +4732,9 @@ define <4 x double>@test_int_x86_avx512_mask_fixupimm_pd_256(<4 x double> %x0, < ; CHECK-NEXT: vfixupimmpd $4, %ymm2, %ymm1, %ymm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x54,0xda,0x04] ; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] ; CHECK-NEXT: vfixupimmpd $5, %ymm2, %ymm1, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x54,0xe2,0x05] -; CHECK-NEXT: vaddpd %ymm4, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xdc] ; CHECK-NEXT: vfixupimmpd $3, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0xf5,0x28,0x54,0xc2,0x03] -; CHECK-NEXT: vaddpd %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc0] +; CHECK-NEXT: vaddpd %ymm4, %ymm3, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xcc] +; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.mask.fixupimm.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x i64> %x2, i32 4, i8 %x4) %res1 = call <4 x double> @llvm.x86.avx512.mask.fixupimm.pd.256(<4 x double> zeroinitializer, <4 x double> %x1, <4 x i64> %x2 , i32 5, i8 %x4) @@ -4755,9 +4755,9 @@ define <4 x double>@test_int_x86_avx512_maskz_fixupimm_pd_256(<4 x double> %x0, ; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4] ; CHECK-NEXT: vmovapd %ymm0, %ymm5 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xe8] ; CHECK-NEXT: vfixupimmpd $4, %ymm4, %ymm1, %ymm5 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x54,0xec,0x04] -; CHECK-NEXT: vaddpd %ymm5, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xdd] ; CHECK-NEXT: vfixupimmpd $3, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0xf5,0x28,0x54,0xc2,0x03] -; CHECK-NEXT: vaddpd %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc0] +; CHECK-NEXT: vaddpd %ymm5, %ymm3, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xcd] +; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.maskz.fixupimm.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x i64> %x2, i32 5, i8 %x4) %res1 = call <4 x double> @llvm.x86.avx512.maskz.fixupimm.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x i64> zeroinitializer, i32 4, i8 %x4) diff --git a/test/CodeGen/X86/bitcast-setcc-128.ll b/test/CodeGen/X86/bitcast-setcc-128.ll new file mode 100644 index 000000000000..d1508f99fc71 --- /dev/null +++ b/test/CodeGen/X86/bitcast-setcc-128.ll @@ -0,0 +1,823 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=CHECK,SSE2-SSSE3,SSE2 +; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 < %s | FileCheck %s --check-prefixes=CHECK,SSE2-SSSE3,SSSE3 +; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=CHECK,AVX1 +; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefixes=CHECK,AVX512 + +define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) { +; SSE2-SSSE3-LABEL: v8i16: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pextrw $7, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $6, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $5, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $3, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $2, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v8i16: +; AVX1: ## BB#0: +; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrw $7, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $6, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $5, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $4, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $3, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $2, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $1, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v8i16: +; AVX512: ## BB#0: +; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> +; AVX512-NEXT: retq + %x = icmp sgt <8 x i16> %a, %b + %res = bitcast <8 x i1> %x to i8 + ret i8 %res +} + +define i4 @v4i32(<4 x i32> %a, <4 x i32> %b) { +; SSE2-SSSE3-LABEL: v4i32: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v4i32: +; AVX1: ## BB#0: +; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrd $3, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrd $2, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrd $1, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v4i32: +; AVX512: ## BB#0: +; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: retq + %x = icmp sgt <4 x i32> %a, %b + %res = bitcast <4 x i1> %x to i4 + ret i4 %res +} + +define i4 @v4f32(<4 x float> %a, <4 x float> %b) { +; SSE2-SSSE3-LABEL: v4f32: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: movd %xmm1, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movaps %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v4f32: +; AVX1: ## BB#0: +; AVX1-NEXT: vcmpltps %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vextractps $3, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vextractps $2, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vextractps $1, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vextractps $0, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v4f32: +; AVX512: ## BB#0: +; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: retq + %x = fcmp ogt <4 x float> %a, %b + %res = bitcast <4 x i1> %x to i4 + ret i4 %res +} + +define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) { +; SSE2-SSSE3-LABEL: v16i8: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-SSSE3-NEXT: andb $1, %cl +; SSE2-SSSE3-NEXT: movb %cl, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: andb $1, %al +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v16i8: +; AVX1: ## BB#0: +; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrb $15, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $14, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $13, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $12, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $11, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $10, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $9, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $8, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $7, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $6, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $5, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $4, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $3, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $2, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $1, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrb $0, %xmm0, %eax +; AVX1-NEXT: andb $1, %al +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: retq +; +; AVX512-LABEL: v16i8: +; AVX512: ## BB#0: +; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill> +; AVX512-NEXT: retq + %x = icmp sgt <16 x i8> %a, %b + %res = bitcast <16 x i1> %x to i16 + ret i16 %res +} + +define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) { +; SSE2-SSSE3-LABEL: v2i8: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: psllq $56, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: psrad $31, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE2-SSSE3-NEXT: psrad $24, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: psllq $56, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: psrad $31, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE2-SSSE3-NEXT: psrad $24, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] +; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] +; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: movq %xmm1, %rax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-SSSE3-NEXT: movq %xmm0, %rax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v2i8: +; AVX1: ## BB#0: +; AVX1-NEXT: vpsllq $56, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2 +; AVX1-NEXT: vpsrad $24, %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v2i8: +; AVX512: ## BB#0: +; AVX512-NEXT: vpsllq $56, %xmm1, %xmm1 +; AVX512-NEXT: vpsraq $56, %xmm1, %xmm1 +; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $56, %xmm0, %xmm0 +; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: retq + %x = icmp sgt <2 x i8> %a, %b + %res = bitcast <2 x i1> %x to i2 + ret i2 %res +} + +define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) { +; SSE2-SSSE3-LABEL: v2i16: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: psllq $48, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: psrad $31, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE2-SSSE3-NEXT: psrad $16, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-SSSE3-NEXT: psllq $48, %xmm1 +; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: psrad $31, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE2-SSSE3-NEXT: psrad $16, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] +; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] +; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: movq %xmm1, %rax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-SSSE3-NEXT: movq %xmm0, %rax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v2i16: +; AVX1: ## BB#0: +; AVX1-NEXT: vpsllq $48, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2 +; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v2i16: +; AVX512: ## BB#0: +; AVX512-NEXT: vpsllq $48, %xmm1, %xmm1 +; AVX512-NEXT: vpsraq $48, %xmm1, %xmm1 +; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $48, %xmm0, %xmm0 +; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: retq + %x = icmp sgt <2 x i16> %a, %b + %res = bitcast <2 x i1> %x to i2 + ret i2 %res +} + +define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) { +; SSE2-SSSE3-LABEL: v2i32: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: psllq $32, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSE2-SSSE3-NEXT: psrad $31, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-SSSE3-NEXT: psllq $32, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3] +; SSE2-SSSE3-NEXT: psrad $31, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,0,2147483648,0] +; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2] +; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: movq %xmm1, %rax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-SSSE3-NEXT: movq %xmm0, %rax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v2i32: +; AVX1: ## BB#0: +; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v2i32: +; AVX512: ## BB#0: +; AVX512-NEXT: vpsllq $32, %xmm1, %xmm1 +; AVX512-NEXT: vpsraq $32, %xmm1, %xmm1 +; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $32, %xmm0, %xmm0 +; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: retq + %x = icmp sgt <2 x i32> %a, %b + %res = bitcast <2 x i1> %x to i2 + ret i2 %res +} + +define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) { +; SSE2-SSSE3-LABEL: v2i64: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0 +; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] +; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] +; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: movq %xmm1, %rax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-SSSE3-NEXT: movq %xmm0, %rax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v2i64: +; AVX1: ## BB#0: +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v2i64: +; AVX512: ## BB#0: +; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: retq + %x = icmp sgt <2 x i64> %a, %b + %res = bitcast <2 x i1> %x to i2 + ret i2 %res +} + +define i2 @v2f64(<2 x double> %a, <2 x double> %b) { +; SSE2-SSSE3-LABEL: v2f64: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: movq %xmm1, %rax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-SSSE3-NEXT: movq %xmm0, %rax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v2f64: +; AVX1: ## BB#0: +; AVX1-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v2f64: +; AVX512: ## BB#0: +; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: retq + %x = fcmp ogt <2 x double> %a, %b + %res = bitcast <2 x i1> %x to i2 + ret i2 %res +} + +define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) { +; SSE2-SSSE3-LABEL: v4i8: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: pslld $24, %xmm1 +; SSE2-SSSE3-NEXT: psrad $24, %xmm1 +; SSE2-SSSE3-NEXT: pslld $24, %xmm0 +; SSE2-SSSE3-NEXT: psrad $24, %xmm0 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v4i8: +; AVX1: ## BB#0: +; AVX1-NEXT: vpslld $24, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $24, %xmm1, %xmm1 +; AVX1-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrd $3, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrd $2, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrd $1, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v4i8: +; AVX512: ## BB#0: +; AVX512-NEXT: vpslld $24, %xmm1, %xmm1 +; AVX512-NEXT: vpsrad $24, %xmm1, %xmm1 +; AVX512-NEXT: vpslld $24, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $24, %xmm0, %xmm0 +; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: retq + %x = icmp sgt <4 x i8> %a, %b + %res = bitcast <4 x i1> %x to i4 + ret i4 %res +} + +define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) { +; SSE2-SSSE3-LABEL: v4i16: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: pslld $16, %xmm1 +; SSE2-SSSE3-NEXT: psrad $16, %xmm1 +; SSE2-SSSE3-NEXT: pslld $16, %xmm0 +; SSE2-SSSE3-NEXT: psrad $16, %xmm0 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-SSSE3-NEXT: movd %xmm1, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v4i16: +; AVX1: ## BB#0: +; AVX1-NEXT: vpslld $16, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1 +; AVX1-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrd $3, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrd $2, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrd $1, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v4i16: +; AVX512: ## BB#0: +; AVX512-NEXT: vpslld $16, %xmm1, %xmm1 +; AVX512-NEXT: vpsrad $16, %xmm1, %xmm1 +; AVX512-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $16, %xmm0, %xmm0 +; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: retq + %x = icmp sgt <4 x i16> %a, %b + %res = bitcast <4 x i1> %x to i4 + ret i4 %res +} + +define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) { +; SSE2-SSSE3-LABEL: v8i8: +; SSE2-SSSE3: ## BB#0: +; SSE2-SSSE3-NEXT: psllw $8, %xmm1 +; SSE2-SSSE3-NEXT: psraw $8, %xmm1 +; SSE2-SSSE3-NEXT: psllw $8, %xmm0 +; SSE2-SSSE3-NEXT: psraw $8, %xmm0 +; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0 +; SSE2-SSSE3-NEXT: pextrw $7, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $6, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $5, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $3, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $2, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movd %xmm0, %eax +; SSE2-SSSE3-NEXT: andl $1, %eax +; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: v8i8: +; AVX1: ## BB#0: +; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 +; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrw $7, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $6, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $5, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $4, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $3, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $2, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vpextrw $1, %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: andl $1, %eax +; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX1-NEXT: retq +; +; AVX512-LABEL: v8i8: +; AVX512: ## BB#0: +; AVX512-NEXT: vpsllw $8, %xmm1, %xmm1 +; AVX512-NEXT: vpsraw $8, %xmm1, %xmm1 +; AVX512-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX512-NEXT: vpsraw $8, %xmm0, %xmm0 +; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> +; AVX512-NEXT: retq + %x = icmp sgt <8 x i8> %a, %b + %res = bitcast <8 x i1> %x to i8 + ret i8 %res +} diff --git a/test/CodeGen/X86/bitcast-setcc-256.ll b/test/CodeGen/X86/bitcast-setcc-256.ll new file mode 100644 index 000000000000..51c6ad7c7f9e --- /dev/null +++ b/test/CodeGen/X86/bitcast-setcc-256.ll @@ -0,0 +1,363 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX2 +; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefix=AVX512 + +define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) { +; AVX2-LABEL: v16i16: +; AVX2: ## BB#0: +; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpextrb $15, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $14, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $13, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $12, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $11, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $10, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $9, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $8, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $7, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $6, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $5, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $4, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $3, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $2, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $1, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrb $0, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: v16i16: +; AVX512: ## BB#0: +; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill> +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %x = icmp sgt <16 x i16> %a, %b + %res = bitcast <16 x i1> %x to i16 + ret i16 %res +} + +define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) { +; AVX2-LABEL: v8i32: +; AVX2: ## BB#0: +; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpextrw $7, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $6, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $5, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $4, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $3, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $2, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $1, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: v8i32: +; AVX512: ## BB#0: +; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %x = icmp sgt <8 x i32> %a, %b + %res = bitcast <8 x i1> %x to i8 + ret i8 %res +} + +define i8 @v8f32(<8 x float> %a, <8 x float> %b) { +; AVX2-LABEL: v8f32: +; AVX2: ## BB#0: +; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpextrw $7, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $6, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $5, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $4, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $3, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $2, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrw $1, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: v8f32: +; AVX512: ## BB#0: +; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %x = fcmp ogt <8 x float> %a, %b + %res = bitcast <8 x i1> %x to i8 + ret i8 %res +} + +define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) { +; AVX2-LABEL: v32i8: +; AVX2: ## BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: Lcfi0: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: Lcfi1: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: Lcfi2: +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $32, %rsp +; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrb $15, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $14, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $13, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $12, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $11, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $10, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $9, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $8, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $7, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $6, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $5, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $4, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $3, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $2, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $1, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $0, %xmm1, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $15, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $14, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $13, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $12, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $11, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $10, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $9, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $8, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $7, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $6, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $5, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $4, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $3, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $2, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $1, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: vpextrb $0, %xmm0, %eax +; AVX2-NEXT: andb $1, %al +; AVX2-NEXT: movb %al, (%rsp) +; AVX2-NEXT: movl (%rsp), %eax +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: v32i8: +; AVX512: ## BB#0: +; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %x = icmp sgt <32 x i8> %a, %b + %res = bitcast <32 x i1> %x to i32 + ret i32 %res +} + +define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) { +; AVX2-LABEL: v4i64: +; AVX2: ## BB#0: +; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpextrd $3, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrd $2, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrd $1, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: v4i64: +; AVX512: ## BB#0: +; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %x = icmp sgt <4 x i64> %a, %b + %res = bitcast <4 x i1> %x to i4 + ret i4 %res +} + +define i4 @v4f64(<4 x double> %a, <4 x double> %b) { +; AVX2-LABEL: v4f64: +; AVX2: ## BB#0: +; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpextrd $3, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrd $2, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrd $1, %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: v4f64: +; AVX512: ## BB#0: +; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %x = fcmp ogt <4 x double> %a, %b + %res = bitcast <4 x i1> %x to i4 + ret i4 %res +} diff --git a/test/CodeGen/X86/bswap_tree2.ll b/test/CodeGen/X86/bswap_tree2.ll index 1340b7662a7a..a9c74df9d0d9 100644 --- a/test/CodeGen/X86/bswap_tree2.ll +++ b/test/CodeGen/X86/bswap_tree2.ll @@ -9,32 +9,31 @@ define i32 @test1(i32 %x) nounwind { ; CHECK-LABEL: test1: ; CHECK: # BB#0: -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: movl %eax, %ecx -; CHECK-NEXT: andl $16711680, %ecx # imm = 0xFF0000 -; CHECK-NEXT: movl %eax, %edx -; CHECK-NEXT: orl $-16777216, %edx # imm = 0xFF000000 -; CHECK-NEXT: shll $8, %ecx -; CHECK-NEXT: shrl $8, %edx -; CHECK-NEXT: orl %ecx, %edx -; CHECK-NEXT: bswapl %eax -; CHECK-NEXT: shrl $16, %eax +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK-NEXT: movl %ecx, %edx +; CHECK-NEXT: andl $16711680, %edx # imm = 0xFF0000 +; CHECK-NEXT: movl %ecx, %eax +; CHECK-NEXT: orl $-16777216, %eax # imm = 0xFF000000 +; CHECK-NEXT: shll $8, %edx +; CHECK-NEXT: shrl $8, %eax +; CHECK-NEXT: bswapl %ecx +; CHECK-NEXT: shrl $16, %ecx ; CHECK-NEXT: orl %edx, %eax +; CHECK-NEXT: orl %ecx, %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: test1: ; CHECK64: # BB#0: -; CHECK64-NEXT: movl %edi, %eax -; CHECK64-NEXT: andl $16711680, %eax # imm = 0xFF0000 ; CHECK64-NEXT: movl %edi, %ecx -; CHECK64-NEXT: orl $-16777216, %ecx # imm = 0xFF000000 -; CHECK64-NEXT: shll $8, %eax -; CHECK64-NEXT: shrl $8, %ecx -; CHECK64-NEXT: orl %eax, %ecx +; CHECK64-NEXT: andl $16711680, %ecx # imm = 0xFF0000 +; CHECK64-NEXT: movl %edi, %eax +; CHECK64-NEXT: orl $-16777216, %eax # imm = 0xFF000000 +; CHECK64-NEXT: shll $8, %ecx +; CHECK64-NEXT: shrl $8, %eax ; CHECK64-NEXT: bswapl %edi ; CHECK64-NEXT: shrl $16, %edi -; CHECK64-NEXT: orl %ecx, %edi -; CHECK64-NEXT: movl %edi, %eax +; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: orl %edi, %eax ; CHECK64-NEXT: retq %byte0 = and i32 %x, 255 ; 0x000000ff %byte1 = and i32 %x, 65280 ; 0x0000ff00 diff --git a/test/CodeGen/X86/constant-combines.ll b/test/CodeGen/X86/constant-combines.ll index 5ea736e92c78..4f55814958f4 100644 --- a/test/CodeGen/X86/constant-combines.ll +++ b/test/CodeGen/X86/constant-combines.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s | FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" @@ -11,13 +12,20 @@ define void @PR22524({ float, float }* %arg) { ; it folded it to a zero too late to legalize the zero store operation. If this ; ever starts forming a zero store instead of movss, the test case has stopped ; being useful. -; +; ; CHECK-LABEL: PR22524: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: movl $0, 4(%rdi) +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: movd %eax, %xmm0 +; CHECK-NEXT: xorps %xmm1, %xmm1 +; CHECK-NEXT: mulss %xmm0, %xmm1 +; CHECK-NEXT: movl $0, (%rdi) +; CHECK-NEXT: movss %xmm1, 4(%rdi) +; CHECK-NEXT: retq entry: %0 = getelementptr inbounds { float, float }, { float, float }* %arg, i32 0, i32 1 store float 0.000000e+00, float* %0, align 4 -; CHECK: movl $0, 4(%rdi) - %1 = getelementptr inbounds { float, float }, { float, float }* %arg, i64 0, i32 0 %2 = bitcast float* %1 to i64* %3 = load i64, i64* %2, align 8 @@ -28,8 +36,6 @@ entry: %8 = fmul float %7, 0.000000e+00 %9 = bitcast float* %1 to i32* store i32 %6, i32* %9, align 4 -; CHECK: movl $0, (%rdi) store float %8, float* %0, align 4 -; CHECK: movss %{{.*}}, 4(%rdi) ret void } diff --git a/test/CodeGen/X86/fast-isel-load-i1.ll b/test/CodeGen/X86/fast-isel-load-i1.ll index 2f3c6c4b84b9..f515d38cbb95 100644 --- a/test/CodeGen/X86/fast-isel-load-i1.ll +++ b/test/CodeGen/X86/fast-isel-load-i1.ll @@ -4,9 +4,7 @@ define i1 @test_i1(i1* %b) { ; CHECK-LABEL: test_i1: ; CHECK: # BB#0: # %entry -; CHECK-NEXT: movzbl (%rdi), %eax -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: testb $1, %al +; CHECK-NEXT: testb $1, (%rdi) ; CHECK-NEXT: je .LBB0_2 ; CHECK-NEXT: # BB#1: # %in ; CHECK-NEXT: xorl %eax, %eax diff --git a/test/CodeGen/X86/fma-fneg-combine.ll b/test/CodeGen/X86/fma-fneg-combine.ll index bb332f7282a8..d1d69c68af7b 100644 --- a/test/CodeGen/X86/fma-fneg-combine.ll +++ b/test/CodeGen/X86/fma-fneg-combine.ll @@ -141,7 +141,6 @@ define <4 x float> @test11(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze ; SKX-LABEL: test11: ; SKX: # BB#0: # %entry ; SKX-NEXT: vxorps {{.*}}(%rip){1to4}, %xmm2, %xmm0 -; SKX-NEXT: andl $1, %edi ; SKX-NEXT: kmovd %edi, %k1 ; SKX-NEXT: vfmadd231ss %xmm1, %xmm1, %xmm0 {%k1} ; SKX-NEXT: retq @@ -150,7 +149,6 @@ define <4 x float> @test11(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze ; KNL: # BB#0: # %entry ; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm0 ; KNL-NEXT: vxorps %xmm0, %xmm2, %xmm0 -; KNL-NEXT: andl $1, %edi ; KNL-NEXT: kmovw %edi, %k1 ; KNL-NEXT: vfmadd231ss %xmm1, %xmm1, %xmm0 {%k1} ; KNL-NEXT: retq @@ -186,7 +184,6 @@ define <2 x double> @test13(<2 x double> %a, <2 x double> %b, <2 x double> %c, i ; SKX-LABEL: test13: ; SKX: # BB#0: # %entry ; SKX-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0 -; SKX-NEXT: andl $1, %edi ; SKX-NEXT: kmovd %edi, %k1 ; SKX-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ; SKX-NEXT: retq @@ -194,10 +191,10 @@ define <2 x double> @test13(<2 x double> %a, <2 x double> %b, <2 x double> %c, i ; KNL-LABEL: test13: ; KNL: # BB#0: # %entry ; KNL-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0 -; KNL-NEXT: andl $1, %edi ; KNL-NEXT: kmovw %edi, %k1 ; KNL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ; KNL-NEXT: retq + entry: %sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %sub.i, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4) diff --git a/test/CodeGen/X86/fmsubadd-combine.ll b/test/CodeGen/X86/fmsubadd-combine.ll new file mode 100644 index 000000000000..bd8888966cf2 --- /dev/null +++ b/test/CodeGen/X86/fmsubadd-combine.ll @@ -0,0 +1,193 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma | FileCheck -check-prefix=FMA3 -check-prefix=FMA3_256 %s +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma,+avx512f | FileCheck -check-prefix=FMA3 -check-prefix=FMA3_512 %s +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma4 | FileCheck -check-prefix=FMA4 %s + +; This test checks the fusing of MUL + SUB/ADD to FMSUBADD. + +define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 { +; FMA3_256-LABEL: mul_subadd_pd128: +; FMA3_256: # BB#0: # %entry +; FMA3_256-NEXT: vmulpd %xmm1, %xmm0, %xmm0 +; FMA3_256-NEXT: vsubpd %xmm2, %xmm0, %xmm1 +; FMA3_256-NEXT: vaddpd %xmm2, %xmm0, %xmm0 +; FMA3_256-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] +; FMA3_256-NEXT: retq +; +; FMA3_512-LABEL: mul_subadd_pd128: +; FMA3_512: # BB#0: # %entry +; FMA3_512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 +; FMA3_512-NEXT: vsubpd %xmm2, %xmm0, %xmm1 +; FMA3_512-NEXT: vaddpd %xmm2, %xmm0, %xmm0 +; FMA3_512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1] +; FMA3_512-NEXT: retq +; +; FMA4-LABEL: mul_subadd_pd128: +; FMA4: # BB#0: # %entry +; FMA4-NEXT: vmulpd %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: vsubpd %xmm2, %xmm0, %xmm1 +; FMA4-NEXT: vaddpd %xmm2, %xmm0, %xmm0 +; FMA4-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] +; FMA4-NEXT: retq +entry: + %AB = fmul <2 x double> %A, %B + %Sub = fsub <2 x double> %AB, %C + %Add = fadd <2 x double> %AB, %C + %subadd = shufflevector <2 x double> %Add, <2 x double> %Sub, <2 x i32> <i32 0, i32 3> + ret <2 x double> %subadd +} + +define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 { +; FMA3-LABEL: mul_subadd_ps128: +; FMA3: # BB#0: # %entry +; FMA3-NEXT: vmulps %xmm1, %xmm0, %xmm0 +; FMA3-NEXT: vsubps %xmm2, %xmm0, %xmm1 +; FMA3-NEXT: vaddps %xmm2, %xmm0, %xmm0 +; FMA3-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; FMA3-NEXT: retq +; +; FMA4-LABEL: mul_subadd_ps128: +; FMA4: # BB#0: # %entry +; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: vsubps %xmm2, %xmm0, %xmm1 +; FMA4-NEXT: vaddps %xmm2, %xmm0, %xmm0 +; FMA4-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; FMA4-NEXT: retq +entry: + %AB = fmul <4 x float> %A, %B + %Sub = fsub <4 x float> %AB, %C + %Add = fadd <4 x float> %AB, %C + %subadd = shufflevector <4 x float> %Add, <4 x float> %Sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7> + ret <4 x float> %subadd +} + +define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 { +; FMA3-LABEL: mul_subadd_pd256: +; FMA3: # BB#0: # %entry +; FMA3-NEXT: vmulpd %ymm1, %ymm0, %ymm0 +; FMA3-NEXT: vsubpd %ymm2, %ymm0, %ymm1 +; FMA3-NEXT: vaddpd %ymm2, %ymm0, %ymm0 +; FMA3-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3] +; FMA3-NEXT: retq +; +; FMA4-LABEL: mul_subadd_pd256: +; FMA4: # BB#0: # %entry +; FMA4-NEXT: vmulpd %ymm1, %ymm0, %ymm0 +; FMA4-NEXT: vsubpd %ymm2, %ymm0, %ymm1 +; FMA4-NEXT: vaddpd %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3] +; FMA4-NEXT: retq +entry: + %AB = fmul <4 x double> %A, %B + %Sub = fsub <4 x double> %AB, %C + %Add = fadd <4 x double> %AB, %C + %subadd = shufflevector <4 x double> %Add, <4 x double> %Sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7> + ret <4 x double> %subadd +} + +define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 { +; FMA3-LABEL: mul_subadd_ps256: +; FMA3: # BB#0: # %entry +; FMA3-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; FMA3-NEXT: vsubps %ymm2, %ymm0, %ymm1 +; FMA3-NEXT: vaddps %ymm2, %ymm0, %ymm0 +; FMA3-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; FMA3-NEXT: retq +; +; FMA4-LABEL: mul_subadd_ps256: +; FMA4: # BB#0: # %entry +; FMA4-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; FMA4-NEXT: vsubps %ymm2, %ymm0, %ymm1 +; FMA4-NEXT: vaddps %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; FMA4-NEXT: retq +entry: + %AB = fmul <8 x float> %A, %B + %Sub = fsub <8 x float> %AB, %C + %Add = fadd <8 x float> %AB, %C + %subadd = shufflevector <8 x float> %Add, <8 x float> %Sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15> + ret <8 x float> %subadd +} + +define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 { +; FMA3_256-LABEL: mul_subadd_pd512: +; FMA3_256: # BB#0: # %entry +; FMA3_256-NEXT: vmulpd %ymm2, %ymm0, %ymm0 +; FMA3_256-NEXT: vmulpd %ymm3, %ymm1, %ymm1 +; FMA3_256-NEXT: vsubpd %ymm5, %ymm1, %ymm2 +; FMA3_256-NEXT: vsubpd %ymm4, %ymm0, %ymm3 +; FMA3_256-NEXT: vaddpd %ymm5, %ymm1, %ymm1 +; FMA3_256-NEXT: vaddpd %ymm4, %ymm0, %ymm0 +; FMA3_256-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3] +; FMA3_256-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3] +; FMA3_256-NEXT: retq +; +; FMA3_512-LABEL: mul_subadd_pd512: +; FMA3_512: # BB#0: # %entry +; FMA3_512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 +; FMA3_512-NEXT: vsubpd %zmm2, %zmm0, %zmm1 +; FMA3_512-NEXT: vaddpd %zmm2, %zmm0, %zmm0 +; FMA3_512-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],zmm1[1],zmm0[2],zmm1[3],zmm0[4],zmm1[5],zmm0[6],zmm1[7] +; FMA3_512-NEXT: retq +; +; FMA4-LABEL: mul_subadd_pd512: +; FMA4: # BB#0: # %entry +; FMA4-NEXT: vmulpd %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vmulpd %ymm3, %ymm1, %ymm1 +; FMA4-NEXT: vsubpd %ymm5, %ymm1, %ymm2 +; FMA4-NEXT: vsubpd %ymm4, %ymm0, %ymm3 +; FMA4-NEXT: vaddpd %ymm5, %ymm1, %ymm1 +; FMA4-NEXT: vaddpd %ymm4, %ymm0, %ymm0 +; FMA4-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3] +; FMA4-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3] +; FMA4-NEXT: retq +entry: + %AB = fmul <8 x double> %A, %B + %Sub = fsub <8 x double> %AB, %C + %Add = fadd <8 x double> %AB, %C + %subadd = shufflevector <8 x double> %Add, <8 x double> %Sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15> + ret <8 x double> %subadd +} + +define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 { +; FMA3_256-LABEL: mul_subadd_ps512: +; FMA3_256: # BB#0: # %entry +; FMA3_256-NEXT: vmulps %ymm2, %ymm0, %ymm0 +; FMA3_256-NEXT: vmulps %ymm3, %ymm1, %ymm1 +; FMA3_256-NEXT: vsubps %ymm5, %ymm1, %ymm2 +; FMA3_256-NEXT: vsubps %ymm4, %ymm0, %ymm3 +; FMA3_256-NEXT: vaddps %ymm5, %ymm1, %ymm1 +; FMA3_256-NEXT: vaddps %ymm4, %ymm0, %ymm0 +; FMA3_256-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7] +; FMA3_256-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7] +; FMA3_256-NEXT: retq +; +; FMA3_512-LABEL: mul_subadd_ps512: +; FMA3_512: # BB#0: # %entry +; FMA3_512-NEXT: vmulps %zmm1, %zmm0, %zmm1 +; FMA3_512-NEXT: vaddps %zmm2, %zmm1, %zmm0 +; FMA3_512-NEXT: movw $-21846, %ax # imm = 0xAAAA +; FMA3_512-NEXT: kmovw %eax, %k1 +; FMA3_512-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1} +; FMA3_512-NEXT: retq +; +; FMA4-LABEL: mul_subadd_ps512: +; FMA4: # BB#0: # %entry +; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1 +; FMA4-NEXT: vsubps %ymm5, %ymm1, %ymm2 +; FMA4-NEXT: vsubps %ymm4, %ymm0, %ymm3 +; FMA4-NEXT: vaddps %ymm5, %ymm1, %ymm1 +; FMA4-NEXT: vaddps %ymm4, %ymm0, %ymm0 +; FMA4-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7] +; FMA4-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7] +; FMA4-NEXT: retq +entry: + %AB = fmul <16 x float> %A, %B + %Sub = fsub <16 x float> %AB, %C + %Add = fadd <16 x float> %AB, %C + %subadd = shufflevector <16 x float> %Add, <16 x float> %Sub, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31> + ret <16 x float> %subadd +} + +attributes #0 = { nounwind "unsafe-fp-math"="true" } diff --git a/test/CodeGen/X86/fold-tied-op.ll b/test/CodeGen/X86/fold-tied-op.ll index eb06eb75a4d7..d68236e9d250 100644 --- a/test/CodeGen/X86/fold-tied-op.ll +++ b/test/CodeGen/X86/fold-tied-op.ll @@ -6,10 +6,9 @@ target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" target triple = "i386--netbsd" ; CHECK-LABEL: fn1 -; CHECK: orl {{.*#+}} 4-byte Folded Reload -; CHECK: addl {{.*#+}} 4-byte Folded Reload -; CHECK: xorl {{.*#+}} 4-byte Folded Reload -; CHECK: xorl {{.*#+}} 4-byte Folded Reload +; CHECK: addl {{.*#+}} 4-byte Folded Reload +; CHECK: imull {{.*#+}} 4-byte Folded Reload +; CHECK: orl {{.*#+}} 4-byte Folded Reload ; CHECK: retl %struct.XXH_state64_t = type { i32, i32, i64, i64, i64 } diff --git a/test/CodeGen/X86/fp128-i128.ll b/test/CodeGen/X86/fp128-i128.ll index 6c6bc8bdc1d1..98082ec611d4 100644 --- a/test/CodeGen/X86/fp128-i128.ll +++ b/test/CodeGen/X86/fp128-i128.ll @@ -50,8 +50,8 @@ define void @TestUnionLD1(fp128 %s, i64 %n) #0 { ; CHECK-NEXT: andq %rdi, %rcx ; CHECK-NEXT: movabsq $-281474976710656, %rdx # imm = 0xFFFF000000000000 ; CHECK-NEXT: andq -{{[0-9]+}}(%rsp), %rdx -; CHECK-NEXT: orq %rcx, %rdx ; CHECK-NEXT: movq %rax, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: orq %rcx, %rdx ; CHECK-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; CHECK-NEXT: jmp foo # TAILCALL diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll index b5507523a75a..4596b83f7bc2 100644 --- a/test/CodeGen/X86/haddsub-2.ll +++ b/test/CodeGen/X86/haddsub-2.ll @@ -933,14 +933,14 @@ define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) { ; AVX-NEXT: vsubss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] ; AVX-NEXT: vsubss %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3] +; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0] +; AVX-NEXT: vsubss %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3] -; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0] -; AVX-NEXT: vsubss %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] -; AVX-NEXT: vsubss %xmm3, %xmm1, %xmm1 +; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX-NEXT: vsubss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0] +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] ; AVX-NEXT: retq %vecext = extractelement <4 x float> %A, i32 2 %vecext1 = extractelement <4 x float> %A, i32 3 diff --git a/test/CodeGen/X86/leaFixup32.mir b/test/CodeGen/X86/leaFixup32.mir new file mode 100644 index 000000000000..e3986e47df4d --- /dev/null +++ b/test/CodeGen/X86/leaFixup32.mir @@ -0,0 +1,509 @@ +# RUN: llc -run-pass x86-fixup-LEAs -mtriple=i386 -verify-machineinstrs -mcpu=corei7-avx -o - %s | FileCheck %s +--- | + ; ModuleID = 'test/CodeGen/X86/fixup-lea.ll' + source_filename = "test/CodeGen/X86/fixup-lea.ll" + target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" + target triple = "i386" + ;generated using: llc -stop-after x86-pad-short-functions fixup-lea.ll > leaFinxup32.mir + + ;test2add_32: 3 operands LEA32r that can be replaced with 2 add instructions + ; where ADD32ri8 is chosen + define i32 @test2add_32() { + ret i32 0 + } + + ;test2add_ebp_32: 3 operands LEA32r that can be replaced with 2 add instructions + ; where the base is rbp/r13/ebp register + define i32 @test2add_ebp_32() { + ret i32 0 + } + + ;test1add_ebp_32: 2 operands LEA32r where base register is ebp and can be replaced + ; with an add instruction + define i32 @test1add_ebp_32() { + ret i32 0 + } + + ;testleaadd_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions + define i32 @testleaadd_32() { + ret i32 0 + } + + ;testleaadd_ebp_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions + ; where the base is ebp register + define i32 @testleaadd_ebp_32() { + ret i32 0 + } + + ;test1lea_ebp_32: 2 operands LEA32r wher base register is rbp/r13/ebp and can be replaced + ; with a lea instruction + define i32 @test1lea_ebp_32() { + ret i32 0 + } + + ;test2addi32_32: 3 operands LEA32r that can be replaced with 2 add instructions where ADD32ri32 + ; is chosen + define i32 @test2addi32_32() { + ret i32 0 + } + + ;test1mov1add_ebp_32: 2 operands LEA32r that can be replaced with 1 add 1 mov instructions + ; where the base is rbp/r13/ebp register + define i32 @test1mov1add_ebp_32() { + ret i32 0 + } + + ;testleaadd_ebp_index_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions + ; where the base and the index are ebp register and there is offset + define i32 @testleaadd_ebp_index_32() { + ret i32 0 + } + + ;testleaadd_ebp_index2_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions + ; where the base and the index are ebp register and there is scale + define i32 @testleaadd_ebp_index2_32() { + ret i32 0 + } + + ;test_skip_opt_32: 3 operands LEA32r that can not be replaced with 2 instructions + define i32 @test_skip_opt_32() { + ret i32 0 + } + + ;test_skip_eflags_32: LEA32r that cannot be replaced since its not safe to clobber eflags + define i32 @test_skip_eflags_32() { + ret i32 0 + } + +... +--- +name: test2add_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%eax' } + - { reg: '%ebp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp + ; CHECK: %eax = ADD32rr %eax, killed %ebp + ; CHECK: %eax = ADD32ri8 %eax, -5 + + %eax = LEA32r killed %eax, 1, killed %ebp, -5, _ + RETQ %eax + +... +--- +name: test2add_ebp_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%eax' } + - { reg: '%ebp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp + ; CHECK: %ebp = ADD32rr %ebp, killed %eax + ; CHECK: %ebp = ADD32ri8 %ebp, -5 + + %ebp = LEA32r killed %ebp, 1, killed %eax, -5, _ + RETQ %ebp + +... +--- +name: test1add_ebp_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%eax' } + - { reg: '%ebp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp + ; CHECK: %ebp = ADD32rr %ebp, killed %eax + + %ebp = LEA32r killed %ebp, 1, killed %eax, 0, _ + RETQ %ebp + +... +--- +name: testleaadd_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%eax' } + - { reg: '%ebp' } + - { reg: '%ebx' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp, %esi + ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0 + ; CHECK: %ebx = ADD32ri8 %ebx, -5 + + %ebx = LEA32r killed %eax, 1, killed %ebp, -5, _ + RETQ %ebx + +... +--- +name: testleaadd_ebp_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%eax' } + - { reg: '%ebp' } + - { reg: '%ebx' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp + ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, _ + ; CHECK: %ebx = ADD32ri8 %ebx, -5 + + %ebx = LEA32r killed %ebp, 1, killed %eax, -5, _ + RETQ %ebx + +... +--- +name: test1lea_ebp_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%eax' } + - { reg: '%ebp' } + - { reg: '%ebx' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp + ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, _ + + %ebx = LEA32r killed %ebp, 1, killed %eax, 0, _ + RETQ %ebx + +... +--- +name: test2addi32_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%eax' } + - { reg: '%ebp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp + ; CHECK: %eax = ADD32rr %eax, killed %ebp + ; CHECK: %eax = ADD32ri %eax, 129 + + %eax = LEA32r killed %eax, 1, killed %ebp, 129, _ + RETQ %eax + +... +--- +name: test1mov1add_ebp_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%eax' } + - { reg: '%ebx' } + - { reg: '%ebp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp, %ebx + ; CHECK: %ebx = MOV32rr %ebp + ; CHECK: %ebx = ADD32rr %ebx, %ebp + + %ebx = LEA32r %ebp, 1, %ebp, 0, _ + RETQ %ebx + +... +--- +name: testleaadd_ebp_index_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%ebx' } + - { reg: '%ebp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp, %ebx + ; CHECK: %ebx = LEA32r _, 1, %ebp, 5, _ + ; CHECK: %ebx = ADD32rr %ebx, %ebp + + %ebx = LEA32r %ebp, 1, %ebp, 5, _ + RETQ %ebx + +... +--- +name: testleaadd_ebp_index2_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%ebx' } + - { reg: '%ebp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp, %ebx + ; CHECK: %ebx = LEA32r _, 4, %ebp, 5, _ + ; CHECK: %ebx = ADD32rr %ebx, %ebp + + %ebx = LEA32r %ebp, 4, %ebp, 5, _ + RETQ %ebx + +... +--- +name: test_skip_opt_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%ebx' } + - { reg: '%ebp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp, %ebx + ; CHECK: %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, _ + + %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, _ + RETQ %ebp + +... +--- +name: test_skip_eflags_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%ebp' } + - { reg: '%eax' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp, %ebx + ; CHECK: %ebx = LEA32r killed %eax, 4, killed %eax, 5, _ + ; CHECK: %ebp = LEA32r killed %ebx, 4, killed %ebx, 0, _ + ; CHECK: %ebp = ADD32ri8 %ebp, 5 + + CMP32rr %eax, killed %ebx, implicit-def %eflags + %ebx = LEA32r killed %eax, 4, killed %eax, 5, _ + JE_1 %bb.1, implicit %eflags + RETQ %ebx + bb.1: + liveins: %eax, %ebp, %ebx + %ebp = LEA32r killed %ebx, 4, killed %ebx, 5, _ + RETQ %ebp + +... + + + diff --git a/test/CodeGen/X86/leaFixup64.mir b/test/CodeGen/X86/leaFixup64.mir new file mode 100644 index 000000000000..b35dee181a47 --- /dev/null +++ b/test/CodeGen/X86/leaFixup64.mir @@ -0,0 +1,1041 @@ +# RUN: llc -run-pass x86-fixup-LEAs -mtriple=x86_64-gnu-unknown -verify-machineinstrs -mcpu=corei7-avx -o - %s | FileCheck %s +--- | + ; ModuleID = 'lea-2.ll' + source_filename = "lea-2.ll" + target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" + ;generated using: llc -stop-after x86-pad-short-functions lea-2.ll > leaFinxup64.mir + + ;testleaadd_64_32_1: 3 operands LEA64_32r cannot be replaced with 2 add instructions + ; but can be replaced with 1 lea + 1 add + define i32 @testleaadd_64_32_1() { + ret i32 0 + } + + ;testleaadd_rbp_64_32_1: 3 operands LEA64_32r cannot be replaced with 2 add instructions + ; where the base is rbp/r13/ebp register but it can be replaced with 1 lea + 1 add + define i32 @testleaadd_rbp_64_32_1() { + ret i32 0 + } + + ;test1lea_rbp_64_32_1: 2 operands LEA64_32r where base register is rbp/r13/ebp and can not + ; be replaced with an add instruction but can be replaced with 1 lea instruction + define i32 @test1lea_rbp_64_32_1() { + ret i32 0 + } + + ;test2add_64: 3 operands LEA64r that can be replaced with 2 add instructions + define i32 @test2add_64() { + ret i32 0 + } + + ;test2add_rbp_64: 3 operands LEA64r that can be replaced with 2 add instructions + ; where the base is rbp/r13/ebp register + define i32 @test2add_rbp_64() { + ret i32 0 + } + + ;test1add_rbp_64: 2 operands LEA64r where base register is rbp/r13/ebp and can be replaced + ; with an add instruction + define i32 @test1add_rbp_64() { + ret i32 0 + } + + ;testleaadd_64_32: 3 operands LEA64_32r that can be replaced with 1 lea 1 add instructions + define i32 @testleaadd_64_32() { + ret i32 0 + } + + ;testleaadd_rbp_64_32: 3 operands LEA64_32r that can be replaced with 1 lea 1 add instructions + ; where the base is rbp/r13/ebp register + define i32 @testleaadd_rbp_64_32() { + ret i32 0 + } + + ;test1lea_rbp_64_32: 2 operands LEA64_32r where base register is rbp/r13/ebp and can be replaced + ; with a lea instruction + define i32 @test1lea_rbp_64_32() { + ret i32 0 + } + + ;testleaadd_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions + define i32 @testleaadd_64() { + ret i32 0 + } + + ;testleaadd_rbp_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions + ; where the base is rbp/r13/ebp register + define i32 @testleaadd_rbp_64() { + ret i32 0 + } + + ;test1lea_rbp_64: 2 operands LEA64r wher base register is rbp/r13/ebp and can be replaced + ; with a lea instruction + define i32 @test1lea_rbp_64() { + ret i32 0 + } + + ;test8: dst = base & scale!=1, can't optimize + define i32 @test8() { + ret i32 0 + } + + ;testleaaddi32_64_32: 3 operands LEA64_32r that can be replaced with 1 lea + 1 add instructions where + ; ADD64ri32 is chosen + define i32 @testleaaddi32_64_32() { + ret i32 0 + } + + ;test1mov1add_rbp_64_32: 2 operands LEA64_32r cannot be replaced with 1 add 1 mov instructions + ; where the base is rbp/r13/ebp register + define i32 @test1mov1add_rbp_64_32() { + ret i32 0 + } + + ;testleaadd_rbp_index_64_32: 3 operands LEA64_32r that cannot replaced with 1 lea 1 add instructions + ; where the base and the index are ebp register and there is offset + define i32 @testleaadd_rbp_index_64_32() { + ret i32 0 + } + + ;testleaadd_rbp_index2_64_32: 3 operands LEA64_32r that cannot replaced with 1 lea 1 add instructions + ; where the base and the index are ebp register and there is scale + define i32 @testleaadd_rbp_index2_64_32() { + ret i32 0 + } + + ;test2addi32_64: 3 operands LEA64r that can be replaced with 2 add instructions where ADD64ri32 + ; is chosen + define i32 @test2addi32_64() { + ret i32 0 + } + + ;test1mov1add_rbp_64: 2 operands LEA64r that can be replaced with 1 add 1 mov instructions + ; where the base is rbp/r13/ebp register + define i32 @test1mov1add_rbp_64() { + ret i32 0 + } + + ;testleaadd_rbp_index_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions + ; where the base and the index are ebp register and there is offset + define i32 @testleaadd_rbp_index_64() { + ret i32 0 + } + + ;testleaadd_rbp_index2_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions + ; where the base and the index are ebp register and there is scale + define i32 @testleaadd_rbp_index2_64() { + ret i32 0 + } + + ;test_skip_opt_64: 3 operands LEA64r that can not be replaced with 2 instructions + define i32 @test_skip_opt_64() { + ret i32 0 + } + + ;test_skip_eflags_64: LEA64r that cannot be replaced since its not safe to clobber eflags + define i32 @test_skip_eflags_64() { + ret i32 0 + } + + ;test_skip_opt_64_32: 3 operands LEA64_32r that can not be replaced with 2 instructions + define i32 @test_skip_opt_64_32() { + ret i32 0 + } + + ;test_skip_eflags_64_32: LEA64_32r that cannot be replaced since its not safe to clobber eflags + define i32 @test_skip_eflags_64_32() { + ret i32 0 + } + + +... +--- +name: testleaadd_64_32_1 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0 + ; CHECK: %eax = ADD32ri8 %eax, -5 + + %eax = LEA64_32r killed %rax, 1, killed %rbp, -5, _ + RETQ %eax + +... +--- +name: testleaadd_rbp_64_32_1 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0 + ; CHECK: %ebp = ADD32ri8 %ebp, -5 + + %ebp = LEA64_32r killed %rbp, 1, killed %rax, -5, _ + RETQ %ebp + +... +--- +name: test1lea_rbp_64_32_1 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0 + + %ebp = LEA64_32r killed %rbp, 1, killed %rax, 0, _ + RETQ %ebp + +... +--- +name: test2add_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %rax = ADD64rr %rax, killed %rbp + ; CHECK: %rax = ADD64ri8 %rax, -5 + + %rax = LEA64r killed %rax, 1, killed %rbp, -5, _ + RETQ %eax + +... +--- +name: test2add_rbp_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %rbp = ADD64rr %rbp, killed %rax + ; CHECK: %rbp = ADD64ri8 %rbp, -5 + + %rbp = LEA64r killed %rbp, 1, killed %rax, -5, _ + RETQ %ebp + +... +--- +name: test1add_rbp_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %rbp = ADD64rr %rbp, killed %rax + + %rbp = LEA64r killed %rbp, 1, killed %rax, 0, _ + RETQ %ebp + +... +--- +name: testleaadd_64_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } + - { reg: '%rbx' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _ + ; CHECK: %ebx = ADD32ri8 %ebx, -5 + + %ebx = LEA64_32r killed %rax, 1, killed %rbp, -5, _ + RETQ %ebx + +... +--- +name: testleaadd_rbp_64_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } + - { reg: '%rbx' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _ + ; CHECK: %ebx = ADD32ri8 %ebx, -5 + + %ebx = LEA64_32r killed %rbp, 1, killed %rax, -5, _ + RETQ %ebx + +... +--- +name: test1lea_rbp_64_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } + - { reg: '%rbx' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _ + + %ebx = LEA64_32r killed %rbp, 1, killed %rax, 0, _ + RETQ %ebx + +... +--- +name: testleaadd_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } + - { reg: '%rbx' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _ + ; CHECK: %rbx = ADD64ri8 %rbx, -5 + + %rbx = LEA64r killed %rax, 1, killed %rbp, -5, _ + RETQ %ebx + +... +--- +name: testleaadd_rbp_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } + - { reg: '%rbx' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _ + ; CHECK: %rbx = ADD64ri8 %rbx, -5 + + %rbx = LEA64r killed %rbp, 1, killed %rax, -5, _ + RETQ %ebx + +... +--- +name: test1lea_rbp_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } + - { reg: '%rbx' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _ + + %rbx = LEA64r killed %rbp, 1, killed %rax, 0, _ + RETQ %ebx + +... +--- +name: test8 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rdi, %rbp + ; CHECK: %r12 = LEA64r _, 2, killed %r13, 5, _ + ; CHECK: %r12 = ADD64rr %r12, killed %rbp + %rbp = KILL %rbp, implicit-def %rbp + %r13 = KILL %rdi, implicit-def %r13 + %r12 = LEA64r killed %rbp, 2, killed %r13, 5, _ + RETQ %r12 + +... +--- +name: testleaaddi32_64_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0 + ; CHECK: %eax = ADD32ri %eax, 129 + + %eax = LEA64_32r killed %rax, 1, killed %rbp, 129, _ + RETQ %eax + +... +--- +name: test1mov1add_rbp_64_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp, %rbx + ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, _ + + %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, _ + RETQ %ebx + +... +--- +name: testleaadd_rbp_index_64_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rbx' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp, %rbx + ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, _ + + %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, _ + RETQ %ebx + +... +--- +name: testleaadd_rbp_index2_64_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rbx' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %eax, %ebp, %ebx + ; CHECK: %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, _ + + %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, _ + RETQ %ebx + +... +--- +name: test2addi32_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp + ; CHECK: %rax = ADD64rr %rax, killed %rbp + ; CHECK: %rax = ADD64ri32 %rax, 129 + + %rax = LEA64r killed %rax, 1, killed %rbp, 129, _ + RETQ %eax + +... +--- +name: test1mov1add_rbp_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rax' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp, %rbx + ; CHECK: %rbx = MOV64rr %rbp + ; CHECK: %rbx = ADD64rr %rbx, %rbp + + %rbx = LEA64r %rbp, 1, %rbp, 0, _ + RETQ %ebx + +... +--- +name: testleaadd_rbp_index_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rbx' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp, %rbx + ; CHECK: %rbx = LEA64r _, 1, %rbp, 5, _ + ; CHECK: %rbx = ADD64rr %rbx, %rbp + + %rbx = LEA64r %rbp, 1, %rbp, 5, _ + RETQ %ebx + +... +--- +name: testleaadd_rbp_index2_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rbx' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp, %rbx + ; CHECK: %rbx = LEA64r _, 4, %rbp, 5, _ + ; CHECK: %rbx = ADD64rr %rbx, %rbp + + %rbx = LEA64r %rbp, 4, %rbp, 5, _ + RETQ %ebx + +... +--- +name: test_skip_opt_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rbx' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp, %rbx + ; CHECK: %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, _ + + %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, _ + RETQ %ebp + +... +--- +name: test_skip_eflags_64 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rbp' } + - { reg: '%rax' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp, %rbx + ; CHECK: %rbx = LEA64r killed %rax, 4, killed %rax, 5, _ + ; CHECK: %rbp = LEA64r killed %rbx, 4, killed %rbx, 0, _ + ; CHECK: %rbp = ADD64ri8 %rbp, 5 + + CMP64rr %rax, killed %rbx, implicit-def %eflags + %rbx = LEA64r killed %rax, 4, killed %rax, 5, _ + JE_1 %bb.1, implicit %eflags + RETQ %ebx + bb.1: + liveins: %rax, %rbp, %rbx + %rbp = LEA64r killed %rbx, 4, killed %rbx, 5, _ + RETQ %ebp + +... +--- +name: test_skip_opt_64_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rbx' } + - { reg: '%rbp' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp, %rbx + ; CHECK: %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, _ + + %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, _ + RETQ %ebp + +... +--- +name: test_skip_eflags_64_32 +alignment: 4 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '%rbp' } + - { reg: '%rax' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0 (%ir-block.0): + liveins: %rax, %rbp, %rbx + ; CHECK: %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, _ + ; CHECK: %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 0, _ + ; CHECK: %ebp = ADD32ri8 %ebp, 5 + + CMP64rr %rax, killed %rbx, implicit-def %eflags + %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, _ + JE_1 %bb.1, implicit %eflags + RETQ %ebx + bb.1: + liveins: %rax, %rbp, %rbx + %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 5, _ + RETQ %ebp + +... + + + diff --git a/test/CodeGen/X86/lrshrink.ll b/test/CodeGen/X86/lrshrink.ll deleted file mode 100644 index a9cf086dbd90..000000000000 --- a/test/CodeGen/X86/lrshrink.ll +++ /dev/null @@ -1,57 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s - -; Checks if "%7 = add nuw nsw i64 %4, %2" is moved before the last call -; to minimize live-range. - -define i64 @test(i1 %a, i64 %r1, i64 %r2, i64 %s1, i64 %s2, i64 %t1, i64 %t2) { -entry: - br i1 %a, label %then, label %else - -then: - br label %else - -else: - %0 = phi i64 [ 4, %entry ], [ 10, %then ] - %r = phi i64 [ %r1, %entry ], [ %r2, %then ] - %s = phi i64 [ %s1, %entry ], [ %s2, %then ] - %t = phi i64 [ %t1, %entry ], [ %t2, %then ] -; CHECK-LABEL: test: -; CHECK: add -; CHECK: add -; CHECK: call -; CHECK: add -; CHECK: call -; CHECK: add -; CHECK: call -; CHECK: add - %1 = tail call i32 @_Z3foov() - %2 = zext i32 %1 to i64 - %3 = tail call i32 @_Z3foov() - %4 = zext i32 %3 to i64 - %5 = tail call i32 @_Z3foov() - %6 = zext i32 %5 to i64 - %7 = add nuw nsw i64 %0, %r - tail call void @llvm.dbg.value(metadata i64 %7, i64 0, metadata !5, metadata !DIExpression()), !dbg !6 - %8 = add nuw nsw i64 %2, %7 - %9 = add nuw nsw i64 %4, %8 - %10 = add nuw nsw i64 %6, %9 - %11 = add nuw nsw i64 %s, %t - tail call void @llvm.dbg.value(metadata i64 %11, i64 0, metadata !5, metadata !DIExpression()), !dbg !6 - %12 = add nuw nsw i64 %10, %11 - ret i64 %12 -} - -declare i32 @_Z3foov() -declare void @llvm.dbg.value(metadata, i64, metadata, metadata) - -!llvm.dbg.cu = !{!0} -!llvm.module.flags = !{!1, !2} - -!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, emissionKind: FullDebug) -!1 = !{i32 2, !"Dwarf Version", i32 4} -!2 = !{i32 2, !"Debug Info Version", i32 3} -!3 = !DIFile(filename: "a.c", directory: "./") -!4 = distinct !DISubprogram(name: "test", scope: !3, unit: !0) -!5 = !DILocalVariable(name: "x", scope: !4) -!6 = !DILocation(line: 4, scope: !4) diff --git a/test/CodeGen/X86/madd.ll b/test/CodeGen/X86/madd.ll index af86df510016..d332b2f3169f 100644 --- a/test/CodeGen/X86/madd.ll +++ b/test/CodeGen/X86/madd.ll @@ -129,9 +129,9 @@ define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly ; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm3, %xmm2 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; SSE2-NEXT: paddd %xmm2, %xmm0 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] ; SSE2-NEXT: paddd %xmm3, %xmm1 +; SSE2-NEXT: paddd %xmm2, %xmm0 ; SSE2-NEXT: addq $16, %rsi ; SSE2-NEXT: addq $16, %rdi ; SSE2-NEXT: addq $-8, %rax @@ -246,23 +246,23 @@ define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i3 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] ; SSE2-NEXT: psrad $16, %xmm4 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7] +; SSE2-NEXT: psrad $16, %xmm5 +; SSE2-NEXT: movq {{.*#+}} xmm6 = mem[0],zero +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm6 +; SSE2-NEXT: movq {{.*#+}} xmm7 = mem[0],zero +; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm7 +; SSE2-NEXT: pmullw %xmm6, %xmm7 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3] +; SSE2-NEXT: psrad $16, %xmm6 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4,4,5,5,6,6,7,7] +; SSE2-NEXT: psrad $16, %xmm7 +; SSE2-NEXT: paddd %xmm7, %xmm2 +; SSE2-NEXT: paddd %xmm6, %xmm3 +; SSE2-NEXT: paddd %xmm5, %xmm1 ; SSE2-NEXT: paddd %xmm4, %xmm0 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] -; SSE2-NEXT: psrad $16, %xmm4 -; SSE2-NEXT: paddd %xmm4, %xmm1 -; SSE2-NEXT: movq {{.*#+}} xmm4 = mem[0],zero -; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: psraw $8, %xmm4 -; SSE2-NEXT: movq {{.*#+}} xmm5 = mem[0],zero -; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: psraw $8, %xmm5 -; SSE2-NEXT: pmullw %xmm4, %xmm5 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] -; SSE2-NEXT: psrad $16, %xmm4 -; SSE2-NEXT: paddd %xmm4, %xmm3 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] -; SSE2-NEXT: psrad $16, %xmm4 -; SSE2-NEXT: paddd %xmm4, %xmm2 ; SSE2-NEXT: addq $16, %rsi ; SSE2-NEXT: addq $16, %rdi ; SSE2-NEXT: addq $-16, %rax diff --git a/test/CodeGen/X86/masked_gather_scatter.ll b/test/CodeGen/X86/masked_gather_scatter.ll index c5de8dd96cbc..91087f650ad6 100644 --- a/test/CodeGen/X86/masked_gather_scatter.ll +++ b/test/CodeGen/X86/masked_gather_scatter.ll @@ -300,8 +300,8 @@ define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) { ; ; KNL_32-LABEL: test6: ; KNL_32: # BB#0: -; KNL_32-NEXT: kxnorw %k0, %k0, %k1 ; KNL_32-NEXT: vpmovsxdq %ymm1, %zmm2 +; KNL_32-NEXT: kxnorw %k0, %k0, %k1 ; KNL_32-NEXT: kxnorw %k0, %k0, %k2 ; KNL_32-NEXT: vpgatherqd (,%zmm2), %ymm1 {%k2} ; KNL_32-NEXT: vpscatterqd %ymm0, (,%zmm2) {%k1} @@ -1575,7 +1575,7 @@ define <16 x float> @test29(float* %base, <16 x i32> %ind) { ; Check non-power-of-2 case. It should be scalarized. declare <3 x i32> @llvm.masked.gather.v3i32.v3p0i32(<3 x i32*>, i32, <3 x i1>, <3 x i32>) define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x i32> %src0) { -; ALL-LABEL: test30: +; ALL-LABEL: test30 ; ALL-NOT: gather %sext_ind = sext <3 x i32> %ind to <3 x i64> @@ -1691,12 +1691,12 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i ; KNL_32-LABEL: test_gather_16i64: ; KNL_32: # BB#0: ; KNL_32-NEXT: pushl %ebp -; KNL_32-NEXT: .Lcfi4: +; KNL_32-NEXT: .Lcfi0: ; KNL_32-NEXT: .cfi_def_cfa_offset 8 -; KNL_32-NEXT: .Lcfi5: +; KNL_32-NEXT: .Lcfi1: ; KNL_32-NEXT: .cfi_offset %ebp, -8 ; KNL_32-NEXT: movl %esp, %ebp -; KNL_32-NEXT: .Lcfi6: +; KNL_32-NEXT: .Lcfi2: ; KNL_32-NEXT: .cfi_def_cfa_register %ebp ; KNL_32-NEXT: andl $-64, %esp ; KNL_32-NEXT: subl $64, %esp @@ -1814,12 +1814,12 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, < ; KNL_32-LABEL: test_gather_16f64: ; KNL_32: # BB#0: ; KNL_32-NEXT: pushl %ebp -; KNL_32-NEXT: .Lcfi7: +; KNL_32-NEXT: .Lcfi3: ; KNL_32-NEXT: .cfi_def_cfa_offset 8 -; KNL_32-NEXT: .Lcfi8: +; KNL_32-NEXT: .Lcfi4: ; KNL_32-NEXT: .cfi_offset %ebp, -8 ; KNL_32-NEXT: movl %esp, %ebp -; KNL_32-NEXT: .Lcfi9: +; KNL_32-NEXT: .Lcfi5: ; KNL_32-NEXT: .cfi_def_cfa_register %ebp ; KNL_32-NEXT: andl $-64, %esp ; KNL_32-NEXT: subl $64, %esp @@ -1936,12 +1936,12 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> % ; KNL_32-LABEL: test_scatter_16i64: ; KNL_32: # BB#0: ; KNL_32-NEXT: pushl %ebp -; KNL_32-NEXT: .Lcfi10: +; KNL_32-NEXT: .Lcfi6: ; KNL_32-NEXT: .cfi_def_cfa_offset 8 -; KNL_32-NEXT: .Lcfi11: +; KNL_32-NEXT: .Lcfi7: ; KNL_32-NEXT: .cfi_offset %ebp, -8 ; KNL_32-NEXT: movl %esp, %ebp -; KNL_32-NEXT: .Lcfi12: +; KNL_32-NEXT: .Lcfi8: ; KNL_32-NEXT: .cfi_def_cfa_register %ebp ; KNL_32-NEXT: andl $-64, %esp ; KNL_32-NEXT: subl $64, %esp @@ -2058,12 +2058,12 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou ; KNL_32-LABEL: test_scatter_16f64: ; KNL_32: # BB#0: ; KNL_32-NEXT: pushl %ebp -; KNL_32-NEXT: .Lcfi13: +; KNL_32-NEXT: .Lcfi9: ; KNL_32-NEXT: .cfi_def_cfa_offset 8 -; KNL_32-NEXT: .Lcfi14: +; KNL_32-NEXT: .Lcfi10: ; KNL_32-NEXT: .cfi_offset %ebp, -8 ; KNL_32-NEXT: movl %esp, %ebp -; KNL_32-NEXT: .Lcfi15: +; KNL_32-NEXT: .Lcfi11: ; KNL_32-NEXT: .cfi_def_cfa_register %ebp ; KNL_32-NEXT: andl $-64, %esp ; KNL_32-NEXT: subl $64, %esp @@ -2139,12 +2139,12 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6 ; KNL_32-LABEL: test_pr28312: ; KNL_32: # BB#0: ; KNL_32-NEXT: pushl %ebp -; KNL_32-NEXT: .Lcfi16: +; KNL_32-NEXT: .Lcfi12: ; KNL_32-NEXT: .cfi_def_cfa_offset 8 -; KNL_32-NEXT: .Lcfi17: +; KNL_32-NEXT: .Lcfi13: ; KNL_32-NEXT: .cfi_offset %ebp, -8 ; KNL_32-NEXT: movl %esp, %ebp -; KNL_32-NEXT: .Lcfi18: +; KNL_32-NEXT: .Lcfi14: ; KNL_32-NEXT: .cfi_def_cfa_register %ebp ; KNL_32-NEXT: andl $-32, %esp ; KNL_32-NEXT: subl $32, %esp diff --git a/test/CodeGen/X86/merge-consecutive-loads-128.ll b/test/CodeGen/X86/merge-consecutive-loads-128.ll index 2f7714e63886..71417694b0d4 100644 --- a/test/CodeGen/X86/merge-consecutive-loads-128.ll +++ b/test/CodeGen/X86/merge-consecutive-loads-128.ll @@ -270,9 +270,9 @@ define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline s ; SSE2: # BB#0: ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: retq ; ; SSE41-LABEL: merge_4f32_f32_012u: @@ -292,9 +292,9 @@ define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline s ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; X32-SSE1-NEXT: retl ; ; X32-SSE41-LABEL: merge_4f32_f32_012u: @@ -321,9 +321,9 @@ define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline s ; SSE2: # BB#0: ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: retq ; ; SSE41-LABEL: merge_4f32_f32_019u: @@ -343,9 +343,9 @@ define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline s ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; X32-SSE1-NEXT: retl ; ; X32-SSE41-LABEL: merge_4f32_f32_019u: diff --git a/test/CodeGen/X86/misched-matrix.ll b/test/CodeGen/X86/misched-matrix.ll index 94bbe75702cb..e62a1d04dad6 100644 --- a/test/CodeGen/X86/misched-matrix.ll +++ b/test/CodeGen/X86/misched-matrix.ll @@ -17,9 +17,9 @@ ; ; TOPDOWN-LABEL: %for.body ; TOPDOWN: movl %{{.*}}, ( -; TOPDOWN-NOT: imull {{[0-9]*}}( +; TOPDOWN: imull {{[0-9]*}}( ; TOPDOWN: movl %{{.*}}, 4( -; TOPDOWN-NOT: imull {{[0-9]*}}( +; TOPDOWN: imull {{[0-9]*}}( ; TOPDOWN: movl %{{.*}}, 8( ; TOPDOWN: movl %{{.*}}, 12( ; TOPDOWN-LABEL: %for.end diff --git a/test/CodeGen/X86/mul-i1024.ll b/test/CodeGen/X86/mul-i1024.ll index 340aa047c022..87661004373f 100644 --- a/test/CodeGen/X86/mul-i1024.ll +++ b/test/CodeGen/X86/mul-i1024.ll @@ -11,7 +11,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: pushl %edi ; X32-NEXT: pushl %esi ; X32-NEXT: andl $-8, %esp -; X32-NEXT: subl $2640, %esp # imm = 0xA50 +; X32-NEXT: subl $2632, %esp # imm = 0xA48 ; X32-NEXT: movl 8(%ebp), %eax ; X32-NEXT: movl 64(%eax), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill @@ -58,7 +58,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl 20(%eax), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl 24(%eax), %ecx -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl 28(%eax), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl 32(%eax), %ecx @@ -1992,7 +1992,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, (%esp) # 4-byte Spill +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill @@ -2002,23 +2002,19 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %ecx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %edi, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl $0, %edi -; X32-NEXT: addl %ebx, %eax -; X32-NEXT: adcl %esi, %edi -; X32-NEXT: movl $0, %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %ebx, %edi +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 @@ -2035,8 +2031,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %eax, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %ecx, %edx -; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %eax, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -2045,157 +2047,144 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %eax, %ecx -; X32-NEXT: movl %ecx, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, %ebx +; X32-NEXT: addl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ebx, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %esi, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, (%esp) # 4-byte Spill +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %ecx, %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edx, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl $0, %esi +; X32-NEXT: adcl $0, %eax ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edi, %esi -; X32-NEXT: adcl %ebx, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: addl %ebx, %eax +; X32-NEXT: adcl %edi, %ecx +; X32-NEXT: setb %dl +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movzbl %dl, %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %edi, %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %esi, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl %esi, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: addl %esi, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, %esi +; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl %edx, %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %eax, %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edi, %eax -; X32-NEXT: adcl %edx, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %edi, %edx +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: addl %eax, %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, %ebx -; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -2215,16 +2204,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl %edx, %esi ; X32-NEXT: adcl %edi, %ecx -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %edx, %edx -; X32-NEXT: andl $1, %edx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload @@ -2246,7 +2234,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx @@ -2268,16 +2256,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload @@ -2306,112 +2293,97 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %ebx ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, (%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %edx +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl %eax, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %eax, %edx -; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl %eax, %esi +; X32-NEXT: adcl %edi, %edx +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movzbl %al, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: addl %edx, %eax +; X32-NEXT: addl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edi, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: adcl %ebx, %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %edi, %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl (%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %esi, %eax +; X32-NEXT: movl %eax, %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %edx, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill @@ -2429,16 +2401,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %edx ; X32-NEXT: addl %ebx, %esi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl $0, %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -2447,16 +2417,16 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ebx, %edi -; X32-NEXT: movl %edi, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ebx, %eax +; X32-NEXT: movl %eax, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %ecx, %eax -; X32-NEXT: movl %eax, %edi +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -2467,10 +2437,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %edx, %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %edi, %edx -; X32-NEXT: adcl (%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl %edi, %eax ; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload @@ -2485,52 +2455,50 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %edx +; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %eax, %edx -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edi +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: movl %esi, (%esp) # 4-byte Spill -; X32-NEXT: movl (%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, (%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: addl %edx, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl %edi, %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: adcl %esi, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, (%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -2550,16 +2518,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload @@ -2582,7 +2549,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx @@ -2603,16 +2570,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload @@ -2639,105 +2605,88 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: adcl $0, %edi ; X32-NEXT: adcl $0, %ebx -; X32-NEXT: movl %edx, %eax -; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %eax, %edx -; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl %edx, %esi +; X32-NEXT: adcl %ecx, %edi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movzbl %al, %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edi, %eax +; X32-NEXT: movl %eax, %edi +; X32-NEXT: adcl %ebx, %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %ebx, %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl %edi, %eax +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill @@ -2766,76 +2715,70 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill -; X32-NEXT: addl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edi, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl %edx, %eax +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %eax, %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %eax, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %eax, %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl $0, %eax -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %ebx, %eax -; X32-NEXT: movl %eax, %edx -; X32-NEXT: adcl %edi, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl (%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %ebx, %edi +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: setb %dl +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movzbl %dl, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %ecx, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edx, %ebx ; X32-NEXT: addl %eax, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill -; X32-NEXT: movl %ecx, %edi -; X32-NEXT: adcl %eax, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %edx, %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %eax, %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl %ecx, %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %ecx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload @@ -2847,55 +2790,53 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: addl %eax, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, %ebx +; X32-NEXT: adcl %esi, %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -2915,20 +2856,19 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload @@ -2968,16 +2908,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload @@ -3004,109 +2943,87 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: adcl $0, %edi ; X32-NEXT: adcl $0, %ebx -; X32-NEXT: movl %edx, %eax -; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %eax, %edx -; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl %edi, %esi +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movzbl %al, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: addl %edx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %edi, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: addl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ecx, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ebx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl $0, %eax +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %ecx, %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -3127,56 +3044,33 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, %ebx ; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl $0, %esi ; X32-NEXT: adcl $0, %edx -; X32-NEXT: adcl $0, %ecx +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl $0, %eax -; X32-NEXT: adcl $0, %esi -; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload @@ -3214,37 +3108,35 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %eax, %ecx -; X32-NEXT: movl $0, %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: addl %edx, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -3264,38 +3156,37 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %edi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl (%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, (%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: addl %edx, %ecx -; X32-NEXT: movl (%esp), %edx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl %esi, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: adcl %edi, %esi -; X32-NEXT: adcl %ebx, %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ebx, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, (%esp) # 4-byte Spill +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill @@ -3319,15 +3210,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload @@ -3337,118 +3227,113 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: addl %edx, %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl %esi, %ebx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl %ecx, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl (%esp), %eax # 4-byte Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, (%esp) # 4-byte Spill +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: movl %edx, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl %ebx, %eax ; X32-NEXT: adcl $0, %eax ; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl %esi, %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl %ecx, %esi +; X32-NEXT: adcl %edx, %edi +; X32-NEXT: setb %cl +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movzbl %cl, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi +; X32-NEXT: adcl %edi, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %edx, %edi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %esi, %edx +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl %esi, %eax +; X32-NEXT: adcl $0, %edi +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -3462,36 +3347,34 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, (%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi @@ -3500,18 +3383,17 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %eax, %edi -; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl %ecx, %edi +; X32-NEXT: adcl %eax, %esi +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload @@ -3555,14 +3437,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload @@ -3612,136 +3493,120 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl %ecx, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl %edi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %ebx, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %edx, %eax +; X32-NEXT: adcl %esi, %ecx +; X32-NEXT: movl %edi, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %ebx, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %edx, %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: movl %esi, %edx ; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl %edi, %esi -; X32-NEXT: adcl $0, %esi +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl $0, %edi +; X32-NEXT: addl %eax, %esi +; X32-NEXT: adcl %edx, %edi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %ebx, %ecx -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %esi, %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi +; X32-NEXT: adcl %edi, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %esi, %eax -; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %ebx, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl %edx, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -3760,16 +3625,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %edi, %edx ; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx @@ -3786,79 +3650,80 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: addl %eax, %edx -; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl %edx, %edi +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ecx, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -3877,35 +3742,35 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl %edx, %eax ; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi +; X32-NEXT: setb %dl ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movzbl %dl, %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl %edi, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %esi, %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: addl %eax, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -3925,44 +3790,43 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl %ecx, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ecx, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ebx, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %esi, %edx +; X32-NEXT: adcl %esi, %edi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill @@ -3986,15 +3850,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %esi ; X32-NEXT: addl %eax, %edx ; X32-NEXT: adcl %ecx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload @@ -4007,10 +3870,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: addl %edx, %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edi, %esi -; X32-NEXT: adcl %ebx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %edi, %edx +; X32-NEXT: adcl %ebx, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -4025,116 +3888,107 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl $0, %eax ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: adcl $0, %edx ; X32-NEXT: adcl $0, %esi +; X32-NEXT: adcl $0, %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: addl %eax, %ebx -; X32-NEXT: adcl %ecx, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %edx, %ecx -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl %eax, %esi +; X32-NEXT: adcl %ecx, %edx +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %esi, %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: adcl %edx, %edi +; X32-NEXT: movl %ebx, %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx @@ -4156,10 +4010,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi @@ -4168,45 +4022,46 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: adcl $0, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: addl %ecx, %ebx -; X32-NEXT: adcl %edx, %eax -; X32-NEXT: movl $0, %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: sbbl %edi, %edi -; X32-NEXT: andl $1, %edi +; X32-NEXT: addl %edx, %ebx +; X32-NEXT: adcl %ecx, %eax +; X32-NEXT: setb %cl ; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movzbl %cl, %ecx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi ; X32-NEXT: addl %ebx, %ecx +; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: adcl %eax, %ebx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: adcl %edi, %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: adcl %edx, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -4216,15 +4071,16 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -4236,25 +4092,21 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload @@ -4264,6 +4116,11 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -4278,13 +4135,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload @@ -4292,10 +4149,6 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload @@ -4304,6 +4157,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill @@ -4312,67 +4169,66 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl 16(%ebp), %ebx -; X32-NEXT: movl %ecx, 4(%ebx) -; X32-NEXT: movl 16(%ebp), %ecx -; X32-NEXT: movl %eax, (%ecx) +; X32-NEXT: movl 16(%ebp), %edx +; X32-NEXT: movl %ecx, 4(%edx) +; X32-NEXT: movl %eax, (%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 8(%ecx) +; X32-NEXT: movl %eax, 8(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 12(%ecx) +; X32-NEXT: movl %eax, 12(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 16(%ecx) +; X32-NEXT: movl %eax, 16(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 20(%ecx) +; X32-NEXT: movl %eax, 20(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 24(%ecx) +; X32-NEXT: movl %eax, 24(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 28(%ecx) +; X32-NEXT: movl %eax, 28(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 32(%ecx) +; X32-NEXT: movl %eax, 32(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 36(%ecx) +; X32-NEXT: movl %eax, 36(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 40(%ecx) +; X32-NEXT: movl %eax, 40(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 44(%ecx) +; X32-NEXT: movl %eax, 44(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 48(%ecx) +; X32-NEXT: movl %eax, 48(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 52(%ecx) +; X32-NEXT: movl %eax, 52(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 56(%ecx) +; X32-NEXT: movl %eax, 56(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 60(%ecx) +; X32-NEXT: movl %eax, 60(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 64(%ecx) +; X32-NEXT: movl %eax, 64(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 68(%ecx) +; X32-NEXT: movl %eax, 68(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 72(%ecx) +; X32-NEXT: movl %eax, 72(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 76(%ecx) +; X32-NEXT: movl %eax, 76(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 80(%ecx) +; X32-NEXT: movl %eax, 80(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 84(%ecx) +; X32-NEXT: movl %eax, 84(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 88(%ecx) +; X32-NEXT: movl %eax, 88(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 92(%ecx) +; X32-NEXT: movl %eax, 92(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 96(%ecx) -; X32-NEXT: movl %edx, 100(%ecx) +; X32-NEXT: movl %eax, 96(%edx) +; X32-NEXT: movl %esi, 100(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 104(%ecx) -; X32-NEXT: movl %esi, 108(%ecx) +; X32-NEXT: movl %eax, 104(%edx) +; X32-NEXT: movl %edi, 108(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 112(%ecx) -; X32-NEXT: movl %edi, 116(%ecx) +; X32-NEXT: movl %eax, 112(%edx) +; X32-NEXT: movl %ebx, 116(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 120(%ecx) +; X32-NEXT: movl %eax, 120(%edx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: movl %eax, 124(%ecx) +; X32-NEXT: movl %eax, 124(%edx) ; X32-NEXT: leal -12(%ebp), %esp ; X32-NEXT: popl %esi ; X32-NEXT: popl %edi @@ -4390,40 +4246,41 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: pushq %rbx ; X64-NEXT: subq $352, %rsp # imm = 0x160 ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq 48(%rdi), %r8 -; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq 40(%rdi), %rcx -; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 48(%rdi), %r9 +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 40(%rdi), %rbp +; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq 32(%rdi), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdi, %r13 -; X64-NEXT: xorl %r9d, %r9d -; X64-NEXT: mulq %r9 -; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdi, %r10 +; X64-NEXT: xorl %r8d, %r8d +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rcx -; X64-NEXT: addq %rbx, %rcx +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rdi, %rbx ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %r11, %rcx -; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rbx, %rbp -; X64-NEXT: movq %rbx, %rcx -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, %r11 +; X64-NEXT: adcq %rdi, %rbp +; X64-NEXT: setb %bl +; X64-NEXT: movzbl %bl, %ebx ; X64-NEXT: addq %rax, %rbp ; X64-NEXT: adcq %rdx, %rbx -; X64-NEXT: movq %r8, %rax -; X64-NEXT: mulq %r9 +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r11, %r12 +; X64-NEXT: movq %r11, %r8 ; X64-NEXT: addq %rax, %r12 -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %rcx, %r8 -; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: addq %rbp, %r12 ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill @@ -4433,186 +4290,182 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: xorl %ebp, %ebp ; X64-NEXT: mulq %rbp -; X64-NEXT: movq %rax, %r10 +; X64-NEXT: movq %rax, %rdi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq 8(%rsi), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rbp -; X64-NEXT: xorl %r9d, %r9d +; X64-NEXT: xorl %r11d, %r11d ; X64-NEXT: movq %rax, %r15 ; X64-NEXT: addq %rcx, %r15 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %r10, %r15 +; X64-NEXT: addq %rdi, %r15 ; X64-NEXT: adcq %rcx, %rbp -; X64-NEXT: movq %rcx, %rdi -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: setb %bl ; X64-NEXT: addq %rax, %rbp +; X64-NEXT: movzbl %bl, %ebx ; X64-NEXT: adcq %rdx, %rbx ; X64-NEXT: movq 16(%rsi), %rax -; X64-NEXT: movq %rsi, %r14 -; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rsi, %r13 +; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r9 +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r10, %rcx -; X64-NEXT: movq %rcx, %rsi -; X64-NEXT: addq %rax, %rsi -; X64-NEXT: movq %rdi, %r9 -; X64-NEXT: adcq %rdx, %r9 -; X64-NEXT: addq %rbp, %rsi -; X64-NEXT: movq %rsi, %r10 -; X64-NEXT: adcq %rbx, %r9 -; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r11, %rax -; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rdi, %r8 -; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r13, %rbp -; X64-NEXT: movq (%rbp), %rax +; X64-NEXT: movq %rdi, %r14 +; X64-NEXT: addq %rax, %r14 +; X64-NEXT: movq %rcx, %r11 +; X64-NEXT: adcq %rdx, %r11 +; X64-NEXT: addq %rbp, %r14 +; X64-NEXT: adcq %rbx, %r11 +; X64-NEXT: movq %r8, %rax +; X64-NEXT: movq %r8, %rbp +; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %rdi, %rax +; X64-NEXT: movq %r9, %rax +; X64-NEXT: adcq %rcx, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq (%r10), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: xorl %r8d, %r8d ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rax +; X64-NEXT: addq %rdi, %rax +; X64-NEXT: movq %rdi, %r9 ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: adcq %rdi, %rax +; X64-NEXT: adcq %rcx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq 32(%r14), %rax +; X64-NEXT: movq 32(%r13), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r8 +; X64-NEXT: xorl %r8d, %r8d ; X64-NEXT: movq %rax, %r13 -; X64-NEXT: movq %rdx, (%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %rbx, %r14 +; X64-NEXT: movq %rbx, %rcx ; X64-NEXT: addq %r13, %rax ; X64-NEXT: movq %rsi, %rax ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: addq %rcx, %r11 -; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rcx, %r8 -; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: addq %r9, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: adcq %r15, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r10, %r12 -; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r10, %rcx +; X64-NEXT: adcq %r14, %r12 +; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: adcq %r9, %rax +; X64-NEXT: adcq %r11, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r9, %r10 -; X64-NEXT: movq 8(%rbp), %rax +; X64-NEXT: movq %r11, %rdi +; X64-NEXT: movq 8(%r10), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rbp, %rdi -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: xorl %edx, %edx -; X64-NEXT: mulq %rdx -; X64-NEXT: xorl %r9d, %r9d -; X64-NEXT: movq %rax, %r12 -; X64-NEXT: addq %rsi, %r12 +; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: addq %rsi, %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %r14, %r12 +; X64-NEXT: addq %rcx, %r11 ; X64-NEXT: adcq %rsi, %rbp ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx +; X64-NEXT: setb %bl ; X64-NEXT: addq %rax, %rbp +; X64-NEXT: movzbl %bl, %ebx ; X64-NEXT: adcq %rdx, %rbx -; X64-NEXT: movq 16(%rdi), %rax +; X64-NEXT: movq 16(%r10), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r9 -; X64-NEXT: xorl %edi, %edi +; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, %r8 +; X64-NEXT: addq %rax, %r8 +; X64-NEXT: movq %rsi, %r10 +; X64-NEXT: adcq %rdx, %r10 +; X64-NEXT: addq %rbp, %r8 +; X64-NEXT: movq %r8, %rax +; X64-NEXT: adcq %rbx, %r10 +; X64-NEXT: movq %rcx, %rdx +; X64-NEXT: movq %rcx, %r12 +; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %r9, %rdx ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r14, %r9 -; X64-NEXT: addq %rax, %r9 -; X64-NEXT: adcq %rdx, %rsi -; X64-NEXT: addq %rbp, %r9 -; X64-NEXT: movq %r9, %rdx -; X64-NEXT: adcq %rbx, %rsi -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: movq %r14, %rsi -; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: addq %r8, %rsi -; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r12, %r15 +; X64-NEXT: movq %r11, %r8 +; X64-NEXT: adcq %r8, %r15 ; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdx, %r15 -; X64-NEXT: adcq %rax, %r10 -; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: adcq %rax, %r14 +; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: adcq %r10, %rdi +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq 40(%rsi), %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %rdi -; X64-NEXT: xorl %r8d, %r8d -; X64-NEXT: movq %rax, %rcx -; X64-NEXT: movq (%rsp), %rdi # 8-byte Reload -; X64-NEXT: addq %rdi, %rcx +; X64-NEXT: xorl %r14d, %r14d +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rax, %rdi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: addq %r9, %rdi ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %r13, %rcx -; X64-NEXT: adcq %rdi, %rbp -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx +; X64-NEXT: addq %r13, %rdi +; X64-NEXT: adcq %r9, %rbp +; X64-NEXT: setb %bl ; X64-NEXT: addq %rax, %rbp -; X64-NEXT: adcq %rdx, %rbx +; X64-NEXT: movzbl %bl, %r11d +; X64-NEXT: adcq %rdx, %r11 ; X64-NEXT: movq 48(%rsi), %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r13, %r8 -; X64-NEXT: addq %rax, %r8 -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: adcq %rdx, %rax -; X64-NEXT: addq %rbp, %r8 -; X64-NEXT: adcq %rbx, %rax -; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: addq %r13, %r14 -; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rcx, %r12 +; X64-NEXT: movq %r13, %rbx +; X64-NEXT: addq %rax, %rbx +; X64-NEXT: movq %r9, %rsi +; X64-NEXT: adcq %rdx, %rsi +; X64-NEXT: addq %rbp, %rbx +; X64-NEXT: adcq %r11, %rsi +; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %r13, %r12 ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r8, %r15 -; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rax, %r10 +; X64-NEXT: adcq %rdi, %r8 +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rbx, %rcx +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rsi, %r10 ; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rax, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; X64-NEXT: movq %rdx, %rax ; X64-NEXT: addq %r13, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: adcq %rdi, %rax +; X64-NEXT: movq (%rsp), %rax # 8-byte Reload +; X64-NEXT: adcq %r9, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rdx, %rax ; X64-NEXT: addq %r13, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload -; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload -; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rax, %r8 +; X64-NEXT: movq %rax, %r9 ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq 56(%rax), %r11 ; X64-NEXT: movq %r11, %rax +; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: movq %rdi, %r10 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rsi, %rbx @@ -4621,15 +4474,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r10 -; X64-NEXT: addq %rbx, %r10 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rbx, %r8 ; X64-NEXT: adcq %rbp, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx +; X64-NEXT: setb %cl ; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdi, %r14 +; X64-NEXT: movq %rdi, %r11 ; X64-NEXT: addq %rsi, %rax +; X64-NEXT: movzbl %cl, %ecx ; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload ; X64-NEXT: addq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload @@ -4637,299 +4490,297 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload ; X64-NEXT: addq %rax, %r15 ; X64-NEXT: adcq %rdx, %r12 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r10, %rbp +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %r9, %rcx -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %rcx +; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %r9, %rbx +; X64-NEXT: addq %rsi, %rbx ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r14 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r10 +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rbp, %rcx -; X64-NEXT: sbbq %rbp, %rbp -; X64-NEXT: andl $1, %ebp -; X64-NEXT: movq %rsi, %rbx -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: mulq %r14 +; X64-NEXT: setb %bl +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %r13 ; X64-NEXT: movq %rax, %rsi ; X64-NEXT: addq %rcx, %rsi -; X64-NEXT: adcq %rbp, %r13 -; X64-NEXT: addq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: movzbl %bl, %eax +; X64-NEXT: adcq %rax, %r13 +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload -; X64-NEXT: addq %r8, %rsi -; X64-NEXT: adcq %r10, %r13 +; X64-NEXT: addq %r9, %rsi +; X64-NEXT: adcq %r8, %r13 ; X64-NEXT: adcq $0, %r15 ; X64-NEXT: adcq $0, %r12 -; X64-NEXT: movq %rdi, %rbp -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; X64-NEXT: mulq %r9 +; X64-NEXT: movq %r10, %rbx +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %rbx, %r14 -; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rdi -; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp ; X64-NEXT: adcq $0, %rdi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq 24(%rax), %rcx -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbx, %rax ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rcx, %rbp -; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, %rbx +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r8 -; X64-NEXT: addq %rbx, %r8 +; X64-NEXT: addq %rbp, %r8 ; X64-NEXT: adcq %rdi, %rcx -; X64-NEXT: sbbq %rdi, %rdi -; X64-NEXT: andl $1, %edi -; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %rbp +; X64-NEXT: setb %dil +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rbx ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rdi, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: movzbl %dil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload -; X64-NEXT: adcq %r14, %rbp -; X64-NEXT: addq %rax, %rbx -; X64-NEXT: adcq %rdx, %rbp +; X64-NEXT: addq %r14, %rbp +; X64-NEXT: movq (%rsp), %rbx # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: adcq %r9, %rbx +; X64-NEXT: addq %rax, %rbp +; X64-NEXT: adcq %rdx, %rbx ; X64-NEXT: addq %rsi, %r10 ; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %r13, %r8 ; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq $0, %rbx ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %r15, %rbx -; X64-NEXT: adcq %r12, %rbp -; X64-NEXT: movl $0, %r8d -; X64-NEXT: adcq $0, %r8 -; X64-NEXT: sbbq %r10, %r10 -; X64-NEXT: andl $1, %r10d +; X64-NEXT: adcq $0, %rbx +; X64-NEXT: addq %r15, %rbp +; X64-NEXT: adcq %r12, %rbx +; X64-NEXT: setb %r15b ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r9, %rsi +; X64-NEXT: movq %r11, %rsi ; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq %rax, %r15 -; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r11, %rax +; X64-NEXT: movq %rdx, %r11 +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; X64-NEXT: movq %r12, %rax ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %r9, %rdi +; X64-NEXT: addq %r11, %rdi ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq %rax, %r13 -; X64-NEXT: addq %rdi, %r13 -; X64-NEXT: adcq %rsi, %r9 -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r11, %rax -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rcx, %r12 -; X64-NEXT: addq %r9, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: addq %rdi, %r11 +; X64-NEXT: adcq %rsi, %rcx +; X64-NEXT: setb %sil +; X64-NEXT: movq %r12, %rax +; X64-NEXT: mulq %r8 +; X64-NEXT: movq %r8, %r12 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: adcq %r14, %rsi +; X64-NEXT: addq %r14, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: adcq %r9, %r14 ; X64-NEXT: addq %rax, %rcx -; X64-NEXT: adcq %rdx, %rsi -; X64-NEXT: addq %rbx, %r15 -; X64-NEXT: adcq %rbp, %r13 -; X64-NEXT: adcq %r8, %rcx -; X64-NEXT: adcq %r10, %rsi -; X64-NEXT: addq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload -; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload +; X64-NEXT: adcq %rdx, %r14 +; X64-NEXT: addq %rbp, %r13 +; X64-NEXT: adcq %rbx, %r11 +; X64-NEXT: movzbl %r15b, %eax +; X64-NEXT: adcq %rax, %rcx +; X64-NEXT: adcq $0, %r14 +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload ; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rax, %r9 -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: movq 24(%rax), %rbp -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq 24(%rax), %rcx +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rsi, %r14 +; X64-NEXT: movq %rsi, %r11 ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rbx, %rbp ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rax, %r15 -; X64-NEXT: addq %rbx, %r15 -; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdi, %r11 -; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: addq %rbp, %r15 +; X64-NEXT: adcq %rsi, %rbx +; X64-NEXT: setb %sil +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r9 +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload ; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload ; X64-NEXT: addq %rax, %r8 ; X64-NEXT: adcq %rdx, %r10 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %r14, %rsi -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r11, %rbp +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload -; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx -; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r11 -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rdi, %rbx +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r11 +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx -; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %r11 -; X64-NEXT: movq %rdx, %r11 -; X64-NEXT: movq %rax, %rsi -; X64-NEXT: addq %rcx, %rsi -; X64-NEXT: adcq %rbx, %r11 -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload -; X64-NEXT: addq %r9, %rsi -; X64-NEXT: adcq %r15, %r11 +; X64-NEXT: adcq %rbp, %rdi +; X64-NEXT: setb %cl +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rsi, %rbp +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rdi, %rbx +; X64-NEXT: movzbl %cl, %eax +; X64-NEXT: adcq %rax, %rsi +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: addq %r14, %rbx +; X64-NEXT: adcq %r15, %rsi ; X64-NEXT: adcq $0, %r8 ; X64-NEXT: adcq $0, %r10 -; X64-NEXT: movq %rdi, %rbx -; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq %r11, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r9 -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, %r14 ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdi, %r15 ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rcx, %rbp ; X64-NEXT: adcq $0, %rdi -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %r12, %r13 -; X64-NEXT: mulq %r13 +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %r12 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbp, %rax -; X64-NEXT: movq %rax, %rbp +; X64-NEXT: movq %rax, %r11 ; X64-NEXT: adcq %rdi, %rcx -; X64-NEXT: sbbq %rdi, %rdi -; X64-NEXT: andl $1, %edi +; X64-NEXT: setb %dil ; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %r13 +; X64-NEXT: mulq %r12 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rdi, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: movzbl %dil, %ecx +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: addq %r13, %rdi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload -; X64-NEXT: addq %r14, %rbx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: addq %rax, %rbx -; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: addq %rsi, %r9 +; X64-NEXT: adcq %r14, %rbp +; X64-NEXT: addq %rax, %rdi +; X64-NEXT: adcq %rdx, %rbp +; X64-NEXT: addq %rbx, %r9 ; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r11, %rbp -; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq $0, %rbx -; X64-NEXT: adcq $0, %rcx -; X64-NEXT: addq %r8, %rbx -; X64-NEXT: adcq %r10, %rcx -; X64-NEXT: movl $0, %r12d -; X64-NEXT: adcq $0, %r12 -; X64-NEXT: sbbq %r9, %r9 -; X64-NEXT: andl $1, %r9d -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: adcq %rsi, %r11 +; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq %r8, %rdi +; X64-NEXT: adcq %r10, %rbp +; X64-NEXT: setb %r9b +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %r15 -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rdx, %r10 ; X64-NEXT: movq %rax, %r11 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: movq %r8, %rax ; X64-NEXT: mulq %r15 ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %r8, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r10, %rbx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: mulq %r13 -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r12 +; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r15 -; X64-NEXT: addq %rdi, %r15 -; X64-NEXT: adcq %rsi, %r8 -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r10, %rax -; X64-NEXT: movq %r10, %rbp -; X64-NEXT: mulq %r13 -; X64-NEXT: addq %r8, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: addq %rbx, %r15 +; X64-NEXT: adcq %rsi, %rcx +; X64-NEXT: setb %bl +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r12 +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: movzbl %bl, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload -; X64-NEXT: movq %r10, %rsi -; X64-NEXT: addq %r14, %rsi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload -; X64-NEXT: movq %r8, %rdi -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: addq %rax, %rsi -; X64-NEXT: adcq %rdx, %rdi -; X64-NEXT: addq %rbx, %r11 -; X64-NEXT: adcq %rcx, %r15 -; X64-NEXT: adcq %r12, %rsi -; X64-NEXT: adcq %r9, %rdi +; X64-NEXT: movq %r10, %rcx +; X64-NEXT: addq %r13, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: movq %rbx, %rsi +; X64-NEXT: movq %rbx, %r12 +; X64-NEXT: adcq %r14, %rsi +; X64-NEXT: addq %rax, %rcx +; X64-NEXT: adcq %rdx, %rsi +; X64-NEXT: addq %rdi, %r11 +; X64-NEXT: adcq %rbp, %r15 +; X64-NEXT: movzbl %r9b, %eax +; X64-NEXT: adcq %rax, %rcx +; X64-NEXT: adcq $0, %rsi ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload ; X64-NEXT: addq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload ; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload ; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill ; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill ; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq $0, {{[0-9]+}}(%rsp) # 8-byte Folded Spill ; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax @@ -4937,9 +4788,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %r14 +; X64-NEXT: movq %r8, %rbp ; X64-NEXT: movq %rbp, %rax ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rcx, %r13 +; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: addq %rsi, %rcx @@ -4948,291 +4800,279 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: addq %rcx, %r11 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rcx, %r8 ; X64-NEXT: adcq %rbx, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx +; X64-NEXT: setb %cl ; X64-NEXT: movq %rbp, %rax ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdi, %r15 ; X64-NEXT: addq %rsi, %rax +; X64-NEXT: movzbl %cl, %ecx ; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq %r10, %r9 -; X64-NEXT: addq {{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload -; X64-NEXT: movq %r8, %r12 -; X64-NEXT: adcq (%rsp), %r12 # 8-byte Folded Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload +; X64-NEXT: movq %r12, %r10 +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload ; X64-NEXT: addq %rax, %r9 -; X64-NEXT: adcq %rdx, %r12 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r13, %rbp -; X64-NEXT: mulq %rbp -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: adcq %rdx, %r10 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: movq %r13, %rax -; X64-NEXT: mulq %rbp +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %r8, %rbx +; X64-NEXT: addq %rcx, %rbx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %rcx, %rbp +; X64-NEXT: movq %rbp, %rax ; X64-NEXT: mulq %r15 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r13, %rax +; X64-NEXT: setb %sil +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %r15 -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rdx, %r15 ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rcx, %rbx -; X64-NEXT: adcq %rsi, %r8 +; X64-NEXT: movzbl %sil, %eax +; X64-NEXT: adcq %rax, %r15 ; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload ; X64-NEXT: addq %r14, %rbx -; X64-NEXT: adcq %r11, %r8 +; X64-NEXT: adcq %r8, %r15 ; X64-NEXT: adcq $0, %r9 -; X64-NEXT: adcq $0, %r12 +; X64-NEXT: adcq $0, %r10 ; X64-NEXT: movq %rbp, %rsi ; X64-NEXT: movq %rsi, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r14 -; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq %r13, %rax +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %r8 ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: addq %r14, %rcx -; X64-NEXT: adcq $0, %rdi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: movq 56(%rax), %r15 +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: movq 56(%rax), %rdi ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %r15 +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: addq %rcx, %rax -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: adcq %rdi, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx -; X64-NEXT: movq %r13, %rax -; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: addq %rcx, %r14 +; X64-NEXT: adcq %rbp, %rsi +; X64-NEXT: setb %cl +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r8 ; X64-NEXT: addq %rsi, %rax +; X64-NEXT: movzbl %cl, %ecx ; X64-NEXT: adcq %rcx, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: addq %r13, %rdi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: addq %r11, %rcx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: adcq %rbp, %rsi -; X64-NEXT: addq %rax, %rdi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: adcq %r13, %rsi +; X64-NEXT: addq %rax, %rcx ; X64-NEXT: adcq %rdx, %rsi -; X64-NEXT: addq %rbx, %r10 -; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r8, %r11 -; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq $0, %rdi +; X64-NEXT: addq %rbx, %r12 +; X64-NEXT: adcq %r15, %r14 +; X64-NEXT: adcq $0, %rcx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: addq %r9, %rdi -; X64-NEXT: adcq %r12, %rsi -; X64-NEXT: movl $0, %r14d -; X64-NEXT: adcq $0, %r14 -; X64-NEXT: sbbq %r10, %r10 -; X64-NEXT: andl $1, %r10d -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r8 -; X64-NEXT: movq %rax, %r12 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; X64-NEXT: movq %r9, %rax -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r11 -; X64-NEXT: movq %rax, %rcx -; X64-NEXT: addq %r8, %rcx -; X64-NEXT: adcq $0, %r11 -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r15 -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: addq %r9, %rcx +; X64-NEXT: adcq %r10, %rsi +; X64-NEXT: setb {{[0-9]+}}(%rsp) # 1-byte Folded Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; X64-NEXT: movq %r10, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r15 ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx -; X64-NEXT: adcq %r11, %r8 -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx -; X64-NEXT: movq %r9, %rax -; X64-NEXT: mulq %r15 -; X64-NEXT: addq %r8, %rax -; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: addq %r9, %rbx +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %r8, %rdi +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rbx, %r8 +; X64-NEXT: adcq %r15, %r9 +; X64-NEXT: setb %bl +; X64-NEXT: movq %r10, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: addq %r9, %rax +; X64-NEXT: movzbl %bl, %edi +; X64-NEXT: adcq %rdi, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload -; X64-NEXT: addq %r13, %r15 -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload -; X64-NEXT: adcq %rbp, %r11 +; X64-NEXT: addq %r11, %r15 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: adcq %r13, %rbp ; X64-NEXT: addq %rax, %r15 -; X64-NEXT: adcq %rdx, %r11 -; X64-NEXT: addq %rdi, %r12 -; X64-NEXT: adcq %rsi, %rbx -; X64-NEXT: adcq %r14, %r15 -; X64-NEXT: adcq %r10, %r11 -; X64-NEXT: addq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq %rdx, %rbp +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; X64-NEXT: addq %rcx, %rdx +; X64-NEXT: adcq %rsi, %r8 +; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload +; X64-NEXT: adcq %rax, %r15 +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: addq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: adcq $0, %r12 -; X64-NEXT: adcq $0, %rbx -; X64-NEXT: adcq $0, %r15 -; X64-NEXT: adcq $0, %r11 -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: addq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: adcq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload -; X64-NEXT: movl $0, %eax -; X64-NEXT: adcq $0, %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movl $0, %eax -; X64-NEXT: adcq $0, %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movl $0, %eax -; X64-NEXT: adcq $0, %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: sbbq %rax, %rax -; X64-NEXT: andl $1, %eax -; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rdx +; X64-NEXT: adcq $0, %r8 +; X64-NEXT: adcq $0, %r15 +; X64-NEXT: adcq $0, %rbp +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdx, %r8 -; X64-NEXT: movq %rax, %r13 -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdi, %r14 -; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %r8, %rbp -; X64-NEXT: adcq $0, %rbx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rdx, %r11 +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rsi, %r10 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r11, %rbx +; X64-NEXT: adcq $0, %rdi ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload -; X64-NEXT: mulq %r8 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r12 -; X64-NEXT: addq %rbp, %r12 -; X64-NEXT: adcq %rbx, %rcx -; X64-NEXT: sbbq %rbp, %rbp -; X64-NEXT: andl $1, %ebp -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: addq %rbx, %r12 +; X64-NEXT: adcq %rdi, %rcx +; X64-NEXT: setb %bl +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rsi, %r9 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rbp, %rdx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; X64-NEXT: addq {{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload -; X64-NEXT: movq (%rsp), %r10 # 8-byte Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload -; X64-NEXT: addq %rax, %r9 -; X64-NEXT: adcq %rdx, %r10 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %r14, %rbx -; X64-NEXT: mulq %rbx -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, (%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %rbx -; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rcx, %rbp -; X64-NEXT: adcq $0, %rbx -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rdi, %r14 -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movzbl %bl, %ecx +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: addq %rax, %r8 +; X64-NEXT: adcq %rdx, %rcx +; X64-NEXT: movq %rcx, %r14 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r10, %rdi +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rbx, %rcx -; X64-NEXT: sbbq %rdi, %rdi -; X64-NEXT: andl $1, %edi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rcx, %rbp -; X64-NEXT: adcq %rdi, %rbx -; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: addq %r13, %rbp -; X64-NEXT: adcq %r12, %rbx -; X64-NEXT: adcq $0, %r9 -; X64-NEXT: movq %r9, %r12 -; X64-NEXT: adcq $0, %r10 -; X64-NEXT: movq %r10, %r8 -; X64-NEXT: movq %r14, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r11, %rbx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r13 +; X64-NEXT: mulq %r9 ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rdi, %rcx +; X64-NEXT: setb %bl ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: movq %rsi, %r10 -; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: mulq %r9 +; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, %rdi ; X64-NEXT: addq %rcx, %rdi -; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movzbl %bl, %eax +; X64-NEXT: adcq %rax, %r11 +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload +; X64-NEXT: adcq %r12, %r11 +; X64-NEXT: adcq $0, %r8 +; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %r14 +; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r13, %rbx +; X64-NEXT: movq %rbx, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r14 -; X64-NEXT: addq %rdi, %rax -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: adcq %rsi, %r14 -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %rsi, %r9 ; X64-NEXT: mulq %rcx -; X64-NEXT: addq %r14, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq %rcx, %r10 +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rcx +; X64-NEXT: addq %r8, %rcx +; X64-NEXT: adcq $0, %rsi +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: adcq %rsi, %rbx +; X64-NEXT: setb %cl +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %r13, %r9 +; X64-NEXT: addq %rbx, %rax +; X64-NEXT: movzbl %cl, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: addq %r13, %rsi +; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload -; X64-NEXT: addq %r14, %rsi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: adcq %r13, %rcx +; X64-NEXT: adcq %r14, %rcx ; X64-NEXT: addq %rax, %rsi ; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: addq %rbp, %r9 -; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rbx, %rdi -; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %rdi, %r12 +; X64-NEXT: adcq %r11, %r8 +; X64-NEXT: movq %r8, %r11 ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: adcq $0, %rcx -; X64-NEXT: addq %r12, %rsi +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r8, %rcx -; X64-NEXT: movq %rcx, %r12 -; X64-NEXT: movl $0, %r10d -; X64-NEXT: adcq $0, %r10 -; X64-NEXT: sbbq %r9, %r9 -; X64-NEXT: andl $1, %r9d +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, (%rsp) # 8-byte Spill +; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload ; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %r10, %rsi ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill @@ -5244,82 +5084,85 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: addq %rcx, %rdi ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: mulq %rbp +; X64-NEXT: mulq %r9 ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rdi, %rbx +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: addq %rdi, %r10 ; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi +; X64-NEXT: setb %bl ; X64-NEXT: movq %r8, %rax -; X64-NEXT: mulq %rbp +; X64-NEXT: mulq %r9 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movzbl %bl, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: addq %r14, %rsi +; X64-NEXT: addq %r13, %rsi ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq %r13, %rcx +; X64-NEXT: adcq %r14, %rcx ; X64-NEXT: addq %rax, %rsi ; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload -; X64-NEXT: adcq %r12, %rbx -; X64-NEXT: adcq %r10, %rsi -; X64-NEXT: adcq %r9, %rcx -; X64-NEXT: addq {{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload +; X64-NEXT: adcq %rax, %rsi +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: addq %rax, (%rsp) # 8-byte Folded Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: adcq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: adcq %r15, {{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: adcq %r11, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload -; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: addq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: adcq %rax, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: adcq %r15, %r12 +; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rbp, %r11 +; X64-NEXT: movq %r11, (%rsp) # 8-byte Spill +; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload +; X64-NEXT: adcq %rax, %r14 +; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %r10 +; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: adcq $0, %rcx ; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; X64-NEXT: movq 64(%r9), %r11 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq 64(%rcx), %r11 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %r11 -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: addq %rsi, %rbx ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: movq 72(%r9), %rcx +; X64-NEXT: movq 72(%rcx), %rsi ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rcx, %rsi -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rsi, %rcx +; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %r8 ; X64-NEXT: addq %rbx, %r8 -; X64-NEXT: adcq %rbp, %rcx -; X64-NEXT: sbbq %rbp, %rbp -; X64-NEXT: andl $1, %ebp -; X64-NEXT: movq %r10, %rax -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rsi, %r10 +; X64-NEXT: adcq %rbp, %rsi +; X64-NEXT: setb %bl +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rcx, %r10 ; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %rcx, %rdi -; X64-NEXT: adcq %rbp, %rsi +; X64-NEXT: addq %rsi, %rdi +; X64-NEXT: movzbl %bl, %eax +; X64-NEXT: adcq %rax, %rcx ; X64-NEXT: movq %r11, %rax -; X64-NEXT: xorl %ecx, %ecx -; X64-NEXT: mulq %rcx +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: mulq %rdx ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rdx, %r14 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload @@ -5327,7 +5170,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload ; X64-NEXT: adcq %r14, %r15 ; X64-NEXT: addq %rdi, %r12 -; X64-NEXT: adcq %rsi, %r15 +; X64-NEXT: adcq %rcx, %r15 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %r11, %rsi @@ -5335,8 +5178,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rdi @@ -5349,165 +5192,159 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: addq %rdi, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: movq %rbp, %rdi +; X64-NEXT: setb %sil +; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %r10 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: adcq %r13, %r14 +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload ; X64-NEXT: addq %rax, %rbx ; X64-NEXT: adcq %rdx, %r14 -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: addq %r13, %rbx ; X64-NEXT: adcq %r8, %r14 ; X64-NEXT: adcq $0, %r12 ; X64-NEXT: adcq $0, %r15 -; X64-NEXT: movq %r9, %rbp -; X64-NEXT: movq 80(%rbp), %r8 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq 80(%rbp), %rdi ; X64-NEXT: movq %r11, %rax -; X64-NEXT: movq %r11, %r9 -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rdx, %r10 -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rcx -; X64-NEXT: addq %r10, %rcx +; X64-NEXT: addq %r8, %rcx ; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq 88(%rbp), %r10 -; X64-NEXT: movq %r9, %rax +; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %rbp -; X64-NEXT: movq %rax, %r9 -; X64-NEXT: addq %rcx, %r9 +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: addq %rcx, %r8 ; X64-NEXT: adcq %rsi, %rbp -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %rdi, %rax +; X64-NEXT: setb %r11b +; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %r10 -; X64-NEXT: movq %rdx, %rdi -; X64-NEXT: movq %rax, %rcx -; X64-NEXT: addq %rbp, %rcx -; X64-NEXT: adcq %rsi, %rdi -; X64-NEXT: movq %r8, %rax +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: addq %rbp, %rsi +; X64-NEXT: movzbl %r11b, %eax +; X64-NEXT: adcq %rax, %rcx +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: mulq %rdx -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: addq %rax, %rsi -; X64-NEXT: movq %r13, %rax +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: addq %r9, %rbp +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: adcq %rdx, %rax -; X64-NEXT: movq %rdx, %r13 -; X64-NEXT: addq %rcx, %rsi -; X64-NEXT: adcq %rdi, %rax -; X64-NEXT: addq %rbx, %r11 -; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r14, %r9 -; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq $0, %rsi +; X64-NEXT: addq %rsi, %rbp +; X64-NEXT: adcq %rcx, %rax +; X64-NEXT: addq %rbx, %r13 +; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r14, %r8 +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rbp ; X64-NEXT: adcq $0, %rax -; X64-NEXT: addq %r12, %rsi -; X64-NEXT: movq %rsi, %r14 +; X64-NEXT: addq %r12, %rbp +; X64-NEXT: movq %rbp, %r8 ; X64-NEXT: adcq %r15, %rax -; X64-NEXT: movq %rax, %r9 -; X64-NEXT: movl $0, %r12d -; X64-NEXT: adcq $0, %r12 -; X64-NEXT: sbbq %r11, %r11 -; X64-NEXT: andl $1, %r11d -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %r15 +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: setb %r14b +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %r15 +; X64-NEXT: movq %rax, %r12 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: movq %rbp, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: addq %r15, %rbx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %r10 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbx, %rax -; X64-NEXT: movq %rax, %rdi +; X64-NEXT: movq %rax, %rbx ; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi +; X64-NEXT: setb %sil ; X64-NEXT: movq %rbp, %rax ; X64-NEXT: mulq %r10 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: addq %r9, %rsi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq %r13, %rcx +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload ; X64-NEXT: addq %rax, %rsi ; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: addq %r14, %r15 -; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r9, %rdi -; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r12, %rsi +; X64-NEXT: addq %r8, %r12 +; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r11, %rbx +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movzbl %r14b, %eax +; X64-NEXT: adcq %rax, %rsi ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r11, %rcx +; X64-NEXT: adcq $0, %rcx ; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: imulq %rax, %r10 -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rax, %r8 ; X64-NEXT: addq %r10, %rdx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload -; X64-NEXT: imulq %r10, %r8 -; X64-NEXT: addq %rdx, %r8 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: imulq %rbp, %rdi +; X64-NEXT: addq %rdx, %rdi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq %rax, %rsi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload -; X64-NEXT: imulq %rbx, %rsi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: mulq %rbp -; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: imulq %r11, %rsi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rax, %r9 ; X64-NEXT: addq %rsi, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload -; X64-NEXT: imulq %rbp, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: imulq %rcx, %rax ; X64-NEXT: addq %rdx, %rax -; X64-NEXT: addq %r9, %r11 -; X64-NEXT: adcq %r8, %rax -; X64-NEXT: movq %rax, %r14 -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: mulq %rdi +; X64-NEXT: addq %r8, %r9 +; X64-NEXT: adcq %rdi, %rax +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %rdi +; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %rbx, %r8 -; X64-NEXT: mulq %rdi +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rcx, %rbx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rbp, %rax -; X64-NEXT: movq %r10, %rbp +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %r15 ; X64-NEXT: addq %rbx, %r15 ; X64-NEXT: adcq %rsi, %rdi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx -; X64-NEXT: movq %r8, %rax +; X64-NEXT: setb %cl +; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %r12 -; X64-NEXT: movq %rax, %r9 -; X64-NEXT: addq %rdi, %r9 -; X64-NEXT: adcq %rcx, %r12 -; X64-NEXT: addq %r11, %r9 -; X64-NEXT: adcq %r14, %r12 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: addq %rdi, %r13 +; X64-NEXT: movzbl %cl, %eax +; X64-NEXT: adcq %rax, %r12 +; X64-NEXT: addq %r9, %r13 +; X64-NEXT: adcq %r8, %r12 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload ; X64-NEXT: movq 120(%rdx), %rcx ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload ; X64-NEXT: imulq %r10, %rcx @@ -5526,12 +5363,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: imulq %rbx, %rcx ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq %rax, %r9 ; X64-NEXT: addq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: imulq %rdi, %rax ; X64-NEXT: addq %rdx, %rax -; X64-NEXT: addq %r11, %r13 +; X64-NEXT: addq %r11, %r9 ; X64-NEXT: adcq %rsi, %rax ; X64-NEXT: movq %rax, %r11 ; X64-NEXT: movq %rdi, %rax @@ -5540,371 +5377,367 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rax, %r14 ; X64-NEXT: movq %rbx, %rax ; X64-NEXT: mulq %r10 -; X64-NEXT: movq %rdx, %rbp -; X64-NEXT: movq %rax, %rsi -; X64-NEXT: addq %rcx, %rsi -; X64-NEXT: adcq $0, %rbp +; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: adcq $0, %rsi ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %rsi, %rdi -; X64-NEXT: adcq %rbp, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi +; X64-NEXT: addq %rbp, %rdi +; X64-NEXT: adcq %rsi, %rcx +; X64-NEXT: setb %sil ; X64-NEXT: movq %rbx, %rax ; X64-NEXT: mulq %r8 ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx -; X64-NEXT: addq %r13, %rax +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: addq %r9, %rax ; X64-NEXT: adcq %r11, %rdx ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload ; X64-NEXT: adcq %r15, %rdi -; X64-NEXT: adcq %r9, %rax +; X64-NEXT: adcq %r13, %rax ; X64-NEXT: adcq %r12, %rdx ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload ; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload -; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq 80(%rsi), %rcx -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload -; X64-NEXT: mulq %r8 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq 80(%rsi), %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq 88(%rsi), %r10 -; X64-NEXT: movq %rsi, %r12 -; X64-NEXT: movq %r10, %rax -; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq 88(%rsi), %rax +; X64-NEXT: movq %rsi, %r9 +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %r9, %rbx +; X64-NEXT: addq %r8, %rbx ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %rcx, %r9 -; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload ; X64-NEXT: mulq %r15 -; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: addq %rbx, %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rbp, %rsi -; X64-NEXT: sbbq %rdi, %rdi -; X64-NEXT: andl $1, %edi -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r14 +; X64-NEXT: addq %rbx, %r14 +; X64-NEXT: adcq %rbp, %rcx +; X64-NEXT: setb %r8b +; X64-NEXT: movq %rsi, %rax ; X64-NEXT: mulq %r15 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rsi, %rbx -; X64-NEXT: adcq %rdi, %rbp -; X64-NEXT: movq %r9, %rax +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: movzbl %r8b, %eax +; X64-NEXT: adcq %rax, %rbp +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r11 -; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload -; X64-NEXT: addq %r9, %r10 -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: adcq %r13, %r11 -; X64-NEXT: addq %rbx, %r10 -; X64-NEXT: adcq %rbp, %r11 -; X64-NEXT: movq %r12, %rcx -; X64-NEXT: movq 64(%rcx), %rdi -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq 72(%rcx), %r14 -; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; X64-NEXT: addq %r12, %rsi +; X64-NEXT: movq %rdx, %r10 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload +; X64-NEXT: adcq %r8, %r10 +; X64-NEXT: addq %rbx, %rsi +; X64-NEXT: adcq %rbp, %r10 +; X64-NEXT: movq %r9, %rdi +; X64-NEXT: movq 64(%rdi), %r13 +; X64-NEXT: movq %r13, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq 72(%rdi), %r9 +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rsi, %rbx +; X64-NEXT: addq %rcx, %rbx ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %r13, %rax ; X64-NEXT: mulq %r15 -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbx, %rax -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %rbp, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rbp, %rcx +; X64-NEXT: setb %r11b +; X64-NEXT: movq %r9, %rax +; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r15 ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rsi, %rbp -; X64-NEXT: adcq %rcx, %rbx -; X64-NEXT: movq %rdi, %rax +; X64-NEXT: addq %rcx, %rbp +; X64-NEXT: movzbl %r11b, %eax +; X64-NEXT: adcq %rax, %rbx +; X64-NEXT: movq %r13, %rax ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, %r15 -; X64-NEXT: addq %r15, %r9 -; X64-NEXT: movq %r13, %rax -; X64-NEXT: adcq %r8, %rax -; X64-NEXT: addq %rbp, %r9 -; X64-NEXT: adcq %rbx, %rax -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload -; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload -; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq %r12, %rcx +; X64-NEXT: addq %r15, %rcx +; X64-NEXT: adcq %r11, %r8 +; X64-NEXT: addq %rbp, %rcx +; X64-NEXT: adcq %rbx, %r8 +; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r14, %r8 +; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq $0, %rsi ; X64-NEXT: adcq $0, %r10 -; X64-NEXT: adcq $0, %r11 -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: mulq %rsi +; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r13, %rax +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %rbx -; X64-NEXT: movq %r14, %rax -; X64-NEXT: movq %r14, %r12 -; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rsi, %r14 -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r8 +; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rcx, %rbp -; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %rbp, %rdi -; X64-NEXT: adcq %rsi, %r9 -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r12, %rax -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rcx, %rbp -; X64-NEXT: addq %r9, %rax -; X64-NEXT: adcq %rsi, %rdx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload -; X64-NEXT: addq %r12, %r15 -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %r13, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: adcq %rdi, %rcx +; X64-NEXT: setb %dil +; X64-NEXT: movq %r9, %rax +; X64-NEXT: mulq %rbx +; X64-NEXT: addq %rcx, %rax +; X64-NEXT: movzbl %dil, %ecx +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: addq %r14, %r15 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload +; X64-NEXT: adcq %r13, %r11 ; X64-NEXT: addq %rax, %r15 -; X64-NEXT: adcq %rdx, %r8 -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r13, %rdi -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %rdx, %r11 +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload +; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq $0, %r15 -; X64-NEXT: adcq $0, %r8 -; X64-NEXT: addq %r10, %r15 -; X64-NEXT: adcq %r11, %r8 -; X64-NEXT: movl $0, %r9d -; X64-NEXT: adcq $0, %r9 -; X64-NEXT: sbbq %r13, %r13 -; X64-NEXT: andl $1, %r13d -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r14, %rsi -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rdx, %r14 -; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rsi, %r11 -; X64-NEXT: movq %rdx, %rsi +; X64-NEXT: adcq $0, %r11 +; X64-NEXT: addq %rsi, %r15 +; X64-NEXT: adcq %r10, %r11 +; X64-NEXT: setb %r10b +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq %r8, %rdi +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r12 +; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %r14, %rbx -; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: mulq %rbp +; X64-NEXT: addq %rcx, %rbx +; X64-NEXT: adcq $0, %rdi +; X64-NEXT: movq %rsi, %rax +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, %rbx -; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rdi, %r14 -; X64-NEXT: mulq %rbp +; X64-NEXT: adcq %rdi, %rcx +; X64-NEXT: setb %r8b +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: mulq %rsi +; X64-NEXT: movq %rsi, %rdi ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movzbl %r8b, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: addq %r12, %rsi +; X64-NEXT: addq %r14, %rsi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: adcq %r13, %rcx ; X64-NEXT: addq %rax, %rsi ; X64-NEXT: adcq %rdx, %rcx -; X64-NEXT: addq %r15, %r10 -; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r8, %rbx +; X64-NEXT: addq %r15, %r9 +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: adcq %r11, %rbx ; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r9, %rsi +; X64-NEXT: movzbl %r10b, %eax +; X64-NEXT: adcq %rax, %rsi ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r13, %rcx +; X64-NEXT: adcq $0, %rcx ; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq 96(%rsi), %rcx -; X64-NEXT: imulq %rcx, %rbp +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: movq 96(%rbp), %rcx +; X64-NEXT: imulq %rcx, %rdi ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %r11, %rdi -; X64-NEXT: mulq %rdi +; X64-NEXT: movq %r12, %rsi +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rax, %r9 -; X64-NEXT: addq %rbp, %rdx -; X64-NEXT: movq 104(%rsi), %r8 -; X64-NEXT: imulq %r8, %rdi -; X64-NEXT: addq %rdx, %rdi -; X64-NEXT: movq %rdi, %r10 -; X64-NEXT: movq 112(%rsi), %rax -; X64-NEXT: movq %rsi, %rbp -; X64-NEXT: movq %rax, %rdi +; X64-NEXT: addq %rdi, %rdx +; X64-NEXT: movq 104(%rbp), %r8 +; X64-NEXT: imulq %r8, %rsi +; X64-NEXT: addq %rdx, %rsi +; X64-NEXT: movq %rsi, %r11 +; X64-NEXT: movq 112(%rbp), %rax +; X64-NEXT: movq %rbp, %rdi +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: imulq %rbp, %rsi ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rax, %r10 +; X64-NEXT: addq %rsi, %rdx +; X64-NEXT: movq 120(%rdi), %rdi ; X64-NEXT: imulq %rbx, %rdi -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: addq %rdi, %rdx -; X64-NEXT: movq 120(%rbp), %rdi -; X64-NEXT: imulq %rsi, %rdi ; X64-NEXT: addq %rdx, %rdi -; X64-NEXT: addq %r9, %r11 -; X64-NEXT: adcq %r10, %rdi -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: movq %rsi, %r10 -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: addq %r9, %r10 +; X64-NEXT: adcq %r11, %rdi ; X64-NEXT: movq %rbx, %rax -; X64-NEXT: movq %rbx, %r9 +; X64-NEXT: movq %rbx, %rsi +; X64-NEXT: mulq %rcx +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, %r9 ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %rbx -; X64-NEXT: addq %rsi, %rbx +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rbx, %rbp ; X64-NEXT: adcq $0, %rcx -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq %rsi, %rax ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r15 -; X64-NEXT: addq %rbx, %r15 +; X64-NEXT: movq %rax, %r12 +; X64-NEXT: addq %rbp, %r12 ; X64-NEXT: adcq %rcx, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx +; X64-NEXT: setb %cl ; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %r8 -; X64-NEXT: addq %rsi, %r8 -; X64-NEXT: adcq %rcx, %rbx -; X64-NEXT: addq %r11, %r8 +; X64-NEXT: movq %rax, %rbp +; X64-NEXT: addq %rsi, %rbp +; X64-NEXT: movzbl %cl, %eax +; X64-NEXT: adcq %rax, %rbx +; X64-NEXT: addq %r10, %rbp ; X64-NEXT: adcq %rdi, %rbx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: imulq %rax, %rsi -; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq %rax, %r13 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq %rax, %r8 ; X64-NEXT: addq %rsi, %rdx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload -; X64-NEXT: imulq %r12, %rcx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload +; X64-NEXT: imulq %r11, %rcx ; X64-NEXT: addq %rdx, %rcx -; X64-NEXT: movq %rcx, %rbp +; X64-NEXT: movq %rcx, %r9 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq %rax, %rcx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: imulq %rsi, %rcx -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload -; X64-NEXT: mulq %rdi +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload +; X64-NEXT: imulq %r15, %rcx +; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rax, %r10 ; X64-NEXT: addq %rcx, %rdx -; X64-NEXT: movq %r14, %r13 -; X64-NEXT: imulq %rdi, %r13 -; X64-NEXT: addq %rdx, %r13 -; X64-NEXT: addq %r11, %r10 -; X64-NEXT: adcq %rbp, %r13 -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rdi, %r11 -; X64-NEXT: mulq %r9 -; X64-NEXT: movq %rdx, %rbp -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: movq %rsi, %rax -; X64-NEXT: movq %rsi, %r14 -; X64-NEXT: mulq %r9 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: imulq %r14, %rax +; X64-NEXT: addq %rdx, %rax +; X64-NEXT: addq %r8, %r10 +; X64-NEXT: adcq %r9, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r14, %rax +; X64-NEXT: mulq %r13 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %r8 +; X64-NEXT: movq %r15, %rax +; X64-NEXT: mulq %r13 ; X64-NEXT: movq %rdx, %r9 ; X64-NEXT: movq %rax, %rcx -; X64-NEXT: addq %rbp, %rcx +; X64-NEXT: addq %rdi, %rcx ; X64-NEXT: adcq $0, %r9 -; X64-NEXT: movq %r11, %rax -; X64-NEXT: movq %r12, %rbp -; X64-NEXT: mulq %rbp -; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r11 -; X64-NEXT: addq %rcx, %r11 -; X64-NEXT: adcq %r9, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx ; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %rbp -; X64-NEXT: addq %rsi, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rdx, %rdi +; X64-NEXT: movq %rax, %rsi +; X64-NEXT: addq %rcx, %rsi +; X64-NEXT: adcq %r9, %rdi +; X64-NEXT: setb %cl +; X64-NEXT: movq %r15, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: addq %rdi, %rax +; X64-NEXT: movzbl %cl, %ecx ; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: addq %r10, %rax -; X64-NEXT: adcq %r13, %rdx -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: adcq %r15, %r11 -; X64-NEXT: adcq %r8, %rax +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq %r12, %rsi +; X64-NEXT: adcq %rbp, %rax ; X64-NEXT: adcq %rbx, %rdx -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: addq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload -; X64-NEXT: addq (%rsp), %rcx # 8-byte Folded Reload -; X64-NEXT: movq %rcx, %r8 -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: movq %rsi, %r9 +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload -; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; X64-NEXT: addq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload +; X64-NEXT: movq %rcx, %r9 ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload -; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload +; X64-NEXT: movq %rdi, %r10 +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; X64-NEXT: adcq (%rsp), %rbx # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload ; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, (%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 8(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 16(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 24(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 32(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 40(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 48(%rcx) -; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload -; X64-NEXT: movq %rsi, 56(%rcx) -; X64-NEXT: movq %r8, 64(%rcx) -; X64-NEXT: movq %r9, 72(%rcx) -; X64-NEXT: movq %rbx, 80(%rcx) -; X64-NEXT: movq %rbp, 88(%rcx) -; X64-NEXT: movq %rdi, 96(%rcx) -; X64-NEXT: movq %r11, 104(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, (%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 8(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 16(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 24(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 32(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 40(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 48(%rcx) +; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; X64-NEXT: movq %rdi, 56(%rcx) +; X64-NEXT: movq %r9, 64(%rcx) +; X64-NEXT: movq %r10, 72(%rcx) +; X64-NEXT: movq %rbp, 80(%rcx) +; X64-NEXT: movq %rbx, 88(%rcx) +; X64-NEXT: movq %r8, 96(%rcx) +; X64-NEXT: movq %rsi, 104(%rcx) ; X64-NEXT: movq %rax, 112(%rcx) ; X64-NEXT: movq %rdx, 120(%rcx) ; X64-NEXT: addq $352, %rsp # imm = 0x160 diff --git a/test/CodeGen/X86/mul-i256.ll b/test/CodeGen/X86/mul-i256.ll index 341484718652..acd86e949894 100644 --- a/test/CodeGen/X86/mul-i256.ll +++ b/test/CodeGen/X86/mul-i256.ll @@ -3,7 +3,6 @@ ; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" define void @test(i256* %a, i256* %b, i256* %out) #0 { ; X32-LABEL: test: @@ -138,18 +137,17 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 { ; X32-NEXT: adcl $0, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: adcl $0, %ebx -; X32-NEXT: xorl %edx, %edx ; X32-NEXT: addl %ecx, %edi ; X32-NEXT: adcl %eax, %ebx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -205,76 +203,70 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 { ; X64-NEXT: pushq %r14 ; X64-NEXT: .Lcfi1: ; X64-NEXT: .cfi_def_cfa_offset 24 -; X64-NEXT: pushq %r12 +; X64-NEXT: pushq %rbx ; X64-NEXT: .Lcfi2: ; X64-NEXT: .cfi_def_cfa_offset 32 -; X64-NEXT: pushq %rbx ; X64-NEXT: .Lcfi3: -; X64-NEXT: .cfi_def_cfa_offset 40 +; X64-NEXT: .cfi_offset %rbx, -32 ; X64-NEXT: .Lcfi4: -; X64-NEXT: .cfi_offset %rbx, -40 -; X64-NEXT: .Lcfi5: -; X64-NEXT: .cfi_offset %r12, -32 -; X64-NEXT: .Lcfi6: ; X64-NEXT: .cfi_offset %r14, -24 -; X64-NEXT: .Lcfi7: +; X64-NEXT: .Lcfi5: ; X64-NEXT: .cfi_offset %r15, -16 ; X64-NEXT: movq %rdx, %r9 -; X64-NEXT: movq (%rdi), %r14 +; X64-NEXT: movq (%rdi), %r11 ; X64-NEXT: movq 8(%rdi), %r8 -; X64-NEXT: movq 16(%rdi), %rcx -; X64-NEXT: movq 16(%rsi), %rbx -; X64-NEXT: movq (%rsi), %r12 +; X64-NEXT: movq 16(%rdi), %rbx +; X64-NEXT: movq 16(%rsi), %r10 +; X64-NEXT: movq (%rsi), %rcx ; X64-NEXT: movq 8(%rsi), %r15 ; X64-NEXT: movq 24(%rdi), %rdi -; X64-NEXT: imulq %r12, %rdi -; X64-NEXT: movq %r12, %rax -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rax, %r10 +; X64-NEXT: imulq %rcx, %rdi +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %rbx +; X64-NEXT: movq %rax, %r14 ; X64-NEXT: addq %rdi, %rdx -; X64-NEXT: imulq %r15, %rcx -; X64-NEXT: addq %rdx, %rcx -; X64-NEXT: movq %rbx, %rdi +; X64-NEXT: imulq %r15, %rbx +; X64-NEXT: addq %rdx, %rbx +; X64-NEXT: movq %r10, %rdi ; X64-NEXT: imulq %r8, %rdi -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: mulq %r14 -; X64-NEXT: movq %rax, %r11 +; X64-NEXT: movq %r10, %rax +; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rax, %r10 ; X64-NEXT: addq %rdi, %rdx -; X64-NEXT: movq 24(%rsi), %rbx -; X64-NEXT: imulq %r14, %rbx -; X64-NEXT: addq %rdx, %rbx -; X64-NEXT: addq %r10, %r11 -; X64-NEXT: adcq %rcx, %rbx -; X64-NEXT: movq %r14, %rax -; X64-NEXT: mulq %r12 +; X64-NEXT: movq 24(%rsi), %rdi +; X64-NEXT: imulq %r11, %rdi +; X64-NEXT: addq %rdx, %rdi +; X64-NEXT: addq %r14, %r10 +; X64-NEXT: adcq %rbx, %rdi +; X64-NEXT: movq %r11, %rax +; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r10 +; X64-NEXT: movq %rax, %r14 ; X64-NEXT: movq %r8, %rax -; X64-NEXT: mulq %r12 +; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %rsi, %rdi +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %rsi, %rbx ; X64-NEXT: adcq $0, %rcx -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %r15 ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: movq %rax, %r14 -; X64-NEXT: addq %rdi, %r14 +; X64-NEXT: movq %rax, %r11 +; X64-NEXT: addq %rbx, %r11 ; X64-NEXT: adcq %rcx, %rsi -; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: andl $1, %ecx +; X64-NEXT: setb %al +; X64-NEXT: movzbl %al, %ecx ; X64-NEXT: movq %r8, %rax ; X64-NEXT: mulq %r15 ; X64-NEXT: addq %rsi, %rax ; X64-NEXT: adcq %rcx, %rdx -; X64-NEXT: addq %r11, %rax -; X64-NEXT: adcq %rbx, %rdx -; X64-NEXT: movq %r10, (%r9) -; X64-NEXT: movq %r14, 8(%r9) +; X64-NEXT: addq %r10, %rax +; X64-NEXT: adcq %rdi, %rdx +; X64-NEXT: movq %r14, (%r9) +; X64-NEXT: movq %r11, 8(%r9) ; X64-NEXT: movq %rax, 16(%r9) ; X64-NEXT: movq %rdx, 24(%r9) ; X64-NEXT: popq %rbx -; X64-NEXT: popq %r12 ; X64-NEXT: popq %r14 ; X64-NEXT: popq %r15 ; X64-NEXT: retq @@ -286,4 +278,4 @@ entry: ret void } -attributes #0 = { norecurse nounwind uwtable "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" } +attributes #0 = { norecurse nounwind uwtable } diff --git a/test/CodeGen/X86/mul-i512.ll b/test/CodeGen/X86/mul-i512.ll index 14fbeae52796..3da17b69ffb5 100644 --- a/test/CodeGen/X86/mul-i512.ll +++ b/test/CodeGen/X86/mul-i512.ll @@ -74,14 +74,13 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: movl 20(%eax), %edi ; X32-NEXT: movl 24(%eax), %ebx ; X32-NEXT: movl 28(%eax), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, (%esp) # 4-byte Spill ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %eax ; X32-NEXT: pushl %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: pushl %edi ; X32-NEXT: pushl %esi ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax @@ -107,6 +106,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -123,8 +123,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: pushl %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp @@ -133,10 +132,11 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: pushl %esi -; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 @@ -145,25 +145,24 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %edi -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: pushl %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 @@ -172,7 +171,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload -; X32-NEXT: pushl %esi +; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -183,14 +182,14 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: pushl %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp @@ -198,8 +197,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -213,7 +212,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %edi -; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax @@ -223,11 +222,11 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 @@ -240,20 +239,20 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %edi -; X32-NEXT: pushl %esi +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: pushl %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 @@ -262,8 +261,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -274,21 +273,21 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %esi ; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi ; X32-NEXT: pushl %esi -; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: pushl %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 @@ -298,11 +297,11 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax -; X32-NEXT: pushl %esi -; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -313,8 +312,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %edi ; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -323,7 +322,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax -; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl (%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -349,10 +348,10 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: pushl %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: pushl %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: pushl %edi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -365,18 +364,18 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %esi ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload +; X32-NEXT: pushl %esi ; X32-NEXT: pushl %eax ; X32-NEXT: calll __multi3 ; X32-NEXT: addl $32, %esp ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %edi ; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %edi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload @@ -494,134 +493,142 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl %esi, %eax ; X32-NEXT: adcl %edx, %ecx -; X32-NEXT: movl $0, %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: sbbl %edx, %edx -; X32-NEXT: andl $1, %edx +; X32-NEXT: setb %dl ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movzbl %dl, %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: addl %edx, %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %edx, %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %edx, %ebx +; X32-NEXT: movl %ebx, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %esi, %edx -; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl %esi, (%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload +; X32-NEXT: adcl %esi, %ebx +; X32-NEXT: movl %ebx, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: addl %eax, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %ecx, %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %ecx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: addl %edi, %ebx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: addl %eax, %ebx +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl %ecx, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: adcl %edx, %eax +; X32-NEXT: addl %eax, %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: adcl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl %eax, (%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %ebx, %eax +; X32-NEXT: addl %edx, %edi +; X32-NEXT: movl %ecx, %edx +; X32-NEXT: adcl %esi, %edx +; X32-NEXT: movl (%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ebx, %ebx -; X32-NEXT: andl $1, %ebx +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: addl %eax, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload -; X32-NEXT: addl %ecx, %eax +; X32-NEXT: adcl %ecx, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload @@ -629,140 +636,125 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: addl %edx, %eax -; X32-NEXT: movl %eax, %edx -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: adcl %ebx, %edi +; X32-NEXT: addl %edx, %ebx +; X32-NEXT: adcl %esi, %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: movl (%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl %edx, %eax +; X32-NEXT: adcl $0, %ebx +; X32-NEXT: movl %esi, %eax ; X32-NEXT: adcl $0, %eax ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: movl %esi, %edx ; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl %edi, %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: sbbl %eax, %eax -; X32-NEXT: andl $1, %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: setb (%esp) # 1-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx -; X32-NEXT: addl %edx, %eax -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl $0, %edx -; X32-NEXT: adcl $0, %edx -; X32-NEXT: sbbl %esi, %esi -; X32-NEXT: andl $1, %esi -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %edx +; X32-NEXT: addl %eax, %esi +; X32-NEXT: adcl %ecx, %edx +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movzbl %al, %ebx +; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill -; X32-NEXT: addl %eax, %edi -; X32-NEXT: adcl %ecx, %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %esi, %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload +; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: addl %esi, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %eax, %esi +; X32-NEXT: adcl %edx, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload -; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl %ebx, %edx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload -; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: addl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload +; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill +; X32-NEXT: adcl $0, %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %edi +; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi -; X32-NEXT: adcl $0, %edi -; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X32-NEXT: adcl $0, %ebx -; X32-NEXT: addl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: addl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl $0, %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl $0, %esi -; X32-NEXT: addl %edi, %edx -; X32-NEXT: adcl %ebx, %esi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx +; X32-NEXT: addl %eax, %edx +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, (%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx @@ -777,7 +769,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload +; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload ; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx @@ -789,25 +781,24 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: adcl $0, %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edx, (%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl $0, %esi ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: adcl $0, %edi -; X32-NEXT: addl %eax, %esi -; X32-NEXT: adcl %ecx, %edi -; X32-NEXT: movl $0, %eax -; X32-NEXT: adcl $0, %eax -; X32-NEXT: sbbl %ecx, %ecx -; X32-NEXT: andl $1, %ecx -; X32-NEXT: addl {{[0-9]+}}(%esp), %esi -; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: adcl $0, %esi +; X32-NEXT: addl %eax, %edi +; X32-NEXT: adcl %ecx, %esi +; X32-NEXT: setb %al +; X32-NEXT: addl {{[0-9]+}}(%esp), %edi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: adcl $0, %eax +; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -828,8 +819,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl %esi, %ebx -; X32-NEXT: adcl %edi, %ecx +; X32-NEXT: addl %edi, %ebx +; X32-NEXT: adcl %esi, %ecx ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi @@ -838,7 +829,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload +; X32-NEXT: movl (%esp), %edi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload @@ -853,7 +844,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload -; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %edi, (%esp) # 4-byte Spill ; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload @@ -864,36 +855,36 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl 16(%ebp), %edi -; X32-NEXT: movl %esi, 4(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi ; X32-NEXT: movl 16(%ebp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, (%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 8(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 12(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 16(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 20(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 24(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 28(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 32(%esi) -; X32-NEXT: movl %ebx, 36(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 40(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 44(%esi) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 48(%esi) -; X32-NEXT: movl %ecx, 52(%esi) -; X32-NEXT: movl %edx, 56(%esi) -; X32-NEXT: movl %eax, 60(%esi) +; X32-NEXT: movl %edi, 4(%esi) +; X32-NEXT: movl 16(%ebp), %edi +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, (%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 8(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 12(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 16(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 20(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 24(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 28(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 32(%edi) +; X32-NEXT: movl %ebx, 36(%edi) +; X32-NEXT: movl (%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 40(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 44(%edi) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 48(%edi) +; X32-NEXT: movl %ecx, 52(%edi) +; X32-NEXT: movl %edx, 56(%edi) +; X32-NEXT: movl %eax, 60(%edi) ; X32-NEXT: leal -12(%ebp), %esp ; X32-NEXT: popl %esi ; X32-NEXT: popl %edi @@ -912,35 +903,36 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: pushq %rax ; X64-NEXT: movq %rdx, (%rsp) # 8-byte Spill ; X64-NEXT: movq 24(%rdi), %r11 -; X64-NEXT: movq 16(%rdi), %r14 +; X64-NEXT: movq 16(%rdi), %r15 ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq (%rsi), %rdx ; X64-NEXT: movq 8(%rsi), %rbp -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %r15, %rax ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: mulq %rsi -; X64-NEXT: movq %rdx, %r8 -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, %r9 +; X64-NEXT: movq %rax, %r8 ; X64-NEXT: movq %r11, %rax ; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rsi, %r10 ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rax, %rsi -; X64-NEXT: addq %r8, %rsi +; X64-NEXT: addq %r9, %rsi ; X64-NEXT: adcq $0, %rbx -; X64-NEXT: movq %r14, %rax -; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r15, %rax +; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r9 ; X64-NEXT: addq %rsi, %r9 ; X64-NEXT: adcq %rbx, %rcx -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx +; X64-NEXT: setb %al +; X64-NEXT: movzbl %al, %ebx ; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %rbp -; X64-NEXT: movq %rbp, %r8 +; X64-NEXT: movq %rbp, %r14 +; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rcx, %rbp @@ -952,46 +944,44 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r13 ; X64-NEXT: movq %rax, %r10 -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq %r15, %rax ; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rdx, %r12 -; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %r15 ; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: addq %r10, %r15 -; X64-NEXT: adcq %r13, %r12 +; X64-NEXT: adcq %r13, %rdx ; X64-NEXT: addq %rbp, %r15 -; X64-NEXT: adcq %rsi, %r12 +; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movq %rdx, %r12 ; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq (%rdi), %r14 -; X64-NEXT: movq %r14, %rax +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %rbx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %r11 -; X64-NEXT: movq 8(%rdi), %rcx -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq 8(%rdi), %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rbx ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rsi ; X64-NEXT: addq %r11, %rsi ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: movq %r14, %rax -; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: addq %rsi, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rbp, %rbx -; X64-NEXT: sbbq %rdi, %rdi -; X64-NEXT: andl $1, %edi -; X64-NEXT: movq %rcx, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: setb %r11b +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r14 ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rbx, %rbp -; X64-NEXT: adcq %rdi, %rsi -; X64-NEXT: movq %r14, %rcx +; X64-NEXT: movzbl %r11b, %eax +; X64-NEXT: adcq %rax, %rsi ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: mulq %rdx @@ -1001,10 +991,11 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: adcq %r14, %r13 ; X64-NEXT: addq %rbp, %r10 ; X64-NEXT: adcq %rsi, %r13 -; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; X64-NEXT: addq %r8, %r10 ; X64-NEXT: adcq %r9, %r13 ; X64-NEXT: adcq $0, %r15 ; X64-NEXT: adcq $0, %r12 +; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq 16(%rsi), %r8 ; X64-NEXT: movq %rcx, %rax @@ -1012,7 +1003,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rdi -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rax, %r12 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %r8 @@ -1027,14 +1018,14 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rbp, %rsi -; X64-NEXT: sbbq %rbp, %rbp -; X64-NEXT: andl $1, %ebp +; X64-NEXT: setb %bpl ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rax, %r9 ; X64-NEXT: addq %rsi, %r9 -; X64-NEXT: adcq %rbp, %rbx +; X64-NEXT: movzbl %bpl, %eax +; X64-NEXT: adcq %rax, %rbx ; X64-NEXT: movq %r8, %rax ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: mulq %rcx @@ -1044,16 +1035,14 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: adcq %rdx, %r14 ; X64-NEXT: addq %r9, %r11 ; X64-NEXT: adcq %rbx, %r14 -; X64-NEXT: addq %r10, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; X64-NEXT: addq %r10, %r12 +; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %r13, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill ; X64-NEXT: adcq $0, %r11 ; X64-NEXT: adcq $0, %r14 ; X64-NEXT: addq %r15, %r11 -; X64-NEXT: adcq %r12, %r14 -; X64-NEXT: adcq $0, %rcx -; X64-NEXT: movq %rcx, %r13 -; X64-NEXT: sbbq %r9, %r9 -; X64-NEXT: andl $1, %r9d +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; X64-NEXT: setb %r9b ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %r8 @@ -1072,12 +1061,12 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: addq %rbx, %rax ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: adcq %rsi, %rcx -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi +; X64-NEXT: setb %sil ; X64-NEXT: movq %r10, %rax ; X64-NEXT: mulq %rdi ; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rsi, %rdx +; X64-NEXT: movzbl %sil, %ecx +; X64-NEXT: adcq %rcx, %rdx ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: addq %rbp, %rsi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload @@ -1088,9 +1077,10 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %r14, %rbx ; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r13, %rsi +; X64-NEXT: movzbl %r9b, %eax +; X64-NEXT: adcq %rax, %rsi ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: adcq %r9, %rcx +; X64-NEXT: adcq $0, %rcx ; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq 32(%rcx), %rsi @@ -1105,42 +1095,44 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: movq 48(%rcx), %rax ; X64-NEXT: movq %rcx, %rbx ; X64-NEXT: movq %rax, %rdi -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload -; X64-NEXT: imulq %r11, %rdi ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload -; X64-NEXT: mulq %rcx -; X64-NEXT: movq %rax, %r12 +; X64-NEXT: imulq %rcx, %rdi +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; X64-NEXT: mulq %rbp +; X64-NEXT: movq %rax, %r14 ; X64-NEXT: addq %rdi, %rdx ; X64-NEXT: movq 56(%rbx), %rbx -; X64-NEXT: imulq %rcx, %rbx +; X64-NEXT: imulq %rbp, %rbx ; X64-NEXT: addq %rdx, %rbx -; X64-NEXT: addq %r10, %r12 +; X64-NEXT: addq %r10, %r14 ; X64-NEXT: adcq %r8, %rbx -; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rbp, %r10 ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r11, %rax +; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %rcx, %r8 ; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rdi, %rbp ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %rcx, %rax +; X64-NEXT: movq %r10, %rax ; X64-NEXT: mulq %r9 ; X64-NEXT: movq %rdx, %rdi -; X64-NEXT: movq %rax, %r15 -; X64-NEXT: addq %rbp, %r15 +; X64-NEXT: movq %rax, %r13 +; X64-NEXT: addq %rbp, %r13 ; X64-NEXT: adcq %rsi, %rdi -; X64-NEXT: sbbq %rsi, %rsi -; X64-NEXT: andl $1, %esi -; X64-NEXT: movq %r11, %rax +; X64-NEXT: setb %cl +; X64-NEXT: movq %r8, %rax ; X64-NEXT: mulq %r9 ; X64-NEXT: movq %rdx, %r11 -; X64-NEXT: movq %rax, %r14 -; X64-NEXT: addq %rdi, %r14 -; X64-NEXT: adcq %rsi, %r11 -; X64-NEXT: addq %r12, %r14 +; X64-NEXT: movq %rax, %r9 +; X64-NEXT: addq %rdi, %r9 +; X64-NEXT: movzbl %cl, %eax +; X64-NEXT: adcq %rax, %r11 +; X64-NEXT: addq %r14, %r9 ; X64-NEXT: adcq %rbx, %r11 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload ; X64-NEXT: movq 56(%rdx), %rcx @@ -1152,49 +1144,50 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: mulq %rbx ; X64-NEXT: movq %rax, %rsi ; X64-NEXT: addq %rcx, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload -; X64-NEXT: imulq %r8, %rbx +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload +; X64-NEXT: imulq %r15, %rbx ; X64-NEXT: addq %rdx, %rbx ; X64-NEXT: movq 32(%rbp), %rdi -; X64-NEXT: movq 40(%rbp), %r12 +; X64-NEXT: movq 40(%rbp), %r8 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq %rax, %rcx -; X64-NEXT: imulq %r12, %rcx +; X64-NEXT: imulq %r8, %rcx ; X64-NEXT: mulq %rdi -; X64-NEXT: movq %rax, %r9 +; X64-NEXT: movq %rax, %r14 ; X64-NEXT: addq %rcx, %rdx -; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload -; X64-NEXT: imulq %rdi, %r13 -; X64-NEXT: addq %rdx, %r13 -; X64-NEXT: addq %rsi, %r9 -; X64-NEXT: adcq %rbx, %r13 +; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; X64-NEXT: imulq %rdi, %rax +; X64-NEXT: addq %rdx, %rax +; X64-NEXT: addq %rsi, %r14 +; X64-NEXT: adcq %rbx, %rax +; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: mulq %r10 -; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rdx, %r12 ; X64-NEXT: movq %rax, %rsi -; X64-NEXT: movq %r12, %rax +; X64-NEXT: movq %r8, %rax ; X64-NEXT: mulq %r10 -; X64-NEXT: movq %rdx, %rbx -; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rcx, %rbp -; X64-NEXT: adcq $0, %rbx -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rcx +; X64-NEXT: movq %rax, %rbx +; X64-NEXT: addq %r12, %rbx +; X64-NEXT: adcq $0, %rcx +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: mulq %r15 +; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rdi -; X64-NEXT: addq %rbp, %rdi -; X64-NEXT: adcq %rbx, %rcx -; X64-NEXT: sbbq %rbx, %rbx -; X64-NEXT: andl $1, %ebx -; X64-NEXT: movq %r12, %rax -; X64-NEXT: mulq %r8 -; X64-NEXT: addq %rcx, %rax -; X64-NEXT: adcq %rbx, %rdx -; X64-NEXT: addq %r9, %rax -; X64-NEXT: adcq %r13, %rdx +; X64-NEXT: addq %rbx, %rdi +; X64-NEXT: adcq %rcx, %rbp +; X64-NEXT: setb %cl +; X64-NEXT: movq %r8, %rax +; X64-NEXT: mulq %r15 +; X64-NEXT: addq %rbp, %rax +; X64-NEXT: movzbl %cl, %ecx +; X64-NEXT: adcq %rcx, %rdx +; X64-NEXT: addq %r14, %rax +; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload -; X64-NEXT: adcq %r15, %rdi -; X64-NEXT: adcq %r14, %rax +; X64-NEXT: adcq %r13, %rdi +; X64-NEXT: adcq %r9, %rax ; X64-NEXT: adcq %r11, %rdx ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload ; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload diff --git a/test/CodeGen/X86/oddshuffles.ll b/test/CodeGen/X86/oddshuffles.ll index 0bda41a30c69..d26cf02dd942 100644 --- a/test/CodeGen/X86/oddshuffles.ll +++ b/test/CodeGen/X86/oddshuffles.ll @@ -746,9 +746,9 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8 ; SSE2-LABEL: interleave_24i8_in: ; SSE2: # BB#0: ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] @@ -791,17 +791,17 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8 ; SSE42: # BB#0: ; SSE42-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE42-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSE42-NEXT: movdqa %xmm0, %xmm2 -; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,8],zero,xmm2[1,9],zero,xmm2[2,10],zero,xmm2[3,11],zero,xmm2[4,12],zero,xmm2[5] -; SSE42-NEXT: movdqa %xmm1, %xmm3 +; SSE42-NEXT: movdqa %xmm0, %xmm1 +; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,8],zero,xmm1[1,9],zero,xmm1[2,10],zero,xmm1[3,11],zero,xmm1[4,12],zero,xmm1[5] +; SSE42-NEXT: movdqa %xmm2, %xmm3 ; SSE42-NEXT: pshufb {{.*#+}} xmm3 = zero,zero,xmm3[0],zero,zero,xmm3[1],zero,zero,xmm3[2],zero,zero,xmm3[3],zero,zero,xmm3[4],zero -; SSE42-NEXT: por %xmm2, %xmm3 +; SSE42-NEXT: por %xmm1, %xmm3 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u] -; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u] -; SSE42-NEXT: por %xmm0, %xmm1 -; SSE42-NEXT: movq %xmm1, 16(%rdi) +; SSE42-NEXT: pshufb {{.*#+}} xmm2 = zero,xmm2[5],zero,zero,xmm2[6],zero,zero,xmm2[7,u,u,u,u,u,u,u,u] +; SSE42-NEXT: por %xmm0, %xmm2 +; SSE42-NEXT: movq %xmm2, 16(%rdi) ; SSE42-NEXT: movdqu %xmm3, (%rdi) ; SSE42-NEXT: retq ; @@ -809,16 +809,16 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8 ; AVX: # BB#0: ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,8],zero,xmm0[1,9],zero,xmm0[2,10],zero,xmm0[3,11],zero,xmm0[4,12],zero,xmm0[5] -; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,xmm1[1],zero,zero,xmm1[2],zero,zero,xmm1[3],zero,zero,xmm1[4],zero -; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,8],zero,xmm0[1,9],zero,xmm0[2,10],zero,xmm0[3,11],zero,xmm0[4,12],zero,xmm0[5] +; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm2[0],zero,zero,xmm2[1],zero,zero,xmm2[2],zero,zero,xmm2[3],zero,zero,xmm2[4],zero +; AVX-NEXT: vpor %xmm3, %xmm1, %xmm1 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u] -; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u] -; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[5],zero,zero,xmm2[6],zero,zero,xmm2[7,u,u,u,u,u,u,u,u] +; AVX-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vmovq %xmm0, 16(%rdi) -; AVX-NEXT: vmovdqu %xmm2, (%rdi) +; AVX-NEXT: vmovdqu %xmm1, (%rdi) ; AVX-NEXT: retq %s1 = load <8 x i8>, <8 x i8>* %q1, align 4 %s2 = load <8 x i8>, <8 x i8>* %q2, align 4 diff --git a/test/CodeGen/X86/overflow.ll b/test/CodeGen/X86/overflow.ll index ff25b5de4933..00dadc4a80f6 100644 --- a/test/CodeGen/X86/overflow.ll +++ b/test/CodeGen/X86/overflow.ll @@ -27,16 +27,14 @@ define i128 @mulhioverflow(i64 %a, i64 %b, i64 %c) nounwind { ; X32-NEXT: addl $32, %esp ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: andl $1, %edi -; X32-NEXT: xorl %ecx, %ecx ; X32-NEXT: addl {{[0-9]+}}(%esp), %edi ; X32-NEXT: adcl $0, %eax -; X32-NEXT: adcl $0, %ecx -; X32-NEXT: sbbl %edx, %edx -; X32-NEXT: andl $1, %edx +; X32-NEXT: setb %cl +; X32-NEXT: movzbl %cl, %ecx ; X32-NEXT: movl %edi, (%esi) ; X32-NEXT: movl %eax, 4(%esi) ; X32-NEXT: movl %ecx, 8(%esi) -; X32-NEXT: movl %edx, 12(%esi) +; X32-NEXT: movl $0, 12(%esi) ; X32-NEXT: movl %esi, %eax ; X32-NEXT: leal -8(%ebp), %esp ; X32-NEXT: popl %esi diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll index 50a661fcca11..88cb7a6d5825 100644 --- a/test/CodeGen/X86/pmul.ll +++ b/test/CodeGen/X86/pmul.ll @@ -1152,9 +1152,9 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) { ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] -; SSE2-NEXT: pmuludq %xmm4, %xmm2 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] ; SSE2-NEXT: pmuludq %xmm0, %xmm1 +; SSE2-NEXT: pmuludq %xmm4, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3] ; SSE2-NEXT: movaps %xmm2, %xmm0 ; SSE2-NEXT: retq @@ -1166,9 +1166,9 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) { ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero -; SSE41-NEXT: pmuludq %xmm2, %xmm4 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero ; SSE41-NEXT: pmuludq %xmm3, %xmm0 +; SSE41-NEXT: pmuludq %xmm2, %xmm4 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3] ; SSE41-NEXT: retq ; @@ -1312,17 +1312,17 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) { ; SSE2-NEXT: movdqa %xmm1, %xmm5 ; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] ; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3] -; SSE2-NEXT: movdqa %xmm2, %xmm7 -; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] -; SSE2-NEXT: pmuludq %xmm7, %xmm4 +; SSE2-NEXT: movdqa %xmm2, %xmm8 +; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1] ; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3] -; SSE2-NEXT: pmuludq %xmm0, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm2[1,3] -; SSE2-NEXT: movdqa %xmm3, %xmm0 -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] -; SSE2-NEXT: pmuludq %xmm0, %xmm5 +; SSE2-NEXT: movdqa %xmm3, %xmm7 +; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] ; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3] ; SSE2-NEXT: pmuludq %xmm1, %xmm3 +; SSE2-NEXT: pmuludq %xmm7, %xmm5 +; SSE2-NEXT: pmuludq %xmm0, %xmm2 +; SSE2-NEXT: pmuludq %xmm8, %xmm4 +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm2[1,3] ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm3[1,3] ; SSE2-NEXT: movaps %xmm4, %xmm0 ; SSE2-NEXT: movaps %xmm5, %xmm1 @@ -1331,22 +1331,22 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) { ; SSE41-LABEL: mul_v8i64_zero_upper: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] -; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero +; SSE41-NEXT: pmovzxdq {{.*#+}} xmm8 = xmm4[0],zero,xmm4[1],zero ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] -; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero -; SSE41-NEXT: pmuludq %xmm4, %xmm1 +; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero -; SSE41-NEXT: pmuludq %xmm5, %xmm0 -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1] ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero -; SSE41-NEXT: pmuludq %xmm6, %xmm2 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero ; SSE41-NEXT: pmuludq %xmm7, %xmm1 +; SSE41-NEXT: pmuludq %xmm6, %xmm2 +; SSE41-NEXT: pmuludq %xmm5, %xmm0 +; SSE41-NEXT: pmuludq %xmm8, %xmm4 +; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3] ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3] ; SSE41-NEXT: retq ; @@ -1356,11 +1356,11 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) { ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,3],ymm0[1,3],ymm2[5,7],ymm0[5,7] +; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm1 +; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,3],ymm0[1,3],ymm1[5,7],ymm0[5,7] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-NEXT: retq ; @@ -1467,22 +1467,22 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) { ; SSE41-LABEL: mul_v8i64_sext: ; SSE41: # BB#0: ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3] -; SSE41-NEXT: pmovsxwq %xmm3, %xmm4 +; SSE41-NEXT: pmovsxwq %xmm3, %xmm8 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] -; SSE41-NEXT: pmovsxwq %xmm3, %xmm5 -; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] ; SSE41-NEXT: pmovsxwq %xmm3, %xmm6 -; SSE41-NEXT: pmovsxwq %xmm0, %xmm7 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] +; SSE41-NEXT: pmovsxwq %xmm3, %xmm7 +; SSE41-NEXT: pmovsxwq %xmm0, %xmm5 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; SSE41-NEXT: pmovsxdq %xmm0, %xmm3 -; SSE41-NEXT: pmuldq %xmm4, %xmm3 ; SSE41-NEXT: pmovsxdq %xmm2, %xmm2 -; SSE41-NEXT: pmuldq %xmm5, %xmm2 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; SSE41-NEXT: pmovsxdq %xmm0, %xmm4 -; SSE41-NEXT: pmuldq %xmm6, %xmm4 ; SSE41-NEXT: pmovsxdq %xmm1, %xmm0 -; SSE41-NEXT: pmuldq %xmm7, %xmm0 +; SSE41-NEXT: pmuldq %xmm5, %xmm0 +; SSE41-NEXT: pmuldq %xmm7, %xmm4 +; SSE41-NEXT: pmuldq %xmm6, %xmm2 +; SSE41-NEXT: pmuldq %xmm8, %xmm3 ; SSE41-NEXT: movdqa %xmm4, %xmm1 ; SSE41-NEXT: retq ; @@ -1493,10 +1493,9 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) { ; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 ; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3 -; AVX2-NEXT: vpmuldq %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 ; AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vmovdqa %ymm2, %ymm1 +; AVX2-NEXT: vpmuldq %ymm3, %ymm2, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: mul_v8i64_sext: diff --git a/test/CodeGen/X86/pr27591.ll b/test/CodeGen/X86/pr27591.ll index 3ff6c096d097..b71cb8c4b3a2 100644 --- a/test/CodeGen/X86/pr27591.ll +++ b/test/CodeGen/X86/pr27591.ll @@ -9,12 +9,6 @@ define void @test1(i32 %x) #0 { ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: testl %edi, %edi ; CHECK-NEXT: setne %al -; CHECK-NEXT: # implicit-def: %EDI -; CHECK-NEXT: movb %al, %dil -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovd %edi, %k0 -; CHECK-NEXT: kmovd %k0, %edi -; CHECK-NEXT: movb %dil, %al ; CHECK-NEXT: andb $1, %al ; CHECK-NEXT: movzbl %al, %edi ; CHECK-NEXT: callq callee1 @@ -32,17 +26,9 @@ define void @test2(i32 %x) #0 { ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: testl %edi, %edi ; CHECK-NEXT: setne %al -; CHECK-NEXT: # implicit-def: %EDI -; CHECK-NEXT: movb %al, %dil -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovd %edi, %k0 -; CHECK-NEXT: kmovd %k0, %edi +; CHECK-NEXT: movzbl %al, %edi ; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: movb %dil, %al -; CHECK-NEXT: xorl %edi, %edi -; CHECK-NEXT: testb %al, %al -; CHECK-NEXT: movl $-1, %ecx -; CHECK-NEXT: cmovnel %ecx, %edi +; CHECK-NEXT: negl %edi ; CHECK-NEXT: callq callee2 ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq diff --git a/test/CodeGen/X86/pr28173.ll b/test/CodeGen/X86/pr28173.ll index d9622b99bd98..3279982e4641 100644 --- a/test/CodeGen/X86/pr28173.ll +++ b/test/CodeGen/X86/pr28173.ll @@ -8,9 +8,8 @@ target triple = "x86_64-unknown-linux-gnu" define i64 @foo64(i1 zeroext %i) #0 { ; CHECK-LABEL: foo64: ; CHECK: # BB#0: -; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> -; CHECK-NEXT: orq $-2, %rdi -; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: movzbl %dil, %eax +; CHECK-NEXT: orq $-2, %rax ; CHECK-NEXT: retq br label %bb @@ -26,8 +25,9 @@ end: define i16 @foo16(i1 zeroext %i) #0 { ; CHECK-LABEL: foo16: ; CHECK: # BB#0: -; CHECK-NEXT: orl $65534, %edi # imm = 0xFFFE -; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: movzbl %dil, %eax +; CHECK-NEXT: orl $65534, %eax # imm = 0xFFFE +; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> ; CHECK-NEXT: retq br label %bb @@ -43,9 +43,9 @@ end: define i16 @foo16_1(i1 zeroext %i, i32 %j) #0 { ; CHECK-LABEL: foo16_1: ; CHECK: # BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: orl $2, %edi -; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: movzbl %dil, %eax +; CHECK-NEXT: orl $2, %eax +; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> ; CHECK-NEXT: retq br label %bb @@ -61,8 +61,8 @@ end: define i32 @foo32(i1 zeroext %i) #0 { ; CHECK-LABEL: foo32: ; CHECK: # BB#0: -; CHECK-NEXT: orl $-2, %edi -; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: movzbl %dil, %eax +; CHECK-NEXT: orl $-2, %eax ; CHECK-NEXT: retq br label %bb diff --git a/test/CodeGen/X86/pr29112.ll b/test/CodeGen/X86/pr29112.ll index 94904018872b..8c970b3d4771 100644 --- a/test/CodeGen/X86/pr29112.ll +++ b/test/CodeGen/X86/pr29112.ll @@ -38,8 +38,7 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, < ; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm8[0],xmm0[0],xmm8[2,3] ; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[1],xmm1[3] ; CHECK-NEXT: vinsertps {{.*#+}} xmm14 = xmm1[0,1,2],xmm3[1] -; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm10[0,1,2],xmm3[1] -; CHECK-NEXT: vaddps %xmm14, %xmm1, %xmm10 +; CHECK-NEXT: vinsertps {{.*#+}} xmm10 = xmm10[0,1,2],xmm3[1] ; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[2,3] ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[1],xmm0[3] @@ -53,9 +52,10 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, < ; CHECK-NEXT: vmovaps %xmm15, %xmm1 ; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9 +; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0 ; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8 -; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm0 -; CHECK-NEXT: vaddps %xmm10, %xmm0, %xmm0 +; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3 +; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovaps %xmm9, (%rsp) diff --git a/test/CodeGen/X86/pr31088.ll b/test/CodeGen/X86/pr31088.ll index d7a546c7396d..0dd8eb0ece85 100644 --- a/test/CodeGen/X86/pr31088.ll +++ b/test/CodeGen/X86/pr31088.ll @@ -150,12 +150,12 @@ define <2 x half> @ir_fadd_v2f16(<2 x half> %arg0, <2 x half> %arg1) nounwind { ; F16C-NEXT: vcvtph2ps %xmm3, %xmm3 ; F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; F16C-NEXT: vcvtph2ps %xmm1, %xmm1 -; F16C-NEXT: vaddss %xmm3, %xmm1, %xmm1 ; F16C-NEXT: vcvtps2ph $4, %xmm2, %xmm2 ; F16C-NEXT: vcvtph2ps %xmm2, %xmm2 ; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; F16C-NEXT: vcvtph2ps %xmm0, %xmm0 ; F16C-NEXT: vaddss %xmm2, %xmm0, %xmm0 +; F16C-NEXT: vaddss %xmm3, %xmm1, %xmm1 ; F16C-NEXT: retq %retval = fadd <2 x half> %arg0, %arg1 ret <2 x half> %retval diff --git a/test/CodeGen/X86/pr32241.ll b/test/CodeGen/X86/pr32241.ll index d8ce230057ea..e1f726f0c625 100644 --- a/test/CodeGen/X86/pr32241.ll +++ b/test/CodeGen/X86/pr32241.ll @@ -4,49 +4,57 @@ define i32 @_Z3foov() { ; CHECK-LABEL: _Z3foov: ; CHECK: # BB#0: # %entry -; CHECK-NEXT: subl $20, %esp +; CHECK-NEXT: pushl %esi ; CHECK-NEXT: .Lcfi0: -; CHECK-NEXT: .cfi_def_cfa_offset 24 +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: subl $24, %esp +; CHECK-NEXT: .Lcfi1: +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .Lcfi2: +; CHECK-NEXT: .cfi_offset %esi, -8 +; CHECK-NEXT: movb $1, %al ; CHECK-NEXT: movw $10959, {{[0-9]+}}(%esp) # imm = 0x2ACF ; CHECK-NEXT: movw $-15498, {{[0-9]+}}(%esp) # imm = 0xC376 ; CHECK-NEXT: movw $19417, {{[0-9]+}}(%esp) # imm = 0x4BD9 -; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: movw {{[0-9]+}}(%esp), %cx -; CHECK-NEXT: kxnorw %k0, %k0, %k0 -; CHECK-NEXT: kshiftrw $15, %k0, %k0 -; CHECK-NEXT: testw %cx, %cx -; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill -; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill +; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; CHECK-NEXT: cmpw $0, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill +; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp) # 1-byte Spill ; CHECK-NEXT: jne .LBB0_2 -; CHECK-NEXT: jmp .LBB0_1 -; CHECK-NEXT: .LBB0_1: # %lor.rhs +; CHECK-NEXT: # BB#1: # %lor.rhs ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: kmovd %eax, %k0 -; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill +; CHECK-NEXT: movb %al, %cl +; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill ; CHECK-NEXT: jmp .LBB0_2 ; CHECK-NEXT: .LBB0_2: # %lor.end -; CHECK-NEXT: kmovw {{[0-9]+}}(%esp), %k0 # 2-byte Reload -; CHECK-NEXT: kxnorw %k0, %k0, %k1 -; CHECK-NEXT: kshiftrw $15, %k1, %k1 -; CHECK-NEXT: movb $1, %al -; CHECK-NEXT: testb %al, %al -; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill -; CHECK-NEXT: kmovw %k1, {{[0-9]+}}(%esp) # 2-byte Spill +; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al # 1-byte Reload +; CHECK-NEXT: movb $1, %cl +; CHECK-NEXT: andb $1, %al +; CHECK-NEXT: movzbl %al, %edx +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; CHECK-NEXT: subl %edx, %esi +; CHECK-NEXT: setl %al +; CHECK-NEXT: andb $1, %al +; CHECK-NEXT: movzbl %al, %edx +; CHECK-NEXT: xorl $-1, %edx +; CHECK-NEXT: cmpl $0, %edx +; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill ; CHECK-NEXT: jne .LBB0_4 -; CHECK-NEXT: jmp .LBB0_3 -; CHECK-NEXT: .LBB0_3: # %lor.rhs4 +; CHECK-NEXT: # BB#3: # %lor.rhs4 ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: kmovd %eax, %k0 -; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill +; CHECK-NEXT: movb %al, %cl +; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill ; CHECK-NEXT: jmp .LBB0_4 ; CHECK-NEXT: .LBB0_4: # %lor.end5 -; CHECK-NEXT: kmovw {{[0-9]+}}(%esp), %k0 # 2-byte Reload -; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: movw %ax, %cx -; CHECK-NEXT: movw %cx, {{[0-9]+}}(%esp) +; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al # 1-byte Reload +; CHECK-NEXT: andb $1, %al +; CHECK-NEXT: movzbl %al, %ecx +; CHECK-NEXT: movw %cx, %dx +; CHECK-NEXT: movw %dx, {{[0-9]+}}(%esp) ; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: addl $20, %esp +; CHECK-NEXT: addl $24, %esp +; CHECK-NEXT: popl %esi ; CHECK-NEXT: retl entry: %aa = alloca i16, align 2 diff --git a/test/CodeGen/X86/pr32256.ll b/test/CodeGen/X86/pr32256.ll index cb26c13e53eb..e29b56236e26 100644 --- a/test/CodeGen/X86/pr32256.ll +++ b/test/CodeGen/X86/pr32256.ll @@ -7,39 +7,27 @@ define void @_Z1av() { ; CHECK-LABEL: _Z1av: ; CHECK: # BB#0: # %entry -; CHECK-NEXT: subl $6, %esp +; CHECK-NEXT: subl $2, %esp ; CHECK-NEXT: .Lcfi0: -; CHECK-NEXT: .cfi_def_cfa_offset 10 +; CHECK-NEXT: .cfi_def_cfa_offset 6 ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: kmovd %eax, %k0 -; CHECK-NEXT: movb c, %cl -; CHECK-NEXT: # implicit-def: %EAX -; CHECK-NEXT: movb %cl, %al -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: kmovd %eax, %k1 -; CHECK-NEXT: kmovq %k1, %k2 -; CHECK-NEXT: kxnorw %k0, %k0, %k3 -; CHECK-NEXT: kshiftrw $15, %k3, %k3 -; CHECK-NEXT: kxorw %k3, %k1, %k1 -; CHECK-NEXT: kmovd %k1, %eax ; CHECK-NEXT: movb %al, %cl -; CHECK-NEXT: testb $1, %cl -; CHECK-NEXT: kmovw %k2, {{[0-9]+}}(%esp) # 2-byte Spill -; CHECK-NEXT: kmovw %k0, (%esp) # 2-byte Spill +; CHECK-NEXT: movb c, %dl +; CHECK-NEXT: xorb $-1, %dl +; CHECK-NEXT: testb $1, %dl +; CHECK-NEXT: movb %cl, (%esp) # 1-byte Spill ; CHECK-NEXT: jne .LBB0_1 ; CHECK-NEXT: jmp .LBB0_2 ; CHECK-NEXT: .LBB0_1: # %land.rhs ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: kmovd %eax, %k0 -; CHECK-NEXT: kmovw %k0, (%esp) # 2-byte Spill +; CHECK-NEXT: movb %al, %cl +; CHECK-NEXT: movb %cl, (%esp) # 1-byte Spill ; CHECK-NEXT: jmp .LBB0_2 ; CHECK-NEXT: .LBB0_2: # %land.end -; CHECK-NEXT: kmovw (%esp), %k0 # 2-byte Reload -; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: movb %al, %cl -; CHECK-NEXT: andb $1, %cl -; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) -; CHECK-NEXT: addl $6, %esp +; CHECK-NEXT: movb (%esp), %al # 1-byte Reload +; CHECK-NEXT: andb $1, %al +; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp) +; CHECK-NEXT: addl $2, %esp ; CHECK-NEXT: retl entry: %b = alloca i8, align 1 diff --git a/test/CodeGen/X86/pr32284.ll b/test/CodeGen/X86/pr32284.ll index 143e3af82eb7..571dd6774906 100644 --- a/test/CodeGen/X86/pr32284.ll +++ b/test/CodeGen/X86/pr32284.ll @@ -39,12 +39,6 @@ define void @foo() { ; X86-O0-NEXT: movzbl %al, %edx ; X86-O0-NEXT: subl %ecx, %edx ; X86-O0-NEXT: setle %al -; X86-O0-NEXT: # implicit-def: %ECX -; X86-O0-NEXT: movb %al, %cl -; X86-O0-NEXT: andl $1, %ecx -; X86-O0-NEXT: kmovd %ecx, %k0 -; X86-O0-NEXT: kmovd %k0, %ecx -; X86-O0-NEXT: movb %cl, %al ; X86-O0-NEXT: andb $1, %al ; X86-O0-NEXT: movzbl %al, %ecx ; X86-O0-NEXT: movl %ecx, {{[0-9]+}}(%esp) @@ -77,12 +71,6 @@ define void @foo() { ; X64-O0-NEXT: movzbl %al, %edx ; X64-O0-NEXT: subl %ecx, %edx ; X64-O0-NEXT: setle %al -; X64-O0-NEXT: # implicit-def: %ECX -; X64-O0-NEXT: movb %al, %cl -; X64-O0-NEXT: andl $1, %ecx -; X64-O0-NEXT: kmovd %ecx, %k0 -; X64-O0-NEXT: kmovd %k0, %ecx -; X64-O0-NEXT: movb %cl, %al ; X64-O0-NEXT: andb $1, %al ; X64-O0-NEXT: movzbl %al, %ecx ; X64-O0-NEXT: movl %ecx, -{{[0-9]+}}(%rsp) diff --git a/test/CodeGen/X86/pr32451.ll b/test/CodeGen/X86/pr32451.ll index d980b7ff284c..e4643a863f94 100644 --- a/test/CodeGen/X86/pr32451.ll +++ b/test/CodeGen/X86/pr32451.ll @@ -25,12 +25,6 @@ define i8** @japi1_convert_690(i8**, i8***, i32) { ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; CHECK-NEXT: movl 4(%ecx), %edx ; CHECK-NEXT: movb (%edx), %bl -; CHECK-NEXT: # implicit-def: %EDX -; CHECK-NEXT: movb %bl, %dl -; CHECK-NEXT: andl $1, %edx -; CHECK-NEXT: kmovw %edx, %k0 -; CHECK-NEXT: kmovw %k0, %edx -; CHECK-NEXT: movb %dl, %bl ; CHECK-NEXT: andb $1, %bl ; CHECK-NEXT: movzbl %bl, %edx ; CHECK-NEXT: movl %edx, (%esp) diff --git a/test/CodeGen/X86/rotate.ll b/test/CodeGen/X86/rotate.ll index 4be3a4c2391b..5d5150ad62d6 100644 --- a/test/CodeGen/X86/rotate.ll +++ b/test/CodeGen/X86/rotate.ll @@ -33,8 +33,8 @@ define i64 @rotl64(i64 %A, i8 %Amt) nounwind { ; 32-NEXT: movl %ebx, %esi ; 32-NEXT: xorl %ebx, %ebx ; 32-NEXT: .LBB0_4: -; 32-NEXT: orl %ebx, %edx ; 32-NEXT: orl %esi, %eax +; 32-NEXT: orl %ebx, %edx ; 32-NEXT: popl %esi ; 32-NEXT: popl %edi ; 32-NEXT: popl %ebx @@ -86,8 +86,8 @@ define i64 @rotr64(i64 %A, i8 %Amt) nounwind { ; 32-NEXT: movl %ebx, %esi ; 32-NEXT: xorl %ebx, %ebx ; 32-NEXT: .LBB1_4: -; 32-NEXT: orl %esi, %edx ; 32-NEXT: orl %ebx, %eax +; 32-NEXT: orl %esi, %edx ; 32-NEXT: popl %esi ; 32-NEXT: popl %edi ; 32-NEXT: popl %ebx @@ -546,7 +546,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind { ; 32-LABEL: rotr1_64_mem: ; 32: # BB#0: ; 32-NEXT: pushl %esi -; 32-NEXT: movl {{[0-9]+}}(%esp), %eax +; 32-NEXT: movl 8(%esp), %eax ; 32-NEXT: movl (%eax), %ecx ; 32-NEXT: movl 4(%eax), %edx ; 32-NEXT: movl %edx, %esi @@ -555,13 +555,11 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind { ; 32-NEXT: movl %ecx, 4(%eax) ; 32-NEXT: movl %esi, (%eax) ; 32-NEXT: popl %esi -; 32-NEXT: retl -; + ; 64-LABEL: rotr1_64_mem: ; 64: # BB#0: ; 64-NEXT: rorq (%rdi) ; 64-NEXT: retq - %A = load i64, i64 *%Aptr %B = shl i64 %A, 63 %C = lshr i64 %A, 1 @@ -573,7 +571,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind { define void @rotr1_32_mem(i32* %Aptr) nounwind { ; 32-LABEL: rotr1_32_mem: ; 32: # BB#0: -; 32-NEXT: movl {{[0-9]+}}(%esp), %eax +; 32-NEXT: movl 4(%esp), %eax ; 32-NEXT: rorl (%eax) ; 32-NEXT: retl ; @@ -592,7 +590,7 @@ define void @rotr1_32_mem(i32* %Aptr) nounwind { define void @rotr1_16_mem(i16* %Aptr) nounwind { ; 32-LABEL: rotr1_16_mem: ; 32: # BB#0: -; 32-NEXT: movl {{[0-9]+}}(%esp), %eax +; 32-NEXT: movl 4(%esp), %eax ; 32-NEXT: rorw (%eax) ; 32-NEXT: retl ; @@ -611,7 +609,7 @@ define void @rotr1_16_mem(i16* %Aptr) nounwind { define void @rotr1_8_mem(i8* %Aptr) nounwind { ; 32-LABEL: rotr1_8_mem: ; 32: # BB#0: -; 32-NEXT: movl {{[0-9]+}}(%esp), %eax +; 32-NEXT: movl 4(%esp), %eax ; 32-NEXT: rorb (%eax) ; 32-NEXT: retl ; diff --git a/test/CodeGen/X86/rtm.ll b/test/CodeGen/X86/rtm.ll index 7215c482ffa2..a8562677c7bf 100644 --- a/test/CodeGen/X86/rtm.ll +++ b/test/CodeGen/X86/rtm.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+rtm | FileCheck %s --check-prefix=X86 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+rtm | FileCheck %s --check-prefix=X64 +; RUN: llc -verify-machineinstrs < %s -mtriple=i686-unknown-unknown -mattr=+rtm | FileCheck %s --check-prefix=X86 +; RUN: llc -verify-machineinstrs < %s -mtriple=x86_64-unknown-unknown -mattr=+rtm | FileCheck %s --check-prefix=X64 declare i32 @llvm.x86.xbegin() nounwind declare void @llvm.x86.xend() nounwind @@ -13,7 +13,8 @@ define i32 @test_xbegin() nounwind uwtable { ; X86-NEXT: xbegin .LBB0_2 ; X86-NEXT: # BB#1: # %entry ; X86-NEXT: movl $-1, %eax -; X86-NEXT: .LBB0_2: # %entry +; X86: .LBB0_2: # %entry +; X86-NEXT: # XABORT DEF ; X86-NEXT: retl ; ; X64-LABEL: test_xbegin: @@ -21,7 +22,8 @@ define i32 @test_xbegin() nounwind uwtable { ; X64-NEXT: xbegin .LBB0_2 ; X64-NEXT: # BB#1: # %entry ; X64-NEXT: movl $-1, %eax -; X64-NEXT: .LBB0_2: # %entry +; X64: .LBB0_2: # %entry +; X64-NEXT: # XABORT DEF ; X64-NEXT: retq entry: %0 = tail call i32 @llvm.x86.xbegin() nounwind diff --git a/test/CodeGen/X86/sad.ll b/test/CodeGen/X86/sad.ll index 6a565a5c76f0..b8a8b8afd14f 100644 --- a/test/CodeGen/X86/sad.ll +++ b/test/CodeGen/X86/sad.ll @@ -149,131 +149,127 @@ middle.block: define i32 @sad_32i8() nounwind { ; SSE2-LABEL: sad_32i8: ; SSE2: # BB#0: # %entry -; SSE2-NEXT: pxor %xmm12, %xmm12 +; SSE2-NEXT: pxor %xmm11, %xmm11 ; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00 -; SSE2-NEXT: pxor %xmm13, %xmm13 -; SSE2-NEXT: pxor %xmm6, %xmm6 -; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm0, %xmm0 ; SSE2-NEXT: pxor %xmm3, %xmm3 -; SSE2-NEXT: pxor %xmm14, %xmm14 -; SSE2-NEXT: pxor %xmm15, %xmm15 +; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: pxor %xmm1, %xmm1 -; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm12, %xmm12 +; SSE2-NEXT: pxor %xmm15, %xmm15 +; SSE2-NEXT: pxor %xmm13, %xmm13 +; SSE2-NEXT: pxor %xmm14, %xmm14 ; SSE2-NEXT: .p2align 4, 0x90 ; SSE2-NEXT: .LBB1_1: # %vector.body ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa a+1040(%rax), %xmm8 +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa a+1040(%rax), %xmm6 ; SSE2-NEXT: movdqa a+1024(%rax), %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7] -; SSE2-NEXT: movdqa %xmm4, %xmm7 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7] -; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15] -; SSE2-NEXT: movdqa %xmm3, %xmm1 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7] -; SSE2-NEXT: movdqa %xmm8, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7] -; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7] -; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm12[8],xmm8[9],xmm12[9],xmm8[10],xmm12[10],xmm8[11],xmm12[11],xmm8[12],xmm12[12],xmm8[13],xmm12[13],xmm8[14],xmm12[14],xmm8[15],xmm12[15] -; SSE2-NEXT: movdqa b+1024(%rax), %xmm11 -; SSE2-NEXT: movdqa %xmm11, %xmm10 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7] -; SSE2-NEXT: movdqa %xmm10, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3] -; SSE2-NEXT: psubd %xmm2, %xmm7 +; SSE2-NEXT: movdqa %xmm3, %xmm8 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3],xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm11[8],xmm3[9],xmm11[9],xmm3[10],xmm11[10],xmm3[11],xmm11[11],xmm3[12],xmm11[12],xmm3[13],xmm11[13],xmm3[14],xmm11[14],xmm3[15],xmm11[15] +; SSE2-NEXT: movdqa %xmm3, %xmm5 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm11[4],xmm3[5],xmm11[5],xmm3[6],xmm11[6],xmm3[7],xmm11[7] +; SSE2-NEXT: movdqa %xmm6, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3],xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm11[8],xmm6[9],xmm11[9],xmm6[10],xmm11[10],xmm6[11],xmm11[11],xmm6[12],xmm11[12],xmm6[13],xmm11[13],xmm6[14],xmm11[14],xmm6[15],xmm11[15] +; SSE2-NEXT: movdqa %xmm6, %xmm7 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] ; SSE2-NEXT: movdqa b+1040(%rax), %xmm9 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7] -; SSE2-NEXT: psubd %xmm10, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15] -; SSE2-NEXT: movdqa %xmm11, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3] +; SSE2-NEXT: movdqa %xmm9, %xmm2 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm11[8],xmm9[9],xmm11[9],xmm9[10],xmm11[10],xmm9[11],xmm11[11],xmm9[12],xmm11[12],xmm9[13],xmm11[13],xmm9[14],xmm11[14],xmm9[15],xmm11[15] +; SSE2-NEXT: movdqa %xmm9, %xmm10 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7] +; SSE2-NEXT: psubd %xmm9, %xmm6 +; SSE2-NEXT: movdqa b+1024(%rax), %xmm4 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3] +; SSE2-NEXT: psubd %xmm10, %xmm7 +; SSE2-NEXT: movdqa %xmm2, %xmm9 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7] ; SSE2-NEXT: psubd %xmm2, %xmm1 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7] -; SSE2-NEXT: psubd %xmm11, %xmm3 -; SSE2-NEXT: movdqa %xmm6, %xmm10 -; SSE2-NEXT: movdqa %xmm9, %xmm6 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7] -; SSE2-NEXT: movdqa %xmm6, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3] -; SSE2-NEXT: psubd %xmm2, %xmm5 +; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm11[8],xmm4[9],xmm11[9],xmm4[10],xmm11[10],xmm4[11],xmm11[11],xmm4[12],xmm11[12],xmm4[13],xmm11[13],xmm4[14],xmm11[14],xmm4[15],xmm11[15] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3] +; SSE2-NEXT: psubd %xmm9, %xmm0 +; SSE2-NEXT: movdqa %xmm4, %xmm9 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm11[4],xmm4[5],xmm11[5],xmm4[6],xmm11[6],xmm4[7],xmm11[7] +; SSE2-NEXT: psubd %xmm4, %xmm3 +; SSE2-NEXT: movdqa %xmm8, %xmm10 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3] +; SSE2-NEXT: psubd %xmm9, %xmm5 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7] +; SSE2-NEXT: psubd %xmm2, %xmm8 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3] +; SSE2-NEXT: psubd %xmm4, %xmm10 +; SSE2-NEXT: movdqa %xmm10, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: paddd %xmm2, %xmm10 +; SSE2-NEXT: pxor %xmm2, %xmm10 ; SSE2-NEXT: movdqa %xmm8, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7] -; SSE2-NEXT: psubd %xmm6, %xmm0 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm12[8],xmm9[9],xmm12[9],xmm9[10],xmm12[10],xmm9[11],xmm12[11],xmm9[12],xmm12[12],xmm9[13],xmm12[13],xmm9[14],xmm12[14],xmm9[15],xmm12[15] -; SSE2-NEXT: movdqa %xmm9, %xmm6 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3] -; SSE2-NEXT: psubd %xmm6, %xmm2 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7] -; SSE2-NEXT: psubd %xmm9, %xmm8 -; SSE2-NEXT: movdqa %xmm7, %xmm6 -; SSE2-NEXT: psrad $31, %xmm6 -; SSE2-NEXT: paddd %xmm6, %xmm7 -; SSE2-NEXT: pxor %xmm6, %xmm7 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: paddd %xmm2, %xmm8 +; SSE2-NEXT: pxor %xmm2, %xmm8 +; SSE2-NEXT: movdqa %xmm5, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: paddd %xmm2, %xmm5 +; SSE2-NEXT: pxor %xmm2, %xmm5 +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: paddd %xmm2, %xmm3 +; SSE2-NEXT: pxor %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: paddd %xmm2, %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: paddd %xmm2, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm7, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: paddd %xmm2, %xmm7 +; SSE2-NEXT: pxor %xmm2, %xmm7 +; SSE2-NEXT: movdqa %xmm6, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: paddd %xmm2, %xmm6 +; SSE2-NEXT: pxor %xmm2, %xmm6 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; SSE2-NEXT: paddd %xmm6, %xmm14 ; SSE2-NEXT: paddd %xmm7, %xmm13 -; SSE2-NEXT: movdqa %xmm4, %xmm6 -; SSE2-NEXT: psrad $31, %xmm6 -; SSE2-NEXT: paddd %xmm6, %xmm4 -; SSE2-NEXT: pxor %xmm6, %xmm4 -; SSE2-NEXT: movdqa %xmm10, %xmm6 -; SSE2-NEXT: paddd %xmm4, %xmm6 -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: psrad $31, %xmm4 -; SSE2-NEXT: paddd %xmm4, %xmm1 -; SSE2-NEXT: pxor %xmm4, %xmm1 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload -; SSE2-NEXT: paddd %xmm1, %xmm4 -; SSE2-NEXT: movdqa %xmm3, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm3 -; SSE2-NEXT: pxor %xmm1, %xmm3 +; SSE2-NEXT: paddd %xmm1, %xmm15 ; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload +; SSE2-NEXT: paddd %xmm0, %xmm12 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload ; SSE2-NEXT: paddd %xmm3, %xmm1 -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload -; SSE2-NEXT: movdqa %xmm5, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm5 -; SSE2-NEXT: pxor %xmm1, %xmm5 -; SSE2-NEXT: paddd %xmm5, %xmm14 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm0 -; SSE2-NEXT: pxor %xmm1, %xmm0 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm0, %xmm15 -; SSE2-NEXT: movdqa %xmm2, %xmm0 -; SSE2-NEXT: psrad $31, %xmm0 -; SSE2-NEXT: paddd %xmm0, %xmm2 -; SSE2-NEXT: pxor %xmm0, %xmm2 -; SSE2-NEXT: paddd %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm8, %xmm0 -; SSE2-NEXT: psrad $31, %xmm0 -; SSE2-NEXT: paddd %xmm0, %xmm8 -; SSE2-NEXT: pxor %xmm0, %xmm8 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE2-NEXT: paddd %xmm8, %xmm0 +; SSE2-NEXT: paddd %xmm5, %xmm2 +; SSE2-NEXT: paddd %xmm8, %xmm3 +; SSE2-NEXT: paddd %xmm10, %xmm0 ; SSE2-NEXT: addq $4, %rax ; SSE2-NEXT: jne .LBB1_1 ; SSE2-NEXT: # BB#2: # %middle.block -; SSE2-NEXT: paddd %xmm15, %xmm6 -; SSE2-NEXT: paddd %xmm0, %xmm3 -; SSE2-NEXT: paddd %xmm6, %xmm3 -; SSE2-NEXT: paddd %xmm14, %xmm13 -; SSE2-NEXT: paddd %xmm1, %xmm4 -; SSE2-NEXT: paddd %xmm3, %xmm4 -; SSE2-NEXT: paddd %xmm13, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1] -; SSE2-NEXT: paddd %xmm4, %xmm0 +; SSE2-NEXT: paddd %xmm15, %xmm3 +; SSE2-NEXT: paddd %xmm14, %xmm1 +; SSE2-NEXT: paddd %xmm12, %xmm0 +; SSE2-NEXT: paddd %xmm13, %xmm2 +; SSE2-NEXT: paddd %xmm3, %xmm1 +; SSE2-NEXT: paddd %xmm2, %xmm1 +; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm1, %eax @@ -402,284 +398,288 @@ middle.block: define i32 @sad_avx64i8() nounwind { ; SSE2-LABEL: sad_avx64i8: ; SSE2: # BB#0: # %entry -; SSE2-NEXT: subq $200, %rsp -; SSE2-NEXT: pxor %xmm14, %xmm14 -; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00 +; SSE2-NEXT: subq $184, %rsp ; SSE2-NEXT: pxor %xmm15, %xmm15 -; SSE2-NEXT: pxor %xmm10, %xmm10 -; SSE2-NEXT: pxor %xmm3, %xmm3 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pxor %xmm13, %xmm13 -; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00 +; SSE2-NEXT: pxor %xmm12, %xmm12 ; SSE2-NEXT: pxor %xmm8, %xmm8 +; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: pxor %xmm0, %xmm0 -; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: pxor %xmm14, %xmm14 +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: pxor %xmm6, %xmm6 +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: pxor %xmm11, %xmm11 -; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: pxor %xmm7, %xmm7 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: pxor %xmm7, %xmm7 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: pxor %xmm7, %xmm7 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: pxor %xmm7, %xmm7 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: pxor %xmm7, %xmm7 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: pxor %xmm13, %xmm13 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm5, %xmm5 ; SSE2-NEXT: .p2align 4, 0x90 ; SSE2-NEXT: .LBB2_1: # %vector.body ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 -; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm11, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm5, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm13, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm10, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm13, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm11, (%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm7, {{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movaps a+1040(%rax), %xmm0 -; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa a+1024(%rax), %xmm12 -; SSE2-NEXT: movdqa a+1056(%rax), %xmm15 -; SSE2-NEXT: movdqa a+1072(%rax), %xmm4 -; SSE2-NEXT: movdqa %xmm4, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15] -; SSE2-NEXT: movdqa %xmm6, %xmm1 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7] -; SSE2-NEXT: movdqa %xmm4, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3] -; SSE2-NEXT: movdqa %xmm15, %xmm11 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm14[8],xmm11[9],xmm14[9],xmm11[10],xmm14[10],xmm11[11],xmm14[11],xmm11[12],xmm14[12],xmm11[13],xmm14[13],xmm11[14],xmm14[14],xmm11[15],xmm14[15] -; SSE2-NEXT: movdqa %xmm11, %xmm8 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm14[0],xmm11[1],xmm14[1],xmm11[2],xmm14[2],xmm11[3],xmm14[3] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7] -; SSE2-NEXT: movdqa %xmm15, %xmm0 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3] -; SSE2-NEXT: movdqa %xmm12, %xmm10 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3],xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7] -; SSE2-NEXT: movdqa %xmm10, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3] -; SSE2-NEXT: movdqa %xmm0, %xmm9 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7] -; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15] +; SSE2-NEXT: movdqa %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm14, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm6, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm8, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm12, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa a+1040(%rax), %xmm6 +; SSE2-NEXT: movdqa a+1024(%rax), %xmm4 +; SSE2-NEXT: movdqa a+1056(%rax), %xmm11 +; SSE2-NEXT: movdqa a+1072(%rax), %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm5 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7] +; SSE2-NEXT: movdqa %xmm11, %xmm1 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm15[8],xmm1[9],xmm15[9],xmm1[10],xmm15[10],xmm1[11],xmm15[11],xmm1[12],xmm15[12],xmm1[13],xmm15[13],xmm1[14],xmm15[14],xmm1[15],xmm15[15] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3],xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7] +; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3] +; SSE2-NEXT: movdqa %xmm4, %xmm12 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm15[0],xmm12[1],xmm15[1],xmm12[2],xmm15[2],xmm12[3],xmm15[3],xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7] ; SSE2-NEXT: movdqa %xmm12, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3] -; SSE2-NEXT: movdqa %xmm0, %xmm13 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7] -; SSE2-NEXT: movdqa b+1072(%rax), %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm7 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm14[8],xmm7[9],xmm14[9],xmm7[10],xmm14[10],xmm7[11],xmm14[11],xmm7[12],xmm14[12],xmm7[13],xmm14[13],xmm7[14],xmm14[14],xmm7[15],xmm14[15] -; SSE2-NEXT: movdqa %xmm7, %xmm0 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] -; SSE2-NEXT: psubd %xmm0, %xmm1 -; SSE2-NEXT: movdqa b+1056(%rax), %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3] -; SSE2-NEXT: psubd %xmm7, %xmm6 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7] -; SSE2-NEXT: movdqa %xmm3, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7] -; SSE2-NEXT: psubd %xmm7, %xmm5 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3] -; SSE2-NEXT: psubd %xmm3, %xmm4 -; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm14[8],xmm3[9],xmm14[9],xmm3[10],xmm14[10],xmm3[11],xmm14[11],xmm3[12],xmm14[12],xmm3[13],xmm14[13],xmm3[14],xmm14[14],xmm3[15],xmm14[15] -; SSE2-NEXT: movdqa %xmm3, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7] -; SSE2-NEXT: psubd %xmm7, %xmm8 -; SSE2-NEXT: movdqa b+1024(%rax), %xmm7 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3] -; SSE2-NEXT: psubd %xmm3, %xmm11 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] -; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7] -; SSE2-NEXT: psubd %xmm3, %xmm2 -; SSE2-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3] -; SSE2-NEXT: psubd %xmm0, %xmm15 -; SSE2-NEXT: movdqa %xmm7, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] -; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3] -; SSE2-NEXT: psubd %xmm3, %xmm9 -; SSE2-NEXT: movdqa %xmm9, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3] +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm15[8],xmm4[9],xmm15[9],xmm4[10],xmm15[10],xmm4[11],xmm15[11],xmm4[12],xmm15[12],xmm4[13],xmm15[13],xmm4[14],xmm15[14],xmm4[15],xmm15[15] +; SSE2-NEXT: movdqa %xmm4, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm15[4],xmm4[5],xmm15[5],xmm4[6],xmm15[6],xmm4[7],xmm15[7] +; SSE2-NEXT: movdqa %xmm6, %xmm14 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7] +; SSE2-NEXT: movdqa %xmm14, %xmm7 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm15[0],xmm7[1],xmm15[1],xmm7[2],xmm15[2],xmm7[3],xmm15[3] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm15[8],xmm6[9],xmm15[9],xmm6[10],xmm15[10],xmm6[11],xmm15[11],xmm6[12],xmm15[12],xmm6[13],xmm15[13],xmm6[14],xmm15[14],xmm6[15],xmm15[15] +; SSE2-NEXT: movdqa %xmm6, %xmm8 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm15[0],xmm8[1],xmm15[1],xmm8[2],xmm15[2],xmm8[3],xmm15[3] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7] +; SSE2-NEXT: movdqa b+1040(%rax), %xmm9 +; SSE2-NEXT: movdqa %xmm9, %xmm13 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm15[8],xmm9[9],xmm15[9],xmm9[10],xmm15[10],xmm9[11],xmm15[11],xmm9[12],xmm15[12],xmm9[13],xmm15[13],xmm9[14],xmm15[14],xmm9[15],xmm15[15] +; SSE2-NEXT: movdqa %xmm9, %xmm10 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7] +; SSE2-NEXT: psubd %xmm9, %xmm6 +; SSE2-NEXT: movdqa b+1024(%rax), %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3],xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3] +; SSE2-NEXT: psubd %xmm10, %xmm8 +; SSE2-NEXT: movdqa %xmm13, %xmm9 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7] +; SSE2-NEXT: psubd %xmm13, %xmm14 +; SSE2-NEXT: movdqa %xmm2, %xmm10 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm15[8],xmm2[9],xmm15[9],xmm2[10],xmm15[10],xmm2[11],xmm15[11],xmm2[12],xmm15[12],xmm2[13],xmm15[13],xmm2[14],xmm15[14],xmm2[15],xmm15[15] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3] +; SSE2-NEXT: psubd %xmm9, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm9 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3],xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] -; SSE2-NEXT: psubd %xmm0, %xmm10 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm14[8],xmm7[9],xmm14[9],xmm7[10],xmm14[10],xmm7[11],xmm14[11],xmm7[12],xmm14[12],xmm7[13],xmm14[13],xmm7[14],xmm14[14],xmm7[15],xmm14[15] -; SSE2-NEXT: movdqa %xmm7, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3] -; SSE2-NEXT: psubd %xmm0, %xmm13 -; SSE2-NEXT: movdqa %xmm13, {{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm9, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7] -; SSE2-NEXT: psubd %xmm7, %xmm12 -; SSE2-NEXT: movdqa b+1040(%rax), %xmm13 -; SSE2-NEXT: movdqa %xmm13, %xmm3 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7] -; SSE2-NEXT: movdqa %xmm3, %xmm7 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3] -; SSE2-NEXT: psubd %xmm7, %xmm0 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7] -; SSE2-NEXT: psubd %xmm3, %xmm9 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm14[8],xmm2[9],xmm14[9],xmm2[10],xmm14[10],xmm2[11],xmm14[11],xmm2[12],xmm14[12],xmm2[13],xmm14[13],xmm2[14],xmm14[14],xmm2[15],xmm14[15] -; SSE2-NEXT: movdqa %xmm2, %xmm7 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3] -; SSE2-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm14[8],xmm13[9],xmm14[9],xmm13[10],xmm14[10],xmm13[11],xmm14[11],xmm13[12],xmm14[12],xmm13[13],xmm14[13],xmm13[14],xmm14[14],xmm13[15],xmm14[15] -; SSE2-NEXT: movdqa %xmm13, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3] -; SSE2-NEXT: psubd %xmm3, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7] -; SSE2-NEXT: psubd %xmm13, %xmm2 -; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: psrad $31, %xmm3 -; SSE2-NEXT: paddd %xmm3, %xmm1 -; SSE2-NEXT: pxor %xmm3, %xmm1 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload -; SSE2-NEXT: paddd %xmm1, %xmm3 -; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm6, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm6 -; SSE2-NEXT: pxor %xmm1, %xmm6 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload -; SSE2-NEXT: movdqa %xmm5, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm5 -; SSE2-NEXT: pxor %xmm1, %xmm5 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm5, %xmm1 -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload -; SSE2-NEXT: movdqa %xmm4, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm4 -; SSE2-NEXT: pxor %xmm1, %xmm4 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm4, %xmm1 -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13 # 16-byte Reload -; SSE2-NEXT: movdqa %xmm8, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm8 -; SSE2-NEXT: pxor %xmm1, %xmm8 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm8, %xmm1 -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload -; SSE2-NEXT: movdqa %xmm11, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm11 -; SSE2-NEXT: pxor %xmm1, %xmm11 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm11, %xmm1 -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload -; SSE2-NEXT: movdqa (%rsp), %xmm4 # 16-byte Reload -; SSE2-NEXT: movdqa %xmm4, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm4 -; SSE2-NEXT: pxor %xmm1, %xmm4 -; SSE2-NEXT: paddd %xmm4, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm11 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload -; SSE2-NEXT: movdqa %xmm15, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm15 -; SSE2-NEXT: pxor %xmm1, %xmm15 -; SSE2-NEXT: paddd %xmm15, %xmm2 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload -; SSE2-NEXT: movdqa %xmm4, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm4 -; SSE2-NEXT: pxor %xmm1, %xmm4 -; SSE2-NEXT: paddd %xmm4, %xmm6 -; SSE2-NEXT: movdqa %xmm6, %xmm15 -; SSE2-NEXT: movdqa %xmm10, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm10 -; SSE2-NEXT: pxor %xmm1, %xmm10 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm10, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm10 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload -; SSE2-NEXT: movdqa %xmm6, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm6 -; SSE2-NEXT: pxor %xmm1, %xmm6 -; SSE2-NEXT: paddd %xmm6, %xmm3 -; SSE2-NEXT: movdqa %xmm12, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm12 -; SSE2-NEXT: pxor %xmm1, %xmm12 -; SSE2-NEXT: paddd %xmm12, %xmm5 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm0 -; SSE2-NEXT: pxor %xmm1, %xmm0 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm0, %xmm13 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7] +; SSE2-NEXT: psubd %xmm2, %xmm4 +; SSE2-NEXT: movdqa b+1056(%rax), %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3] +; SSE2-NEXT: psubd %xmm9, %xmm0 +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm10, %xmm9 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7] +; SSE2-NEXT: psubd %xmm10, %xmm12 +; SSE2-NEXT: movdqa %xmm2, %xmm10 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3] +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: psubd %xmm9, %xmm0 +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm2, %xmm9 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3] +; SSE2-NEXT: psubd %xmm2, %xmm11 +; SSE2-NEXT: movdqa %xmm1, %xmm13 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3] +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm15[4],xmm0[5],xmm15[5],xmm0[6],xmm15[6],xmm0[7],xmm15[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7] +; SSE2-NEXT: psubd %xmm9, %xmm0 +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm10, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3] +; SSE2-NEXT: psubd %xmm10, %xmm1 +; SSE2-NEXT: movdqa %xmm3, %xmm10 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7] +; SSE2-NEXT: psubd %xmm2, %xmm13 +; SSE2-NEXT: movdqa b+1072(%rax), %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7] +; SSE2-NEXT: movdqa %xmm2, %xmm9 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3] +; SSE2-NEXT: psubd %xmm2, %xmm3 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7] +; SSE2-NEXT: psubd %xmm9, %xmm10 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm15[8],xmm5[9],xmm15[9],xmm5[10],xmm15[10],xmm5[11],xmm15[11],xmm5[12],xmm15[12],xmm5[13],xmm15[13],xmm5[14],xmm15[14],xmm5[15],xmm15[15] +; SSE2-NEXT: movdqa %xmm5, %xmm9 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm15[8],xmm0[9],xmm15[9],xmm0[10],xmm15[10],xmm0[11],xmm15[11],xmm0[12],xmm15[12],xmm0[13],xmm15[13],xmm0[14],xmm15[14],xmm0[15],xmm15[15] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3] +; SSE2-NEXT: psubd %xmm0, %xmm5 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7] +; SSE2-NEXT: psubd %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm9, %xmm0 ; SSE2-NEXT: psrad $31, %xmm0 ; SSE2-NEXT: paddd %xmm0, %xmm9 ; SSE2-NEXT: pxor %xmm0, %xmm9 -; SSE2-NEXT: paddd %xmm9, %xmm1 -; SSE2-NEXT: movdqa %xmm7, %xmm0 +; SSE2-NEXT: movdqa %xmm5, %xmm0 ; SSE2-NEXT: psrad $31, %xmm0 -; SSE2-NEXT: paddd %xmm0, %xmm7 -; SSE2-NEXT: pxor %xmm0, %xmm7 -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE2-NEXT: paddd %xmm7, %xmm0 -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload -; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload +; SSE2-NEXT: paddd %xmm0, %xmm5 +; SSE2-NEXT: pxor %xmm0, %xmm5 +; SSE2-NEXT: movdqa %xmm10, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm10 +; SSE2-NEXT: pxor %xmm0, %xmm10 +; SSE2-NEXT: movdqa %xmm3, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm3 +; SSE2-NEXT: pxor %xmm0, %xmm3 +; SSE2-NEXT: movdqa %xmm13, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm13 +; SSE2-NEXT: pxor %xmm0, %xmm13 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm0, %xmm1 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm2 +; SSE2-NEXT: pxor %xmm0, %xmm2 +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm11, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm11 +; SSE2-NEXT: pxor %xmm0, %xmm11 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm2 +; SSE2-NEXT: pxor %xmm0, %xmm2 +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm12, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm12 +; SSE2-NEXT: pxor %xmm0, %xmm12 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm2 +; SSE2-NEXT: pxor %xmm0, %xmm2 +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm4, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm4 +; SSE2-NEXT: pxor %xmm0, %xmm4 ; SSE2-NEXT: movdqa %xmm7, %xmm0 ; SSE2-NEXT: psrad $31, %xmm0 ; SSE2-NEXT: paddd %xmm0, %xmm7 ; SSE2-NEXT: pxor %xmm0, %xmm7 +; SSE2-NEXT: movdqa %xmm14, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm14 +; SSE2-NEXT: pxor %xmm0, %xmm14 +; SSE2-NEXT: movdqa %xmm8, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm8 +; SSE2-NEXT: pxor %xmm0, %xmm8 +; SSE2-NEXT: movdqa %xmm6, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm6 +; SSE2-NEXT: pxor %xmm0, %xmm6 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload -; SSE2-NEXT: paddd %xmm7, %xmm0 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; SSE2-NEXT: paddd %xmm6, %xmm2 +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload +; SSE2-NEXT: paddd %xmm8, %xmm6 +; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; SSE2-NEXT: paddd %xmm14, %xmm2 +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; SSE2-NEXT: paddd %xmm7, %xmm2 +; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm14 # 16-byte Reload +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; SSE2-NEXT: paddd %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Folded Reload +; SSE2-NEXT: paddd %xmm12, %xmm8 +; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload +; SSE2-NEXT: movdqa %xmm0, %xmm12 +; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: paddd %xmm11, %xmm0 +; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa (%rsp), %xmm11 # 16-byte Reload +; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload +; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload +; SSE2-NEXT: paddd %xmm1, %xmm2 +; SSE2-NEXT: paddd %xmm13, %xmm7 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload +; SSE2-NEXT: paddd %xmm3, %xmm1 +; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm13 # 16-byte Reload +; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload +; SSE2-NEXT: paddd %xmm10, %xmm1 +; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload +; SSE2-NEXT: paddd %xmm5, %xmm3 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload +; SSE2-NEXT: paddd %xmm9, %xmm5 +; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload ; SSE2-NEXT: addq $4, %rax ; SSE2-NEXT: jne .LBB2_1 ; SSE2-NEXT: # BB#2: # %middle.block -; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload -; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Folded Reload -; SSE2-NEXT: paddd %xmm3, %xmm8 -; SSE2-NEXT: paddd %xmm2, %xmm15 -; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm13 # 16-byte Folded Reload -; SSE2-NEXT: paddd %xmm8, %xmm13 -; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Folded Reload -; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload -; SSE2-NEXT: paddd %xmm5, %xmm0 -; SSE2-NEXT: paddd %xmm11, %xmm10 -; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload -; SSE2-NEXT: paddd %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm10, %xmm1 -; SSE2-NEXT: paddd %xmm13, %xmm1 -; SSE2-NEXT: paddd %xmm15, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: paddd %xmm2, %xmm4 +; SSE2-NEXT: paddd %xmm3, %xmm6 +; SSE2-NEXT: movdqa %xmm12, %xmm2 +; SSE2-NEXT: paddd %xmm11, %xmm2 +; SSE2-NEXT: paddd %xmm13, %xmm14 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload +; SSE2-NEXT: paddd %xmm7, %xmm3 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload +; SSE2-NEXT: paddd %xmm5, %xmm7 +; SSE2-NEXT: paddd %xmm0, %xmm8 +; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload ; SSE2-NEXT: paddd %xmm1, %xmm0 +; SSE2-NEXT: paddd %xmm3, %xmm7 +; SSE2-NEXT: paddd %xmm4, %xmm6 +; SSE2-NEXT: paddd %xmm14, %xmm6 +; SSE2-NEXT: paddd %xmm0, %xmm7 +; SSE2-NEXT: paddd %xmm8, %xmm7 +; SSE2-NEXT: paddd %xmm6, %xmm7 +; SSE2-NEXT: paddd %xmm2, %xmm7 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,0,1] +; SSE2-NEXT: paddd %xmm7, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm1, %eax -; SSE2-NEXT: addq $200, %rsp +; SSE2-NEXT: addq $184, %rsp ; SSE2-NEXT: retq ; ; AVX2-LABEL: sad_avx64i8: @@ -688,8 +688,8 @@ define i32 @sad_avx64i8() nounwind { ; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00 ; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 -; AVX2-NEXT: vpxor %ymm4, %ymm4, %ymm4 ; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX2-NEXT: vpxor %ymm4, %ymm4, %ymm4 ; AVX2-NEXT: vpxor %ymm6, %ymm6, %ymm6 ; AVX2-NEXT: vpxor %ymm5, %ymm5, %ymm5 ; AVX2-NEXT: vpxor %ymm7, %ymm7, %ymm7 @@ -697,6 +697,7 @@ define i32 @sad_avx64i8() nounwind { ; AVX2-NEXT: .LBB2_1: # %vector.body ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vmovdqu %ymm8, -{{[0-9]+}}(%rsp) # 32-byte Spill ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero @@ -704,49 +705,48 @@ define i32 @sad_avx64i8() nounwind { ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vmovdqu %ymm15, -{{[0-9]+}}(%rsp) # 32-byte Spill +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero +; AVX2-NEXT: vpsubd %ymm8, %ymm15, %ymm8 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpsubd %ymm15, %ymm8, %ymm8 -; AVX2-NEXT: vmovdqu %ymm8, -{{[0-9]+}}(%rsp) # 32-byte Spill +; AVX2-NEXT: vpsubd %ymm15, %ymm14, %ymm14 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpsubd %ymm15, %ymm9, %ymm9 +; AVX2-NEXT: vpsubd %ymm15, %ymm13, %ymm13 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpsubd %ymm15, %ymm10, %ymm10 +; AVX2-NEXT: vpsubd %ymm15, %ymm12, %ymm12 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX2-NEXT: vpsubd %ymm15, %ymm11, %ymm11 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpsubd %ymm15, %ymm12, %ymm12 -; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpsubd %ymm15, %ymm13, %ymm13 +; AVX2-NEXT: vpsubd %ymm15, %ymm10, %ymm10 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vpsubd %ymm15, %ymm14, %ymm14 +; AVX2-NEXT: vpsubd %ymm15, %ymm9, %ymm9 +; AVX2-NEXT: vmovdqu %ymm9, -{{[0-9]+}}(%rsp) # 32-byte Spill ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero -; AVX2-NEXT: vmovdqu -{{[0-9]+}}(%rsp), %ymm8 # 32-byte Reload -; AVX2-NEXT: vpsubd %ymm15, %ymm8, %ymm15 -; AVX2-NEXT: vpabsd -{{[0-9]+}}(%rsp), %ymm8 # 32-byte Folded Reload -; AVX2-NEXT: vpaddd %ymm7, %ymm8, %ymm7 -; AVX2-NEXT: vpabsd %ymm9, %ymm8 -; AVX2-NEXT: vpaddd %ymm5, %ymm8, %ymm5 -; AVX2-NEXT: vpabsd %ymm10, %ymm8 -; AVX2-NEXT: vpaddd %ymm6, %ymm8, %ymm6 -; AVX2-NEXT: vpabsd %ymm11, %ymm8 +; AVX2-NEXT: vmovdqu -{{[0-9]+}}(%rsp), %ymm9 # 32-byte Reload +; AVX2-NEXT: vpsubd %ymm15, %ymm9, %ymm15 +; AVX2-NEXT: vpabsd %ymm8, %ymm8 ; AVX2-NEXT: vpaddd %ymm3, %ymm8, %ymm3 -; AVX2-NEXT: vpabsd %ymm12, %ymm8 -; AVX2-NEXT: vpaddd %ymm0, %ymm8, %ymm0 -; AVX2-NEXT: vpabsd %ymm13, %ymm8 -; AVX2-NEXT: vpaddd %ymm2, %ymm8, %ymm2 ; AVX2-NEXT: vpabsd %ymm14, %ymm8 ; AVX2-NEXT: vpaddd %ymm1, %ymm8, %ymm1 -; AVX2-NEXT: vpabsd %ymm15, %ymm8 +; AVX2-NEXT: vpabsd %ymm13, %ymm8 +; AVX2-NEXT: vpaddd %ymm2, %ymm8, %ymm2 +; AVX2-NEXT: vpabsd %ymm12, %ymm8 +; AVX2-NEXT: vpaddd %ymm0, %ymm8, %ymm0 +; AVX2-NEXT: vpabsd %ymm11, %ymm8 ; AVX2-NEXT: vpaddd %ymm4, %ymm8, %ymm4 +; AVX2-NEXT: vpabsd %ymm10, %ymm8 +; AVX2-NEXT: vpaddd %ymm6, %ymm8, %ymm6 +; AVX2-NEXT: vpabsd -{{[0-9]+}}(%rsp), %ymm8 # 32-byte Folded Reload +; AVX2-NEXT: vpaddd %ymm5, %ymm8, %ymm5 +; AVX2-NEXT: vpabsd %ymm15, %ymm8 +; AVX2-NEXT: vpaddd %ymm7, %ymm8, %ymm7 ; AVX2-NEXT: addq $4, %rax ; AVX2-NEXT: jne .LBB2_1 ; AVX2-NEXT: # BB#2: # %middle.block ; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2 -; AVX2-NEXT: vpaddd %ymm7, %ymm4, %ymm4 -; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2 -; AVX2-NEXT: vpaddd %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vpaddd %ymm7, %ymm3, %ymm3 +; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0 ; AVX2-NEXT: vpaddd %ymm5, %ymm1, %ymm1 +; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 @@ -773,21 +773,21 @@ define i32 @sad_avx64i8() nounwind { ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero +; AVX512F-NEXT: vpsubd %zmm11, %zmm7, %zmm7 +; AVX512F-NEXT: vpsubd %zmm10, %zmm6, %zmm6 +; AVX512F-NEXT: vpsubd %zmm9, %zmm5, %zmm5 ; AVX512F-NEXT: vpsubd %zmm8, %zmm4, %zmm4 -; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero -; AVX512F-NEXT: vpsubd %zmm8, %zmm5, %zmm5 -; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero -; AVX512F-NEXT: vpsubd %zmm8, %zmm6, %zmm6 -; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero -; AVX512F-NEXT: vpsubd %zmm8, %zmm7, %zmm7 ; AVX512F-NEXT: vpabsd %zmm4, %zmm4 +; AVX512F-NEXT: vpabsd %zmm5, %zmm5 +; AVX512F-NEXT: vpabsd %zmm6, %zmm6 +; AVX512F-NEXT: vpabsd %zmm7, %zmm7 +; AVX512F-NEXT: vpaddd %zmm3, %zmm7, %zmm3 +; AVX512F-NEXT: vpaddd %zmm2, %zmm6, %zmm2 +; AVX512F-NEXT: vpaddd %zmm1, %zmm5, %zmm1 ; AVX512F-NEXT: vpaddd %zmm0, %zmm4, %zmm0 -; AVX512F-NEXT: vpabsd %zmm5, %zmm4 -; AVX512F-NEXT: vpaddd %zmm1, %zmm4, %zmm1 -; AVX512F-NEXT: vpabsd %zmm6, %zmm4 -; AVX512F-NEXT: vpaddd %zmm2, %zmm4, %zmm2 -; AVX512F-NEXT: vpabsd %zmm7, %zmm4 -; AVX512F-NEXT: vpaddd %zmm3, %zmm4, %zmm3 ; AVX512F-NEXT: addq $4, %rax ; AVX512F-NEXT: jne .LBB2_1 ; AVX512F-NEXT: # BB#2: # %middle.block @@ -1154,54 +1154,59 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n ; SSE2-LABEL: sad_nonloop_32i8: ; SSE2: # BB#0: ; SSE2-NEXT: movdqu (%rdi), %xmm0 -; SSE2-NEXT: movdqu 16(%rdi), %xmm12 -; SSE2-NEXT: pxor %xmm1, %xmm1 -; SSE2-NEXT: movdqa %xmm12, %xmm8 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7] -; SSE2-NEXT: movdqa %xmm8, %xmm10 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7] -; SSE2-NEXT: movdqa %xmm0, %xmm9 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3],xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7] -; SSE2-NEXT: movdqa %xmm9, %xmm11 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7] -; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm1[8],xmm12[9],xmm1[9],xmm12[10],xmm1[10],xmm12[11],xmm1[11],xmm12[12],xmm1[12],xmm12[13],xmm1[13],xmm12[14],xmm1[14],xmm12[15],xmm1[15] -; SSE2-NEXT: movdqa %xmm12, %xmm13 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7] -; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm1[0],xmm12[1],xmm1[1],xmm12[2],xmm1[2],xmm12[3],xmm1[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-NEXT: movdqu (%rdx), %xmm7 -; SSE2-NEXT: movdqu 16(%rdx), %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7] -; SSE2-NEXT: movdqa %xmm6, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; SSE2-NEXT: psubd %xmm5, %xmm10 -; SSE2-NEXT: movdqa %xmm7, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; SSE2-NEXT: psubd %xmm5, %xmm11 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15] -; SSE2-NEXT: movdqa %xmm3, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; SSE2-NEXT: psubd %xmm5, %xmm13 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15] -; SSE2-NEXT: movdqa %xmm7, %xmm5 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; SSE2-NEXT: psubd %xmm5, %xmm4 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3] -; SSE2-NEXT: psubd %xmm6, %xmm8 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-NEXT: psubd %xmm2, %xmm9 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; SSE2-NEXT: psubd %xmm3, %xmm12 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3] -; SSE2-NEXT: psubd %xmm7, %xmm0 +; SSE2-NEXT: movdqu 16(%rdi), %xmm3 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: movdqa %xmm3, %xmm12 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm12, %xmm9 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm0, %xmm13 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3],xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm13, %xmm10 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] +; SSE2-NEXT: movdqa %xmm3, %xmm11 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15] +; SSE2-NEXT: movdqa %xmm0, %xmm6 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] +; SSE2-NEXT: movdqu (%rdx), %xmm5 +; SSE2-NEXT: movdqu 16(%rdx), %xmm7 +; SSE2-NEXT: movdqa %xmm7, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm5, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-NEXT: movdqa %xmm2, %xmm14 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15] +; SSE2-NEXT: movdqa %xmm7, %xmm15 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15] +; SSE2-NEXT: movdqa %xmm5, %xmm8 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] +; SSE2-NEXT: psubd %xmm5, %xmm0 +; SSE2-NEXT: psubd %xmm7, %xmm3 +; SSE2-NEXT: psubd %xmm2, %xmm13 +; SSE2-NEXT: psubd %xmm1, %xmm12 +; SSE2-NEXT: psubd %xmm8, %xmm6 +; SSE2-NEXT: psubd %xmm15, %xmm11 +; SSE2-NEXT: psubd %xmm14, %xmm10 +; SSE2-NEXT: psubd -{{[0-9]+}}(%rsp), %xmm9 # 16-byte Folded Reload +; SSE2-NEXT: movdqa %xmm9, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm9 +; SSE2-NEXT: pxor %xmm1, %xmm9 ; SSE2-NEXT: movdqa %xmm10, %xmm1 ; SSE2-NEXT: psrad $31, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm10 @@ -1210,37 +1215,33 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n ; SSE2-NEXT: psrad $31, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm11 ; SSE2-NEXT: pxor %xmm1, %xmm11 -; SSE2-NEXT: movdqa %xmm13, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm13 -; SSE2-NEXT: pxor %xmm1, %xmm13 -; SSE2-NEXT: movdqa %xmm4, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm4 -; SSE2-NEXT: pxor %xmm1, %xmm4 -; SSE2-NEXT: paddd %xmm13, %xmm4 -; SSE2-NEXT: paddd %xmm10, %xmm4 -; SSE2-NEXT: paddd %xmm11, %xmm4 -; SSE2-NEXT: movdqa %xmm8, %xmm1 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm8 -; SSE2-NEXT: pxor %xmm1, %xmm8 -; SSE2-NEXT: movdqa %xmm9, %xmm1 +; SSE2-NEXT: movdqa %xmm6, %xmm1 ; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: paddd %xmm1, %xmm9 -; SSE2-NEXT: pxor %xmm1, %xmm9 +; SSE2-NEXT: paddd %xmm1, %xmm6 +; SSE2-NEXT: pxor %xmm1, %xmm6 ; SSE2-NEXT: movdqa %xmm12, %xmm1 ; SSE2-NEXT: psrad $31, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm12 ; SSE2-NEXT: pxor %xmm1, %xmm12 +; SSE2-NEXT: movdqa %xmm13, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm13 +; SSE2-NEXT: pxor %xmm1, %xmm13 +; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm3 +; SSE2-NEXT: pxor %xmm1, %xmm3 ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: psrad $31, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm1, %xmm0 +; SSE2-NEXT: paddd %xmm3, %xmm0 +; SSE2-NEXT: paddd %xmm11, %xmm6 +; SSE2-NEXT: paddd %xmm9, %xmm6 +; SSE2-NEXT: paddd %xmm10, %xmm6 ; SSE2-NEXT: paddd %xmm12, %xmm0 -; SSE2-NEXT: paddd %xmm8, %xmm0 -; SSE2-NEXT: paddd %xmm4, %xmm0 -; SSE2-NEXT: paddd %xmm9, %xmm0 +; SSE2-NEXT: paddd %xmm6, %xmm0 +; SSE2-NEXT: paddd %xmm13, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] diff --git a/test/CodeGen/X86/select.ll b/test/CodeGen/X86/select.ll index 1afef86a5f11..ce42d0d643e8 100644 --- a/test/CodeGen/X86/select.ll +++ b/test/CodeGen/X86/select.ll @@ -299,21 +299,20 @@ define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2) ; GENERIC-NEXT: testb %dil, %dil ; GENERIC-NEXT: jne LBB7_4 ; GENERIC-NEXT: ## BB#5: -; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero -; GENERIC-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; GENERIC-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; GENERIC-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero ; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; GENERIC-NEXT: jmp LBB7_6 ; GENERIC-NEXT: LBB7_4: -; GENERIC-NEXT: movd %r9d, %xmm1 -; GENERIC-NEXT: movd %ecx, %xmm2 -; GENERIC-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; GENERIC-NEXT: movd %r8d, %xmm3 +; GENERIC-NEXT: movd %r9d, %xmm2 +; GENERIC-NEXT: movd %ecx, %xmm3 +; GENERIC-NEXT: movd %r8d, %xmm4 ; GENERIC-NEXT: movd %edx, %xmm1 ; GENERIC-NEXT: LBB7_6: +; GENERIC-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] ; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; GENERIC-NEXT: psubd {{.*}}(%rip), %xmm1 ; GENERIC-NEXT: psubd {{.*}}(%rip), %xmm0 ; GENERIC-NEXT: movq %xmm0, 16(%rsi) @@ -340,19 +339,16 @@ define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2) ; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero ; ATOM-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero ; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; ATOM-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] -; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; ATOM-NEXT: jmp LBB7_6 ; ATOM-NEXT: LBB7_4: -; ATOM-NEXT: movd %r9d, %xmm1 -; ATOM-NEXT: movd %ecx, %xmm2 -; ATOM-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; ATOM-NEXT: movd %r8d, %xmm3 +; ATOM-NEXT: movd %r9d, %xmm2 +; ATOM-NEXT: movd %ecx, %xmm3 +; ATOM-NEXT: movd %r8d, %xmm4 ; ATOM-NEXT: movd %edx, %xmm1 -; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; ATOM-NEXT: LBB7_6: +; ATOM-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; ATOM-NEXT: psubd {{.*}}(%rip), %xmm0 ; ATOM-NEXT: psubd {{.*}}(%rip), %xmm1 ; ATOM-NEXT: movq %xmm0, 16(%rsi) diff --git a/test/CodeGen/X86/setcc-wide-types.ll b/test/CodeGen/X86/setcc-wide-types.ll index 332bf2887fb0..2996edaec3e0 100644 --- a/test/CodeGen/X86/setcc-wide-types.ll +++ b/test/CodeGen/X86/setcc-wide-types.ll @@ -58,25 +58,25 @@ define i32 @ne_i256(<4 x i64> %x, <4 x i64> %y) { ; SSE2-LABEL: ne_i256: ; SSE2: # BB#0: ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] -; SSE2-NEXT: movq %xmm4, %rax +; SSE2-NEXT: movq %xmm4, %r8 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] -; SSE2-NEXT: movq %xmm4, %rcx -; SSE2-NEXT: movq %xmm0, %rdx -; SSE2-NEXT: movq %xmm1, %r8 +; SSE2-NEXT: movq %xmm4, %r9 +; SSE2-NEXT: movq %xmm0, %r10 +; SSE2-NEXT: movq %xmm1, %rsi ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; SSE2-NEXT: movq %xmm0, %rdi -; SSE2-NEXT: xorq %rax, %rdi ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] -; SSE2-NEXT: movq %xmm0, %rsi -; SSE2-NEXT: xorq %rcx, %rsi -; SSE2-NEXT: orq %rdi, %rsi -; SSE2-NEXT: movq %xmm2, %rax -; SSE2-NEXT: xorq %rdx, %rax -; SSE2-NEXT: movq %xmm3, %rcx -; SSE2-NEXT: xorq %r8, %rcx -; SSE2-NEXT: orq %rax, %rcx +; SSE2-NEXT: movq %xmm0, %rax +; SSE2-NEXT: movq %xmm2, %rcx +; SSE2-NEXT: movq %xmm3, %rdx +; SSE2-NEXT: xorq %rsi, %rdx +; SSE2-NEXT: xorq %r10, %rcx +; SSE2-NEXT: orq %rdx, %rcx +; SSE2-NEXT: xorq %r9, %rax +; SSE2-NEXT: xorq %r8, %rdi +; SSE2-NEXT: orq %rax, %rdi ; SSE2-NEXT: xorl %eax, %eax -; SSE2-NEXT: orq %rsi, %rcx +; SSE2-NEXT: orq %rcx, %rdi ; SSE2-NEXT: setne %al ; SSE2-NEXT: retq ; @@ -100,25 +100,25 @@ define i32 @eq_i256(<4 x i64> %x, <4 x i64> %y) { ; SSE2-LABEL: eq_i256: ; SSE2: # BB#0: ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] -; SSE2-NEXT: movq %xmm4, %rax +; SSE2-NEXT: movq %xmm4, %r8 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] -; SSE2-NEXT: movq %xmm4, %rcx -; SSE2-NEXT: movq %xmm0, %rdx -; SSE2-NEXT: movq %xmm1, %r8 +; SSE2-NEXT: movq %xmm4, %r9 +; SSE2-NEXT: movq %xmm0, %r10 +; SSE2-NEXT: movq %xmm1, %rsi ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; SSE2-NEXT: movq %xmm0, %rdi -; SSE2-NEXT: xorq %rax, %rdi ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] -; SSE2-NEXT: movq %xmm0, %rsi -; SSE2-NEXT: xorq %rcx, %rsi -; SSE2-NEXT: orq %rdi, %rsi -; SSE2-NEXT: movq %xmm2, %rax -; SSE2-NEXT: xorq %rdx, %rax -; SSE2-NEXT: movq %xmm3, %rcx -; SSE2-NEXT: xorq %r8, %rcx -; SSE2-NEXT: orq %rax, %rcx +; SSE2-NEXT: movq %xmm0, %rax +; SSE2-NEXT: movq %xmm2, %rcx +; SSE2-NEXT: movq %xmm3, %rdx +; SSE2-NEXT: xorq %rsi, %rdx +; SSE2-NEXT: xorq %r10, %rcx +; SSE2-NEXT: orq %rdx, %rcx +; SSE2-NEXT: xorq %r9, %rax +; SSE2-NEXT: xorq %r8, %rdi +; SSE2-NEXT: orq %rax, %rdi ; SSE2-NEXT: xorl %eax, %eax -; SSE2-NEXT: orq %rsi, %rcx +; SSE2-NEXT: orq %rcx, %rdi ; SSE2-NEXT: sete %al ; SSE2-NEXT: retq ; diff --git a/test/CodeGen/X86/shrink_vmul_sse.ll b/test/CodeGen/X86/shrink_vmul_sse.ll index 6701c247e6fc..c869dff9e642 100644 --- a/test/CodeGen/X86/shrink_vmul_sse.ll +++ b/test/CodeGen/X86/shrink_vmul_sse.ll @@ -20,9 +20,9 @@ define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; CHECK-NEXT: movzbl 1(%edx,%ecx), %edi ; CHECK-NEXT: movzbl (%edx,%ecx), %edx ; CHECK-NEXT: movzbl 1(%eax,%ecx), %ebx -; CHECK-NEXT: imull %edi, %ebx ; CHECK-NEXT: movzbl (%eax,%ecx), %eax ; CHECK-NEXT: imull %edx, %eax +; CHECK-NEXT: imull %edi, %ebx ; CHECK-NEXT: movl %ebx, 4(%esi,%ecx,4) ; CHECK-NEXT: movl %eax, (%esi,%ecx,4) ; CHECK-NEXT: popl %esi diff --git a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll index d99cfaf535de..0b03dffe99b5 100644 --- a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll @@ -1537,9 +1537,9 @@ define <4 x float> @test_mm_set_ps(float %a0, float %a1, float %a2, float %a3) n ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X32-NEXT: retl ; @@ -1673,13 +1673,13 @@ define void @test_mm_setcsr(i32 %a0) nounwind { define <4 x float> @test_mm_setr_ps(float %a0, float %a1, float %a2, float %a3) nounwind { ; X32-LABEL: test_mm_setr_ps: ; X32: # BB#0: -; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; X32-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] ; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; X32-NEXT: retl ; ; X64-LABEL: test_mm_setr_ps: diff --git a/test/CodeGen/X86/sse-scalar-fp-arith.ll b/test/CodeGen/X86/sse-scalar-fp-arith.ll index f711dc615742..4b2af6fce8de 100644 --- a/test/CodeGen/X86/sse-scalar-fp-arith.ll +++ b/test/CodeGen/X86/sse-scalar-fp-arith.ll @@ -1119,9 +1119,9 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, ; ; AVX512-LABEL: add_ss_mask: ; AVX512: # BB#0: -; AVX512-NEXT: andl $1, %edi +; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1 ; AVX512-NEXT: kmovw %edi, %k1 -; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} ; AVX512-NEXT: vmovaps %xmm2, %xmm0 ; AVX512-NEXT: retq %1 = extractelement <4 x float> %a, i64 0 @@ -1174,9 +1174,9 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> ; ; AVX512-LABEL: add_sd_mask: ; AVX512: # BB#0: -; AVX512-NEXT: andl $1, %edi +; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1 ; AVX512-NEXT: kmovw %edi, %k1 -; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} ; AVX512-NEXT: vmovapd %xmm2, %xmm0 ; AVX512-NEXT: retq %1 = extractelement <2 x double> %a, i64 0 diff --git a/test/CodeGen/X86/sse1.ll b/test/CodeGen/X86/sse1.ll index 68ab3f9f3205..f4964b5a6f66 100644 --- a/test/CodeGen/X86/sse1.ll +++ b/test/CodeGen/X86/sse1.ll @@ -66,8 +66,8 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) { ; X32-NEXT: jne .LBB1_8 ; X32-NEXT: .LBB1_7: ; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp) +; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X32-NEXT: je .LBB1_10 ; X32-NEXT: jmp .LBB1_11 ; X32-NEXT: .LBB1_1: @@ -80,8 +80,8 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) { ; X32-NEXT: je .LBB1_7 ; X32-NEXT: .LBB1_8: # %entry ; X32-NEXT: xorps %xmm3, %xmm3 -; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp) +; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X32-NEXT: jne .LBB1_11 ; X32-NEXT: .LBB1_10: ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -105,8 +105,8 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) { ; X64-NEXT: jne .LBB1_8 ; X64-NEXT: .LBB1_7: ; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X64-NEXT: testl %esi, %esi +; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X64-NEXT: je .LBB1_10 ; X64-NEXT: jmp .LBB1_11 ; X64-NEXT: .LBB1_1: @@ -119,8 +119,8 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) { ; X64-NEXT: je .LBB1_7 ; X64-NEXT: .LBB1_8: # %entry ; X64-NEXT: xorps %xmm3, %xmm3 -; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X64-NEXT: testl %esi, %esi +; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; X64-NEXT: jne .LBB1_11 ; X64-NEXT: .LBB1_10: ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero diff --git a/test/CodeGen/X86/sse3-avx-addsub-2.ll b/test/CodeGen/X86/sse3-avx-addsub-2.ll index aed5e0d1c32e..4d895ea264c5 100644 --- a/test/CodeGen/X86/sse3-avx-addsub-2.ll +++ b/test/CodeGen/X86/sse3-avx-addsub-2.ll @@ -412,14 +412,14 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) { ; SSE-NEXT: movaps %xmm1, %xmm4 ; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1] ; SSE-NEXT: subss %xmm4, %xmm3 -; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] -; SSE-NEXT: addss %xmm0, %xmm3 +; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE-NEXT: addss %xmm0, %xmm4 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] ; SSE-NEXT: addss %xmm0, %xmm1 -; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] ; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; @@ -431,12 +431,12 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) { ; AVX-NEXT: vsubss %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] ; AVX-NEXT: vaddss %xmm0, %xmm4, %xmm4 -; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3] -; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] +; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm4[0],xmm2[2,3] +; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3] +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; AVX-NEXT: retq %1 = extractelement <4 x float> %A, i32 0 %2 = extractelement <4 x float> %B, i32 0 diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll index 4a0dc9c1eb17..503b9416c8d3 100644 --- a/test/CodeGen/X86/sse41.ll +++ b/test/CodeGen/X86/sse41.ll @@ -273,8 +273,8 @@ define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind { ; X32: ## BB#0: ## %entry ; X32-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X32-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] -; X32-NEXT: addss %xmm2, %xmm3 ; X32-NEXT: addss %xmm1, %xmm0 +; X32-NEXT: addss %xmm2, %xmm3 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3] ; X32-NEXT: retl ; @@ -282,8 +282,8 @@ define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind { ; X64: ## BB#0: ## %entry ; X64-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X64-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] -; X64-NEXT: addss %xmm2, %xmm3 ; X64-NEXT: addss %xmm1, %xmm0 +; X64-NEXT: addss %xmm2, %xmm3 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3] ; X64-NEXT: retq entry: @@ -896,9 +896,9 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl ; X32-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0] ; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0] -; X32-NEXT: addps %xmm1, %xmm0 ; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0] ; X32-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0] +; X32-NEXT: addps %xmm1, %xmm0 ; X32-NEXT: addps %xmm2, %xmm3 ; X32-NEXT: addps %xmm3, %xmm0 ; X32-NEXT: retl @@ -908,9 +908,9 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl ; X64-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0] ; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0] -; X64-NEXT: addps %xmm1, %xmm0 ; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0] ; X64-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0] +; X64-NEXT: addps %xmm1, %xmm0 ; X64-NEXT: addps %xmm2, %xmm3 ; X64-NEXT: addps %xmm3, %xmm0 ; X64-NEXT: retq diff --git a/test/CodeGen/X86/subcarry.ll b/test/CodeGen/X86/subcarry.ll new file mode 100644 index 000000000000..df676328f682 --- /dev/null +++ b/test/CodeGen/X86/subcarry.ll @@ -0,0 +1,137 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s + +%S = type { [4 x i64] } + +define %S @negate(%S* nocapture readonly %this) { +; CHECK-LABEL: negate: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: movq (%rsi), %rax +; CHECK-NEXT: movq 8(%rsi), %rcx +; CHECK-NEXT: notq %rax +; CHECK-NEXT: addq $1, %rax +; CHECK-NEXT: notq %rcx +; CHECK-NEXT: adcq $0, %rcx +; CHECK-NEXT: movq 16(%rsi), %rdx +; CHECK-NEXT: notq %rdx +; CHECK-NEXT: adcq $0, %rdx +; CHECK-NEXT: movq 24(%rsi), %rsi +; CHECK-NEXT: notq %rsi +; CHECK-NEXT: adcq $0, %rsi +; CHECK-NEXT: movq %rax, (%rdi) +; CHECK-NEXT: movq %rcx, 8(%rdi) +; CHECK-NEXT: movq %rdx, 16(%rdi) +; CHECK-NEXT: movq %rsi, 24(%rdi) +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: retq +entry: + %0 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 0 + %1 = load i64, i64* %0, align 8 + %2 = xor i64 %1, -1 + %3 = zext i64 %2 to i128 + %4 = add nuw nsw i128 %3, 1 + %5 = trunc i128 %4 to i64 + %6 = lshr i128 %4, 64 + %7 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 1 + %8 = load i64, i64* %7, align 8 + %9 = xor i64 %8, -1 + %10 = zext i64 %9 to i128 + %11 = add nuw nsw i128 %6, %10 + %12 = trunc i128 %11 to i64 + %13 = lshr i128 %11, 64 + %14 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 2 + %15 = load i64, i64* %14, align 8 + %16 = xor i64 %15, -1 + %17 = zext i64 %16 to i128 + %18 = add nuw nsw i128 %13, %17 + %19 = lshr i128 %18, 64 + %20 = trunc i128 %18 to i64 + %21 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 3 + %22 = load i64, i64* %21, align 8 + %23 = xor i64 %22, -1 + %24 = zext i64 %23 to i128 + %25 = add nuw nsw i128 %19, %24 + %26 = trunc i128 %25 to i64 + %27 = insertvalue [4 x i64] undef, i64 %5, 0 + %28 = insertvalue [4 x i64] %27, i64 %12, 1 + %29 = insertvalue [4 x i64] %28, i64 %20, 2 + %30 = insertvalue [4 x i64] %29, i64 %26, 3 + %31 = insertvalue %S undef, [4 x i64] %30, 0 + ret %S %31 +} + +define %S @sub(%S* nocapture readonly %this, %S %arg.b) local_unnamed_addr { +; CHECK-LABEL: sub: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: notq %rdx +; CHECK-NEXT: xorl %r10d, %r10d +; CHECK-NEXT: addq (%rsi), %rdx +; CHECK-NEXT: setb %r10b +; CHECK-NEXT: addq $1, %rdx +; CHECK-NEXT: adcq 8(%rsi), %r10 +; CHECK-NEXT: setb %al +; CHECK-NEXT: movzbl %al, %r11d +; CHECK-NEXT: notq %rcx +; CHECK-NEXT: addq %r10, %rcx +; CHECK-NEXT: adcq 16(%rsi), %r11 +; CHECK-NEXT: setb %al +; CHECK-NEXT: movzbl %al, %eax +; CHECK-NEXT: notq %r8 +; CHECK-NEXT: addq %r11, %r8 +; CHECK-NEXT: adcq 24(%rsi), %rax +; CHECK-NEXT: notq %r9 +; CHECK-NEXT: addq %rax, %r9 +; CHECK-NEXT: movq %rdx, (%rdi) +; CHECK-NEXT: movq %rcx, 8(%rdi) +; CHECK-NEXT: movq %r8, 16(%rdi) +; CHECK-NEXT: movq %r9, 24(%rdi) +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: retq +entry: + %0 = extractvalue %S %arg.b, 0 + %.elt6 = extractvalue [4 x i64] %0, 1 + %.elt8 = extractvalue [4 x i64] %0, 2 + %.elt10 = extractvalue [4 x i64] %0, 3 + %.elt = extractvalue [4 x i64] %0, 0 + %1 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 0 + %2 = load i64, i64* %1, align 8 + %3 = zext i64 %2 to i128 + %4 = add nuw nsw i128 %3, 1 + %5 = xor i64 %.elt, -1 + %6 = zext i64 %5 to i128 + %7 = add nuw nsw i128 %4, %6 + %8 = trunc i128 %7 to i64 + %9 = lshr i128 %7, 64 + %10 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 1 + %11 = load i64, i64* %10, align 8 + %12 = zext i64 %11 to i128 + %13 = add nuw nsw i128 %9, %12 + %14 = xor i64 %.elt6, -1 + %15 = zext i64 %14 to i128 + %16 = add nuw nsw i128 %13, %15 + %17 = trunc i128 %16 to i64 + %18 = lshr i128 %16, 64 + %19 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 2 + %20 = load i64, i64* %19, align 8 + %21 = zext i64 %20 to i128 + %22 = add nuw nsw i128 %18, %21 + %23 = xor i64 %.elt8, -1 + %24 = zext i64 %23 to i128 + %25 = add nuw nsw i128 %22, %24 + %26 = lshr i128 %25, 64 + %27 = trunc i128 %25 to i64 + %28 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 3 + %29 = load i64, i64* %28, align 8 + %30 = zext i64 %29 to i128 + %31 = add nuw nsw i128 %26, %30 + %32 = xor i64 %.elt10, -1 + %33 = zext i64 %32 to i128 + %34 = add nuw nsw i128 %31, %33 + %35 = trunc i128 %34 to i64 + %36 = insertvalue [4 x i64] undef, i64 %8, 0 + %37 = insertvalue [4 x i64] %36, i64 %17, 1 + %38 = insertvalue [4 x i64] %37, i64 %27, 2 + %39 = insertvalue [4 x i64] %38, i64 %35, 3 + %40 = insertvalue %S undef, [4 x i64] %39, 0 + ret %S %40 +} diff --git a/test/CodeGen/X86/vec_int_to_fp.ll b/test/CodeGen/X86/vec_int_to_fp.ll index 1eef67764ab9..a42b3c96c3ae 100644 --- a/test/CodeGen/X86/vec_int_to_fp.ll +++ b/test/CodeGen/X86/vec_int_to_fp.ll @@ -4344,7 +4344,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) { ; AVX1-NEXT: testq %rax, %rax ; AVX1-NEXT: js .LBB80_4 ; AVX1-NEXT: # BB#5: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm4 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3 ; AVX1-NEXT: jmp .LBB80_6 ; AVX1-NEXT: .LBB80_4: ; AVX1-NEXT: movq %rax, %rcx @@ -4352,22 +4352,22 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) { ; AVX1-NEXT: andl $1, %eax ; AVX1-NEXT: orq %rcx, %rax ; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3 -; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm4 +; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: .LBB80_6: ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2 ; AVX1-NEXT: vmovq %xmm2, %rax ; AVX1-NEXT: testq %rax, %rax ; AVX1-NEXT: js .LBB80_7 ; AVX1-NEXT: # BB#8: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4 ; AVX1-NEXT: jmp .LBB80_9 ; AVX1-NEXT: .LBB80_7: ; AVX1-NEXT: movq %rax, %rcx ; AVX1-NEXT: shrq %rcx ; AVX1-NEXT: andl $1, %eax ; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3 -; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4 +; AVX1-NEXT: vaddss %xmm4, %xmm4, %xmm4 ; AVX1-NEXT: .LBB80_9: ; AVX1-NEXT: vpextrq $1, %xmm2, %rax ; AVX1-NEXT: testq %rax, %rax @@ -4397,29 +4397,29 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) { ; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5 ; AVX1-NEXT: vaddss %xmm5, %xmm5, %xmm5 ; AVX1-NEXT: .LBB80_15: -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3] ; AVX1-NEXT: vmovq %xmm0, %rax ; AVX1-NEXT: testq %rax, %rax ; AVX1-NEXT: js .LBB80_16 ; AVX1-NEXT: # BB#17: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3 ; AVX1-NEXT: jmp .LBB80_18 ; AVX1-NEXT: .LBB80_16: ; AVX1-NEXT: movq %rax, %rcx ; AVX1-NEXT: shrq %rcx ; AVX1-NEXT: andl $1, %eax ; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4 -; AVX1-NEXT: vaddss %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3 +; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: .LBB80_18: -; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vmovq %xmm3, %rax +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vmovq %xmm4, %rax ; AVX1-NEXT: testq %rax, %rax ; AVX1-NEXT: js .LBB80_19 ; AVX1-NEXT: # BB#20: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5 ; AVX1-NEXT: jmp .LBB80_21 ; AVX1-NEXT: .LBB80_19: ; AVX1-NEXT: movq %rax, %rcx @@ -4427,25 +4427,25 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) { ; AVX1-NEXT: andl $1, %eax ; AVX1-NEXT: orq %rcx, %rax ; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0 -; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm5 ; AVX1-NEXT: .LBB80_21: -; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0],xmm4[3] ; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm2[0] -; AVX1-NEXT: vpextrq $1, %xmm3, %rax +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1],xmm5[0],xmm3[3] +; AVX1-NEXT: vpextrq $1, %xmm4, %rax ; AVX1-NEXT: testq %rax, %rax ; AVX1-NEXT: js .LBB80_22 ; AVX1-NEXT: # BB#23: -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2 ; AVX1-NEXT: jmp .LBB80_24 ; AVX1-NEXT: .LBB80_22: ; AVX1-NEXT: movq %rax, %rcx ; AVX1-NEXT: shrq %rcx ; AVX1-NEXT: andl $1, %eax ; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1 -; AVX1-NEXT: vaddss %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2 +; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: .LBB80_24: -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[0] +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; @@ -4471,7 +4471,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) { ; AVX2-NEXT: testq %rax, %rax ; AVX2-NEXT: js .LBB80_4 ; AVX2-NEXT: # BB#5: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm4 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3 ; AVX2-NEXT: jmp .LBB80_6 ; AVX2-NEXT: .LBB80_4: ; AVX2-NEXT: movq %rax, %rcx @@ -4479,22 +4479,22 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) { ; AVX2-NEXT: andl $1, %eax ; AVX2-NEXT: orq %rcx, %rax ; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3 -; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm4 +; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3 ; AVX2-NEXT: .LBB80_6: ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX2-NEXT: vmovq %xmm2, %rax ; AVX2-NEXT: testq %rax, %rax ; AVX2-NEXT: js .LBB80_7 ; AVX2-NEXT: # BB#8: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4 ; AVX2-NEXT: jmp .LBB80_9 ; AVX2-NEXT: .LBB80_7: ; AVX2-NEXT: movq %rax, %rcx ; AVX2-NEXT: shrq %rcx ; AVX2-NEXT: andl $1, %eax ; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3 -; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4 +; AVX2-NEXT: vaddss %xmm4, %xmm4, %xmm4 ; AVX2-NEXT: .LBB80_9: ; AVX2-NEXT: vpextrq $1, %xmm2, %rax ; AVX2-NEXT: testq %rax, %rax @@ -4524,29 +4524,29 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) { ; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5 ; AVX2-NEXT: vaddss %xmm5, %xmm5, %xmm5 ; AVX2-NEXT: .LBB80_15: -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3] +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3] ; AVX2-NEXT: vmovq %xmm0, %rax ; AVX2-NEXT: testq %rax, %rax ; AVX2-NEXT: js .LBB80_16 ; AVX2-NEXT: # BB#17: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3 ; AVX2-NEXT: jmp .LBB80_18 ; AVX2-NEXT: .LBB80_16: ; AVX2-NEXT: movq %rax, %rcx ; AVX2-NEXT: shrq %rcx ; AVX2-NEXT: andl $1, %eax ; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4 -; AVX2-NEXT: vaddss %xmm4, %xmm4, %xmm4 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3 +; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3 ; AVX2-NEXT: .LBB80_18: -; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vmovq %xmm3, %rax +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3] +; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4 +; AVX2-NEXT: vmovq %xmm4, %rax ; AVX2-NEXT: testq %rax, %rax ; AVX2-NEXT: js .LBB80_19 ; AVX2-NEXT: # BB#20: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5 ; AVX2-NEXT: jmp .LBB80_21 ; AVX2-NEXT: .LBB80_19: ; AVX2-NEXT: movq %rax, %rcx @@ -4554,25 +4554,25 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) { ; AVX2-NEXT: andl $1, %eax ; AVX2-NEXT: orq %rcx, %rax ; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0 -; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm5 ; AVX2-NEXT: .LBB80_21: -; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0],xmm4[3] ; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm2[0] -; AVX2-NEXT: vpextrq $1, %xmm3, %rax +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1],xmm5[0],xmm3[3] +; AVX2-NEXT: vpextrq $1, %xmm4, %rax ; AVX2-NEXT: testq %rax, %rax ; AVX2-NEXT: js .LBB80_22 ; AVX2-NEXT: # BB#23: -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2 ; AVX2-NEXT: jmp .LBB80_24 ; AVX2-NEXT: .LBB80_22: ; AVX2-NEXT: movq %rax, %rcx ; AVX2-NEXT: shrq %rcx ; AVX2-NEXT: andl $1, %eax ; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1 -; AVX2-NEXT: vaddss %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2 +; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: .LBB80_24: -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[0] +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0] ; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; diff --git a/test/CodeGen/X86/vector-bitreverse.ll b/test/CodeGen/X86/vector-bitreverse.ll index 2fb821555dba..226c0adbaf3c 100644 --- a/test/CodeGen/X86/vector-bitreverse.ll +++ b/test/CodeGen/X86/vector-bitreverse.ll @@ -2372,10 +2372,10 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind { ; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1 ; AVX512F-NEXT: vpsrlq $24, %zmm0, %zmm2 ; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2 +; AVX512F-NEXT: vpsrlq $8, %zmm0, %zmm3 +; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm3, %zmm3 ; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1 -; AVX512F-NEXT: vpsrlq $8, %zmm0, %zmm2 -; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2 -; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1 +; AVX512F-NEXT: vporq %zmm1, %zmm3, %zmm1 ; AVX512F-NEXT: vpsllq $8, %zmm0, %zmm2 ; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2 ; AVX512F-NEXT: vpsllq $24, %zmm0, %zmm3 diff --git a/test/CodeGen/X86/vector-blend.ll b/test/CodeGen/X86/vector-blend.ll index f0a5fe1dbfff..a05a981daa1f 100644 --- a/test/CodeGen/X86/vector-blend.ll +++ b/test/CodeGen/X86/vector-blend.ll @@ -848,10 +848,10 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) { ; SSE2-NEXT: psrad $31, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm3 ; SSE2-NEXT: pandn %xmm5, %xmm1 -; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm0, %xmm2 ; SSE2-NEXT: pandn %xmm4, %xmm0 ; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: blend_logic_v8i32: @@ -860,10 +860,10 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) { ; SSSE3-NEXT: psrad $31, %xmm1 ; SSSE3-NEXT: pand %xmm1, %xmm3 ; SSSE3-NEXT: pandn %xmm5, %xmm1 -; SSSE3-NEXT: por %xmm3, %xmm1 ; SSSE3-NEXT: pand %xmm0, %xmm2 ; SSSE3-NEXT: pandn %xmm4, %xmm0 ; SSSE3-NEXT: por %xmm2, %xmm0 +; SSSE3-NEXT: por %xmm3, %xmm1 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: blend_logic_v8i32: diff --git a/test/CodeGen/X86/vector-sqrt.ll b/test/CodeGen/X86/vector-sqrt.ll index 8081e9482d67..c5ac4466b5fa 100644 --- a/test/CodeGen/X86/vector-sqrt.ll +++ b/test/CodeGen/X86/vector-sqrt.ll @@ -29,11 +29,11 @@ define <4 x float> @sqrtf4(float* nocapture readonly %v) local_unnamed_addr #0 { ; CHECK: # BB#0: # %entry ; CHECK-NEXT: vsqrtss (%rdi), %xmm0, %xmm0 ; CHECK-NEXT: vsqrtss 4(%rdi), %xmm1, %xmm1 +; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm2 +; CHECK-NEXT: vsqrtss 12(%rdi), %xmm3, %xmm3 ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm1 -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] -; CHECK-NEXT: vsqrtss 12(%rdi), %xmm2, %xmm1 -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] +; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] ; CHECK-NEXT: retq entry: %0 = load float, float* %v, align 4 diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll index 450e255313b3..6fbec91e77a3 100644 --- a/test/CodeGen/X86/x86-interleaved-access.ll +++ b/test/CodeGen/X86/x86-interleaved-access.ll @@ -11,13 +11,13 @@ define <4 x double> @load_factorf64_4(<16 x double>* %ptr) { ; AVX-NEXT: vmovupd 96(%rdi), %ymm3 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 -; AVX-NEXT: vhaddpd %ymm5, %ymm4, %ymm4 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] ; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] -; AVX-NEXT: vaddpd %ymm2, %ymm4, %ymm2 ; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX-NEXT: vaddpd %ymm0, %ymm2, %ymm0 +; AVX-NEXT: vhaddpd %ymm5, %ymm4, %ymm1 +; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1 +; AVX-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retq %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16 %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12> @@ -39,11 +39,11 @@ define <4 x double> @load_factorf64_2(<16 x double>* %ptr) { ; AVX-NEXT: vmovupd 96(%rdi), %ymm3 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 ; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 -; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] ; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX-NEXT: vmulpd %ymm0, %ymm4, %ymm0 +; AVX-NEXT: vmulpd %ymm0, %ymm2, %ymm0 ; AVX-NEXT: retq %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16 %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12> @@ -124,9 +124,9 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) { ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] ; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] -; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm3 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX2-NEXT: vpaddq %ymm0, %ymm3, %ymm0 +; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm1 +; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: retq %wide.vec = load <16 x i64>, <16 x i64>* %ptr, align 16 diff --git a/test/CodeGen/X86/xmulo.ll b/test/CodeGen/X86/xmulo.ll index aed305058f0b..03f284d87a66 100644 --- a/test/CodeGen/X86/xmulo.ll +++ b/test/CodeGen/X86/xmulo.ll @@ -712,17 +712,11 @@ define i1 @bug27873(i64 %c1, i1 %c2) { ; ; KNL-LABEL: bug27873: ; KNL: ## BB#0: -; KNL-NEXT: andl $1, %esi ; KNL-NEXT: movl $160, %ecx ; KNL-NEXT: movq %rdi, %rax ; KNL-NEXT: mulq %rcx -; KNL-NEXT: kmovw %esi, %k0 ; KNL-NEXT: seto %al -; KNL-NEXT: andl $1, %eax -; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: korw %k1, %k0, %k0 -; KNL-NEXT: kmovw %k0, %eax -; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill> +; KNL-NEXT: orb %sil, %al ; KNL-NEXT: retq %mul = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %c1, i64 160) %mul.overflow = extractvalue { i64, i1 } %mul, 1 diff --git a/test/CodeGen/X86/xor-select-i1-combine.ll b/test/CodeGen/X86/xor-select-i1-combine.ll index 6507ddcc7697..c9383282a0cc 100644 --- a/test/CodeGen/X86/xor-select-i1-combine.ll +++ b/test/CodeGen/X86/xor-select-i1-combine.ll @@ -7,10 +7,10 @@ define i32 @main(i8 %small) { ; CHECK-LABEL: main: ; CHECK: # BB#0: # %entry -; CHECK-NEXT: movl $n, %eax -; CHECK-NEXT: movl $m, %ecx ; CHECK-NEXT: testb $1, %dil -; CHECK-NEXT: cmovneq %rax, %rcx +; CHECK-NEXT: movl $m, %eax +; CHECK-NEXT: movl $n, %ecx +; CHECK-NEXT: cmoveq %rax, %rcx ; CHECK-NEXT: movl (%rcx), %eax ; CHECK-NEXT: retq entry: diff --git a/test/DebugInfo/Inputs/split-dwarf-addr-object-relocation.dwo b/test/DebugInfo/Inputs/split-dwarf-addr-object-relocation.dwo Binary files differnew file mode 100644 index 000000000000..2a3bc57caa6d --- /dev/null +++ b/test/DebugInfo/Inputs/split-dwarf-addr-object-relocation.dwo diff --git a/test/DebugInfo/Inputs/split-dwarf-addr-object-relocation.o b/test/DebugInfo/Inputs/split-dwarf-addr-object-relocation.o Binary files differnew file mode 100644 index 000000000000..b6993c6cae20 --- /dev/null +++ b/test/DebugInfo/Inputs/split-dwarf-addr-object-relocation.o diff --git a/test/DebugInfo/Inputs/split-dwarf-multiple-cu.dwo b/test/DebugInfo/Inputs/split-dwarf-multiple-cu.dwo Binary files differnew file mode 100644 index 000000000000..4df9894b089a --- /dev/null +++ b/test/DebugInfo/Inputs/split-dwarf-multiple-cu.dwo diff --git a/test/DebugInfo/Inputs/split-dwarf-multiple-cu.o b/test/DebugInfo/Inputs/split-dwarf-multiple-cu.o Binary files differnew file mode 100644 index 000000000000..aa4ab4bc76f7 --- /dev/null +++ b/test/DebugInfo/Inputs/split-dwarf-multiple-cu.o diff --git a/test/DebugInfo/PDB/Inputs/merge1.yaml b/test/DebugInfo/PDB/Inputs/merge1.yaml new file mode 100644 index 000000000000..89d471e3343d --- /dev/null +++ b/test/DebugInfo/PDB/Inputs/merge1.yaml @@ -0,0 +1,52 @@ +---
+TpiStream:
+ Records:
+ # uint32_t* [Index: 0x1000]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 117
+ Attrs: 32778
+ # int64_t* [Index: 0x1001]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 118
+ Attrs: 32778
+ # struct OnlyInMerge1 [Index: 0x1002]
+ - Kind: LF_STRUCTURE
+ Class:
+ MemberCount: 0
+ Options: [ None, ForwardReference, HasUniqueName ]
+ FieldList: 0
+ Name: 'OnlyInMerge1'
+ UniqueName: 'OnlyInMerge1'
+ DerivationList: 0
+ VTableShape: 0
+ Size: 0
+ # uint32_t** [Index: 0x1003]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 4096
+ Attrs: 32778
+ # uint32_t*** [Index: 0x1004]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 4099
+ Attrs: 32778
+ # int64_t* [Index: 0x1005]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 4097
+ Attrs: 32778
+ # [uint32_t, uint32_t*, uint32_t**] [Index: 0x1006]
+ - Kind: LF_ARGLIST
+ ArgList:
+ ArgIndices: [ 117, 4096, 4099 ]
+ # uint32_t (uint32_t, uint32_t*, uint32_t**) [Index: 0x1007]
+ - Kind: LF_PROCEDURE
+ Procedure:
+ ReturnType: 117
+ CallConv: NearC
+ Options: [ None ]
+ ParameterCount: 0
+ ArgumentList: 4102
+...
diff --git a/test/DebugInfo/PDB/Inputs/merge2.yaml b/test/DebugInfo/PDB/Inputs/merge2.yaml new file mode 100644 index 000000000000..b6cbdb98f0ca --- /dev/null +++ b/test/DebugInfo/PDB/Inputs/merge2.yaml @@ -0,0 +1,52 @@ +---
+TpiStream:
+ Records:
+ # uint32_t* [Index: 0x1000]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 117
+ Attrs: 32778
+ # uint32_t** [Index: 0x1001]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 4096
+ Attrs: 32778
+ # uint32_t*** [Index: 0x1002]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 4097
+ Attrs: 32778
+ # [uint32_t, uint32_t*, uint32_t**] [Index: 0x1003]
+ - Kind: LF_ARGLIST
+ ArgList:
+ ArgIndices: [ 117, 4096, 4097 ]
+ # uint32_t (uint32_t, uint32_t*, uint32_t**) [Index: 0x1004]
+ - Kind: LF_PROCEDURE
+ Procedure:
+ ReturnType: 117
+ CallConv: NearC
+ Options: [ None ]
+ ParameterCount: 0
+ ArgumentList: 4099
+ # int64_t* [Index: 0x1005]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 118
+ Attrs: 32778
+ # int64_t** [Index: 0x1006]
+ - Kind: LF_POINTER
+ Pointer:
+ ReferentType: 4101
+ Attrs: 32778
+ # struct OnlyInMerge2 [Index: 0x1007]
+ - Kind: LF_STRUCTURE
+ Class:
+ MemberCount: 0
+ Options: [ None, ForwardReference, HasUniqueName ]
+ FieldList: 0
+ Name: 'OnlyInMerge2'
+ UniqueName: 'OnlyInMerge2'
+ DerivationList: 0
+ VTableShape: 0
+ Size: 0
+...
diff --git a/test/DebugInfo/PDB/pdbdump-headers.test b/test/DebugInfo/PDB/pdbdump-headers.test index d67743efd707..4e6bb75f8b8d 100644 --- a/test/DebugInfo/PDB/pdbdump-headers.test +++ b/test/DebugInfo/PDB/pdbdump-headers.test @@ -326,7 +326,7 @@ ; EMPTY-NEXT: TypeLeafKind: LF_SUBSTR_LIST (0x1604) ; EMPTY-NEXT: NumStrings: 1 ; EMPTY-NEXT: Strings [ -; EMPTY-NEXT: String: __vc_attributes::threadingAttribute (0x100B) +; EMPTY-NEXT: String: -Zi -MT -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\INCLUDE" -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\ATLMFC\INCLUDE" -I"C:\Program Files (x86)\Windows Kits\8.1\include\shared" -I"C:\Program Files (x86)\Windows ; EMPTY-NEXT: ] ; EMPTY-NEXT: } ; EMPTY-NEXT: Bytes ( @@ -1253,7 +1253,7 @@ ; ALL: TypeLeafKind: LF_SUBSTR_LIST (0x1604) ; ALL: NumStrings: 1 ; ALL: Strings [ -; ALL: String: __vc_attributes::threadingAttribute (0x100B) +; ALL: String: -Zi -MT -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\INCLUDE" -I"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\ATLMFC\INCLUDE" -I"C:\Program Files (x86)\Windows Kits\8.1\include\shared" -I"C:\Program Files (x86)\Windows (0x100B) ; ALL: ] ; ALL: } ; ALL: } diff --git a/test/DebugInfo/PDB/pdbdump-mergetypes.test b/test/DebugInfo/PDB/pdbdump-mergetypes.test new file mode 100644 index 000000000000..96f6316d4766 --- /dev/null +++ b/test/DebugInfo/PDB/pdbdump-mergetypes.test @@ -0,0 +1,24 @@ +; RUN: llvm-pdbdump yaml2pdb -pdb=%t.1.pdb %p/Inputs/merge1.yaml
+; RUN: llvm-pdbdump yaml2pdb -pdb=%t.2.pdb %p/Inputs/merge2.yaml
+; RUN: llvm-pdbdump merge -pdb=%t.3.pdb %t.1.pdb %t.2.pdb
+; RUN: llvm-pdbdump raw -tpi-records %t.3.pdb | FileCheck -check-prefix=MERGED %s
+; RUN: llvm-pdbdump raw -tpi-records %t.3.pdb | FileCheck -check-prefix=ARGLIST %s
+
+
+MERGED: Type Info Stream (TPI)
+MERGED: Record count: 9
+MERGED-DAG: PointeeType: unsigned
+MERGED-DAG: PointeeType: unsigned*
+MERGED-DAG: PointeeType: unsigned**
+MERGED-DAG: PointeeType: __int64
+MERGED-DAG: PointeeType: __int64*
+MERGED-DAG: Name: OnlyInMerge1
+MERGED-DAG: Name: OnlyInMerge2
+MERGED-DAG: TypeLeafKind: LF_ARGLIST
+
+ARGLIST: TypeLeafKind: LF_ARGLIST
+ARGLIST-NEXT: NumArgs: 3
+ARGLIST-NEXT: Arguments [
+ARGLIST-NEXT: ArgType: unsigned
+ARGLIST-NEXT: ArgType: unsigned*
+ARGLIST-NEXT: ArgType: unsigned**
diff --git a/test/DebugInfo/llvm-symbolizer.test b/test/DebugInfo/llvm-symbolizer.test index 7ea062e6c9e7..f0db8f4b921f 100644 --- a/test/DebugInfo/llvm-symbolizer.test +++ b/test/DebugInfo/llvm-symbolizer.test @@ -23,6 +23,10 @@ RUN: cp %p/Inputs/split-dwarf-test.dwo %T RUN: echo "%p/Inputs/split-dwarf-test 0x4005d4" >> %t.input RUN: echo "%p/Inputs/split-dwarf-test 0x4005c4" >> %t.input RUN: echo "%p/Inputs/cross-cu-inlining.x86_64-macho.o 0x17" >> %t.input +RUN: cp %p/Inputs/split-dwarf-multiple-cu.dwo %T +RUN: echo "%p/Inputs/split-dwarf-multiple-cu.o 0x4" >> %t.input +RUN: cp %p/Inputs/split-dwarf-addr-object-relocation.dwo %T +RUN: echo "%p/Inputs/split-dwarf-addr-object-relocation.o 0x14" >> %t.input RUN: llvm-symbolizer --functions=linkage --inlining --demangle=false \ RUN: --default-arch=i386 < %t.input | FileCheck --check-prefix=CHECK --check-prefix=SPLIT --check-prefix=DWO %s @@ -133,6 +137,16 @@ CHECK-NEXT: /tmp{{[/\\]}}cross-cu-inlining.c:16:3 CHECK-NEXT: main CHECK-NEXT: /tmp{{[/\\]}}cross-cu-inlining.c:11:0 +CHECK: f2 +CHECK-NEXT: b.cpp:3:3 +CHECK-NEXT: f3 +CHECK-NEXT: b.cpp:6:0 + +CHECK: f2 +CHECK-NEXT: split-dwarf-addr-object-relocation.cpp:3:3 +CHECK-NEXT: f3 +CHECK-NEXT: split-dwarf-addr-object-relocation.cpp:6:0 + RUN: echo "unexisting-file 0x1234" > %t.input2 RUN: llvm-symbolizer < %t.input2 2>&1 | FileCheck %s --check-prefix=MISSING-FILE diff --git a/test/Instrumentation/MemorySanitizer/csr.ll b/test/Instrumentation/MemorySanitizer/csr.ll index c4e3a3f73920..c288f93241b9 100644 --- a/test/Instrumentation/MemorySanitizer/csr.ll +++ b/test/Instrumentation/MemorySanitizer/csr.ll @@ -1,5 +1,6 @@ ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s ; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s --check-prefix=ADDR +; REQUIRES: x86 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/test/LTO/X86/Inputs/strip-debug-info-bar.ll b/test/LTO/X86/Inputs/strip-debug-info-bar.ll new file mode 100644 index 000000000000..4269886676b3 --- /dev/null +++ b/test/LTO/X86/Inputs/strip-debug-info-bar.ll @@ -0,0 +1,15 @@ +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.12" + +define void @bar() !dbg !3 { + ret void +} + +!llvm.module.flags = !{!0} +!llvm.dbg.cu = !{!1} + +!0 = !{i32 2, !"Debug Info Version", i32 3} +!1 = distinct !DICompileUnit(language: DW_LANG_C99, file: !2) +!2 = !DIFile(filename: "broken", directory: "") +!3 = distinct !DISubprogram(line: 1000, isDefinition: true) + diff --git a/test/LTO/X86/Inputs/strip-debug-info.bc b/test/LTO/X86/Inputs/strip-debug-info.bc Binary files differdeleted file mode 100644 index c83195ff9caf..000000000000 --- a/test/LTO/X86/Inputs/strip-debug-info.bc +++ /dev/null diff --git a/test/LTO/X86/strip-debug-info.ll b/test/LTO/X86/strip-debug-info.ll index ff45ca15243e..6b7745164446 100644 --- a/test/LTO/X86/strip-debug-info.ll +++ b/test/LTO/X86/strip-debug-info.ll @@ -1,16 +1,61 @@ +; RUN: llvm-as -disable-verify %s -o %t.bc +; ---- Full LTO --------------------------------------------- ; RUN: not llvm-lto -lto-strip-invalid-debug-info=false \ -; RUN: -o %t.o %S/Inputs/strip-debug-info.bc 2>&1 | \ +; RUN: -o %t.o %t.bc 2>&1 | \ ; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-ERR ; RUN: llvm-lto -lto-strip-invalid-debug-info=true \ ; RUN: -exported-symbol foo -exported-symbol _foo \ -; RUN: -o %t.o %S/Inputs/strip-debug-info.bc 2>&1 | \ +; RUN: -o %t.o %t.bc 2>&1 | \ ; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-WARN ; RUN: llvm-nm %t.o | FileCheck %s +; ---- Thin LTO (codegen only) ------------------------------ +; RUN: not llvm-lto -thinlto -thinlto-action=codegen \ +; RUN: -lto-strip-invalid-debug-info=false \ +; RUN: %t.bc -disable-verify 2>&1 | \ +; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-ERR +; RUN: llvm-lto -thinlto -thinlto-action=codegen \ +; RUN: -lto-strip-invalid-debug-info=true \ +; RUN: %t.bc -disable-verify 2>&1 | \ +; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-WARN +; ---- Thin LTO (optimize, strip main file) ----------------- +; RUN: opt -disable-verify -module-summary %s -o %t.bc +; RUN: opt -disable-verify -module-summary %S/Inputs/strip-debug-info-bar.ll \ +; RUN: -o %t2.bc +; RUN: not llvm-lto -thinlto -thinlto-action=run \ +; RUN: -lto-strip-invalid-debug-info=false \ +; RUN: %t.bc -disable-verify 2>&1 | \ +; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-ERR +; RUN: llvm-lto -thinlto -thinlto-action=run \ +; RUN: -lto-strip-invalid-debug-info=true \ +; RUN: %t.bc -disable-verify 2>&1 | \ +; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-WARN +; ---- Thin LTO (optimize, strip imported file) ------------- +; RUN: opt -disable-verify -strip-debug -module-summary %t.bc -o %t-stripped.bc +; RUN: llvm-lto -thinlto-action=thinlink -o %t.index.bc %t-stripped.bc %t2.bc +; RUN: not llvm-lto -thinlto -thinlto-action=import \ +; RUN: -thinlto-index=%t.index.bc \ +; RUN: -lto-strip-invalid-debug-info=false \ +; RUN: -exported-symbol foo -exported-symbol _foo \ +; RUN: %t-stripped.bc -disable-verify 2>&1 | \ +; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-ERR +; RUN: llvm-lto -thinlto -thinlto-action=import \ +; RUN: -lto-strip-invalid-debug-info=true \ +; RUN: -thinlto-index=%t.index.bc \ +; RUN: -exported-symbol foo -exported-symbol _foo \ +; RUN: %t-stripped.bc -disable-verify 2>&1 | \ +; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-WARN ; CHECK-ERR: Broken module found, compilation aborted ; CHECK-WARN: Invalid debug info found, debug info will be stripped +; CHECK-WARN-NOT: Broken module found ; CHECK: foo +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.12" + +declare void @bar() + define void @foo() { + call void @bar() ret void } diff --git a/test/MC/AMDGPU/exp.s b/test/MC/AMDGPU/exp.s index 710a777ab217..fab89e48dcfa 100644 --- a/test/MC/AMDGPU/exp.s +++ b/test/MC/AMDGPU/exp.s @@ -112,3 +112,15 @@ exp mrt0 v4, v3, v2, v1 vm exp mrt0 v4, v3, v2, v1 done vm // SI: exp mrt0 v4, v3, v2, v1 done vm ; encoding: [0x0f,0x18,0x00,0xf8,0x04,0x03,0x02,0x01] // VI: exp mrt0 v4, v3, v2, v1 done vm ; encoding: [0x0f,0x18,0x00,0xc4,0x04,0x03,0x02,0x01] + +exp mrtz, v3, v3, v7, v7 compr +// SI: exp mrtz v3, v3, v7, v7 compr ; encoding: [0x8f,0x04,0x00,0xf8,0x03,0x07,0x00,0x00] +// VI: exp mrtz v3, v3, v7, v7 compr ; encoding: [0x8f,0x04,0x00,0xc4,0x03,0x07,0x00,0x00] + +exp mrtz, off, off, v7, v7 compr +// SI: exp mrtz off, off, v7, v7 compr ; encoding: [0x8c,0x04,0x00,0xf8,0x00,0x07,0x00,0x00] +// VI: exp mrtz off, off, v7, v7 compr ; encoding: [0x8c,0x04,0x00,0xc4,0x00,0x07,0x00,0x00] + +exp mrtz, v3, v3, off, off compr +// SI: exp mrtz v3, v3, off, off compr ; encoding: [0x83,0x04,0x00,0xf8,0x03,0x00,0x00,0x00] +// VI: exp mrtz v3, v3, off, off compr ; encoding: [0x83,0x04,0x00,0xc4,0x03,0x00,0x00,0x00] diff --git a/test/MC/Disassembler/AMDGPU/exp_vi.txt b/test/MC/Disassembler/AMDGPU/exp_vi.txt new file mode 100644 index 000000000000..9291fb807839 --- /dev/null +++ b/test/MC/Disassembler/AMDGPU/exp_vi.txt @@ -0,0 +1,40 @@ +# RUN: llvm-mc -arch=amdgcn -mcpu=tonga -disassemble -show-encoding < %s | FileCheck %s -check-prefix=VI + +# VI: exp mrt0 v1, v2, v3, v4 ; encoding: [0x0f,0x00,0x00,0xc4,0x01,0x02,0x03,0x04] +0x0f,0x00,0x00,0xc4,0x01,0x02,0x03,0x04 + +# VI: exp mrt0 v1, v2, v3, v4 vm ; encoding: [0x0f,0x10,0x00,0xc4,0x01,0x02,0x03,0x04] +0x0f,0x10,0x00,0xc4,0x01,0x02,0x03,0x04 + +# VI: exp mrt0 v1, v1, v3, v3 compr ; encoding: [0x0f,0x04,0x00,0xc4,0x01,0x03,0x00,0x00] +0x0f,0x04,0x00,0xc4,0x01,0x03,0x00,0x00 + +# VI: exp mrt0 v1, v2, v3, v4 done ; encoding: [0x0f,0x08,0x00,0xc4,0x01,0x02,0x03,0x04] +0x0f,0x08,0x00,0xc4,0x01,0x02,0x03,0x04 + +# VI: exp mrt0 v2, v2, v4, v4 done compr vm ; encoding: [0x0f,0x1c,0x00,0xc4,0x02,0x04,0x00,0x00] +0x0f,0x1c,0x00,0xc4,0x02,0x04,0x00,0x00 + +# VI: exp mrt0 v7, off, off, off vm ; encoding: [0x01,0x10,0x00,0xc4,0x07,0x00,0x00,0x00] +0x01,0x10,0x00,0xc4,0x07,0x00,0x00,0x00 + +# VI: exp mrt0 off, off, v1, v2 ; encoding: [0x0c,0x00,0x00,0xc4,0x00,0x00,0x01,0x02] +0x0c,0x00,0x00,0xc4,0x00,0x00,0x01,0x02 + +# VI: exp mrt0 off, off, v8, v8 done compr ; encoding: [0x0c,0x0c,0x00,0xc4,0x00,0x08,0x00,0x00] +0x0c,0x0c,0x00,0xc4,0x00,0x08,0x00,0x00 + +# VI: exp mrt0 v1, v1, off, off compr ; encoding: [0x03,0x04,0x00,0xc4,0x01,0x00,0x00,0x00] +0x03,0x04,0x00,0xc4,0x01,0x00,0x00,0x00 + +# VI: exp param0 off, off, off, off compr ; encoding: [0x00,0x06,0x00,0xc4,0x00,0x00,0x00,0x00] +0x00,0x06,0x00,0xc4,0x00,0x00,0x00,0x00 + +# VI: exp mrtz v0, off, off, off done vm ; encoding: [0x81,0x18,0x00,0xc4,0x00,0x00,0x00,0x00] +0x81,0x18,0x00,0xc4,0x00,0x00,0x00,0x00 + +# VI: exp null v255, v0, v255, v0 ; encoding: [0x9f,0x00,0x00,0xc4,0xff,0x00,0xff,0x00] +0x9f,0x00,0x00,0xc4,0xff,0x00,0xff,0x00 + +# VI: exp pos0 v1, off, off, off ; encoding: [0xc1,0x00,0x00,0xc4,0x01,0x00,0x00,0x00] +0xc1,0x00,0x00,0xc4,0x01,0x00,0x00,0x00 diff --git a/test/MC/Disassembler/AMDGPU/sopc_vi.txt b/test/MC/Disassembler/AMDGPU/sopc_vi.txt index 026dcbafed42..2c2dc07efd65 100644 --- a/test/MC/Disassembler/AMDGPU/sopc_vi.txt +++ b/test/MC/Disassembler/AMDGPU/sopc_vi.txt @@ -50,3 +50,6 @@ # GCN: s_setvskip s3, s5 ; encoding: [0x03,0x05,0x10,0xbf] 0x03 0x05 0x10 0xbf + +# GCN: s_bitcmp0_b32 0xafaaffff, 0xafaaffff ; encoding: [0xff,0xff,0x0c,0xbf,0xff,0xff,0xaa,0xaf] +0xff 0xff 0x0c 0xbf 0xff 0xff 0xaa 0xaf diff --git a/test/TableGen/GlobalISelEmitter.td b/test/TableGen/GlobalISelEmitter.td index 2784e937954a..aeac85962f63 100644 --- a/test/TableGen/GlobalISelEmitter.td +++ b/test/TableGen/GlobalISelEmitter.td @@ -7,10 +7,6 @@ include "llvm/Target/Target.td" def MyTargetISA : InstrInfo; def MyTarget : Target { let InstructionSet = MyTargetISA; } -let TargetPrefix = "mytarget" in { -def int_mytarget_nop : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>; -} - def R0 : Register<"r0"> { let Namespace = "MyTarget"; } def GPR32 : RegisterClass<"MyTarget", [i32], 32, (add R0)>; def GPR32Op : RegisterOperand<GPR32>; @@ -131,37 +127,6 @@ def : Pat<(select GPR32:$src1, complex:$src2, complex:$src3), def ADD : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2), [(set GPR32:$dst, (add GPR32:$src1, GPR32:$src2))]>; -//===- Test a simple pattern with an intrinsic. ---------------------------===// -// - -// CHECK-LABEL: if ([&]() { -// CHECK-NEXT: MachineInstr &MI0 = I; -// CHECK-NEXT: if (MI0.getNumOperands() < 3) -// CHECK-NEXT: return false; -// CHECK-NEXT: if ((MI0.getOpcode() == TargetOpcode::G_INTRINSIC) && -// CHECK-NEXT: ((/* dst */ (MRI.getType(MI0.getOperand(0).getReg()) == (LLT::scalar(32))) && -// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(0).getReg(), MRI, TRI))))) && -// CHECK-NEXT: ((/* Operand 1 */ (isOperandImmEqual(MI0.getOperand(1), [[ID:[0-9]+]], MRI)))) && -// CHECK-NEXT: ((/* src1 */ (MRI.getType(MI0.getOperand(2).getReg()) == (LLT::scalar(32))) && -// CHECK-NEXT: ((&RBI.getRegBankFromRegClass(MyTarget::GPR32RegClass) == RBI.getRegBank(MI0.getOperand(2).getReg(), MRI, TRI)))))) { -// CHECK-NEXT: // (intrinsic_wo_chain:i32 [[ID]]:iPTR, GPR32:i32:$src1) => (MOV:i32 GPR32:i32:$src1) -// CHECK-NEXT: MachineInstrBuilder MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(MyTarget::MOV)); -// CHECK-NEXT: MIB.add(MI0.getOperand(0)/*dst*/); -// CHECK-NEXT: MIB.add(MI0.getOperand(2)/*src1*/); -// CHECK-NEXT: for (const auto *FromMI : {&MI0, }) -// CHECK-NEXT: for (const auto &MMO : FromMI->memoperands()) -// CHECK-NEXT: MIB.addMemOperand(MMO); -// CHECK-NEXT: I.eraseFromParent(); -// CHECK-NEXT: MachineInstr &NewI = *MIB; -// CHECK-NEXT: constrainSelectedInstRegOperands(NewI, TII, TRI, RBI); -// CHECK-NEXT: return true; -// CHECK-NEXT: } -// CHECK-NEXT: return false; -// CHECK-NEXT: }()) { return true; } - -def MOV : I<(outs GPR32:$dst), (ins GPR32:$src1), - [(set GPR32:$dst, (int_mytarget_nop GPR32:$src1))]>; - //===- Test a nested instruction match. -----------------------------------===// // CHECK-LABEL: if ([&]() { diff --git a/test/TableGen/intrinsic-varargs.td b/test/TableGen/intrinsic-varargs.td index 1e2378550855..b4ce10c64e22 100644 --- a/test/TableGen/intrinsic-varargs.td +++ b/test/TableGen/intrinsic-varargs.td @@ -23,7 +23,7 @@ class Intrinsic<string name, list<LLVMType> param_types = []> { } // isVoid needs to match the definition in ValueTypes.td -def isVoid : ValueType<0, 108>; // Produces no value +def isVoid : ValueType<0, 110>; // Produces no value def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here // CHECK: /* 0 */ 0, 29, 0, diff --git a/test/ThinLTO/X86/Inputs/merge-triple.ll b/test/ThinLTO/X86/Inputs/merge-triple.ll new file mode 100644 index 000000000000..ea644f5497b9 --- /dev/null +++ b/test/ThinLTO/X86/Inputs/merge-triple.ll @@ -0,0 +1 @@ +target triple = "x86_64-apple-macosx10.11.0" diff --git a/test/ThinLTO/X86/merge-triple.ll b/test/ThinLTO/X86/merge-triple.ll new file mode 100644 index 000000000000..8f099d12a23b --- /dev/null +++ b/test/ThinLTO/X86/merge-triple.ll @@ -0,0 +1,10 @@ +; RUN: opt -module-summary %s -o %t1.bc +; RUN: opt -module-summary %p/Inputs/merge-triple.ll -o %t2.bc +; RUN: llvm-lto -thinlto-action=optimize %t1.bc %t2.bc +; RUN: llvm-dis < %t1.bc.thinlto.imported.bc | FileCheck %s --check-prefix=CHECK1 +; RUN: llvm-dis < %t2.bc.thinlto.imported.bc | FileCheck %s --check-prefix=CHECK2 + +target triple = "x86_64-apple-macosx10.12.0" + +; CHECK1: target triple = "x86_64-apple-macosx10.12.0" +; CHECK2: target triple = "x86_64-apple-macosx10.11.0" diff --git a/test/Transforms/InstCombine/2008-09-29-FoldingOr.ll b/test/Transforms/InstCombine/2008-09-29-FoldingOr.ll deleted file mode 100644 index 4d00d495a07f..000000000000 --- a/test/Transforms/InstCombine/2008-09-29-FoldingOr.ll +++ /dev/null @@ -1,10 +0,0 @@ -; RUN: opt < %s -instcombine -S | grep "or i1" -; PR2844 - -define i32 @test(i32 %p_74) { - %A = icmp eq i32 %p_74, 0 ; <i1> [#uses=1] - %B = icmp slt i32 %p_74, -638208501 ; <i1> [#uses=1] - %or.cond = or i1 %A, %B ; <i1> [#uses=1] - %iftmp.10.0 = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1] - ret i32 %iftmp.10.0 -} diff --git a/test/Transforms/InstCombine/amdgcn-demanded-vector-elts.ll b/test/Transforms/InstCombine/AMDGPU/amdgcn-demanded-vector-elts.ll index 0c4842c15988..0c4842c15988 100644 --- a/test/Transforms/InstCombine/amdgcn-demanded-vector-elts.ll +++ b/test/Transforms/InstCombine/AMDGPU/amdgcn-demanded-vector-elts.ll diff --git a/test/Transforms/InstCombine/NVPTX/lit.local.cfg b/test/Transforms/InstCombine/NVPTX/lit.local.cfg new file mode 100644 index 000000000000..2cb98eb371b2 --- /dev/null +++ b/test/Transforms/InstCombine/NVPTX/lit.local.cfg @@ -0,0 +1,2 @@ +if not 'NVPTX' in config.root.targets: + config.unsupported = True diff --git a/test/Transforms/InstCombine/nvvm-intrins.ll b/test/Transforms/InstCombine/NVPTX/nvvm-intrins.ll index cb65b8fdc547..cb65b8fdc547 100644 --- a/test/Transforms/InstCombine/nvvm-intrins.ll +++ b/test/Transforms/InstCombine/NVPTX/nvvm-intrins.ll diff --git a/test/Transforms/InstCombine/cast.ll b/test/Transforms/InstCombine/cast.ll index a4375a5cd57e..486a617097e1 100644 --- a/test/Transforms/InstCombine/cast.ll +++ b/test/Transforms/InstCombine/cast.ll @@ -1470,3 +1470,55 @@ define i32 @test93(i32 %A) { %D = trunc i96 %C to i32 ret i32 %D } + +; The following four tests sext + lshr + trunc patterns. +; PR33078 + +define i8 @pr33078_1(i8 %A) { +; CHECK-LABEL: @pr33078_1( +; CHECK-NEXT: [[C:%.*]] = ashr i8 [[A:%.*]], 7 +; CHECK-NEXT: ret i8 [[C]] +; + %B = sext i8 %A to i16 + %C = lshr i16 %B, 8 + %D = trunc i16 %C to i8 + ret i8 %D +} + +define i12 @pr33078_2(i8 %A) { +; CHECK-LABEL: @pr33078_2( +; CHECK-NEXT: [[C:%.*]] = ashr i8 [[A:%.*]], 4 +; CHECK-NEXT: [[D:%.*]] = sext i8 [[C]] to i12 +; CHECK-NEXT: ret i12 [[D]] +; + %B = sext i8 %A to i16 + %C = lshr i16 %B, 4 + %D = trunc i16 %C to i12 + ret i12 %D +} + +define i4 @pr33078_3(i8 %A) { +; CHECK-LABEL: @pr33078_3( +; CHECK-NEXT: [[B:%.*]] = sext i8 [[A:%.*]] to i16 +; CHECK-NEXT: [[C:%.*]] = lshr i16 [[B]], 12 +; CHECK-NEXT: [[D:%.*]] = trunc i16 [[C]] to i4 +; CHECK-NEXT: ret i4 [[D]] +; + %B = sext i8 %A to i16 + %C = lshr i16 %B, 12 + %D = trunc i16 %C to i4 + ret i4 %D +} + +define i8 @pr33078_4(i3 %x) { +; Don't turn this in an `ashr`. This was getting miscompiled +; CHECK-LABEL: @pr33078_4( +; CHECK-NEXT: [[B:%.*]] = sext i3 %x to i16 +; CHECK-NEXT: [[C:%.*]] = lshr i16 [[B]], 13 +; CHECK-NEXT: [[D:%.*]] = trunc i16 [[C]] to i8 +; CHECK-NEXT: ret i8 [[D]] + %B = sext i3 %x to i16 + %C = lshr i16 %B, 13 + %D = trunc i16 %C to i8 + ret i8 %D +} diff --git a/test/Transforms/InstCombine/lshr.ll b/test/Transforms/InstCombine/lshr.ll index b81371b03042..0cad7f833ab6 100644 --- a/test/Transforms/InstCombine/lshr.ll +++ b/test/Transforms/InstCombine/lshr.ll @@ -100,3 +100,75 @@ define <2 x i8> @lshr_exact_splat_vec(<2 x i8> %x) { ret <2 x i8> %lshr } +; FIXME: The bool bit got smeared across a wide val, but then we zero'd out those bits. This is just a zext. + +define i16 @bool_zext(i1 %x) { +; CHECK-LABEL: @bool_zext( +; CHECK-NEXT: [[SEXT:%.*]] = sext i1 %x to i16 +; CHECK-NEXT: [[HIBIT:%.*]] = lshr i16 [[SEXT]], 15 +; CHECK-NEXT: ret i16 [[HIBIT]] +; + %sext = sext i1 %x to i16 + %hibit = lshr i16 %sext, 15 + ret i16 %hibit +} + +define <2 x i8> @bool_zext_splat(<2 x i1> %x) { +; CHECK-LABEL: @bool_zext_splat( +; CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i1> %x to <2 x i8> +; CHECK-NEXT: [[HIBIT:%.*]] = lshr <2 x i8> [[SEXT]], <i8 7, i8 7> +; CHECK-NEXT: ret <2 x i8> [[HIBIT]] +; + %sext = sext <2 x i1> %x to <2 x i8> + %hibit = lshr <2 x i8> %sext, <i8 7, i8 7> + ret <2 x i8> %hibit +} + +; FIXME: The replicated sign bits are all that's left. This could be ashr+zext. + +define i16 @smear_sign_and_widen(i4 %x) { +; CHECK-LABEL: @smear_sign_and_widen( +; CHECK-NEXT: [[SEXT:%.*]] = sext i4 %x to i16 +; CHECK-NEXT: [[HIBIT:%.*]] = lshr i16 [[SEXT]], 12 +; CHECK-NEXT: ret i16 [[HIBIT]] +; + %sext = sext i4 %x to i16 + %hibit = lshr i16 %sext, 12 + ret i16 %hibit +} + +define <2 x i8> @smear_sign_and_widen_splat(<2 x i6> %x) { +; CHECK-LABEL: @smear_sign_and_widen_splat( +; CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i6> %x to <2 x i8> +; CHECK-NEXT: [[HIBIT:%.*]] = lshr <2 x i8> [[SEXT]], <i8 2, i8 2> +; CHECK-NEXT: ret <2 x i8> [[HIBIT]] +; + %sext = sext <2 x i6> %x to <2 x i8> + %hibit = lshr <2 x i8> %sext, <i8 2, i8 2> + ret <2 x i8> %hibit +} + +; FIXME: All of the replicated sign bits are wiped out by the lshr. This could be lshr+zext. + +define i16 @fake_sext(i3 %x) { +; CHECK-LABEL: @fake_sext( +; CHECK-NEXT: [[SEXT:%.*]] = sext i3 %x to i16 +; CHECK-NEXT: [[SH:%.*]] = lshr i16 [[SEXT]], 15 +; CHECK-NEXT: ret i16 [[SH]] +; + %sext = sext i3 %x to i16 + %sh = lshr i16 %sext, 15 + ret i16 %sh +} + +define <2 x i8> @fake_sext_splat(<2 x i3> %x) { +; CHECK-LABEL: @fake_sext_splat( +; CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i3> %x to <2 x i8> +; CHECK-NEXT: [[SH:%.*]] = lshr <2 x i8> [[SEXT]], <i8 7, i8 7> +; CHECK-NEXT: ret <2 x i8> [[SH]] +; + %sext = sext <2 x i3> %x to <2 x i8> + %sh = lshr <2 x i8> %sext, <i8 7, i8 7> + ret <2 x i8> %sh +} + diff --git a/test/Transforms/InstCombine/memchr.ll b/test/Transforms/InstCombine/memchr.ll index b0573567bf60..5a081c222fb0 100644 --- a/test/Transforms/InstCombine/memchr.ll +++ b/test/Transforms/InstCombine/memchr.ll @@ -190,3 +190,12 @@ define i1 @test15(i32 %C) { %cmp = icmp ne i8* %dst, null ret i1 %cmp } + +@s = internal constant [1 x i8] [i8 0], align 1 +define i8* @pr32124() { +; CHECK-LABEL: @pr32124( +; CHECK-NEXT: ret i8* getelementptr inbounds ([1 x i8], [1 x i8]* @s, i32 0, i32 0) +; + %res = tail call i8* @memchr(i8* getelementptr ([1 x i8], [1 x i8]* @s, i64 0, i64 0), i32 0, i32 1) + ret i8* %res +} diff --git a/test/Transforms/InstCombine/set.ll b/test/Transforms/InstCombine/set.ll index 494a60379011..db2b4c3558e8 100644 --- a/test/Transforms/InstCombine/set.ll +++ b/test/Transforms/InstCombine/set.ll @@ -110,8 +110,8 @@ define i1 @test12(i1 %A) { define i1 @test13(i1 %A, i1 %B) { ; CHECK-LABEL: @test13( -; CHECK-NEXT: [[CTMP:%.*]] = xor i1 %B, true -; CHECK-NEXT: [[C:%.*]] = or i1 [[CTMP]], %A +; CHECK-NEXT: [[TMP1:%.*]] = xor i1 %B, true +; CHECK-NEXT: [[C:%.*]] = or i1 [[TMP1]], %A ; CHECK-NEXT: ret i1 [[C]] ; %C = icmp uge i1 %A, %B @@ -120,8 +120,8 @@ define i1 @test13(i1 %A, i1 %B) { define <2 x i1> @test13vec(<2 x i1> %A, <2 x i1> %B) { ; CHECK-LABEL: @test13vec( -; CHECK-NEXT: [[CTMP:%.*]] = xor <2 x i1> %B, <i1 true, i1 true> -; CHECK-NEXT: [[C:%.*]] = or <2 x i1> [[CTMP]], %A +; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> %B, <i1 true, i1 true> +; CHECK-NEXT: [[C:%.*]] = or <2 x i1> [[TMP1]], %A ; CHECK-NEXT: ret <2 x i1> [[C]] ; %C = icmp uge <2 x i1> %A, %B @@ -130,8 +130,8 @@ define <2 x i1> @test13vec(<2 x i1> %A, <2 x i1> %B) { define i1 @test14(i1 %A, i1 %B) { ; CHECK-LABEL: @test14( -; CHECK-NEXT: [[CTMP:%.*]] = xor i1 %A, %B -; CHECK-NEXT: [[C:%.*]] = xor i1 [[CTMP]], true +; CHECK-NEXT: [[TMP1:%.*]] = xor i1 %A, %B +; CHECK-NEXT: [[C:%.*]] = xor i1 [[TMP1]], true ; CHECK-NEXT: ret i1 [[C]] ; %C = icmp eq i1 %A, %B @@ -140,14 +140,88 @@ define i1 @test14(i1 %A, i1 %B) { define <3 x i1> @test14vec(<3 x i1> %A, <3 x i1> %B) { ; CHECK-LABEL: @test14vec( -; CHECK-NEXT: [[CTMP:%.*]] = xor <3 x i1> %A, %B -; CHECK-NEXT: [[C:%.*]] = xor <3 x i1> [[CTMP]], <i1 true, i1 true, i1 true> +; CHECK-NEXT: [[TMP1:%.*]] = xor <3 x i1> %A, %B +; CHECK-NEXT: [[C:%.*]] = xor <3 x i1> [[TMP1]], <i1 true, i1 true, i1 true> ; CHECK-NEXT: ret <3 x i1> [[C]] ; %C = icmp eq <3 x i1> %A, %B ret <3 x i1> %C } +define i1 @bool_eq0(i64 %a) { +; CHECK-LABEL: @bool_eq0( +; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 %a, 1 +; CHECK-NEXT: ret i1 [[TMP1]] +; + %b = icmp sgt i64 %a, 0 + %c = icmp eq i64 %a, 1 + %notc = icmp eq i1 %c, false + %and = and i1 %b, %notc + ret i1 %and +} + +; FIXME: This is equivalent to the previous test. + +define i1 @xor_of_icmps(i64 %a) { +; CHECK-LABEL: @xor_of_icmps( +; CHECK-NEXT: [[B:%.*]] = icmp sgt i64 %a, 0 +; CHECK-NEXT: [[C:%.*]] = icmp eq i64 %a, 1 +; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[C]], [[B]] +; CHECK-NEXT: ret i1 [[XOR]] +; + %b = icmp sgt i64 %a, 0 + %c = icmp eq i64 %a, 1 + %xor = xor i1 %c, %b + ret i1 %xor +} + +; FIXME: This is also equivalent to the previous test. + +define i1 @xor_of_icmps_commute(i64 %a) { +; CHECK-LABEL: @xor_of_icmps_commute( +; CHECK-NEXT: [[B:%.*]] = icmp sgt i64 %a, 0 +; CHECK-NEXT: [[C:%.*]] = icmp eq i64 %a, 1 +; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[B]], [[C]] +; CHECK-NEXT: ret i1 [[XOR]] +; + %b = icmp sgt i64 %a, 0 + %c = icmp eq i64 %a, 1 + %xor = xor i1 %b, %c + ret i1 %xor +} + +; FIXME: This is (a != 5). + +define i1 @xor_of_icmps_folds_more(i64 %a) { +; CHECK-LABEL: @xor_of_icmps_folds_more( +; CHECK-NEXT: [[B:%.*]] = icmp sgt i64 %a, 4 +; CHECK-NEXT: [[C:%.*]] = icmp slt i64 %a, 6 +; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[B]], [[C]] +; CHECK-NEXT: ret i1 [[XOR]] +; + %b = icmp sgt i64 %a, 4 + %c = icmp slt i64 %a, 6 + %xor = xor i1 %b, %c + ret i1 %xor +} + +; https://bugs.llvm.org/show_bug.cgi?id=2844 + +define i32 @PR2844(i32 %x) { +; CHECK-LABEL: @PR2844( +; CHECK-NEXT: [[A:%.*]] = icmp eq i32 %x, 0 +; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 %x, -638208502 +; CHECK-NEXT: [[NOT_OR:%.*]] = xor i1 [[A]], [[B]] +; CHECK-NEXT: [[SEL:%.*]] = zext i1 [[NOT_OR]] to i32 +; CHECK-NEXT: ret i32 [[SEL]] +; + %A = icmp eq i32 %x, 0 + %B = icmp slt i32 %x, -638208501 + %or = or i1 %A, %B + %sel = select i1 %or, i32 0, i32 1 + ret i32 %sel +} + define i1 @test16(i32 %A) { ; CHECK-LABEL: @test16( ; CHECK-NEXT: ret i1 false @@ -191,8 +265,8 @@ endif: define i1 @test19(i1 %A, i1 %B) { ; CHECK-LABEL: @test19( -; CHECK-NEXT: [[CTMP:%.*]] = xor i1 %A, %B -; CHECK-NEXT: [[C:%.*]] = xor i1 [[CTMP]], true +; CHECK-NEXT: [[TMP1:%.*]] = xor i1 %A, %B +; CHECK-NEXT: [[C:%.*]] = xor i1 [[TMP1]], true ; CHECK-NEXT: ret i1 [[C]] ; %a = zext i1 %A to i32 diff --git a/test/Transforms/InstCombine/wcslen-1.ll b/test/Transforms/InstCombine/wcslen-1.ll new file mode 100644 index 000000000000..d4e51750f6da --- /dev/null +++ b/test/Transforms/InstCombine/wcslen-1.ll @@ -0,0 +1,191 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; Test that the wcslen library call simplifier works correctly. +; +; RUN: opt < %s -instcombine -S | FileCheck %s + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" + +declare i64 @wcslen(i32*) + +@hello = constant [6 x i32] [i32 104, i32 101, i32 108, i32 108, i32 111, i32 0] +@longer = constant [7 x i32] [i32 108, i32 111, i32 110, i32 103, i32 101, i32 114, i32 0] +@null = constant [1 x i32] zeroinitializer +@null_hello = constant [7 x i32] [i32 0, i32 104, i32 101, i32 108, i32 108, i32 111, i32 0] +@nullstring = constant i32 0 +@a = common global [32 x i32] zeroinitializer, align 1 +@null_hello_mid = constant [13 x i32] [i32 104, i32 101, i32 108, i32 108, i32 111, i32 32, i32 119, i32 111, i32 114, i32 0, i32 108, i32 100, i32 0] + +define i64 @test_simplify1() { +; CHECK-LABEL: @test_simplify1( +; CHECK-NEXT: ret i64 5 +; + %hello_p = getelementptr [6 x i32], [6 x i32]* @hello, i64 0, i64 0 + %hello_l = call i64 @wcslen(i32* %hello_p) + ret i64 %hello_l +} + +define i64 @test_simplify2() { +; CHECK-LABEL: @test_simplify2( +; CHECK-NEXT: ret i64 0 +; + %null_p = getelementptr [1 x i32], [1 x i32]* @null, i64 0, i64 0 + %null_l = call i64 @wcslen(i32* %null_p) + ret i64 %null_l +} + +define i64 @test_simplify3() { +; CHECK-LABEL: @test_simplify3( +; CHECK-NEXT: ret i64 0 +; + %null_hello_p = getelementptr [7 x i32], [7 x i32]* @null_hello, i64 0, i64 0 + %null_hello_l = call i64 @wcslen(i32* %null_hello_p) + ret i64 %null_hello_l +} + +define i64 @test_simplify4() { +; CHECK-LABEL: @test_simplify4( +; CHECK-NEXT: ret i64 0 +; + %len = tail call i64 @wcslen(i32* @nullstring) nounwind + ret i64 %len +} + +; Check wcslen(x) == 0 --> *x == 0. + +define i1 @test_simplify5() { +; CHECK-LABEL: @test_simplify5( +; CHECK-NEXT: ret i1 false +; + %hello_p = getelementptr [6 x i32], [6 x i32]* @hello, i64 0, i64 0 + %hello_l = call i64 @wcslen(i32* %hello_p) + %eq_hello = icmp eq i64 %hello_l, 0 + ret i1 %eq_hello +} + +define i1 @test_simplify6(i32* %str_p) { +; CHECK-LABEL: @test_simplify6( +; CHECK-NEXT: [[STRLENFIRST:%.*]] = load i32, i32* [[STR_P:%.*]], align 4 +; CHECK-NEXT: [[EQ_NULL:%.*]] = icmp eq i32 [[STRLENFIRST]], 0 +; CHECK-NEXT: ret i1 [[EQ_NULL]] +; + %str_l = call i64 @wcslen(i32* %str_p) + %eq_null = icmp eq i64 %str_l, 0 + ret i1 %eq_null +} + +; Check wcslen(x) != 0 --> *x != 0. + +define i1 @test_simplify7() { +; CHECK-LABEL: @test_simplify7( +; CHECK-NEXT: ret i1 true +; + %hello_p = getelementptr [6 x i32], [6 x i32]* @hello, i64 0, i64 0 + %hello_l = call i64 @wcslen(i32* %hello_p) + %ne_hello = icmp ne i64 %hello_l, 0 + ret i1 %ne_hello +} + +define i1 @test_simplify8(i32* %str_p) { +; CHECK-LABEL: @test_simplify8( +; CHECK-NEXT: [[STRLENFIRST:%.*]] = load i32, i32* [[STR_P:%.*]], align 4 +; CHECK-NEXT: [[NE_NULL:%.*]] = icmp ne i32 [[STRLENFIRST]], 0 +; CHECK-NEXT: ret i1 [[NE_NULL]] +; + %str_l = call i64 @wcslen(i32* %str_p) + %ne_null = icmp ne i64 %str_l, 0 + ret i1 %ne_null +} + +define i64 @test_simplify9(i1 %x) { +; CHECK-LABEL: @test_simplify9( +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[X:%.*]], i64 5, i64 6 +; CHECK-NEXT: ret i64 [[TMP1]] +; + %hello = getelementptr [6 x i32], [6 x i32]* @hello, i64 0, i64 0 + %longer = getelementptr [7 x i32], [7 x i32]* @longer, i64 0, i64 0 + %s = select i1 %x, i32* %hello, i32* %longer + %l = call i64 @wcslen(i32* %s) + ret i64 %l +} + +; Check the case that should be simplified to a sub instruction. +; wcslen(@hello + x) --> 5 - x + +define i64 @test_simplify10(i32 %x) { +; CHECK-LABEL: @test_simplify10( +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sub nsw i64 5, [[TMP1]] +; CHECK-NEXT: ret i64 [[TMP2]] +; + %hello_p = getelementptr inbounds [6 x i32], [6 x i32]* @hello, i32 0, i32 %x + %hello_l = call i64 @wcslen(i32* %hello_p) + ret i64 %hello_l +} + +; wcslen(@null_hello_mid + (x & 7)) --> 9 - (x & 7) + +define i64 @test_simplify11(i32 %x) { +; CHECK-LABEL: @test_simplify11( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 7 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[AND]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sub nsw i64 9, [[TMP1]] +; CHECK-NEXT: ret i64 [[TMP2]] +; + %and = and i32 %x, 7 + %hello_p = getelementptr inbounds [13 x i32], [13 x i32]* @null_hello_mid, i32 0, i32 %and + %hello_l = call i64 @wcslen(i32* %hello_p) + ret i64 %hello_l +} + +; Check cases that shouldn't be simplified. + +define i64 @test_no_simplify1() { +; CHECK-LABEL: @test_no_simplify1( +; CHECK-NEXT: [[A_L:%.*]] = call i64 @wcslen(i32* getelementptr inbounds ([32 x i32], [32 x i32]* @a, i64 0, i64 0)) +; CHECK-NEXT: ret i64 [[A_L]] +; + %a_p = getelementptr [32 x i32], [32 x i32]* @a, i64 0, i64 0 + %a_l = call i64 @wcslen(i32* %a_p) + ret i64 %a_l +} + +; wcslen(@null_hello + x) should not be simplified to a sub instruction. + +define i64 @test_no_simplify2(i32 %x) { +; CHECK-LABEL: @test_no_simplify2( +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64 +; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @null_hello, i64 0, i64 [[TMP1]] +; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i32* [[HELLO_P]]) +; CHECK-NEXT: ret i64 [[HELLO_L]] +; + %hello_p = getelementptr inbounds [7 x i32], [7 x i32]* @null_hello, i32 0, i32 %x + %hello_l = call i64 @wcslen(i32* %hello_p) + ret i64 %hello_l +} + +; wcslen(@null_hello_mid + (x & 15)) should not be simplified to a sub instruction. + +define i64 @test_no_simplify3(i32 %x) { +; CHECK-LABEL: @test_no_simplify3( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 15 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[AND]] to i64 +; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i32], [13 x i32]* @null_hello_mid, i64 0, i64 [[TMP1]] +; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i32* [[HELLO_P]]) +; CHECK-NEXT: ret i64 [[HELLO_L]] +; + %and = and i32 %x, 15 + %hello_p = getelementptr inbounds [13 x i32], [13 x i32]* @null_hello_mid, i32 0, i32 %and + %hello_l = call i64 @wcslen(i32* %hello_p) + ret i64 %hello_l +} + +@str16 = constant [1 x i16] [i16 0] + +define i64 @test_no_simplify4() { +; CHECK-LABEL: @test_no_simplify4( +; CHECK-NEXT: [[L:%.*]] = call i64 @wcslen(i32* bitcast ([1 x i16]* @str16 to i32*)) +; CHECK-NEXT: ret i64 [[L]] +; + %l = call i64 @wcslen(i32* bitcast ([1 x i16]* @str16 to i32*)) + ret i64 %l +} diff --git a/test/Transforms/InstCombine/wcslen-2.ll b/test/Transforms/InstCombine/wcslen-2.ll new file mode 100644 index 000000000000..c1a70312a2b3 --- /dev/null +++ b/test/Transforms/InstCombine/wcslen-2.ll @@ -0,0 +1,18 @@ +; Test that the wcslen library call simplifier works correctly. +; +; RUN: opt < %s -instcombine -S | FileCheck %s + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" + +@hello = constant [6 x i32] [i32 104, i32 101, i32 108, i32 108, i32 111, i32 0] + +declare i64 @wcslen(i32*, i32) + +define i64 @test_no_simplify1() { +; CHECK-LABEL: @test_no_simplify1( + %hello_p = getelementptr [6 x i32], [6 x i32]* @hello, i64 0, i64 0 + %hello_l = call i64 @wcslen(i32* %hello_p, i32 187) +; CHECK-NEXT: %hello_l = call i64 @wcslen + ret i64 %hello_l +; CHECK-NEXT: ret i64 %hello_l +} diff --git a/test/Transforms/InstCombine/wcslen-3.ll b/test/Transforms/InstCombine/wcslen-3.ll new file mode 100644 index 000000000000..c766ff21412d --- /dev/null +++ b/test/Transforms/InstCombine/wcslen-3.ll @@ -0,0 +1,197 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; Test that the wcslen library call simplifier works correctly. +; +; RUN: opt < %s -instcombine -S | FileCheck %s + +; Test behavior for wchar_size==2 +!llvm.module.flags = !{!0} +!0 = !{i32 1, !"wchar_size", i32 2} + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" + +declare i64 @wcslen(i16*) + +@hello = constant [6 x i16] [i16 104, i16 101, i16 108, i16 108, i16 111, i16 0] +@longer = constant [7 x i16] [i16 108, i16 111, i16 110, i16 103, i16 101, i16 114, i16 0] +@null = constant [1 x i16] zeroinitializer +@null_hello = constant [7 x i16] [i16 0, i16 104, i16 101, i16 108, i16 108, i16 111, i16 0] +@nullstring = constant i16 0 +@a = common global [32 x i16] zeroinitializer, align 1 +@null_hello_mid = constant [13 x i16] [i16 104, i16 101, i16 108, i16 108, i16 111, i16 32, i16 119, i16 111, i16 114, i16 0, i16 108, i16 100, i16 0] + +define i64 @test_simplify1() { +; CHECK-LABEL: @test_simplify1( +; CHECK-NEXT: ret i64 5 +; + %hello_p = getelementptr [6 x i16], [6 x i16]* @hello, i64 0, i64 0 + %hello_l = call i64 @wcslen(i16* %hello_p) + ret i64 %hello_l +} + +define i64 @test_simplify2() { +; CHECK-LABEL: @test_simplify2( +; CHECK-NEXT: ret i64 0 +; + %null_p = getelementptr [1 x i16], [1 x i16]* @null, i64 0, i64 0 + %null_l = call i64 @wcslen(i16* %null_p) + ret i64 %null_l +} + +define i64 @test_simplify3() { +; CHECK-LABEL: @test_simplify3( +; CHECK-NEXT: ret i64 0 +; + %null_hello_p = getelementptr [7 x i16], [7 x i16]* @null_hello, i64 0, i64 0 + %null_hello_l = call i64 @wcslen(i16* %null_hello_p) + ret i64 %null_hello_l +} + +define i64 @test_simplify4() { +; CHECK-LABEL: @test_simplify4( +; CHECK-NEXT: ret i64 0 +; + %len = tail call i64 @wcslen(i16* @nullstring) nounwind + ret i64 %len +} + +; Check wcslen(x) == 0 --> *x == 0. + +define i1 @test_simplify5() { +; CHECK-LABEL: @test_simplify5( +; CHECK-NEXT: ret i1 false +; + %hello_p = getelementptr [6 x i16], [6 x i16]* @hello, i64 0, i64 0 + %hello_l = call i64 @wcslen(i16* %hello_p) + %eq_hello = icmp eq i64 %hello_l, 0 + ret i1 %eq_hello +} + +define i1 @test_simplify6(i16* %str_p) { +; CHECK-LABEL: @test_simplify6( +; CHECK-NEXT: [[STRLENFIRST:%.*]] = load i16, i16* [[STR_P:%.*]], align 2 +; CHECK-NEXT: [[EQ_NULL:%.*]] = icmp eq i16 [[STRLENFIRST]], 0 +; CHECK-NEXT: ret i1 [[EQ_NULL]] +; + %str_l = call i64 @wcslen(i16* %str_p) + %eq_null = icmp eq i64 %str_l, 0 + ret i1 %eq_null +} + +; Check wcslen(x) != 0 --> *x != 0. + +define i1 @test_simplify7() { +; CHECK-LABEL: @test_simplify7( +; CHECK-NEXT: ret i1 true +; + %hello_p = getelementptr [6 x i16], [6 x i16]* @hello, i64 0, i64 0 + %hello_l = call i64 @wcslen(i16* %hello_p) + %ne_hello = icmp ne i64 %hello_l, 0 + ret i1 %ne_hello +} + +define i1 @test_simplify8(i16* %str_p) { +; CHECK-LABEL: @test_simplify8( +; CHECK-NEXT: [[STRLENFIRST:%.*]] = load i16, i16* [[STR_P:%.*]], align 2 +; CHECK-NEXT: [[NE_NULL:%.*]] = icmp ne i16 [[STRLENFIRST]], 0 +; CHECK-NEXT: ret i1 [[NE_NULL]] +; + %str_l = call i64 @wcslen(i16* %str_p) + %ne_null = icmp ne i64 %str_l, 0 + ret i1 %ne_null +} + +define i64 @test_simplify9(i1 %x) { +; CHECK-LABEL: @test_simplify9( +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[X:%.*]], i64 5, i64 6 +; CHECK-NEXT: ret i64 [[TMP1]] +; + %hello = getelementptr [6 x i16], [6 x i16]* @hello, i64 0, i64 0 + %longer = getelementptr [7 x i16], [7 x i16]* @longer, i64 0, i64 0 + %s = select i1 %x, i16* %hello, i16* %longer + %l = call i64 @wcslen(i16* %s) + ret i64 %l +} + +; Check the case that should be simplified to a sub instruction. +; wcslen(@hello + x) --> 5 - x + +define i64 @test_simplify10(i16 %x) { +; CHECK-LABEL: @test_simplify10( +; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sub nsw i64 5, [[TMP1]] +; CHECK-NEXT: ret i64 [[TMP2]] +; + %hello_p = getelementptr inbounds [6 x i16], [6 x i16]* @hello, i16 0, i16 %x + %hello_l = call i64 @wcslen(i16* %hello_p) + ret i64 %hello_l +} + +; wcslen(@null_hello_mid + (x & 7)) --> 9 - (x & 7) + +define i64 @test_simplify11(i16 %x) { +; CHECK-LABEL: @test_simplify11( +; CHECK-NEXT: [[AND:%.*]] = and i16 [[X:%.*]], 7 +; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[AND]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sub nsw i64 9, [[TMP1]] +; CHECK-NEXT: ret i64 [[TMP2]] +; + %and = and i16 %x, 7 + %hello_p = getelementptr inbounds [13 x i16], [13 x i16]* @null_hello_mid, i16 0, i16 %and + %hello_l = call i64 @wcslen(i16* %hello_p) + ret i64 %hello_l +} + +; Check cases that shouldn't be simplified. + +define i64 @test_no_simplify1() { +; CHECK-LABEL: @test_no_simplify1( +; CHECK-NEXT: [[A_L:%.*]] = call i64 @wcslen(i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a, i64 0, i64 0)) +; CHECK-NEXT: ret i64 [[A_L]] +; + %a_p = getelementptr [32 x i16], [32 x i16]* @a, i64 0, i64 0 + %a_l = call i64 @wcslen(i16* %a_p) + ret i64 %a_l +} + +; wcslen(@null_hello + x) should not be simplified to a sub instruction. + +define i64 @test_no_simplify2(i16 %x) { +; CHECK-LABEL: @test_no_simplify2( +; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i64 +; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i16], [7 x i16]* @null_hello, i64 0, i64 [[TMP1]] +; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i16* [[HELLO_P]]) +; CHECK-NEXT: ret i64 [[HELLO_L]] +; + %hello_p = getelementptr inbounds [7 x i16], [7 x i16]* @null_hello, i16 0, i16 %x + %hello_l = call i64 @wcslen(i16* %hello_p) + ret i64 %hello_l +} + +; wcslen(@null_hello_mid + (x & 15)) should not be simplified to a sub instruction. + +define i64 @test_no_simplify3(i16 %x) { +; CHECK-LABEL: @test_no_simplify3( +; CHECK-NEXT: [[AND:%.*]] = and i16 [[X:%.*]], 15 +; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[AND]] to i64 +; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i16], [13 x i16]* @null_hello_mid, i64 0, i64 [[TMP1]] +; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i16* [[HELLO_P]]) +; CHECK-NEXT: ret i64 [[HELLO_L]] +; + %and = and i16 %x, 15 + %hello_p = getelementptr inbounds [13 x i16], [13 x i16]* @null_hello_mid, i16 0, i16 %and + %hello_l = call i64 @wcslen(i16* %hello_p) + ret i64 %hello_l +} + +@str32 = constant [1 x i32] [i32 0] + +; This could in principle be simplified, but the current implementation bails on +; type mismatches. +define i64 @test_no_simplify4() { +; CHECK-LABEL: @test_no_simplify4( +; CHECK-NEXT: [[L:%.*]] = call i64 @wcslen(i16* bitcast ([1 x i32]* @str32 to i16*)) +; CHECK-NEXT: ret i64 [[L]] +; + %l = call i64 @wcslen(i16* bitcast ([1 x i32]* @str32 to i16*)) + ret i64 %l +} diff --git a/test/Transforms/InstSimplify/AndOrXor.ll b/test/Transforms/InstSimplify/AndOrXor.ll index a9b4e4e5cfcc..a027c7e18280 100644 --- a/test/Transforms/InstSimplify/AndOrXor.ll +++ b/test/Transforms/InstSimplify/AndOrXor.ll @@ -735,6 +735,74 @@ define i32 @test54(i32 %a, i32 %b) { ret i32 %or } +; (A & B) | ~(A ^ B) -> ~(A ^ B) + +define i32 @test55(i32 %a, i32 %b) { +; CHECK-LABEL: @test55( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], [[B]] +; CHECK-NEXT: [[XNOR:%.*]] = xor i32 [[XOR]], -1 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[AND]], [[XNOR]] +; CHECK-NEXT: ret i32 [[OR]] +; + %and = and i32 %a, %b + %xor = xor i32 %a, %b + %xnor = xor i32 %xor, -1 + %or = or i32 %and, %xnor + ret i32 %or +} + +; ~(A ^ B) | (A & B) -> ~(A ^ B) + +define i32 @test56(i32 %a, i32 %b) { +; CHECK-LABEL: @test56( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], [[B]] +; CHECK-NEXT: [[XNOR:%.*]] = xor i32 [[XOR]], -1 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[XNOR]], [[AND]] +; CHECK-NEXT: ret i32 [[OR]] +; + %and = and i32 %a, %b + %xor = xor i32 %a, %b + %xnor = xor i32 %xor, -1 + %or = or i32 %xnor, %and + ret i32 %or +} + +; (B & A) | ~(A ^ B) -> ~(A ^ B) + +define i32 @test57(i32 %a, i32 %b) { +; CHECK-LABEL: @test57( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], [[B]] +; CHECK-NEXT: [[XNOR:%.*]] = xor i32 [[XOR]], -1 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[AND]], [[XNOR]] +; CHECK-NEXT: ret i32 [[OR]] +; + %and = and i32 %b, %a + %xor = xor i32 %a, %b + %xnor = xor i32 %xor, -1 + %or = or i32 %and, %xnor + ret i32 %or +} + +; ~(A ^ B) | (A & B) -> ~(A ^ B) + +define i32 @test58(i32 %a, i32 %b) { +; CHECK-LABEL: @test58( +; CHECK-NEXT: [[AND:%.*]] = and i32 [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], [[B]] +; CHECK-NEXT: [[XNOR:%.*]] = xor i32 [[XOR]], -1 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[XNOR]], [[AND]] +; CHECK-NEXT: ret i32 [[OR]] +; + %and = and i32 %b, %a + %xor = xor i32 %a, %b + %xnor = xor i32 %xor, -1 + %or = or i32 %xnor, %and + ret i32 %or +} + define i8 @lshr_perfect_mask(i8 %x) { ; CHECK-LABEL: @lshr_perfect_mask( ; CHECK-NEXT: [[SH:%.*]] = lshr i8 %x, 5 @@ -797,3 +865,11 @@ define <2 x i8> @shl_undersized_mask_splat(<2 x i8> %x) { ret <2 x i8> %mask } +define i32 @reversed_not(i32 %a) { +; CHECK-LABEL: @reversed_not( +; CHECK-NEXT: ret i32 -1 +; + %nega = xor i32 -1, %a + %or = or i32 %a, %nega + ret i32 %or +} diff --git a/test/Transforms/InstSimplify/icmp-bool-constant.ll b/test/Transforms/InstSimplify/icmp-bool-constant.ll new file mode 100644 index 000000000000..f711fae0a857 --- /dev/null +++ b/test/Transforms/InstSimplify/icmp-bool-constant.ll @@ -0,0 +1,171 @@ +; RUN: opt < %s -instsimplify -S | FileCheck %s + +; Test all integer predicates with bool types and true/false constants. +; Use vectors to provide test coverage that is not duplicated in other folds. + +define <2 x i1> @eq_t(<2 x i1> %a) { +; CHECK-LABEL: @eq_t( +; CHECK-NEXT: ret <2 x i1> %a +; + %r = icmp eq <2 x i1> %a, <i1 true, i1 true> + ret <2 x i1> %r +} + +define <2 x i1> @eq_f(<2 x i1> %a) { +; CHECK-LABEL: @eq_f( +; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i1> %a, zeroinitializer +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %r = icmp eq <2 x i1> %a, <i1 false, i1 false> + ret <2 x i1> %r +} + +define <2 x i1> @ne_t(<2 x i1> %a) { +; CHECK-LABEL: @ne_t( +; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i1> %a, <i1 true, i1 true> +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %r = icmp ne <2 x i1> %a, <i1 true, i1 true> + ret <2 x i1> %r +} + +define <2 x i1> @ne_f(<2 x i1> %a) { +; CHECK-LABEL: @ne_f( +; CHECK-NEXT: ret <2 x i1> %a +; + %r = icmp ne <2 x i1> %a, <i1 false, i1 false> + ret <2 x i1> %r +} + +define <2 x i1> @ugt_t(<2 x i1> %a) { +; CHECK-LABEL: @ugt_t( +; CHECK-NEXT: ret <2 x i1> zeroinitializer +; + %r = icmp ugt <2 x i1> %a, <i1 true, i1 true> + ret <2 x i1> %r +} + +define <2 x i1> @ugt_f(<2 x i1> %a) { +; CHECK-LABEL: @ugt_f( +; CHECK-NEXT: ret <2 x i1> %a +; + %r = icmp ugt <2 x i1> %a, <i1 false, i1 false> + ret <2 x i1> %r +} + +define <2 x i1> @ult_t(<2 x i1> %a) { +; CHECK-LABEL: @ult_t( +; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i1> %a, <i1 true, i1 true> +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %r = icmp ult <2 x i1> %a, <i1 true, i1 true> + ret <2 x i1> %r +} + +define <2 x i1> @ult_f(<2 x i1> %a) { +; CHECK-LABEL: @ult_f( +; CHECK-NEXT: ret <2 x i1> zeroinitializer +; + %r = icmp ult <2 x i1> %a, <i1 false, i1 false> + ret <2 x i1> %r +} + +define <2 x i1> @sgt_t(<2 x i1> %a) { +; CHECK-LABEL: @sgt_t( +; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i1> %a, <i1 true, i1 true> +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %r = icmp sgt <2 x i1> %a, <i1 true, i1 true> + ret <2 x i1> %r +} + +define <2 x i1> @sgt_f(<2 x i1> %a) { +; CHECK-LABEL: @sgt_f( +; CHECK-NEXT: ret <2 x i1> zeroinitializer +; + %r = icmp sgt <2 x i1> %a, <i1 false, i1 false> + ret <2 x i1> %r +} + +define <2 x i1> @slt_t(<2 x i1> %a) { +; CHECK-LABEL: @slt_t( +; CHECK-NEXT: ret <2 x i1> zeroinitializer +; + %r = icmp slt <2 x i1> %a, <i1 true, i1 true> + ret <2 x i1> %r +} + +define <2 x i1> @slt_f(<2 x i1> %a) { +; CHECK-LABEL: @slt_f( +; CHECK-NEXT: ret <2 x i1> %a +; + %r = icmp slt <2 x i1> %a, <i1 false, i1 false> + ret <2 x i1> %r +} + +define <2 x i1> @uge_t(<2 x i1> %a) { +; CHECK-LABEL: @uge_t( +; CHECK-NEXT: ret <2 x i1> %a +; + %r = icmp uge <2 x i1> %a, <i1 true, i1 true> + ret <2 x i1> %r +} + +define <2 x i1> @uge_f(<2 x i1> %a) { +; CHECK-LABEL: @uge_f( +; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true> +; + %r = icmp uge <2 x i1> %a, <i1 false, i1 false> + ret <2 x i1> %r +} + +define <2 x i1> @ule_t(<2 x i1> %a) { +; CHECK-LABEL: @ule_t( +; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true> +; + %r = icmp ule <2 x i1> %a, <i1 true, i1 true> + ret <2 x i1> %r +} + +define <2 x i1> @ule_f(<2 x i1> %a) { +; CHECK-LABEL: @ule_f( +; CHECK-NEXT: [[R:%.*]] = icmp ule <2 x i1> %a, zeroinitializer +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %r = icmp ule <2 x i1> %a, <i1 false, i1 false> + ret <2 x i1> %r +} + +define <2 x i1> @sge_t(<2 x i1> %a) { +; CHECK-LABEL: @sge_t( +; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true> +; + %r = icmp sge <2 x i1> %a, <i1 true, i1 true> + ret <2 x i1> %r +} + +define <2 x i1> @sge_f(<2 x i1> %a) { +; CHECK-LABEL: @sge_f( +; CHECK-NEXT: [[R:%.*]] = icmp sge <2 x i1> %a, zeroinitializer +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %r = icmp sge <2 x i1> %a, <i1 false, i1 false> + ret <2 x i1> %r +} + +define <2 x i1> @sle_t(<2 x i1> %a) { +; CHECK-LABEL: @sle_t( +; CHECK-NEXT: ret <2 x i1> %a +; + %r = icmp sle <2 x i1> %a, <i1 true, i1 true> + ret <2 x i1> %r +} + +define <2 x i1> @sle_f(<2 x i1> %a) { +; CHECK-LABEL: @sle_f( +; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true> +; + %r = icmp sle <2 x i1> %a, <i1 false, i1 false> + ret <2 x i1> %r +} + diff --git a/test/Transforms/JumpThreading/assume.ll b/test/Transforms/JumpThreading/assume.ll index 53010b71c728..3a039676e172 100644 --- a/test/Transforms/JumpThreading/assume.ll +++ b/test/Transforms/JumpThreading/assume.ll @@ -56,6 +56,50 @@ return: ; preds = %entry, %if.then ret i32 %retval.0 } +@g = external global i32 + +; Check that we do prove a fact using an assume within the block. +; FIXME: We can fold the assume based on the semantics of assume. +; CHECK-LABEL: @can_fold_assume +; CHECK: %notnull = icmp ne i32* %array, null +; CHECK-NEXT: call void @llvm.assume(i1 %notnull) +; CHECK-NEXT: ret void +define void @can_fold_assume(i32* %array) { + %notnull = icmp ne i32* %array, null + call void @llvm.assume(i1 %notnull) + br i1 %notnull, label %normal, label %error + +normal: + ret void + +error: + store atomic i32 0, i32* @g unordered, align 4 + ret void +} + +declare void @f(i1) +declare void @exit() +; We can fold the assume but not the uses before the assume. +define void @dont_fold_incorrectly(i32* %array) { +; CHECK-LABEL:@dont_fold_incorrectly +; CHECK: @f(i1 %notnull) +; CHECK-NEXT: exit() +; CHECK-NEXT: assume(i1 %notnull) +; CHECK-NEXT: ret void + %notnull = icmp ne i32* %array, null + call void @f(i1 %notnull) + call void @exit() + call void @llvm.assume(i1 %notnull) + br i1 %notnull, label %normal, label %error + +normal: + ret void + +error: + store atomic i32 0, i32* @g unordered, align 4 + ret void +} + ; Function Attrs: nounwind declare void @llvm.assume(i1) #1 diff --git a/test/Transforms/JumpThreading/fold-not-thread.ll b/test/Transforms/JumpThreading/fold-not-thread.ll index 06ddc10e02b6..f05169b31bc8 100644 --- a/test/Transforms/JumpThreading/fold-not-thread.ll +++ b/test/Transforms/JumpThreading/fold-not-thread.ll @@ -133,10 +133,10 @@ L3: ret void } -; Make sure we can do the RAUW for %add... +; FIXME: Make sure we can do the RAUW for %add... ; ; CHECK-LABEL: @rauw_if_possible( -; CHECK: call void @f4(i32 96) +; CHECK: call void @f4(i32 %add) define void @rauw_if_possible(i32 %value) nounwind { entry: %cmp = icmp eq i32 %value, 32 diff --git a/test/Transforms/JumpThreading/guards.ll b/test/Transforms/JumpThreading/guards.ll index eac2b5dcd85f..c5f72b113efc 100644 --- a/test/Transforms/JumpThreading/guards.ll +++ b/test/Transforms/JumpThreading/guards.ll @@ -181,3 +181,97 @@ Exit: ; CHECK-NEXT: ret void ret void } + +declare void @never_called() + +; Assume the guard is always taken and we deoptimize, so we never reach the +; branch below that guard. We should *never* change the behaviour of a guard from +; `must deoptimize` to `may deoptimize`, since this affects the program +; semantics. +define void @dont_fold_guard(i8* %addr, i32 %i, i32 %length) { +; CHECK-LABEL: dont_fold_guard +; CHECK: experimental.guard(i1 %wide.chk) + +entry: + br label %BBPred + +BBPred: + %cond = icmp eq i8* %addr, null + br i1 %cond, label %zero, label %not_zero + +zero: + unreachable + +not_zero: + %c1 = icmp ult i32 %i, %length + %c2 = icmp eq i32 %i, 0 + %wide.chk = and i1 %c1, %c2 + call void(i1, ...) @llvm.experimental.guard(i1 %wide.chk) [ "deopt"() ] + br i1 %c2, label %unreachedBB2, label %unreachedBB1 + +unreachedBB2: + call void @never_called() + ret void + +unreachedBB1: + ret void +} + + +; same as dont_fold_guard1 but condition %cmp is not an instruction. +; We cannot fold the guard under any circumstance. +; FIXME: We can merge unreachableBB2 into not_zero. +define void @dont_fold_guard2(i8* %addr, i1 %cmp, i32 %i, i32 %length) { +; CHECK-LABEL: dont_fold_guard2 +; CHECK: guard(i1 %cmp) + +entry: + br label %BBPred + +BBPred: + %cond = icmp eq i8* %addr, null + br i1 %cond, label %zero, label %not_zero + +zero: + unreachable + +not_zero: + call void(i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ] + br i1 %cmp, label %unreachedBB2, label %unreachedBB1 + +unreachedBB2: + call void @never_called() + ret void + +unreachedBB1: + ret void +} + +; Same as dont_fold_guard1 but use switch instead of branch. +; triggers source code `ProcessThreadableEdges`. +declare void @f(i1) +define void @dont_fold_guard3(i1 %cmp1, i32 %i) nounwind { +; CHECK-LABEL: dont_fold_guard3 +; CHECK-LABEL: L2: +; CHECK-NEXT: %cmp = icmp eq i32 %i, 0 +; CHECK-NEXT: guard(i1 %cmp) +; CHECK-NEXT: @f(i1 %cmp) +; CHECK-NEXT: ret void +entry: + br i1 %cmp1, label %L0, label %L3 +L0: + %cmp = icmp eq i32 %i, 0 + call void(i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ] + switch i1 %cmp, label %L3 [ + i1 false, label %L1 + i1 true, label %L2 + ] + +L1: + ret void +L2: + call void @f(i1 %cmp) + ret void +L3: + ret void +} diff --git a/test/Transforms/LoopStrengthReduce/X86/canonical-2.ll b/test/Transforms/LoopStrengthReduce/X86/canonical-2.ll new file mode 100644 index 000000000000..69bae3a51159 --- /dev/null +++ b/test/Transforms/LoopStrengthReduce/X86/canonical-2.ll @@ -0,0 +1,36 @@ +; REQUIRES: asserts +; RUN: opt -mtriple=x86_64-unknown-linux-gnu -loop-reduce -S < %s +; PR33077. Check the LSR Use formula to be inserted is already canonicalized and +; will not trigger assertion. + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" + +; Function Attrs: uwtable +define void @foo() { +cHeapLvb.exit: + br label %not_zero48.us + +not_zero48.us: ; preds = %not_zero48.us, %cHeapLvb.exit + %indvars.iv.us = phi i64 [ %indvars.iv.next.us.7, %not_zero48.us ], [ undef, %cHeapLvb.exit ] + %0 = phi i32 [ %13, %not_zero48.us ], [ undef, %cHeapLvb.exit ] + %indvars.iv.next.us = add nuw nsw i64 %indvars.iv.us, 1 + %1 = add i32 %0, 2 + %2 = getelementptr inbounds i32, i32 addrspace(1)* undef, i64 %indvars.iv.next.us + %3 = load i32, i32 addrspace(1)* %2, align 4 + %4 = add i32 %0, 3 + %5 = load i32, i32 addrspace(1)* undef, align 4 + %6 = sub i32 undef, %5 + %factor.us.2 = shl i32 %6, 1 + %7 = add i32 %factor.us.2, %1 + %8 = load i32, i32 addrspace(1)* undef, align 4 + %9 = sub i32 %7, %8 + %factor.us.3 = shl i32 %9, 1 + %10 = add i32 %factor.us.3, %4 + %11 = load i32, i32 addrspace(1)* undef, align 4 + %12 = sub i32 %10, %11 + %factor.us.4 = shl i32 %12, 1 + %13 = add i32 %0, 8 + %indvars.iv.next.us.7 = add nsw i64 %indvars.iv.us, 8 + br label %not_zero48.us +} + diff --git a/test/Transforms/NewGVN/completeness.ll b/test/Transforms/NewGVN/completeness.ll new file mode 100644 index 000000000000..bafe5f966d22 --- /dev/null +++ b/test/Transforms/NewGVN/completeness.ll @@ -0,0 +1,415 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" + +define i32 @test1(i32, i8**) { +; CHECK-LABEL: @test1( +; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP0:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP5:%.*]] +; CHECK: br label [[TMP6:%.*]] +; CHECK: br label [[TMP6]] +; CHECK: [[TMP7:%.*]] = phi i32 [ 75, [[TMP4]] ], [ 105, [[TMP5]] ] +; CHECK-NEXT: [[DOT0:%.*]] = phi i32 [ 5, [[TMP4]] ], [ 7, [[TMP5]] ] +; CHECK-NEXT: ret i32 [[TMP7]] +; + %3 = icmp ne i32 %0, 0 + br i1 %3, label %4, label %5 + +; <label>:4: ; preds = %2 + br label %6 + +; <label>:5: ; preds = %2 + br label %6 + +; <label>:6: ; preds = %5, %4 + %.0 = phi i32 [ 5, %4 ], [ 7, %5 ] + %7 = mul nsw i32 %.0, 15 + ret i32 %7 +} + +define i32 @test2(i32) { +; CHECK-LABEL: @test2( +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP0:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]] +; CHECK: br label [[TMP5:%.*]] +; CHECK: br label [[TMP5]] +; CHECK: [[DOT01:%.*]] = phi i32 [ 3, [[TMP3]] ], [ 2, [[TMP4]] ] +; CHECK-NEXT: [[DOT0:%.*]] = phi i32 [ 2, [[TMP3]] ], [ 3, [[TMP4]] ] +; CHECK-NEXT: ret i32 5 +; + %2 = icmp ne i32 %0, 0 + br i1 %2, label %3, label %4 + +; <label>:3: ; preds = %1 + br label %5 + +; <label>:4: ; preds = %1 + br label %5 + +; <label>:5: ; preds = %4, %3 + %.01 = phi i32 [ 3, %3 ], [ 2, %4 ] + %.0 = phi i32 [ 2, %3 ], [ 3, %4 ] + %6 = add nsw i32 %.01, %.0 + ret i32 %6 +} +define i32 @test3(i1 %which) { +; CHECK-LABEL: @test3( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]] +; CHECK: delay: +; CHECK-NEXT: br label [[FINAL]] +; CHECK: final: +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ -877, [[ENTRY:%.*]] ], [ 113, [[DELAY]] ] +; CHECK-NEXT: [[A:%.*]] = phi i32 [ 1000, [[ENTRY]] ], [ 10, [[DELAY]] ] +; CHECK-NEXT: ret i32 [[TMP0]] +; + +entry: + br i1 %which, label %final, label %delay + +delay: + br label %final + +final: + %A = phi i32 [ 1000, %entry ], [ 10, %delay ] + %value = sub i32 123, %A + ret i32 %value +} + +define <2 x i32> @test3vec(i1 %which) { +; CHECK-LABEL: @test3vec( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]] +; CHECK: delay: +; CHECK-NEXT: br label [[FINAL]] +; CHECK: final: +; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x i32> [ <i32 -877, i32 -877>, [[ENTRY:%.*]] ], [ <i32 113, i32 113>, [[DELAY]] ] +; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 1000, i32 1000>, [[ENTRY]] ], [ <i32 10, i32 10>, [[DELAY]] ] +; CHECK-NEXT: ret <2 x i32> [[TMP0]] +; + +entry: + br i1 %which, label %final, label %delay + +delay: + br label %final + +final: + %A = phi <2 x i32> [ <i32 1000, i32 1000>, %entry ], [ <i32 10, i32 10>, %delay ] + %value = sub <2 x i32> <i32 123, i32 123>, %A + ret <2 x i32> %value +} + +define <2 x i32> @test3vec2(i1 %which) { +; CHECK-LABEL: @test3vec2( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]] +; CHECK: delay: +; CHECK-NEXT: br label [[FINAL]] +; CHECK: final: +; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x i32> [ <i32 -877, i32 -2167>, [[ENTRY:%.*]] ], [ <i32 113, i32 303>, [[DELAY]] ] +; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 1000, i32 2500>, [[ENTRY]] ], [ <i32 10, i32 30>, [[DELAY]] ] +; CHECK-NEXT: ret <2 x i32> [[TMP0]] +; + +entry: + br i1 %which, label %final, label %delay + +delay: + br label %final + +final: + %A = phi <2 x i32> [ <i32 1000, i32 2500>, %entry ], [ <i32 10, i32 30>, %delay ] + %value = sub <2 x i32> <i32 123, i32 333>, %A + ret <2 x i32> %value +} + +;; This example is a bit contrived because we can't create fake memoryuses, so we use two loads in the if blocks +define i32 @test4(i32, i8**, i32* noalias, i32* noalias) { +; CHECK-LABEL: @test4( +; CHECK-NEXT: store i32 5, i32* [[TMP2:%.*]], align 4 +; CHECK-NEXT: store i32 7, i32* [[TMP3:%.*]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP0:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP7:%.*]] +; CHECK: br label [[TMP8:%.*]] +; CHECK: br label [[TMP8]] +; CHECK: [[DOT01:%.*]] = phi i32 [ 5, [[TMP6]] ], [ 7, [[TMP7]] ] +; CHECK-NEXT: [[DOT0:%.*]] = phi i32* [ [[TMP2]], [[TMP6]] ], [ [[TMP3]], [[TMP7]] ] +; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOT0]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = mul nsw i32 [[TMP9]], 15 +; CHECK-NEXT: [[TMP11:%.*]] = mul nsw i32 [[TMP10]], [[DOT01]] +; CHECK-NEXT: ret i32 [[TMP11]] +; + store i32 5, i32* %2, align 4 + store i32 7, i32* %3, align 4 + %5 = icmp ne i32 %0, 0 + br i1 %5, label %6, label %8 + +; <label>:6: ; preds = %4 + %7 = load i32, i32* %2, align 4 + br label %10 + +; <label>:8: ; preds = %4 + %9 = load i32, i32* %3, align 4 + br label %10 + +; <label>:10: ; preds = %8, %6 + %.01 = phi i32 [ %7, %6 ], [ %9, %8 ] + %.0 = phi i32* [ %2, %6 ], [ %3, %8 ] + %11 = load i32, i32* %.0, align 4 + %12 = mul nsw i32 %11, 15 + %13 = mul nsw i32 %12, %.01 + ret i32 %13 +} + +@global = common global [100 x i64] zeroinitializer, align 16 +@global.1 = common global [100 x i64] zeroinitializer, align 16 +define i64 @test5(i64 %arg) { +; CHECK-LABEL: @test5( +; CHECK-NEXT: bb: +; CHECK-NEXT: [[TMP:%.*]] = alloca i64, align 8 +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[ARG:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP1]], label [[BB28:%.*]], label [[BB2:%.*]] +; CHECK: bb2: +; CHECK-NEXT: br label [[BB7:%.*]] +; CHECK: bb4: +; CHECK-NEXT: br label [[BB5:%.*]] +; CHECK: bb5: +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP9:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[BB27:%.*]], label [[BB7]] +; CHECK: bb7: +; CHECK-NEXT: [[TMP8:%.*]] = phi i64 [ [[ARG]], [[BB2]] ], [ [[TMP9]], [[BB5]] ] +; CHECK-NEXT: [[TMP9]] = add nsw i64 [[TMP8]], -1 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @global, i64 0, i64 0), align 16 +; CHECK-NEXT: [[TMP11:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @global.1, i64 0, i64 0), align 16 +; CHECK-NEXT: [[TMP12:%.*]] = mul nsw i64 [[TMP11]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[TMP12]], 0 +; CHECK-NEXT: br i1 [[TMP13]], label [[BB5]], label [[BB14:%.*]] +; CHECK: bb14: +; CHECK-NEXT: br label [[BB15:%.*]] +; CHECK: bb15: +; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ [[TMP25:%.*]], [[BB15]] ], [ [[TMP12]], [[BB14]] ] +; CHECK-NEXT: [[TMP16:%.*]] = phi i64 [ [[TMP24:%.*]], [[BB15]] ], [ [[TMP11]], [[BB14]] ] +; CHECK-NEXT: [[TMP17:%.*]] = phi i64 [ [[TMP22:%.*]], [[BB15]] ], [ [[TMP10]], [[BB14]] ] +; CHECK-NEXT: [[TMP18:%.*]] = phi i64 [ [[TMP20:%.*]], [[BB15]] ], [ 0, [[BB14]] ] +; CHECK-NEXT: store i64 [[TMP0]], i64* [[TMP]], align 8 +; CHECK-NEXT: [[TMP20]] = add nuw nsw i64 [[TMP18]], 1 +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [100 x i64], [100 x i64]* @global, i64 0, i64 [[TMP20]] +; CHECK-NEXT: [[TMP22]] = load i64, i64* [[TMP21]], align 8 +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [100 x i64], [100 x i64]* @global.1, i64 0, i64 [[TMP20]] +; CHECK-NEXT: [[TMP24]] = load i64, i64* [[TMP23]], align 8 +; CHECK-NEXT: [[TMP25]] = mul nsw i64 [[TMP24]], [[TMP22]] +; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[TMP20]], [[TMP25]] +; CHECK-NEXT: br i1 [[TMP26]], label [[BB4:%.*]], label [[BB15]] +; CHECK: bb27: +; CHECK-NEXT: br label [[BB28]] +; CHECK: bb28: +; CHECK-NEXT: ret i64 0 +; +bb: + %tmp = alloca i64, align 8 + %tmp1 = icmp eq i64 %arg, 0 + br i1 %tmp1, label %bb28, label %bb2 + +bb2: ; preds = %bb + %tmp3 = bitcast i64* %tmp to i8* + br label %bb7 + +bb4: ; preds = %bb15 + br label %bb5 + +bb5: ; preds = %bb7, %bb4 + %tmp6 = icmp eq i64 %tmp9, 0 + br i1 %tmp6, label %bb27, label %bb7 + +bb7: ; preds = %bb5, %bb2 + %tmp8 = phi i64 [ %arg, %bb2 ], [ %tmp9, %bb5 ] + %tmp9 = add nsw i64 %tmp8, -1 + %tmp10 = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @global, i64 0, i64 0), align 16 + %tmp11 = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @global.1, i64 0, i64 0), align 16 + %tmp12 = mul nsw i64 %tmp11, %tmp10 + %tmp13 = icmp eq i64 %tmp12, 0 + br i1 %tmp13, label %bb5, label %bb14 + +bb14: ; preds = %bb7 + br label %bb15 + +bb15: ; preds = %bb15, %bb14 + %tmp16 = phi i64 [ %tmp24, %bb15 ], [ %tmp11, %bb14 ] + %tmp17 = phi i64 [ %tmp22, %bb15 ], [ %tmp10, %bb14 ] + %tmp18 = phi i64 [ %tmp20, %bb15 ], [ 0, %bb14 ] +;; This multiply is an op of phis which is really equivalent to phi(tmp25, tmp12) + %tmp19 = mul nsw i64 %tmp16, %tmp17 + store i64 %tmp19, i64* %tmp, align 8 + %tmp20 = add nuw nsw i64 %tmp18, 1 + %tmp21 = getelementptr inbounds [100 x i64], [100 x i64]* @global, i64 0, i64 %tmp20 + %tmp22 = load i64, i64* %tmp21, align 8 + %tmp23 = getelementptr inbounds [100 x i64], [100 x i64]* @global.1, i64 0, i64 %tmp20 + %tmp24 = load i64, i64* %tmp23, align 8 + %tmp25 = mul nsw i64 %tmp24, %tmp22 + %tmp26 = icmp eq i64 %tmp20, %tmp25 + br i1 %tmp26, label %bb4, label %bb15 + +bb27: ; preds = %bb5 + br label %bb28 + +bb28: ; preds = %bb27, %bb + ret i64 0 +} + +;; These icmps are all equivalent to phis of constants +define i8 @test6(i8* %addr) { +; CHECK-LABEL: @test6( +; CHECK-NEXT: entry-block: +; CHECK-NEXT: br label %main-loop +; CHECK: main-loop: +; CHECK-NEXT: [[TMP0:%.*]] = phi i1 [ true, %entry-block ], [ false, [[CORE:%.*]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi i1 [ false, %entry-block ], [ true, [[CORE]] ] +; CHECK-NEXT: [[PHI:%.*]] = phi i8 [ 0, %entry-block ], [ 1, [[CORE]] ] +; CHECK-NEXT: store volatile i8 0, i8* [[ADDR:%.*]] +; CHECK-NEXT: br i1 [[TMP0]], label %busy-wait-phi-0, label [[EXIT:%.*]] +; CHECK: busy-wait-phi-0: +; CHECK-NEXT: [[LOAD:%.*]] = load volatile i8, i8* [[ADDR]] +; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i8 [[LOAD]], 0 +; CHECK-NEXT: br i1 [[ICMP]], label %busy-wait-phi-0, label [[CORE]] +; CHECK: core: +; CHECK-NEXT: br i1 [[TMP1]], label [[TRAP:%.*]], label %main-loop +; CHECK: trap: +; CHECK-NEXT: ret i8 1 +; CHECK: exit: +; CHECK-NEXT: ret i8 0 +; +entry-block: + br label %main-loop + +main-loop: + %phi = phi i8 [ 0, %entry-block ], [ 1, %core ] + %switch_0 = icmp eq i8 %phi, 0 + store volatile i8 0, i8* %addr + br i1 %switch_0, label %busy-wait-phi-0, label %exit + +busy-wait-phi-0: + %load = load volatile i8, i8* %addr + %icmp = icmp eq i8 %load, 0 + br i1 %icmp, label %busy-wait-phi-0, label %core + +core: + %switch_1 = icmp eq i8 %phi, 1 + br i1 %switch_1, label %trap, label %main-loop + +trap: + ret i8 1 + +exit: + ret i8 0 +} + +; Test that we don't infinite loop simplifying +; an undefined value that can go both ways. +define void @test7() { +; CHECK-LABEL: @test7( +; CHECK-NEXT: bb: +; CHECK-NEXT: br label [[BB1:%.*]] +; CHECK: bb1: +; CHECK-NEXT: br label [[BB1]] +; +bb: + br label %bb1 + +bb1: ; preds = %bb1, %bb + %tmp = phi i32 [ undef, %bb ], [ %tmp3, %bb1 ] + %tmp2 = icmp eq i32 %tmp, 0 + %tmp3 = select i1 %tmp2, i32 1, i32 %tmp + br label %bb1 +} + + + +; Test that we get a consistent answer about what the +; value of this undefined select is. +define void @test8() { +; CHECK-LABEL: @test8( +; CHECK-NEXT: bb: +; CHECK-NEXT: br label [[BB1:%.*]] +; CHECK: bb1: +; CHECK-NEXT: br label [[BB1]] +; +bb: + %tmp = select i1 undef, i8 0, i8 1 + br label %bb1 + +bb1: ; preds = %bb1, %bb + %tmp2 = phi i8 [ %tmp4, %bb1 ], [ %tmp, %bb ] + %tmp3 = icmp eq i8 %tmp2, 0 + %tmp4 = select i1 %tmp3, i8 1, i8 %tmp2 + br label %bb1 +} + + +;; Make sure we handle the case where we later come up with an expression that we need +;; for a phi of ops. +define void @test9() { +; CHECK-LABEL: @test9( +; CHECK-NEXT: bb: +; CHECK-NEXT: br label [[BB1:%.*]] +; CHECK: bb1: +; CHECK-NEXT: br i1 undef, label [[BB1]], label [[BB2:%.*]] +; CHECK: bb2: +; CHECK-NEXT: br label [[BB6:%.*]] +; CHECK: bb6: +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ -13, [[BB2]] ], [ [[TMP11:%.*]], [[BB6]] ] +; CHECK-NEXT: [[TMP7:%.*]] = phi i32 [ 1, [[BB2]] ], [ [[TMP8:%.*]], [[BB6]] ] +; CHECK-NEXT: [[TMP8]] = add nuw nsw i32 [[TMP7]], 1 +; CHECK-NEXT: [[TMP11]] = add i32 -14, [[TMP8]] +; CHECK-NEXT: br label [[BB6]] +; +bb: + br label %bb1 + +bb1: ; preds = %bb1, %bb + br i1 undef, label %bb1, label %bb2 + +bb2: ; preds = %bb1 + %tmp = select i1 true, i32 -14, i32 -10 + %tmp3 = add i32 %tmp, 0 + %tmp4 = select i1 true, i32 -14, i32 -10 + %tmp5 = add i32 %tmp4, 0 + br label %bb6 + +bb6: ; preds = %bb6, %bb2 + %tmp7 = phi i32 [ 1, %bb2 ], [ %tmp13, %bb6 ] + %tmp8 = add nuw nsw i32 %tmp7, 1 + %tmp9 = add i32 %tmp3, %tmp7 + %tmp10 = select i1 false, i32 undef, i32 %tmp9 + %tmp11 = add i32 %tmp5, %tmp8 + %tmp12 = select i1 undef, i32 undef, i32 %tmp11 + %tmp13 = add nuw nsw i32 %tmp7, 1 + br label %bb6 +} + +;; Ensure that we revisit predicateinfo operands at the right points in time. +define void @test10() { +b: + %m = getelementptr i32, i32* null, i64 8 + br label %g + +g: ; preds = %i, %b + %n = phi i32* [ %h, %i ], [ null, %b ] + %h = getelementptr i32, i32* %n, i64 1 + %j = icmp eq i32* %h, %m + br i1 %j, label %c, label %i + +i: ; preds = %g + br i1 undef, label %k, label %g + +k: ; preds = %i + %l = icmp eq i32* %n, %m + br i1 %l, label %c, label %o + +o: ; preds = %k + br label %c + +c: ; preds = %o, %k, %g + %0 = phi i32* [ undef, %o ], [ %m, %k ], [ %m, %g ] + ret void +} diff --git a/test/Transforms/NewGVN/pr32838.ll b/test/Transforms/NewGVN/pr32838.ll new file mode 100644 index 000000000000..b6b7b0d19b86 --- /dev/null +++ b/test/Transforms/NewGVN/pr32838.ll @@ -0,0 +1,157 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +;RUN: opt -newgvn -S < %s | FileCheck %s +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.12.0" +;; Ensure we don't infinite loop when all phi arguments are really unreachable or self-defined +define void @fn1(i64 %arg) { +; CHECK-LABEL: @fn1( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 undef, label [[IF_THEN:%.*]], label [[COND_TRUE:%.*]] +; CHECK: if.then: +; CHECK-NEXT: br i1 false, label [[FIRSTPHIBLOCK:%.*]], label [[TEMP:%.*]] +; CHECK: firstphiblock: +; CHECK-NEXT: br i1 undef, label %for.cond17thread-pre-split, label [[SECONDPHIBLOCK:%.*]] +; CHECK: secondphiblock: +; CHECK-NEXT: [[SECONDPHI:%.*]] = phi i64 [ [[THIRDPHI:%.*]], [[THIRDPHIBLOCK:%.*]] ], [ undef, [[FIRSTPHIBLOCK]] ] +; CHECK-NEXT: br i1 undef, label [[FIRSTPHIBLOCK]], label [[THIRDPHIBLOCK]] +; CHECK: thirdphiblock: +; CHECK-NEXT: [[THIRDPHI]] = phi i64 [ [[SECONDPHI]], [[SECONDPHIBLOCK]] ], [ [[DIV:%.*]], [[COND_TRUE]] ] +; CHECK-NEXT: br label [[SECONDPHIBLOCK]] +; CHECK: for.cond17thread-pre-split: +; CHECK-NEXT: br label [[COND_TRUE]] +; CHECK: cond.true: +; CHECK-NEXT: [[DIV]] = sdiv i64 [[ARG:%.*]], 4 +; CHECK-NEXT: br label [[THIRDPHIBLOCK]] +; CHECK: temp: +; CHECK-NEXT: ret void +; +entry: + br i1 undef, label %if.then, label %cond.true +if.then: + br i1 false, label %firstphiblock, label %temp +firstphiblock: + %firstphi = phi i64 [ %arg, %if.then ], [ undef, %secondphiblock ] + br i1 undef, label %for.cond17thread-pre-split, label %secondphiblock +secondphiblock: + %secondphi = phi i64 [ %thirdphi, %thirdphiblock ], [ %firstphi, %firstphiblock ] + br i1 undef, label %firstphiblock, label %thirdphiblock +thirdphiblock: + %thirdphi = phi i64 [ %secondphi, %secondphiblock ], [ %div, %cond.true ] + br label %secondphiblock +for.cond17thread-pre-split: + br label %cond.true +cond.true: + %fourthphi = phi i64 [ %arg, %entry ], [ %firstphi, %for.cond17thread-pre-split ] + %div = sdiv i64 %fourthphi, 4 + br label %thirdphiblock +temp: + ret void +} +define void @fn2(i64 %arg) { +; CHECK-LABEL: @fn2( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 undef, label [[IF_THEN:%.*]], label [[COND_TRUE:%.*]] +; CHECK: if.then: +; CHECK-NEXT: br i1 false, label [[FIRSTPHIBLOCK:%.*]], label [[TEMP:%.*]] +; CHECK: firstphiblock: +; CHECK-NEXT: [[FIRSTPHI:%.*]] = phi i64 [ undef, [[IF_THEN]] ], [ [[SECONDPHI:%.*]], [[SECONDPHIBLOCK:%.*]] ] +; CHECK-NEXT: br i1 undef, label %for.cond17thread-pre-split, label [[SECONDPHIBLOCK]] +; CHECK: secondphiblock: +; CHECK-NEXT: [[SECONDPHI]] = phi i64 [ [[THIRDPHI:%.*]], [[THIRDPHIBLOCK:%.*]] ], [ [[FIRSTPHI]], [[FIRSTPHIBLOCK]] ] +; CHECK-NEXT: br i1 undef, label [[FIRSTPHIBLOCK]], label [[THIRDPHIBLOCK]] +; CHECK: thirdphiblock: +; CHECK-NEXT: [[THIRDPHI]] = phi i64 [ [[SECONDPHI]], [[SECONDPHIBLOCK]] ], [ [[DIV:%.*]], [[COND_TRUE]] ] +; CHECK-NEXT: br label [[SECONDPHIBLOCK]] +; CHECK: for.cond17thread-pre-split: +; CHECK-NEXT: br label [[COND_TRUE]] +; CHECK: cond.true: +; CHECK-NEXT: [[FOURTHPHI:%.*]] = phi i64 [ [[ARG:%.*]], [[ENTRY:%.*]] ], [ [[FIRSTPHI]], %for.cond17thread-pre-split ] +; CHECK-NEXT: [[DIV]] = sdiv i64 [[FOURTHPHI]], 4 +; CHECK-NEXT: br label [[THIRDPHIBLOCK]] +; CHECK: temp: +; CHECK-NEXT: ret void +; +entry: + br i1 undef, label %if.then, label %cond.true +if.then: + br i1 false, label %firstphiblock, label %temp +firstphiblock: + %firstphi = phi i64 [ %arg, %if.then ], [ %secondphi, %secondphiblock ] + br i1 undef, label %for.cond17thread-pre-split, label %secondphiblock +secondphiblock: + %secondphi = phi i64 [ %thirdphi, %thirdphiblock ], [ %firstphi, %firstphiblock ] + br i1 undef, label %firstphiblock, label %thirdphiblock +thirdphiblock: + %thirdphi = phi i64 [ %secondphi, %secondphiblock ], [ %div, %cond.true ] + br label %secondphiblock +for.cond17thread-pre-split: + br label %cond.true +cond.true: + %fourthphi = phi i64 [ %arg, %entry ], [ %firstphi, %for.cond17thread-pre-split ] + %div = sdiv i64 %fourthphi, 4 + br label %thirdphiblock +temp: + ret void +} +@b = external global i32, align 4 +@a = external global i32, align 4 +define void @fn3() { +; CHECK-LABEL: @fn3( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[L1:%.*]] +; CHECK: l1.loopexit: +; CHECK-NEXT: br label [[L1]] +; CHECK: l1: +; CHECK-NEXT: [[F_0:%.*]] = phi i32* [ @b, [[ENTRY:%.*]] ], [ @a, [[L1_LOOPEXIT:%.*]] ] +; CHECK-NEXT: br label [[FOR_COND:%.*]] +; CHECK: for.cond.loopexit: +; CHECK-NEXT: store i8 undef, i8* null +; CHECK-NEXT: br label [[FOR_COND]] +; CHECK: for.cond: +; CHECK-NEXT: br i1 undef, label [[FOR_END14:%.*]], label [[FOR_COND1_PREHEADER:%.*]] +; CHECK: for.cond1.preheader: +; CHECK-NEXT: br label [[FOR_BODY3:%.*]] +; CHECK: for.cond1: +; CHECK-NEXT: br label [[L2:%.*]] +; CHECK: for.body3: +; CHECK-NEXT: br i1 undef, label [[FOR_COND1:%.*]], label [[L1_LOOPEXIT]] +; CHECK: l2: +; CHECK-NEXT: [[G_4:%.*]] = phi i32* [ @b, [[FOR_END14]] ], [ @a, [[FOR_COND1]] ] +; CHECK-NEXT: [[F_2:%.*]] = phi i32* [ [[F_0]], [[FOR_END14]] ], [ @a, [[FOR_COND1]] ] +; CHECK-NEXT: br label [[FOR_INC:%.*]] +; CHECK: for.inc: +; CHECK-NEXT: br i1 false, label [[FOR_COND_LOOPEXIT:%.*]], label [[FOR_INC]] +; CHECK: for.end14: +; CHECK-NEXT: br label [[L2]] +; +entry: + br label %l1 +l1.loopexit: + %g.223.lcssa = phi i32* [ @b, %for.body3 ] + br label %l1 +l1: + %g.0 = phi i32* [ undef, %entry ], [ %g.223.lcssa, %l1.loopexit ] + %f.0 = phi i32* [ @b, %entry ], [ @a, %l1.loopexit ] + br label %for.cond +for.cond.loopexit: + br label %for.cond +for.cond: + %g.1 = phi i32* [ %g.0, %l1 ], [ %g.4, %for.cond.loopexit ] + %f.1 = phi i32* [ %f.0, %l1 ], [ %f.2, %for.cond.loopexit ] + br i1 undef, label %for.end14, label %for.cond1.preheader +for.cond1.preheader: + br label %for.body3 +for.cond1: + br label %l2 +for.body3: + br i1 undef, label %for.cond1, label %l1.loopexit +l2: + %g.4 = phi i32* [ %g.1, %for.end14 ], [ @a, %for.cond1 ] + %f.2 = phi i32* [ %f.1, %for.end14 ], [ @a, %for.cond1 ] + br label %for.inc +for.inc: + br i1 false, label %for.cond.loopexit, label %for.inc +for.end14: + br label %l2 +} + diff --git a/test/Transforms/NewGVN/pr32845.ll b/test/Transforms/NewGVN/pr32845.ll new file mode 100644 index 000000000000..beba3363b303 --- /dev/null +++ b/test/Transforms/NewGVN/pr32845.ll @@ -0,0 +1,64 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -newgvn %s -S | FileCheck %s + +@b = external global i32, align 4 +@a = external global i32, align 4 +define void @tinkywinky() { +; CHECK-LABEL: @tinkywinky( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[L1:%.*]] +; CHECK: l1.loopexit: +; CHECK-NEXT: br label [[L1]] +; CHECK: l1: +; CHECK-NEXT: [[F_0:%.*]] = phi i32* [ @b, [[ENTRY:%.*]] ], [ @a, [[L1_LOOPEXIT:%.*]] ] +; CHECK-NEXT: br label [[FOR_COND:%.*]] +; CHECK: for.cond.loopexit: +; CHECK-NEXT: store i8 undef, i8* null +; CHECK-NEXT: br label [[FOR_COND]] +; CHECK: for.cond: +; CHECK-NEXT: br i1 undef, label [[FOR_END14:%.*]], label [[FOR_COND1_PREHEADER:%.*]] +; CHECK: for.cond1.preheader: +; CHECK-NEXT: br label [[FOR_BODY3:%.*]] +; CHECK: for.cond1: +; CHECK-NEXT: br label [[L2:%.*]] +; CHECK: for.body3: +; CHECK-NEXT: br i1 undef, label [[FOR_COND1:%.*]], label [[L1_LOOPEXIT]] +; CHECK: l2: +; CHECK-NEXT: [[G_4:%.*]] = phi i32* [ @b, [[FOR_END14]] ], [ @a, [[FOR_COND1]] ] +; CHECK-NEXT: [[F_2:%.*]] = phi i32* [ [[F_0]], [[FOR_END14]] ], [ @a, [[FOR_COND1]] ] +; CHECK-NEXT: br label [[FOR_INC:%.*]] +; CHECK: for.inc: +; CHECK-NEXT: br i1 false, label [[FOR_COND_LOOPEXIT:%.*]], label [[FOR_INC]] +; CHECK: for.end14: +; CHECK-NEXT: br label [[L2]] +; +entry: + br label %l1 +l1.loopexit: + %g.223.lcssa = phi i32* [ @b, %for.body3 ] + br label %l1 +l1: + %g.0 = phi i32* [ undef, %entry ], [ %g.223.lcssa, %l1.loopexit ] + %f.0 = phi i32* [ @b, %entry ], [ @a, %l1.loopexit ] + br label %for.cond +for.cond.loopexit: + br label %for.cond +for.cond: + %g.1 = phi i32* [ %g.0, %l1 ], [ %g.4, %for.cond.loopexit ] + %f.1 = phi i32* [ %f.0, %l1 ], [ %f.2, %for.cond.loopexit ] + br i1 undef, label %for.end14, label %for.cond1.preheader +for.cond1.preheader: + br label %for.body3 +for.cond1: + br label %l2 +for.body3: + br i1 undef, label %for.cond1, label %l1.loopexit +l2: + %g.4 = phi i32* [ %g.1, %for.end14 ], [ @a, %for.cond1 ] + %f.2 = phi i32* [ %f.1, %for.end14 ], [ @a, %for.cond1 ] + br label %for.inc +for.inc: + br i1 false, label %for.cond.loopexit, label %for.inc +for.end14: + br label %l2 +} diff --git a/test/Transforms/NewGVN/pr32897.ll b/test/Transforms/NewGVN/pr32897.ll new file mode 100644 index 000000000000..eb19aa367b72 --- /dev/null +++ b/test/Transforms/NewGVN/pr32897.ll @@ -0,0 +1,26 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -newgvn %s | FileCheck %s + +define void @tinkywinky(i64* %b) { +; CHECK-LABEL: @tinkywinky( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[BODY:%.*]] +; CHECK: body: +; CHECK-NEXT: store i64 undef, i64* [[B:%.*]] +; CHECK-NEXT: [[B2:%.*]] = load i64, i64* [[B]] +; CHECK-NEXT: br i1 undef, label [[BODY]], label [[END:%.*]] +; CHECK: end: +; CHECK-NEXT: br label [[BODY]] +; +entry: + br label %body +body: + %d.1 = phi i64* [ undef, %entry ], [ %d.1, %body ], [ %b, %end ] + store i64 undef, i64* %d.1 + %b2 = load i64, i64* %b + %or = or i64 %b2, 0 + store i64 %or, i64* %b + br i1 undef, label %body, label %end +end: + br label %body +} diff --git a/test/Transforms/NewGVN/pr32945.ll b/test/Transforms/NewGVN/pr32945.ll new file mode 100644 index 000000000000..553ba4bd4aaa --- /dev/null +++ b/test/Transforms/NewGVN/pr32945.ll @@ -0,0 +1,24 @@ +; RUN: opt -S -newgvn %s | FileCheck %s +; CHECK-NOT: call i32 @llvm.ssa.copy + +@d = external global i32 +@e = external global i32 +define void @tinkywinky() { + br i1 true, label %lor.lhs.false, label %cond.true +lor.lhs.false: + %tmp = load i32, i32* @d, align 4 + %patatino = load i32, i32* null, align 4 + %or = or i32 %tmp, %patatino + store i32 %or, i32* @d, align 4 + br label %cond.true +cond.true: + %tmp1 = load i32, i32* @e, align 4 + %tmp2 = load i32, i32* @d, align 4 + %cmp = icmp eq i32 %tmp1, %tmp2 + br i1 %cmp, label %cond.true6, label %cond.false +cond.true6: + %cmp7 = icmp slt i32 %tmp1, 0 + br i1 %cmp7, label %cond.false, label %cond.false +cond.false: + ret void +} diff --git a/test/Transforms/NewGVN/pr33014.ll b/test/Transforms/NewGVN/pr33014.ll new file mode 100644 index 000000000000..4157178e4f0c --- /dev/null +++ b/test/Transforms/NewGVN/pr33014.ll @@ -0,0 +1,54 @@ +; Make sure we don't end up in an infinite recursion in singleReachablePHIPath(). +; REQUIRES: asserts +; RUN: opt -newgvn -S %s | FileCheck %s + +@c = external global i64, align 8 + +; CHECK-LABEL: define void @tinkywinky() { +; CHECK: entry: +; CHECK-NEXT: br i1 undef, label %l2, label %if.then +; CHECK: if.then: ; preds = %entry +; CHECK-NEXT: br label %for.body +; CHECK: ph: ; preds = %back, %ontrue +; CHECK-NEXT: br label %for.body +; CHECK: for.body: ; preds = %ph, %if.then +; CHECK-NEXT: br i1 undef, label %ontrue, label %onfalse +; CHECK: onfalse: ; preds = %for.body +; CHECK-NEXT: %patatino = load i64, i64* @c +; CHECK-NEXT: ret void +; CHECK: ontrue: ; preds = %for.body +; CHECK-NEXT: %dipsy = load i64, i64* @c +; CHECK-NEXT: br label %ph +; CHECK: back: ; preds = %l2 +; CHECK-NEXT: store i8 undef, i8* null +; CHECK-NEXT: br label %ph +; CHECK: end: ; preds = %l2 +; CHECK-NEXT: ret void +; CHECK: l2: ; preds = %entry +; CHECK-NEXT: br i1 false, label %back, label %end +; CHECK-NEXT: } + +define void @tinkywinky() { +entry: + br i1 undef, label %l2, label %if.then +if.then: + br label %for.body +ph: + br label %for.body +for.body: + br i1 undef, label %ontrue, label %onfalse +onfalse: + %patatino = load i64, i64* @c + store i64 %patatino, i64* @c + ret void +ontrue: + %dipsy = load i64, i64* @c + store i64 %dipsy, i64* @c + br label %ph +back: + br label %ph +end: + ret void +l2: + br i1 false, label %back, label %end +} diff --git a/test/Transforms/NewGVN/pr33086.ll b/test/Transforms/NewGVN/pr33086.ll new file mode 100644 index 000000000000..6117ef35e6de --- /dev/null +++ b/test/Transforms/NewGVN/pr33086.ll @@ -0,0 +1,59 @@ +; RUN: opt -newgvn -S %s | FileCheck %s +; REQUIRES: asserts + +; CHECK-LABEL: define void @tinkywinky() { +; CHECK: entry: +; CHECK-NEXT: br i1 undef, label %for.cond18, label %for.cond.preheader +; CHECK: for.cond.preheader: +; CHECK-NEXT: br label %for.cond2thread-pre-split +; CHECK: for.cond2thread-pre-split: +; CHECK-NEXT: %conv24 = phi i32 [ 0, %for.cond.preheader ], [ %conv, %for.inc.split ] +; CHECK-NEXT: br label %for.inc.split +; CHECK: for.inc.split: +; CHECK-NEXT: %add = shl nsw i32 %conv24, 16 +; CHECK-NEXT: %sext23 = add i32 %add, 65536 +; CHECK-NEXT: %conv = ashr exact i32 %sext23, 16 +; CHECK-NEXT: %cmp = icmp slt i32 %sext23, 3604480 +; CHECK-NEXT: br i1 %cmp, label %for.cond2thread-pre-split, label %l1.loopexit +; CHECK: l1.loopexit: +; CHECK-NEXT: br label %l1 +; CHECK: l1: +; CHECK-NEXT: %0 = load i16, i16* null, align 2 +; CHECK-NEXT: %g.0.g.0..pr = load i16, i16* null, align 2 +; CHECK-NEXT: ret void +; CHECK: for.cond18: +; CHECK-NEXT: br label %l1 +; CHECK-NEXT: } + +define void @tinkywinky() { +entry: + br i1 undef, label %for.cond18, label %for.cond.preheader + +for.cond.preheader: + br label %for.cond2thread-pre-split + +for.cond2thread-pre-split: + %conv24 = phi i32 [ 0, %for.cond.preheader ], [ %conv, %for.inc.split ] + br label %for.inc.split + +for.inc.split: + %add = shl nsw i32 %conv24, 16 + %sext23 = add i32 %add, 65536 + %conv = ashr exact i32 %sext23, 16 + %cmp = icmp slt i32 %sext23, 3604480 + br i1 %cmp, label %for.cond2thread-pre-split, label %l1.loopexit + +l1.loopexit: + br label %l1 + +l1: + %h.0 = phi i16* [ undef, %for.cond18 ], [ null, %l1.loopexit ] + %0 = load i16, i16* %h.0, align 2 + store i16 %0, i16* null, align 2 + %g.0.g.0..pr = load i16, i16* null, align 2 + %tobool15 = icmp eq i16 %g.0.g.0..pr, 0 + ret void + +for.cond18: + br label %l1 +} diff --git a/test/Transforms/NewGVN/pr33116.ll b/test/Transforms/NewGVN/pr33116.ll new file mode 100644 index 000000000000..9bf6bb1ff6ef --- /dev/null +++ b/test/Transforms/NewGVN/pr33116.ll @@ -0,0 +1,39 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -newgvn %s | FileCheck %s + +@a = external global i32 + +define void @b() { +; CHECK-LABEL: @b( +; CHECK-NEXT: br i1 false, label [[C:%.*]], label [[WHILE_D:%.*]] +; CHECK: while.d: +; CHECK-NEXT: br label [[F:%.*]] +; CHECK: f: +; CHECK-NEXT: br i1 undef, label [[IF_E:%.*]], label [[C]] +; CHECK: c: +; CHECK-NEXT: br i1 undef, label [[IF_G:%.*]], label [[IF_E]] +; CHECK: if.g: +; CHECK-NEXT: store i32 undef, i32* @a +; CHECK-NEXT: br label [[WHILE_D]] +; CHECK: if.e: +; CHECK-NEXT: br label [[F]] +; + br i1 false, label %c, label %while.d + +while.d: ; preds = %if.g, %0 + br label %f + +f: ; preds = %if.e, %while.d + br i1 undef, label %if.e, label %c + +c: ; preds = %f, %0 + br i1 undef, label %if.g, label %if.e + +if.g: ; preds = %c + store i32 undef, i32* @a + br label %while.d + +if.e: ; preds = %c, %f + br label %f +} + diff --git a/test/Transforms/NewGVN/storeoverstore.ll b/test/Transforms/NewGVN/storeoverstore.ll index 49b55d430dc7..28f5eea03ced 100644 --- a/test/Transforms/NewGVN/storeoverstore.ll +++ b/test/Transforms/NewGVN/storeoverstore.ll @@ -13,11 +13,11 @@ define i32 @foo(i32*, i32) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP1:%.*]], 0 ; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP5:%.*]] ; CHECK: br label [[TMP5]] -; CHECK: [[DOT0:%.*]] = phi i32 [ 10, [[TMP4]] ], [ 5, [[TMP2:%.*]] ] -; CHECK-NEXT: br i1 [[TMP3]], label [[TMP6:%.*]], label [[TMP8:%.*]] -; CHECK: [[TMP7:%.*]] = add nsw i32 [[DOT0]], 5 -; CHECK-NEXT: br label [[TMP8]] -; CHECK: [[DOT1:%.*]] = phi i32 [ [[TMP7]], [[TMP6]] ], [ [[DOT0]], [[TMP5]] ] +; CHECK: [[TMP6:%.*]] = phi i32 [ 15, [[TMP4]] ], [ 10, [[TMP2:%.*]] ] +; CHECK-NEXT: [[DOT0:%.*]] = phi i32 [ 10, [[TMP4]] ], [ 5, [[TMP2]] ] +; CHECK-NEXT: br i1 [[TMP3]], label [[TMP7:%.*]], label [[TMP8:%.*]] +; CHECK: br label [[TMP8]] +; CHECK: [[DOT1:%.*]] = phi i32 [ [[TMP6]], [[TMP7]] ], [ [[DOT0]], [[TMP5]] ] ; CHECK-NEXT: ret i32 [[DOT1]] ; store i32 5, i32* %0, align 4 @@ -54,11 +54,11 @@ define i32 @foo2(i32*, i32) { ; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP5:%.*]] ; CHECK: br label [[TMP6:%.*]] ; CHECK: br label [[TMP6]] -; CHECK: [[DOT0:%.*]] = phi i32 [ 10, [[TMP4]] ], [ 5, [[TMP5]] ] -; CHECK-NEXT: br i1 [[TMP3]], label [[TMP7:%.*]], label [[TMP9:%.*]] -; CHECK: [[TMP8:%.*]] = add nsw i32 [[DOT0]], 5 -; CHECK-NEXT: br label [[TMP9]] -; CHECK: [[DOT1:%.*]] = phi i32 [ [[TMP8]], [[TMP7]] ], [ [[DOT0]], [[TMP6]] ] +; CHECK: [[TMP7:%.*]] = phi i32 [ 15, [[TMP4]] ], [ 10, [[TMP5]] ] +; CHECK-NEXT: [[DOT0:%.*]] = phi i32 [ 10, [[TMP4]] ], [ 5, [[TMP5]] ] +; CHECK-NEXT: br i1 [[TMP3]], label [[TMP8:%.*]], label [[TMP9:%.*]] +; CHECK: br label [[TMP9]] +; CHECK: [[DOT1:%.*]] = phi i32 [ [[TMP7]], [[TMP8]] ], [ [[DOT0]], [[TMP6]] ] ; CHECK-NEXT: ret i32 [[DOT1]] ; store i32 5, i32* %0, align 4 diff --git a/test/Transforms/SafeStack/X86/coloring-ssp.ll b/test/Transforms/SafeStack/X86/coloring-ssp.ll index 3b04fdf13fbc..040632e7526d 100644 --- a/test/Transforms/SafeStack/X86/coloring-ssp.ll +++ b/test/Transforms/SafeStack/X86/coloring-ssp.ll @@ -1,4 +1,4 @@ -; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s +; RUN: opt -safe-stack -safe-stack-coloring=1 -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s ; %x and %y share a stack slot between them, but not with the stack guard. define void @f() safestack sspreq { diff --git a/test/Transforms/SafeStack/X86/coloring.ll b/test/Transforms/SafeStack/X86/coloring.ll index 76bdf37dbf4e..60e960e693d5 100644 --- a/test/Transforms/SafeStack/X86/coloring.ll +++ b/test/Transforms/SafeStack/X86/coloring.ll @@ -1,5 +1,5 @@ -; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s -; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s +; RUN: opt -safe-stack -safe-stack-coloring=1 -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s +; RUN: opt -safe-stack -safe-stack-coloring=1 -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s define void @f() safestack { entry: diff --git a/test/Transforms/SafeStack/X86/coloring2.ll b/test/Transforms/SafeStack/X86/coloring2.ll index 2a8f871945ff..ef00d9b54715 100644 --- a/test/Transforms/SafeStack/X86/coloring2.ll +++ b/test/Transforms/SafeStack/X86/coloring2.ll @@ -1,5 +1,5 @@ -; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s -; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s +; RUN: opt -safe-stack -safe-stack-coloring=1 -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s +; RUN: opt -safe-stack -safe-stack-coloring=1 -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s ; x and y share the stack slot. define void @f() safestack { diff --git a/test/Transforms/SafeStack/X86/layout-frag.ll b/test/Transforms/SafeStack/X86/layout-frag.ll index b127defc2c5d..b9831c26b74c 100644 --- a/test/Transforms/SafeStack/X86/layout-frag.ll +++ b/test/Transforms/SafeStack/X86/layout-frag.ll @@ -1,5 +1,5 @@ ; Test that safestack layout reuses a region w/o fragmentation. -; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s +; RUN: opt -safe-stack -safe-stack-coloring=1 -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s define void @f() safestack { ; CHECK-LABEL: define void @f diff --git a/test/tools/llvm-cvtres/Inputs/cursor_small.bmp b/test/tools/llvm-cvtres/Inputs/cursor_small.bmp Binary files differnew file mode 100644 index 000000000000..ce513261bc2c --- /dev/null +++ b/test/tools/llvm-cvtres/Inputs/cursor_small.bmp diff --git a/test/tools/llvm-cvtres/Inputs/okay_small.bmp b/test/tools/llvm-cvtres/Inputs/okay_small.bmp Binary files differnew file mode 100644 index 000000000000..e4005bf5ef97 --- /dev/null +++ b/test/tools/llvm-cvtres/Inputs/okay_small.bmp diff --git a/test/tools/llvm-cvtres/Inputs/test_resource.rc b/test/tools/llvm-cvtres/Inputs/test_resource.rc new file mode 100644 index 000000000000..fd616520dbe1 --- /dev/null +++ b/test/tools/llvm-cvtres/Inputs/test_resource.rc @@ -0,0 +1,44 @@ +#include "windows.h"
+
+LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
+
+myaccelerators ACCELERATORS
+{
+ "^C", 999, VIRTKEY, ALT
+ "D", 1100, VIRTKEY, CONTROL, SHIFT
+ "^R", 444, ASCII, NOINVERT
+}
+
+cursor BITMAP "cursor_small.bmp"
+okay BITMAP "okay_small.bmp"
+
+14432 MENU
+LANGUAGE LANG_CHINESE, SUBLANG_CHINESE_SIMPLIFIED
+{
+ MENUITEM "yu", 100
+ MENUITEM "shala", 101
+ MENUITEM "kaoya", 102
+}
+
+testdialog DIALOG 10, 10, 200, 300
+STYLE WS_POPUP | WS_BORDER
+CAPTION "Test"
+{
+ CTEXT "Continue:", 1, 10, 10, 230, 14
+ PUSHBUTTON "&OK", 2, 66, 134, 161, 13
+}
+
+12 ACCELERATORS
+{
+ "X", 164, VIRTKEY, ALT
+ "H", 5678, VIRTKEY, CONTROL, SHIFT
+ "^R", 444, ASCII, NOINVERT
+}
+
+"eat" MENU
+LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_AUS
+{
+ MENUITEM "fish", 100
+ MENUITEM "salad", 101
+ MENUITEM "duck", 102
+}
diff --git a/test/tools/llvm-cvtres/Inputs/test_resource.res b/test/tools/llvm-cvtres/Inputs/test_resource.res Binary files differnew file mode 100644 index 000000000000..c577ecc3d633 --- /dev/null +++ b/test/tools/llvm-cvtres/Inputs/test_resource.res diff --git a/test/tools/llvm-cvtres/resource.test b/test/tools/llvm-cvtres/resource.test new file mode 100644 index 000000000000..16970343c60d --- /dev/null +++ b/test/tools/llvm-cvtres/resource.test @@ -0,0 +1,7 @@ +// The input was generated with the following command, using the original Windows +// rc.exe: +// > rc /fo test_resource.res /nologo test_resource.rc + +RUN: llvm-cvtres %p/Inputs/test_resource.res | FileCheck %s + +CHECK: Number of resources: 7 |