diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2017-04-26 19:45:00 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2017-04-26 19:45:00 +0000 |
commit | 12f3ca4cdb95b193af905a00e722a4dcb40b3de3 (patch) | |
tree | ae1a7fcfc24a8d4b23206c57121c3f361d4b7f84 /test/CodeGen/X86/vector-trunc-math.ll | |
parent | d99dafe2e4a385dd2a6c76da6d8258deb100657b (diff) |
Vendor import of llvm trunk r301441:vendor/llvm/llvm-trunk-r301441
Notes
Notes:
svn path=/vendor/llvm/dist/; revision=317461
svn path=/vendor/llvm/llvm-trunk-r301441/; revision=317462; tag=vendor/llvm/llvm-trunk-r301441
Diffstat (limited to 'test/CodeGen/X86/vector-trunc-math.ll')
-rw-r--r-- | test/CodeGen/X86/vector-trunc-math.ll | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/test/CodeGen/X86/vector-trunc-math.ll b/test/CodeGen/X86/vector-trunc-math.ll index ab34ad6a613c..a5fac9ac6a41 100644 --- a/test/CodeGen/X86/vector-trunc-math.ll +++ b/test/CodeGen/X86/vector-trunc-math.ll @@ -1257,7 +1257,7 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind { ; SSE-LABEL: trunc_sub_const_v4i64_v4i32: ; SSE: # BB#0: ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm2 +; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7] ; SSE-NEXT: psubq %xmm2, %xmm0 ; SSE-NEXT: psubq {{.*}}(%rip), %xmm1 @@ -1301,7 +1301,7 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind { ; SSE-LABEL: trunc_sub_const_v8i64_v8i16: ; SSE: # BB#0: ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm4 +; SSE-NEXT: movq %rax, %xmm4 ; SSE-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7] ; SSE-NEXT: psubq %xmm4, %xmm0 ; SSE-NEXT: psubq {{.*}}(%rip), %xmm1 @@ -1418,7 +1418,7 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; SSE-LABEL: trunc_sub_const_v16i64_v16i8: ; SSE: # BB#0: ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm8 +; SSE-NEXT: movq %rax, %xmm8 ; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7] ; SSE-NEXT: psubq %xmm8, %xmm0 ; SSE-NEXT: psubq {{.*}}(%rip), %xmm1 @@ -2411,7 +2411,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind { ; SSE-NEXT: psllq $32, %xmm1 ; SSE-NEXT: paddq %xmm3, %xmm1 ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm2 +; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7] ; SSE-NEXT: movdqa %xmm0, %xmm3 ; SSE-NEXT: pmuludq %xmm2, %xmm3 @@ -2554,7 +2554,7 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; SSE-LABEL: trunc_mul_const_v16i64_v16i8: ; SSE: # BB#0: ; SSE-NEXT: movl $1, %eax -; SSE-NEXT: movd %rax, %xmm8 +; SSE-NEXT: movq %rax, %xmm8 ; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7] ; SSE-NEXT: movdqa %xmm0, %xmm9 ; SSE-NEXT: pmuludq %xmm8, %xmm9 |