aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/i64-to-float.ll
blob: da92bdb55d7c66ee0a611086aba5844888e5dc88 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX

;PR29078

define <2 x double> @mask_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_sitofp_2i64_2f64:
; X32-SSE:       # BB#0:
; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
; X32-SSE-NEXT:    retl
;
; X32-AVX-LABEL: mask_sitofp_2i64_2f64:
; X32-AVX:       # BB#0:
; X32-AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; X32-AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
; X32-AVX-NEXT:    retl
;
; X64-SSE-LABEL: mask_sitofp_2i64_2f64:
; X64-SSE:       # BB#0:
; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-SSE-NEXT:    pand {{.*}}(%rip), %xmm0
; X64-SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
; X64-SSE-NEXT:    retq
;
; X64-AVX-LABEL: mask_sitofp_2i64_2f64:
; X64-AVX:       # BB#0:
; X64-AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; X64-AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
; X64-AVX-NEXT:    retq
  %and = and <2 x i64> %a, <i64 255, i64 65535>
  %cvt = sitofp <2 x i64> %and to <2 x double>
  ret <2 x double> %cvt
}

define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_uitofp_2i64_2f64:
; X32-SSE:       # BB#0:
; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
; X32-SSE-NEXT:    retl
;
; X32-AVX-LABEL: mask_uitofp_2i64_2f64:
; X32-AVX:       # BB#0:
; X32-AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; X32-AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
; X32-AVX-NEXT:    retl
;
; X64-SSE-LABEL: mask_uitofp_2i64_2f64:
; X64-SSE:       # BB#0:
; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-SSE-NEXT:    pand {{.*}}(%rip), %xmm0
; X64-SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
; X64-SSE-NEXT:    retq
;
; X64-AVX-LABEL: mask_uitofp_2i64_2f64:
; X64-AVX:       # BB#0:
; X64-AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8,9],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; X64-AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
; X64-AVX-NEXT:    retq
  %and = and <2 x i64> %a, <i64 255, i64 65535>
  %cvt = uitofp <2 x i64> %and to <2 x double>
  ret <2 x double> %cvt
}

define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_sitofp_4i64_4f32:
; X32-SSE:       # BB#0:
; X32-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-SSE-NEXT:    andps {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT:    retl
;
; X32-AVX-LABEL: mask_sitofp_4i64_4f32:
; X32-AVX:       # BB#0:
; X32-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
; X32-AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-AVX-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT:    vzeroupper
; X32-AVX-NEXT:    retl
;
; X64-SSE-LABEL: mask_sitofp_4i64_4f32:
; X64-SSE:       # BB#0:
; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-SSE-NEXT:    andps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
; X64-SSE-NEXT:    retq
;
; X64-AVX-LABEL: mask_sitofp_4i64_4f32:
; X64-AVX:       # BB#0:
; X64-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
; X64-AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
; X64-AVX-NEXT:    vzeroupper
; X64-AVX-NEXT:    retq
  %and = and <4 x i64> %a, <i64 127, i64 255, i64 4095, i64 65535>
  %cvt = sitofp <4 x i64> %and to <4 x float>
  ret <4 x float> %cvt
}

define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X32-SSE-LABEL: mask_uitofp_4i64_4f32:
; X32-SSE:       # BB#0:
; X32-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-SSE-NEXT:    andps {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT:    retl
;
; X32-AVX-LABEL: mask_uitofp_4i64_4f32:
; X32-AVX:       # BB#0:
; X32-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
; X32-AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-AVX-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT:    vzeroupper
; X32-AVX-NEXT:    retl
;
; X64-SSE-LABEL: mask_uitofp_4i64_4f32:
; X64-SSE:       # BB#0:
; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-SSE-NEXT:    andps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
; X64-SSE-NEXT:    retq
;
; X64-AVX-LABEL: mask_uitofp_4i64_4f32:
; X64-AVX:       # BB#0:
; X64-AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
; X64-AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X64-AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
; X64-AVX-NEXT:    vzeroupper
; X64-AVX-NEXT:    retq
  %and = and <4 x i64> %a, <i64 127, i64 255, i64 4095, i64 65535>
  %cvt = uitofp <4 x i64> %and to <4 x float>
  ret <4 x float> %cvt
}

define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X32-SSE-LABEL: clamp_sitofp_2i64_2f64:
; X32-SSE:       # BB#0:
; X32-SSE-NEXT:    pushl %ebp
; X32-SSE-NEXT:    movl %esp, %ebp
; X32-SSE-NEXT:    andl $-8, %esp
; X32-SSE-NEXT:    subl $32, %esp
; X32-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [2147483648,0,2147483648,0]
; X32-SSE-NEXT:    movdqa %xmm0, %xmm2
; X32-SSE-NEXT:    pxor %xmm1, %xmm2
; X32-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [2147483393,4294967295,2147483393,4294967295]
; X32-SSE-NEXT:    movdqa %xmm3, %xmm4
; X32-SSE-NEXT:    pcmpgtd %xmm2, %xmm4
; X32-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
; X32-SSE-NEXT:    pcmpeqd %xmm3, %xmm2
; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; X32-SSE-NEXT:    pand %xmm5, %xmm2
; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
; X32-SSE-NEXT:    por %xmm2, %xmm3
; X32-SSE-NEXT:    movdqa %xmm3, %xmm2
; X32-SSE-NEXT:    pandn %xmm0, %xmm2
; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm3
; X32-SSE-NEXT:    por %xmm2, %xmm3
; X32-SSE-NEXT:    pxor %xmm3, %xmm1
; X32-SSE-NEXT:    movdqa {{.*#+}} xmm0 = [2147483903,0,2147483903,0]
; X32-SSE-NEXT:    movdqa %xmm1, %xmm2
; X32-SSE-NEXT:    pcmpgtd %xmm0, %xmm2
; X32-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
; X32-SSE-NEXT:    pcmpeqd %xmm0, %xmm1
; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; X32-SSE-NEXT:    pand %xmm4, %xmm0
; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; X32-SSE-NEXT:    por %xmm0, %xmm1
; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
; X32-SSE-NEXT:    pandn %xmm3, %xmm0
; X32-SSE-NEXT:    pand {{\.LCPI.*}}, %xmm1
; X32-SSE-NEXT:    por %xmm0, %xmm1
; X32-SSE-NEXT:    movq {{.*#+}} xmm0 = xmm1[0],zero
; X32-SSE-NEXT:    movq %xmm0, {{[0-9]+}}(%esp)
; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; X32-SSE-NEXT:    movq %xmm0, {{[0-9]+}}(%esp)
; X32-SSE-NEXT:    fildll {{[0-9]+}}(%esp)
; X32-SSE-NEXT:    fstpl {{[0-9]+}}(%esp)
; X32-SSE-NEXT:    fildll {{[0-9]+}}(%esp)
; X32-SSE-NEXT:    fstpl (%esp)
; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X32-SSE-NEXT:    movl %ebp, %esp
; X32-SSE-NEXT:    popl %ebp
; X32-SSE-NEXT:    retl
;
; X32-AVX-LABEL: clamp_sitofp_2i64_2f64:
; X32-AVX:       # BB#0:
; X32-AVX-NEXT:    pushl %ebp
; X32-AVX-NEXT:    movl %esp, %ebp
; X32-AVX-NEXT:    andl $-8, %esp
; X32-AVX-NEXT:    subl $32, %esp
; X32-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [4294967041,4294967295,4294967041,4294967295]
; X32-AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
; X32-AVX-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; X32-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [255,0,255,0]
; X32-AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
; X32-AVX-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; X32-AVX-NEXT:    vmovq {{.*#+}} xmm1 = xmm0[0],zero
; X32-AVX-NEXT:    vmovq %xmm1, {{[0-9]+}}(%esp)
; X32-AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-AVX-NEXT:    vmovq %xmm0, {{[0-9]+}}(%esp)
; X32-AVX-NEXT:    fildll {{[0-9]+}}(%esp)
; X32-AVX-NEXT:    fstpl {{[0-9]+}}(%esp)
; X32-AVX-NEXT:    fildll {{[0-9]+}}(%esp)
; X32-AVX-NEXT:    fstpl (%esp)
; X32-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X32-AVX-NEXT:    movl %ebp, %esp
; X32-AVX-NEXT:    popl %ebp
; X32-AVX-NEXT:    retl
;
; X64-SSE-LABEL: clamp_sitofp_2i64_2f64:
; X64-SSE:       # BB#0:
; X64-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [2147483648,0,2147483648,0]
; X64-SSE-NEXT:    movdqa %xmm0, %xmm2
; X64-SSE-NEXT:    pxor %xmm1, %xmm2
; X64-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [18446744073709551361,18446744073709551361]
; X64-SSE-NEXT:    movdqa %xmm1, %xmm4
; X64-SSE-NEXT:    pxor %xmm3, %xmm4
; X64-SSE-NEXT:    movdqa %xmm4, %xmm5
; X64-SSE-NEXT:    pcmpgtd %xmm2, %xmm5
; X64-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
; X64-SSE-NEXT:    pcmpeqd %xmm2, %xmm4
; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
; X64-SSE-NEXT:    pand %xmm6, %xmm2
; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
; X64-SSE-NEXT:    por %xmm2, %xmm4
; X64-SSE-NEXT:    movdqa %xmm4, %xmm2
; X64-SSE-NEXT:    pandn %xmm0, %xmm2
; X64-SSE-NEXT:    pand %xmm3, %xmm4
; X64-SSE-NEXT:    por %xmm2, %xmm4
; X64-SSE-NEXT:    movdqa %xmm4, %xmm0
; X64-SSE-NEXT:    pxor %xmm1, %xmm0
; X64-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255]
; X64-SSE-NEXT:    pxor %xmm2, %xmm1
; X64-SSE-NEXT:    movdqa %xmm0, %xmm3
; X64-SSE-NEXT:    pcmpgtd %xmm1, %xmm3
; X64-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
; X64-SSE-NEXT:    pcmpeqd %xmm0, %xmm1
; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; X64-SSE-NEXT:    pand %xmm5, %xmm0
; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
; X64-SSE-NEXT:    por %xmm0, %xmm1
; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
; X64-SSE-NEXT:    pandn %xmm4, %xmm0
; X64-SSE-NEXT:    pand %xmm2, %xmm1
; X64-SSE-NEXT:    por %xmm0, %xmm1
; X64-SSE-NEXT:    movd %xmm1, %rax
; X64-SSE-NEXT:    xorps %xmm0, %xmm0
; X64-SSE-NEXT:    cvtsi2sdq %rax, %xmm0
; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; X64-SSE-NEXT:    movd %xmm1, %rax
; X64-SSE-NEXT:    xorps %xmm1, %xmm1
; X64-SSE-NEXT:    cvtsi2sdq %rax, %xmm1
; X64-SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-SSE-NEXT:    retq
;
; X64-AVX-LABEL: clamp_sitofp_2i64_2f64:
; X64-AVX:       # BB#0:
; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [18446744073709551361,18446744073709551361]
; X64-AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
; X64-AVX-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [255,255]
; X64-AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT:    vpextrq $1, %xmm0, %rax
; X64-AVX-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
; X64-AVX-NEXT:    vmovq %xmm0, %rax
; X64-AVX-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
; X64-AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-AVX-NEXT:    retq
  %clo = icmp slt <2 x i64> %a, <i64 -255, i64 -255>
  %lo = select <2 x i1> %clo, <2 x i64> <i64 -255, i64 -255>, <2 x i64> %a
  %chi = icmp sgt <2 x i64> %lo, <i64 255, i64 255>
  %hi = select <2 x i1> %chi, <2 x i64> <i64 255, i64 255>, <2 x i64> %lo
  %cvt = sitofp <2 x i64> %hi to <2 x double>
  ret <2 x double> %cvt
}