1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-linux-gnu -mattr=avx512bf16,avx512fp16,avx512vl | FileCheck %s --check-prefixes=X86
3 ; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,SSE2
4 ; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,F16,BF16
5 ; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512fp16,avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,F16,FP16
6 ; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avxneconvert,f16c | FileCheck %s --check-prefixes=CHECK,AVX,BF16,AVXNC
8 define void @add(ptr %pa, ptr %pb, ptr %pc) nounwind {
11 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
12 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
13 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
14 ; X86-NEXT: movzwl (%edx), %edx
15 ; X86-NEXT: shll $16, %edx
16 ; X86-NEXT: vmovd %edx, %xmm0
17 ; X86-NEXT: movzwl (%ecx), %ecx
18 ; X86-NEXT: shll $16, %ecx
19 ; X86-NEXT: vmovd %ecx, %xmm1
20 ; X86-NEXT: vaddss %xmm0, %xmm1, %xmm0
21 ; X86-NEXT: vcvtneps2bf16 %xmm0, %xmm0
22 ; X86-NEXT: vpextrw $0, %xmm0, (%eax)
27 ; SSE2-NEXT: pushq %rbx
28 ; SSE2-NEXT: movq %rdx, %rbx
29 ; SSE2-NEXT: movzwl (%rsi), %eax
30 ; SSE2-NEXT: shll $16, %eax
31 ; SSE2-NEXT: movd %eax, %xmm1
32 ; SSE2-NEXT: movzwl (%rdi), %eax
33 ; SSE2-NEXT: shll $16, %eax
34 ; SSE2-NEXT: movd %eax, %xmm0
35 ; SSE2-NEXT: addss %xmm1, %xmm0
36 ; SSE2-NEXT: callq __truncsfbf2@PLT
37 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
38 ; SSE2-NEXT: movw %ax, (%rbx)
39 ; SSE2-NEXT: popq %rbx
44 ; F16-NEXT: movzwl (%rsi), %eax
45 ; F16-NEXT: shll $16, %eax
46 ; F16-NEXT: vmovd %eax, %xmm0
47 ; F16-NEXT: movzwl (%rdi), %eax
48 ; F16-NEXT: shll $16, %eax
49 ; F16-NEXT: vmovd %eax, %xmm1
50 ; F16-NEXT: vaddss %xmm0, %xmm1, %xmm0
51 ; F16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
52 ; F16-NEXT: vpextrw $0, %xmm0, (%rdx)
57 ; AVXNC-NEXT: movzwl (%rsi), %eax
58 ; AVXNC-NEXT: shll $16, %eax
59 ; AVXNC-NEXT: vmovd %eax, %xmm0
60 ; AVXNC-NEXT: movzwl (%rdi), %eax
61 ; AVXNC-NEXT: shll $16, %eax
62 ; AVXNC-NEXT: vmovd %eax, %xmm1
63 ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0
64 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
65 ; AVXNC-NEXT: vpextrw $0, %xmm0, (%rdx)
67 %a = load bfloat, ptr %pa
68 %b = load bfloat, ptr %pb
69 %add = fadd bfloat %a, %b
70 store bfloat %add, ptr %pc
74 define bfloat @add2(bfloat %a, bfloat %b) nounwind {
77 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
78 ; X86-NEXT: shll $16, %eax
79 ; X86-NEXT: vmovd %eax, %xmm0
80 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
81 ; X86-NEXT: shll $16, %eax
82 ; X86-NEXT: vmovd %eax, %xmm1
83 ; X86-NEXT: vaddss %xmm0, %xmm1, %xmm0
84 ; X86-NEXT: vcvtneps2bf16 %xmm0, %xmm0
85 ; X86-NEXT: vmovw %xmm0, %eax
86 ; X86-NEXT: vmovw %eax, %xmm0
91 ; SSE2-NEXT: pushq %rax
92 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
93 ; SSE2-NEXT: pextrw $0, %xmm1, %ecx
94 ; SSE2-NEXT: shll $16, %ecx
95 ; SSE2-NEXT: movd %ecx, %xmm1
96 ; SSE2-NEXT: shll $16, %eax
97 ; SSE2-NEXT: movd %eax, %xmm0
98 ; SSE2-NEXT: addss %xmm1, %xmm0
99 ; SSE2-NEXT: callq __truncsfbf2@PLT
100 ; SSE2-NEXT: popq %rax
105 ; FP16-NEXT: vmovw %xmm0, %eax
106 ; FP16-NEXT: vmovw %xmm1, %ecx
107 ; FP16-NEXT: shll $16, %ecx
108 ; FP16-NEXT: vmovd %ecx, %xmm0
109 ; FP16-NEXT: shll $16, %eax
110 ; FP16-NEXT: vmovd %eax, %xmm1
111 ; FP16-NEXT: vaddss %xmm0, %xmm1, %xmm0
112 ; FP16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
113 ; FP16-NEXT: vmovw %xmm0, %eax
114 ; FP16-NEXT: vmovw %eax, %xmm0
119 ; AVXNC-NEXT: vpextrw $0, %xmm0, %eax
120 ; AVXNC-NEXT: vpextrw $0, %xmm1, %ecx
121 ; AVXNC-NEXT: shll $16, %ecx
122 ; AVXNC-NEXT: vmovd %ecx, %xmm0
123 ; AVXNC-NEXT: shll $16, %eax
124 ; AVXNC-NEXT: vmovd %eax, %xmm1
125 ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0
126 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
127 ; AVXNC-NEXT: vmovd %xmm0, %eax
128 ; AVXNC-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
130 %add = fadd bfloat %a, %b
134 define void @add_double(ptr %pa, ptr %pb, ptr %pc) nounwind {
135 ; X86-LABEL: add_double:
137 ; X86-NEXT: pushl %ebx
138 ; X86-NEXT: pushl %edi
139 ; X86-NEXT: pushl %esi
140 ; X86-NEXT: subl $16, %esp
141 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
142 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
143 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
144 ; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
145 ; X86-NEXT: vmovsd %xmm0, (%esp)
146 ; X86-NEXT: calll __truncdfbf2
147 ; X86-NEXT: vmovw %xmm0, %edi
148 ; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
149 ; X86-NEXT: vmovsd %xmm0, (%esp)
150 ; X86-NEXT: calll __truncdfbf2
151 ; X86-NEXT: vmovw %xmm0, %eax
152 ; X86-NEXT: shll $16, %eax
153 ; X86-NEXT: vmovd %eax, %xmm0
154 ; X86-NEXT: shll $16, %edi
155 ; X86-NEXT: vmovd %edi, %xmm1
156 ; X86-NEXT: vaddss %xmm0, %xmm1, %xmm0
157 ; X86-NEXT: vcvtneps2bf16 %xmm0, %xmm0
158 ; X86-NEXT: vmovw %xmm0, %eax
159 ; X86-NEXT: shll $16, %eax
160 ; X86-NEXT: vmovd %eax, %xmm0
161 ; X86-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
162 ; X86-NEXT: vmovsd %xmm0, (%esi)
163 ; X86-NEXT: addl $16, %esp
164 ; X86-NEXT: popl %esi
165 ; X86-NEXT: popl %edi
166 ; X86-NEXT: popl %ebx
169 ; SSE2-LABEL: add_double:
171 ; SSE2-NEXT: pushq %rbp
172 ; SSE2-NEXT: pushq %r14
173 ; SSE2-NEXT: pushq %rbx
174 ; SSE2-NEXT: movq %rdx, %rbx
175 ; SSE2-NEXT: movq %rsi, %r14
176 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
177 ; SSE2-NEXT: callq __truncdfbf2@PLT
178 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
179 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
180 ; SSE2-NEXT: callq __truncdfbf2@PLT
181 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
182 ; SSE2-NEXT: shll $16, %eax
183 ; SSE2-NEXT: movd %eax, %xmm1
184 ; SSE2-NEXT: shll $16, %ebp
185 ; SSE2-NEXT: movd %ebp, %xmm0
186 ; SSE2-NEXT: addss %xmm1, %xmm0
187 ; SSE2-NEXT: callq __truncsfbf2@PLT
188 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
189 ; SSE2-NEXT: shll $16, %eax
190 ; SSE2-NEXT: movd %eax, %xmm0
191 ; SSE2-NEXT: cvtss2sd %xmm0, %xmm0
192 ; SSE2-NEXT: movsd %xmm0, (%rbx)
193 ; SSE2-NEXT: popq %rbx
194 ; SSE2-NEXT: popq %r14
195 ; SSE2-NEXT: popq %rbp
198 ; FP16-LABEL: add_double:
200 ; FP16-NEXT: pushq %rbp
201 ; FP16-NEXT: pushq %r14
202 ; FP16-NEXT: pushq %rbx
203 ; FP16-NEXT: movq %rdx, %rbx
204 ; FP16-NEXT: movq %rsi, %r14
205 ; FP16-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
206 ; FP16-NEXT: callq __truncdfbf2@PLT
207 ; FP16-NEXT: vmovw %xmm0, %ebp
208 ; FP16-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
209 ; FP16-NEXT: callq __truncdfbf2@PLT
210 ; FP16-NEXT: vmovw %xmm0, %eax
211 ; FP16-NEXT: shll $16, %eax
212 ; FP16-NEXT: vmovd %eax, %xmm0
213 ; FP16-NEXT: shll $16, %ebp
214 ; FP16-NEXT: vmovd %ebp, %xmm1
215 ; FP16-NEXT: vaddss %xmm0, %xmm1, %xmm0
216 ; FP16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
217 ; FP16-NEXT: vmovw %xmm0, %eax
218 ; FP16-NEXT: shll $16, %eax
219 ; FP16-NEXT: vmovd %eax, %xmm0
220 ; FP16-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
221 ; FP16-NEXT: vmovsd %xmm0, (%rbx)
222 ; FP16-NEXT: popq %rbx
223 ; FP16-NEXT: popq %r14
224 ; FP16-NEXT: popq %rbp
227 ; AVXNC-LABEL: add_double:
229 ; AVXNC-NEXT: pushq %rbp
230 ; AVXNC-NEXT: pushq %r14
231 ; AVXNC-NEXT: pushq %rbx
232 ; AVXNC-NEXT: movq %rdx, %rbx
233 ; AVXNC-NEXT: movq %rsi, %r14
234 ; AVXNC-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
235 ; AVXNC-NEXT: callq __truncdfbf2@PLT
236 ; AVXNC-NEXT: vpextrw $0, %xmm0, %ebp
237 ; AVXNC-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
238 ; AVXNC-NEXT: callq __truncdfbf2@PLT
239 ; AVXNC-NEXT: vpextrw $0, %xmm0, %eax
240 ; AVXNC-NEXT: shll $16, %eax
241 ; AVXNC-NEXT: vmovd %eax, %xmm0
242 ; AVXNC-NEXT: shll $16, %ebp
243 ; AVXNC-NEXT: vmovd %ebp, %xmm1
244 ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0
245 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
246 ; AVXNC-NEXT: vmovd %xmm0, %eax
247 ; AVXNC-NEXT: shll $16, %eax
248 ; AVXNC-NEXT: vmovd %eax, %xmm0
249 ; AVXNC-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
250 ; AVXNC-NEXT: vmovsd %xmm0, (%rbx)
251 ; AVXNC-NEXT: popq %rbx
252 ; AVXNC-NEXT: popq %r14
253 ; AVXNC-NEXT: popq %rbp
255 %la = load double, ptr %pa
256 %a = fptrunc double %la to bfloat
257 %lb = load double, ptr %pb
258 %b = fptrunc double %lb to bfloat
259 %add = fadd bfloat %a, %b
260 %dadd = fpext bfloat %add to double
261 store double %dadd, ptr %pc
265 define double @add_double2(double %da, double %db) nounwind {
266 ; X86-LABEL: add_double2:
268 ; X86-NEXT: pushl %esi
269 ; X86-NEXT: subl $24, %esp
270 ; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
271 ; X86-NEXT: vmovsd %xmm0, (%esp)
272 ; X86-NEXT: calll __truncdfbf2
273 ; X86-NEXT: vmovw %xmm0, %esi
274 ; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
275 ; X86-NEXT: vmovsd %xmm0, (%esp)
276 ; X86-NEXT: calll __truncdfbf2
277 ; X86-NEXT: vmovw %xmm0, %eax
278 ; X86-NEXT: shll $16, %eax
279 ; X86-NEXT: vmovd %eax, %xmm0
280 ; X86-NEXT: shll $16, %esi
281 ; X86-NEXT: vmovd %esi, %xmm1
282 ; X86-NEXT: vaddss %xmm0, %xmm1, %xmm0
283 ; X86-NEXT: vcvtneps2bf16 %xmm0, %xmm0
284 ; X86-NEXT: vmovw %xmm0, %eax
285 ; X86-NEXT: shll $16, %eax
286 ; X86-NEXT: vmovd %eax, %xmm0
287 ; X86-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
288 ; X86-NEXT: vmovsd %xmm0, {{[0-9]+}}(%esp)
289 ; X86-NEXT: fldl {{[0-9]+}}(%esp)
290 ; X86-NEXT: addl $24, %esp
291 ; X86-NEXT: popl %esi
294 ; SSE2-LABEL: add_double2:
296 ; SSE2-NEXT: pushq %rbx
297 ; SSE2-NEXT: subq $16, %rsp
298 ; SSE2-NEXT: movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
299 ; SSE2-NEXT: callq __truncdfbf2@PLT
300 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
301 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
302 ; SSE2-NEXT: # xmm0 = mem[0],zero
303 ; SSE2-NEXT: callq __truncdfbf2@PLT
304 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
305 ; SSE2-NEXT: shll $16, %eax
306 ; SSE2-NEXT: movd %eax, %xmm1
307 ; SSE2-NEXT: shll $16, %ebx
308 ; SSE2-NEXT: movd %ebx, %xmm0
309 ; SSE2-NEXT: addss %xmm1, %xmm0
310 ; SSE2-NEXT: callq __truncsfbf2@PLT
311 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
312 ; SSE2-NEXT: shll $16, %eax
313 ; SSE2-NEXT: movd %eax, %xmm0
314 ; SSE2-NEXT: cvtss2sd %xmm0, %xmm0
315 ; SSE2-NEXT: addq $16, %rsp
316 ; SSE2-NEXT: popq %rbx
319 ; FP16-LABEL: add_double2:
321 ; FP16-NEXT: pushq %rbx
322 ; FP16-NEXT: subq $16, %rsp
323 ; FP16-NEXT: vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
324 ; FP16-NEXT: callq __truncdfbf2@PLT
325 ; FP16-NEXT: vmovw %xmm0, %ebx
326 ; FP16-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
327 ; FP16-NEXT: # xmm0 = mem[0],zero
328 ; FP16-NEXT: callq __truncdfbf2@PLT
329 ; FP16-NEXT: vmovw %xmm0, %eax
330 ; FP16-NEXT: shll $16, %eax
331 ; FP16-NEXT: vmovd %eax, %xmm0
332 ; FP16-NEXT: shll $16, %ebx
333 ; FP16-NEXT: vmovd %ebx, %xmm1
334 ; FP16-NEXT: vaddss %xmm0, %xmm1, %xmm0
335 ; FP16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
336 ; FP16-NEXT: vmovw %xmm0, %eax
337 ; FP16-NEXT: shll $16, %eax
338 ; FP16-NEXT: vmovd %eax, %xmm0
339 ; FP16-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
340 ; FP16-NEXT: addq $16, %rsp
341 ; FP16-NEXT: popq %rbx
344 ; AVXNC-LABEL: add_double2:
346 ; AVXNC-NEXT: pushq %rbx
347 ; AVXNC-NEXT: subq $16, %rsp
348 ; AVXNC-NEXT: vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
349 ; AVXNC-NEXT: callq __truncdfbf2@PLT
350 ; AVXNC-NEXT: vpextrw $0, %xmm0, %ebx
351 ; AVXNC-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
352 ; AVXNC-NEXT: # xmm0 = mem[0],zero
353 ; AVXNC-NEXT: callq __truncdfbf2@PLT
354 ; AVXNC-NEXT: vpextrw $0, %xmm0, %eax
355 ; AVXNC-NEXT: shll $16, %eax
356 ; AVXNC-NEXT: vmovd %eax, %xmm0
357 ; AVXNC-NEXT: shll $16, %ebx
358 ; AVXNC-NEXT: vmovd %ebx, %xmm1
359 ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0
360 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
361 ; AVXNC-NEXT: vmovd %xmm0, %eax
362 ; AVXNC-NEXT: shll $16, %eax
363 ; AVXNC-NEXT: vmovd %eax, %xmm0
364 ; AVXNC-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
365 ; AVXNC-NEXT: addq $16, %rsp
366 ; AVXNC-NEXT: popq %rbx
368 %a = fptrunc double %da to bfloat
369 %b = fptrunc double %db to bfloat
370 %add = fadd bfloat %a, %b
371 %dadd = fpext bfloat %add to double
375 define void @add_constant(ptr %pa, ptr %pc) nounwind {
376 ; X86-LABEL: add_constant:
378 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
379 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
380 ; X86-NEXT: movzwl (%ecx), %ecx
381 ; X86-NEXT: shll $16, %ecx
382 ; X86-NEXT: vmovd %ecx, %xmm0
383 ; X86-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
384 ; X86-NEXT: vcvtneps2bf16 %xmm0, %xmm0
385 ; X86-NEXT: vpextrw $0, %xmm0, (%eax)
388 ; SSE2-LABEL: add_constant:
390 ; SSE2-NEXT: pushq %rbx
391 ; SSE2-NEXT: movq %rsi, %rbx
392 ; SSE2-NEXT: movzwl (%rdi), %eax
393 ; SSE2-NEXT: shll $16, %eax
394 ; SSE2-NEXT: movd %eax, %xmm0
395 ; SSE2-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
396 ; SSE2-NEXT: callq __truncsfbf2@PLT
397 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
398 ; SSE2-NEXT: movw %ax, (%rbx)
399 ; SSE2-NEXT: popq %rbx
402 ; F16-LABEL: add_constant:
404 ; F16-NEXT: movzwl (%rdi), %eax
405 ; F16-NEXT: shll $16, %eax
406 ; F16-NEXT: vmovd %eax, %xmm0
407 ; F16-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
408 ; F16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
409 ; F16-NEXT: vpextrw $0, %xmm0, (%rsi)
412 ; AVXNC-LABEL: add_constant:
414 ; AVXNC-NEXT: movzwl (%rdi), %eax
415 ; AVXNC-NEXT: shll $16, %eax
416 ; AVXNC-NEXT: vmovd %eax, %xmm0
417 ; AVXNC-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
418 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
419 ; AVXNC-NEXT: vpextrw $0, %xmm0, (%rsi)
421 %a = load bfloat, ptr %pa
422 %add = fadd bfloat %a, 1.0
423 store bfloat %add, ptr %pc
427 define bfloat @add_constant2(bfloat %a) nounwind {
428 ; X86-LABEL: add_constant2:
430 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
431 ; X86-NEXT: shll $16, %eax
432 ; X86-NEXT: vmovd %eax, %xmm0
433 ; X86-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
434 ; X86-NEXT: vcvtneps2bf16 %xmm0, %xmm0
435 ; X86-NEXT: vmovw %xmm0, %eax
436 ; X86-NEXT: vmovw %eax, %xmm0
439 ; SSE2-LABEL: add_constant2:
441 ; SSE2-NEXT: pushq %rax
442 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
443 ; SSE2-NEXT: shll $16, %eax
444 ; SSE2-NEXT: movd %eax, %xmm0
445 ; SSE2-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
446 ; SSE2-NEXT: callq __truncsfbf2@PLT
447 ; SSE2-NEXT: popq %rax
450 ; FP16-LABEL: add_constant2:
452 ; FP16-NEXT: vmovw %xmm0, %eax
453 ; FP16-NEXT: shll $16, %eax
454 ; FP16-NEXT: vmovd %eax, %xmm0
455 ; FP16-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
456 ; FP16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
457 ; FP16-NEXT: vmovw %xmm0, %eax
458 ; FP16-NEXT: vmovw %eax, %xmm0
461 ; AVXNC-LABEL: add_constant2:
463 ; AVXNC-NEXT: vpextrw $0, %xmm0, %eax
464 ; AVXNC-NEXT: shll $16, %eax
465 ; AVXNC-NEXT: vmovd %eax, %xmm0
466 ; AVXNC-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
467 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
468 ; AVXNC-NEXT: vmovd %xmm0, %eax
469 ; AVXNC-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
471 %add = fadd bfloat %a, 1.0
475 define void @store_constant(ptr %pc) nounwind {
476 ; X86-LABEL: store_constant:
478 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
479 ; X86-NEXT: movw $16256, (%eax) # imm = 0x3F80
482 ; CHECK-LABEL: store_constant:
484 ; CHECK-NEXT: movw $16256, (%rdi) # imm = 0x3F80
486 store bfloat 1.0, ptr %pc
490 define void @fold_ext_trunc(ptr %pa, ptr %pc) nounwind {
491 ; X86-LABEL: fold_ext_trunc:
493 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
494 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
495 ; X86-NEXT: movzwl (%ecx), %ecx
496 ; X86-NEXT: movw %cx, (%eax)
499 ; CHECK-LABEL: fold_ext_trunc:
501 ; CHECK-NEXT: movzwl (%rdi), %eax
502 ; CHECK-NEXT: movw %ax, (%rsi)
504 %a = load bfloat, ptr %pa
505 %ext = fpext bfloat %a to float
506 %trunc = fptrunc float %ext to bfloat
507 store bfloat %trunc, ptr %pc
511 define bfloat @fold_ext_trunc2(bfloat %a) nounwind {
512 ; X86-LABEL: fold_ext_trunc2:
514 ; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm0
517 ; CHECK-LABEL: fold_ext_trunc2:
520 %ext = fpext bfloat %a to float
521 %trunc = fptrunc float %ext to bfloat
525 define <8 x bfloat> @addv(<8 x bfloat> %a, <8 x bfloat> %b) nounwind {
528 ; X86-NEXT: pushl %ebp
529 ; X86-NEXT: pushl %ebx
530 ; X86-NEXT: pushl %edi
531 ; X86-NEXT: pushl %esi
532 ; X86-NEXT: vmovw %xmm1, %eax
533 ; X86-NEXT: shll $16, %eax
534 ; X86-NEXT: vmovd %eax, %xmm2
535 ; X86-NEXT: vmovw %xmm0, %eax
536 ; X86-NEXT: shll $16, %eax
537 ; X86-NEXT: vmovd %eax, %xmm3
538 ; X86-NEXT: vaddss %xmm2, %xmm3, %xmm2
539 ; X86-NEXT: vcvtneps2bf16 %xmm2, %xmm2
540 ; X86-NEXT: vmovw %xmm2, %ecx
541 ; X86-NEXT: vpextrw $1, %xmm1, %eax
542 ; X86-NEXT: shll $16, %eax
543 ; X86-NEXT: vmovd %eax, %xmm2
544 ; X86-NEXT: vpextrw $1, %xmm0, %eax
545 ; X86-NEXT: shll $16, %eax
546 ; X86-NEXT: vmovd %eax, %xmm3
547 ; X86-NEXT: vaddss %xmm2, %xmm3, %xmm2
548 ; X86-NEXT: vcvtneps2bf16 %xmm2, %xmm2
549 ; X86-NEXT: vmovw %xmm2, %eax
550 ; X86-NEXT: vpextrw $2, %xmm1, %edx
551 ; X86-NEXT: shll $16, %edx
552 ; X86-NEXT: vmovd %edx, %xmm2
553 ; X86-NEXT: vpextrw $2, %xmm0, %edx
554 ; X86-NEXT: shll $16, %edx
555 ; X86-NEXT: vmovd %edx, %xmm3
556 ; X86-NEXT: vaddss %xmm2, %xmm3, %xmm2
557 ; X86-NEXT: vcvtneps2bf16 %xmm2, %xmm2
558 ; X86-NEXT: vmovw %xmm2, %edx
559 ; X86-NEXT: vpextrw $3, %xmm1, %esi
560 ; X86-NEXT: shll $16, %esi
561 ; X86-NEXT: vmovd %esi, %xmm2
562 ; X86-NEXT: vpextrw $3, %xmm0, %esi
563 ; X86-NEXT: shll $16, %esi
564 ; X86-NEXT: vmovd %esi, %xmm3
565 ; X86-NEXT: vaddss %xmm2, %xmm3, %xmm2
566 ; X86-NEXT: vcvtneps2bf16 %xmm2, %xmm2
567 ; X86-NEXT: vmovw %xmm2, %esi
568 ; X86-NEXT: vpextrw $4, %xmm1, %edi
569 ; X86-NEXT: shll $16, %edi
570 ; X86-NEXT: vmovd %edi, %xmm2
571 ; X86-NEXT: vpextrw $4, %xmm0, %edi
572 ; X86-NEXT: shll $16, %edi
573 ; X86-NEXT: vmovd %edi, %xmm3
574 ; X86-NEXT: vaddss %xmm2, %xmm3, %xmm2
575 ; X86-NEXT: vcvtneps2bf16 %xmm2, %xmm2
576 ; X86-NEXT: vmovw %xmm2, %ebx
577 ; X86-NEXT: vpextrw $5, %xmm1, %edi
578 ; X86-NEXT: shll $16, %edi
579 ; X86-NEXT: vmovd %edi, %xmm2
580 ; X86-NEXT: vpextrw $5, %xmm0, %edi
581 ; X86-NEXT: shll $16, %edi
582 ; X86-NEXT: vmovd %edi, %xmm3
583 ; X86-NEXT: vaddss %xmm2, %xmm3, %xmm2
584 ; X86-NEXT: vcvtneps2bf16 %xmm2, %xmm2
585 ; X86-NEXT: vmovw %xmm2, %edi
586 ; X86-NEXT: vpextrw $6, %xmm1, %ebp
587 ; X86-NEXT: shll $16, %ebp
588 ; X86-NEXT: vmovd %ebp, %xmm2
589 ; X86-NEXT: vpextrw $6, %xmm0, %ebp
590 ; X86-NEXT: shll $16, %ebp
591 ; X86-NEXT: vmovd %ebp, %xmm3
592 ; X86-NEXT: vaddss %xmm2, %xmm3, %xmm3
593 ; X86-NEXT: vmovw %ecx, %xmm2
594 ; X86-NEXT: vcvtneps2bf16 %xmm3, %xmm3
595 ; X86-NEXT: vmovw %xmm3, %ecx
596 ; X86-NEXT: vmovw %ebx, %xmm3
597 ; X86-NEXT: vpextrw $7, %xmm1, %ebx
598 ; X86-NEXT: shll $16, %ebx
599 ; X86-NEXT: vmovd %ebx, %xmm1
600 ; X86-NEXT: vpextrw $7, %xmm0, %ebx
601 ; X86-NEXT: shll $16, %ebx
602 ; X86-NEXT: vmovd %ebx, %xmm0
603 ; X86-NEXT: vaddss %xmm1, %xmm0, %xmm0
604 ; X86-NEXT: vmovw %ecx, %xmm1
605 ; X86-NEXT: vcvtneps2bf16 %xmm0, %xmm0
606 ; X86-NEXT: vmovw %xmm0, %ecx
607 ; X86-NEXT: vmovw %ecx, %xmm0
608 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
609 ; X86-NEXT: vmovw %edi, %xmm1
610 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
611 ; X86-NEXT: vmovw %edx, %xmm3
612 ; X86-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
613 ; X86-NEXT: vmovw %esi, %xmm1
614 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
615 ; X86-NEXT: vmovw %eax, %xmm3
616 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
617 ; X86-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
618 ; X86-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
619 ; X86-NEXT: popl %esi
620 ; X86-NEXT: popl %edi
621 ; X86-NEXT: popl %ebx
622 ; X86-NEXT: popl %ebp
627 ; SSE2-NEXT: pushq %rbp
628 ; SSE2-NEXT: pushq %r15
629 ; SSE2-NEXT: pushq %r14
630 ; SSE2-NEXT: pushq %r13
631 ; SSE2-NEXT: pushq %r12
632 ; SSE2-NEXT: pushq %rbx
633 ; SSE2-NEXT: subq $56, %rsp
634 ; SSE2-NEXT: movq %xmm0, %rcx
635 ; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
636 ; SSE2-NEXT: movq %rcx, %rax
637 ; SSE2-NEXT: shrq $48, %rax
638 ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
639 ; SSE2-NEXT: movq %xmm1, %rdx
640 ; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
641 ; SSE2-NEXT: movq %rdx, %rax
642 ; SSE2-NEXT: shrq $48, %rax
643 ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
644 ; SSE2-NEXT: movq %rcx, %rax
645 ; SSE2-NEXT: shrq $32, %rax
646 ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
647 ; SSE2-NEXT: movq %rdx, %rax
648 ; SSE2-NEXT: shrq $32, %rax
649 ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
650 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
651 ; SSE2-NEXT: movq %xmm0, %r15
652 ; SSE2-NEXT: movq %r15, %rbx
653 ; SSE2-NEXT: shrq $48, %rbx
654 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
655 ; SSE2-NEXT: movq %xmm1, %r14
656 ; SSE2-NEXT: movq %r14, %rbp
657 ; SSE2-NEXT: shrq $48, %rbp
658 ; SSE2-NEXT: movq %r15, %r12
659 ; SSE2-NEXT: shrq $32, %r12
660 ; SSE2-NEXT: movq %r14, %r13
661 ; SSE2-NEXT: shrq $32, %r13
662 ; SSE2-NEXT: movl %r14d, %eax
663 ; SSE2-NEXT: shll $16, %eax
664 ; SSE2-NEXT: movd %eax, %xmm1
665 ; SSE2-NEXT: movl %r15d, %eax
666 ; SSE2-NEXT: shll $16, %eax
667 ; SSE2-NEXT: movd %eax, %xmm0
668 ; SSE2-NEXT: addss %xmm1, %xmm0
669 ; SSE2-NEXT: callq __truncsfbf2@PLT
670 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
671 ; SSE2-NEXT: movzwl %ax, %eax
672 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
673 ; SSE2-NEXT: andl $-65536, %r14d # imm = 0xFFFF0000
674 ; SSE2-NEXT: movd %r14d, %xmm1
675 ; SSE2-NEXT: andl $-65536, %r15d # imm = 0xFFFF0000
676 ; SSE2-NEXT: movd %r15d, %xmm0
677 ; SSE2-NEXT: addss %xmm1, %xmm0
678 ; SSE2-NEXT: callq __truncsfbf2@PLT
679 ; SSE2-NEXT: pextrw $0, %xmm0, %r15d
680 ; SSE2-NEXT: shll $16, %r15d
681 ; SSE2-NEXT: addl {{[-0-9]+}}(%r{{[sb]}}p), %r15d # 4-byte Folded Reload
682 ; SSE2-NEXT: shll $16, %r13d
683 ; SSE2-NEXT: movd %r13d, %xmm1
684 ; SSE2-NEXT: shll $16, %r12d
685 ; SSE2-NEXT: movd %r12d, %xmm0
686 ; SSE2-NEXT: addss %xmm1, %xmm0
687 ; SSE2-NEXT: callq __truncsfbf2@PLT
688 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
689 ; SSE2-NEXT: movzwl %ax, %r14d
690 ; SSE2-NEXT: shll $16, %ebp
691 ; SSE2-NEXT: movd %ebp, %xmm1
692 ; SSE2-NEXT: shll $16, %ebx
693 ; SSE2-NEXT: movd %ebx, %xmm0
694 ; SSE2-NEXT: addss %xmm1, %xmm0
695 ; SSE2-NEXT: callq __truncsfbf2@PLT
696 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
697 ; SSE2-NEXT: shll $16, %ebx
698 ; SSE2-NEXT: orl %r14d, %ebx
699 ; SSE2-NEXT: shlq $32, %rbx
700 ; SSE2-NEXT: orq %r15, %rbx
701 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
702 ; SSE2-NEXT: movl %r15d, %eax
703 ; SSE2-NEXT: shll $16, %eax
704 ; SSE2-NEXT: movd %eax, %xmm1
705 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
706 ; SSE2-NEXT: movl %r14d, %eax
707 ; SSE2-NEXT: shll $16, %eax
708 ; SSE2-NEXT: movd %eax, %xmm0
709 ; SSE2-NEXT: addss %xmm1, %xmm0
710 ; SSE2-NEXT: callq __truncsfbf2@PLT
711 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
712 ; SSE2-NEXT: movzwl %ax, %ebp
713 ; SSE2-NEXT: movq %r15, %rax
714 ; SSE2-NEXT: andl $-65536, %eax # imm = 0xFFFF0000
715 ; SSE2-NEXT: movd %eax, %xmm1
716 ; SSE2-NEXT: movq %r14, %rax
717 ; SSE2-NEXT: andl $-65536, %eax # imm = 0xFFFF0000
718 ; SSE2-NEXT: movd %eax, %xmm0
719 ; SSE2-NEXT: addss %xmm1, %xmm0
720 ; SSE2-NEXT: callq __truncsfbf2@PLT
721 ; SSE2-NEXT: pextrw $0, %xmm0, %r14d
722 ; SSE2-NEXT: shll $16, %r14d
723 ; SSE2-NEXT: orl %ebp, %r14d
724 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
725 ; SSE2-NEXT: shll $16, %eax
726 ; SSE2-NEXT: movd %eax, %xmm1
727 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
728 ; SSE2-NEXT: shll $16, %eax
729 ; SSE2-NEXT: movd %eax, %xmm0
730 ; SSE2-NEXT: addss %xmm1, %xmm0
731 ; SSE2-NEXT: callq __truncsfbf2@PLT
732 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
733 ; SSE2-NEXT: movzwl %ax, %ebp
734 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
735 ; SSE2-NEXT: shll $16, %eax
736 ; SSE2-NEXT: movd %eax, %xmm1
737 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
738 ; SSE2-NEXT: shll $16, %eax
739 ; SSE2-NEXT: movd %eax, %xmm0
740 ; SSE2-NEXT: addss %xmm1, %xmm0
741 ; SSE2-NEXT: callq __truncsfbf2@PLT
742 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
743 ; SSE2-NEXT: shll $16, %eax
744 ; SSE2-NEXT: orl %ebp, %eax
745 ; SSE2-NEXT: shlq $32, %rax
746 ; SSE2-NEXT: orq %r14, %rax
747 ; SSE2-NEXT: movq %rax, %xmm0
748 ; SSE2-NEXT: movq %rbx, %xmm1
749 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
750 ; SSE2-NEXT: addq $56, %rsp
751 ; SSE2-NEXT: popq %rbx
752 ; SSE2-NEXT: popq %r12
753 ; SSE2-NEXT: popq %r13
754 ; SSE2-NEXT: popq %r14
755 ; SSE2-NEXT: popq %r15
756 ; SSE2-NEXT: popq %rbp
761 ; FP16-NEXT: vmovw %xmm1, %eax
762 ; FP16-NEXT: shll $16, %eax
763 ; FP16-NEXT: vmovd %eax, %xmm2
764 ; FP16-NEXT: vmovw %xmm0, %eax
765 ; FP16-NEXT: shll $16, %eax
766 ; FP16-NEXT: vmovd %eax, %xmm3
767 ; FP16-NEXT: vaddss %xmm2, %xmm3, %xmm2
768 ; FP16-NEXT: vcvtneps2bf16 %xmm2, %xmm2
769 ; FP16-NEXT: vmovw %xmm2, %eax
770 ; FP16-NEXT: vmovw %eax, %xmm2
771 ; FP16-NEXT: vpextrw $1, %xmm1, %eax
772 ; FP16-NEXT: shll $16, %eax
773 ; FP16-NEXT: vmovd %eax, %xmm3
774 ; FP16-NEXT: vpextrw $1, %xmm0, %eax
775 ; FP16-NEXT: shll $16, %eax
776 ; FP16-NEXT: vmovd %eax, %xmm4
777 ; FP16-NEXT: vaddss %xmm3, %xmm4, %xmm3
778 ; FP16-NEXT: vcvtneps2bf16 %xmm3, %xmm3
779 ; FP16-NEXT: vmovw %xmm3, %eax
780 ; FP16-NEXT: vmovw %eax, %xmm3
781 ; FP16-NEXT: vpextrw $2, %xmm1, %eax
782 ; FP16-NEXT: shll $16, %eax
783 ; FP16-NEXT: vmovd %eax, %xmm4
784 ; FP16-NEXT: vpextrw $2, %xmm0, %eax
785 ; FP16-NEXT: shll $16, %eax
786 ; FP16-NEXT: vmovd %eax, %xmm5
787 ; FP16-NEXT: vaddss %xmm4, %xmm5, %xmm4
788 ; FP16-NEXT: vcvtneps2bf16 %xmm4, %xmm4
789 ; FP16-NEXT: vmovw %xmm4, %eax
790 ; FP16-NEXT: vmovw %eax, %xmm4
791 ; FP16-NEXT: vpextrw $3, %xmm1, %eax
792 ; FP16-NEXT: shll $16, %eax
793 ; FP16-NEXT: vmovd %eax, %xmm5
794 ; FP16-NEXT: vpextrw $3, %xmm0, %eax
795 ; FP16-NEXT: shll $16, %eax
796 ; FP16-NEXT: vmovd %eax, %xmm6
797 ; FP16-NEXT: vaddss %xmm5, %xmm6, %xmm5
798 ; FP16-NEXT: vcvtneps2bf16 %xmm5, %xmm5
799 ; FP16-NEXT: vmovw %xmm5, %eax
800 ; FP16-NEXT: vmovw %eax, %xmm5
801 ; FP16-NEXT: vpextrw $4, %xmm1, %eax
802 ; FP16-NEXT: shll $16, %eax
803 ; FP16-NEXT: vmovd %eax, %xmm6
804 ; FP16-NEXT: vpextrw $4, %xmm0, %eax
805 ; FP16-NEXT: shll $16, %eax
806 ; FP16-NEXT: vmovd %eax, %xmm7
807 ; FP16-NEXT: vaddss %xmm6, %xmm7, %xmm6
808 ; FP16-NEXT: vcvtneps2bf16 %xmm6, %xmm6
809 ; FP16-NEXT: vmovw %xmm6, %eax
810 ; FP16-NEXT: vmovw %eax, %xmm6
811 ; FP16-NEXT: vpextrw $5, %xmm1, %eax
812 ; FP16-NEXT: shll $16, %eax
813 ; FP16-NEXT: vmovd %eax, %xmm7
814 ; FP16-NEXT: vpextrw $5, %xmm0, %eax
815 ; FP16-NEXT: shll $16, %eax
816 ; FP16-NEXT: vmovd %eax, %xmm8
817 ; FP16-NEXT: vaddss %xmm7, %xmm8, %xmm7
818 ; FP16-NEXT: vcvtneps2bf16 %xmm7, %xmm7
819 ; FP16-NEXT: vmovw %xmm7, %eax
820 ; FP16-NEXT: vmovw %eax, %xmm7
821 ; FP16-NEXT: vpextrw $6, %xmm1, %eax
822 ; FP16-NEXT: shll $16, %eax
823 ; FP16-NEXT: vmovd %eax, %xmm8
824 ; FP16-NEXT: vpextrw $6, %xmm0, %eax
825 ; FP16-NEXT: shll $16, %eax
826 ; FP16-NEXT: vmovd %eax, %xmm9
827 ; FP16-NEXT: vaddss %xmm8, %xmm9, %xmm8
828 ; FP16-NEXT: vcvtneps2bf16 %xmm8, %xmm8
829 ; FP16-NEXT: vmovw %xmm8, %eax
830 ; FP16-NEXT: vmovw %eax, %xmm8
831 ; FP16-NEXT: vpextrw $7, %xmm1, %eax
832 ; FP16-NEXT: shll $16, %eax
833 ; FP16-NEXT: vmovd %eax, %xmm1
834 ; FP16-NEXT: vpextrw $7, %xmm0, %eax
835 ; FP16-NEXT: shll $16, %eax
836 ; FP16-NEXT: vmovd %eax, %xmm0
837 ; FP16-NEXT: vaddss %xmm1, %xmm0, %xmm0
838 ; FP16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
839 ; FP16-NEXT: vmovw %xmm0, %eax
840 ; FP16-NEXT: vmovw %eax, %xmm0
841 ; FP16-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
842 ; FP16-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
843 ; FP16-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
844 ; FP16-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
845 ; FP16-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
846 ; FP16-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
847 ; FP16-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
852 ; AVXNC-NEXT: vpextrw $7, %xmm1, %eax
853 ; AVXNC-NEXT: shll $16, %eax
854 ; AVXNC-NEXT: vmovd %eax, %xmm2
855 ; AVXNC-NEXT: vpextrw $7, %xmm0, %eax
856 ; AVXNC-NEXT: shll $16, %eax
857 ; AVXNC-NEXT: vmovd %eax, %xmm3
858 ; AVXNC-NEXT: vaddss %xmm2, %xmm3, %xmm2
859 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm2, %xmm2
860 ; AVXNC-NEXT: vmovd %xmm2, %eax
861 ; AVXNC-NEXT: vpextrw $6, %xmm1, %ecx
862 ; AVXNC-NEXT: shll $16, %ecx
863 ; AVXNC-NEXT: vmovd %ecx, %xmm2
864 ; AVXNC-NEXT: vpextrw $6, %xmm0, %ecx
865 ; AVXNC-NEXT: shll $16, %ecx
866 ; AVXNC-NEXT: vmovd %ecx, %xmm3
867 ; AVXNC-NEXT: vaddss %xmm2, %xmm3, %xmm2
868 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm2, %xmm2
869 ; AVXNC-NEXT: vmovd %xmm2, %ecx
870 ; AVXNC-NEXT: vpextrw $5, %xmm1, %edx
871 ; AVXNC-NEXT: shll $16, %edx
872 ; AVXNC-NEXT: vmovd %edx, %xmm2
873 ; AVXNC-NEXT: vpextrw $5, %xmm0, %edx
874 ; AVXNC-NEXT: shll $16, %edx
875 ; AVXNC-NEXT: vmovd %edx, %xmm3
876 ; AVXNC-NEXT: vaddss %xmm2, %xmm3, %xmm2
877 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm2, %xmm2
878 ; AVXNC-NEXT: vmovd %xmm2, %edx
879 ; AVXNC-NEXT: vpextrw $4, %xmm1, %esi
880 ; AVXNC-NEXT: shll $16, %esi
881 ; AVXNC-NEXT: vmovd %esi, %xmm2
882 ; AVXNC-NEXT: vpextrw $4, %xmm0, %esi
883 ; AVXNC-NEXT: shll $16, %esi
884 ; AVXNC-NEXT: vmovd %esi, %xmm3
885 ; AVXNC-NEXT: vaddss %xmm2, %xmm3, %xmm2
886 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm2, %xmm2
887 ; AVXNC-NEXT: vmovd %xmm2, %esi
888 ; AVXNC-NEXT: vpextrw $3, %xmm1, %edi
889 ; AVXNC-NEXT: shll $16, %edi
890 ; AVXNC-NEXT: vmovd %edi, %xmm2
891 ; AVXNC-NEXT: vpextrw $3, %xmm0, %edi
892 ; AVXNC-NEXT: shll $16, %edi
893 ; AVXNC-NEXT: vmovd %edi, %xmm3
894 ; AVXNC-NEXT: vaddss %xmm2, %xmm3, %xmm2
895 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm2, %xmm2
896 ; AVXNC-NEXT: vmovd %xmm2, %edi
897 ; AVXNC-NEXT: vpextrw $2, %xmm1, %r8d
898 ; AVXNC-NEXT: shll $16, %r8d
899 ; AVXNC-NEXT: vmovd %r8d, %xmm2
900 ; AVXNC-NEXT: vpextrw $2, %xmm0, %r8d
901 ; AVXNC-NEXT: shll $16, %r8d
902 ; AVXNC-NEXT: vmovd %r8d, %xmm3
903 ; AVXNC-NEXT: vaddss %xmm2, %xmm3, %xmm2
904 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm2, %xmm2
905 ; AVXNC-NEXT: vmovd %xmm2, %r8d
906 ; AVXNC-NEXT: vpextrw $1, %xmm1, %r9d
907 ; AVXNC-NEXT: shll $16, %r9d
908 ; AVXNC-NEXT: vmovd %r9d, %xmm2
909 ; AVXNC-NEXT: vpextrw $1, %xmm0, %r9d
910 ; AVXNC-NEXT: shll $16, %r9d
911 ; AVXNC-NEXT: vmovd %r9d, %xmm3
912 ; AVXNC-NEXT: vaddss %xmm2, %xmm3, %xmm2
913 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm2, %xmm2
914 ; AVXNC-NEXT: vmovd %xmm1, %r9d
915 ; AVXNC-NEXT: shll $16, %r9d
916 ; AVXNC-NEXT: vmovd %r9d, %xmm1
917 ; AVXNC-NEXT: vmovd %xmm0, %r9d
918 ; AVXNC-NEXT: shll $16, %r9d
919 ; AVXNC-NEXT: vmovd %r9d, %xmm0
920 ; AVXNC-NEXT: vaddss %xmm1, %xmm0, %xmm0
921 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
922 ; AVXNC-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
923 ; AVXNC-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0
924 ; AVXNC-NEXT: vpinsrw $3, %edi, %xmm0, %xmm0
925 ; AVXNC-NEXT: vpinsrw $4, %esi, %xmm0, %xmm0
926 ; AVXNC-NEXT: vpinsrw $5, %edx, %xmm0, %xmm0
927 ; AVXNC-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
928 ; AVXNC-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
930 %add = fadd <8 x bfloat> %a, %b
931 ret <8 x bfloat> %add
934 define <2 x bfloat> @pr62997(bfloat %a, bfloat %b) {
935 ; X86-LABEL: pr62997:
937 ; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm0
938 ; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm1
939 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
942 ; SSE2-LABEL: pr62997:
944 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
947 ; BF16-LABEL: pr62997:
949 ; BF16-NEXT: vpextrw $0, %xmm0, %eax
950 ; BF16-NEXT: vpextrw $0, %xmm1, %ecx
951 ; BF16-NEXT: vmovd %eax, %xmm0
952 ; BF16-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
955 ; FP16-LABEL: pr62997:
957 ; FP16-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
959 %1 = insertelement <2 x bfloat> undef, bfloat %a, i64 0
960 %2 = insertelement <2 x bfloat> %1, bfloat %b, i64 1
964 define <32 x bfloat> @pr63017() {
965 ; X86-LABEL: pr63017:
967 ; X86-NEXT: vxorps %xmm0, %xmm0, %xmm0
970 ; SSE2-LABEL: pr63017:
972 ; SSE2-NEXT: xorps %xmm0, %xmm0
973 ; SSE2-NEXT: xorps %xmm1, %xmm1
974 ; SSE2-NEXT: xorps %xmm2, %xmm2
975 ; SSE2-NEXT: xorps %xmm3, %xmm3
978 ; F16-LABEL: pr63017:
980 ; F16-NEXT: vxorps %xmm0, %xmm0, %xmm0
983 ; AVXNC-LABEL: pr63017:
985 ; AVXNC-NEXT: vxorps %xmm0, %xmm0, %xmm0
986 ; AVXNC-NEXT: vxorps %xmm1, %xmm1, %xmm1
988 ret <32 x bfloat> zeroinitializer
991 define <32 x bfloat> @pr63017_2() nounwind {
992 ; X86-LABEL: pr63017_2:
994 ; X86-NEXT: vpbroadcastw {{.*#+}} zmm0 = [-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0]
995 ; X86-NEXT: vmovdqu16 (%eax), %zmm0 {%k1}
998 ; SSE2-LABEL: pr63017_2:
1000 ; SSE2-NEXT: pushq %r14
1001 ; SSE2-NEXT: pushq %rbx
1002 ; SSE2-NEXT: subq $200, %rsp
1003 ; SSE2-NEXT: xorl %eax, %eax
1004 ; SSE2-NEXT: testb %al, %al
1005 ; SSE2-NEXT: jne .LBB12_1
1006 ; SSE2-NEXT: # %bb.2: # %cond.load
1007 ; SSE2-NEXT: movzwl (%rax), %eax
1008 ; SSE2-NEXT: shll $16, %eax
1009 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1010 ; SSE2-NEXT: movd {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
1011 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1012 ; SSE2-NEXT: jmp .LBB12_3
1013 ; SSE2-NEXT: .LBB12_1:
1014 ; SSE2-NEXT: movd {{.*#+}} xmm1 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
1015 ; SSE2-NEXT: movdqa %xmm1, %xmm0
1016 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1017 ; SSE2-NEXT: .LBB12_3: # %else
1018 ; SSE2-NEXT: xorl %eax, %eax
1019 ; SSE2-NEXT: testb %al, %al
1020 ; SSE2-NEXT: jne .LBB12_5
1021 ; SSE2-NEXT: # %bb.4: # %cond.load1
1022 ; SSE2-NEXT: movzwl (%rax), %eax
1023 ; SSE2-NEXT: shll $16, %eax
1024 ; SSE2-NEXT: movd %eax, %xmm0
1025 ; SSE2-NEXT: .LBB12_5: # %else2
1026 ; SSE2-NEXT: xorl %eax, %eax
1027 ; SSE2-NEXT: testb %al, %al
1028 ; SSE2-NEXT: jne .LBB12_6
1029 ; SSE2-NEXT: # %bb.7: # %cond.load4
1030 ; SSE2-NEXT: movzwl (%rax), %eax
1031 ; SSE2-NEXT: shll $16, %eax
1032 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1033 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1034 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1035 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1036 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1037 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1038 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1039 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1040 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1041 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1042 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1043 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1044 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1045 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1046 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1047 ; SSE2-NEXT: movdqa %xmm1, %xmm14
1048 ; SSE2-NEXT: movdqa %xmm1, %xmm15
1049 ; SSE2-NEXT: movdqa %xmm1, %xmm12
1050 ; SSE2-NEXT: movdqa %xmm1, %xmm13
1051 ; SSE2-NEXT: movdqa %xmm1, %xmm10
1052 ; SSE2-NEXT: movdqa %xmm1, %xmm11
1053 ; SSE2-NEXT: movdqa %xmm1, %xmm8
1054 ; SSE2-NEXT: movdqa %xmm1, %xmm9
1055 ; SSE2-NEXT: movdqa %xmm1, %xmm6
1056 ; SSE2-NEXT: movdqa %xmm1, %xmm7
1057 ; SSE2-NEXT: movdqa %xmm1, %xmm4
1058 ; SSE2-NEXT: movdqa %xmm1, %xmm5
1059 ; SSE2-NEXT: movdqa %xmm1, %xmm2
1060 ; SSE2-NEXT: movdqa %xmm1, %xmm3
1061 ; SSE2-NEXT: movd %eax, %xmm1
1062 ; SSE2-NEXT: jmp .LBB12_8
1063 ; SSE2-NEXT: .LBB12_6:
1064 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1065 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1066 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1067 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1068 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1069 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1070 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1071 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1072 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1073 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1074 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1075 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1076 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1077 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1078 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1079 ; SSE2-NEXT: movdqa %xmm1, %xmm14
1080 ; SSE2-NEXT: movdqa %xmm1, %xmm15
1081 ; SSE2-NEXT: movdqa %xmm1, %xmm12
1082 ; SSE2-NEXT: movdqa %xmm1, %xmm13
1083 ; SSE2-NEXT: movdqa %xmm1, %xmm10
1084 ; SSE2-NEXT: movdqa %xmm1, %xmm11
1085 ; SSE2-NEXT: movdqa %xmm1, %xmm8
1086 ; SSE2-NEXT: movdqa %xmm1, %xmm9
1087 ; SSE2-NEXT: movdqa %xmm1, %xmm6
1088 ; SSE2-NEXT: movdqa %xmm1, %xmm7
1089 ; SSE2-NEXT: movdqa %xmm1, %xmm4
1090 ; SSE2-NEXT: movdqa %xmm1, %xmm5
1091 ; SSE2-NEXT: movdqa %xmm1, %xmm2
1092 ; SSE2-NEXT: movdqa %xmm1, %xmm3
1093 ; SSE2-NEXT: .LBB12_8: # %else5
1094 ; SSE2-NEXT: xorl %eax, %eax
1095 ; SSE2-NEXT: testb %al, %al
1096 ; SSE2-NEXT: jne .LBB12_10
1097 ; SSE2-NEXT: # %bb.9: # %cond.load7
1098 ; SSE2-NEXT: movzwl (%rax), %eax
1099 ; SSE2-NEXT: shll $16, %eax
1100 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1101 ; SSE2-NEXT: .LBB12_10: # %else8
1102 ; SSE2-NEXT: xorl %eax, %eax
1103 ; SSE2-NEXT: testb %al, %al
1104 ; SSE2-NEXT: jne .LBB12_12
1105 ; SSE2-NEXT: # %bb.11: # %cond.load10
1106 ; SSE2-NEXT: movzwl (%rax), %eax
1107 ; SSE2-NEXT: shll $16, %eax
1108 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1109 ; SSE2-NEXT: .LBB12_12: # %else11
1110 ; SSE2-NEXT: xorl %eax, %eax
1111 ; SSE2-NEXT: testb %al, %al
1112 ; SSE2-NEXT: jne .LBB12_14
1113 ; SSE2-NEXT: # %bb.13: # %cond.load13
1114 ; SSE2-NEXT: movzwl (%rax), %eax
1115 ; SSE2-NEXT: shll $16, %eax
1116 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1117 ; SSE2-NEXT: .LBB12_14: # %else14
1118 ; SSE2-NEXT: xorl %eax, %eax
1119 ; SSE2-NEXT: testb %al, %al
1120 ; SSE2-NEXT: jne .LBB12_16
1121 ; SSE2-NEXT: # %bb.15: # %cond.load16
1122 ; SSE2-NEXT: movzwl (%rax), %eax
1123 ; SSE2-NEXT: shll $16, %eax
1124 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1125 ; SSE2-NEXT: .LBB12_16: # %else17
1126 ; SSE2-NEXT: xorl %eax, %eax
1127 ; SSE2-NEXT: testb %al, %al
1128 ; SSE2-NEXT: jne .LBB12_18
1129 ; SSE2-NEXT: # %bb.17: # %cond.load19
1130 ; SSE2-NEXT: movzwl (%rax), %eax
1131 ; SSE2-NEXT: shll $16, %eax
1132 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1133 ; SSE2-NEXT: .LBB12_18: # %else20
1134 ; SSE2-NEXT: xorl %eax, %eax
1135 ; SSE2-NEXT: testb %al, %al
1136 ; SSE2-NEXT: jne .LBB12_20
1137 ; SSE2-NEXT: # %bb.19: # %cond.load22
1138 ; SSE2-NEXT: movzwl (%rax), %eax
1139 ; SSE2-NEXT: shll $16, %eax
1140 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1141 ; SSE2-NEXT: .LBB12_20: # %else23
1142 ; SSE2-NEXT: xorl %eax, %eax
1143 ; SSE2-NEXT: testb %al, %al
1144 ; SSE2-NEXT: jne .LBB12_22
1145 ; SSE2-NEXT: # %bb.21: # %cond.load25
1146 ; SSE2-NEXT: movzwl (%rax), %eax
1147 ; SSE2-NEXT: shll $16, %eax
1148 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1149 ; SSE2-NEXT: .LBB12_22: # %else26
1150 ; SSE2-NEXT: xorl %eax, %eax
1151 ; SSE2-NEXT: testb %al, %al
1152 ; SSE2-NEXT: jne .LBB12_24
1153 ; SSE2-NEXT: # %bb.23: # %cond.load28
1154 ; SSE2-NEXT: movzwl (%rax), %eax
1155 ; SSE2-NEXT: shll $16, %eax
1156 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1157 ; SSE2-NEXT: .LBB12_24: # %else29
1158 ; SSE2-NEXT: xorl %eax, %eax
1159 ; SSE2-NEXT: testb %al, %al
1160 ; SSE2-NEXT: jne .LBB12_26
1161 ; SSE2-NEXT: # %bb.25: # %cond.load31
1162 ; SSE2-NEXT: movzwl (%rax), %eax
1163 ; SSE2-NEXT: shll $16, %eax
1164 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1165 ; SSE2-NEXT: .LBB12_26: # %else32
1166 ; SSE2-NEXT: xorl %eax, %eax
1167 ; SSE2-NEXT: testb %al, %al
1168 ; SSE2-NEXT: jne .LBB12_28
1169 ; SSE2-NEXT: # %bb.27: # %cond.load34
1170 ; SSE2-NEXT: movzwl (%rax), %eax
1171 ; SSE2-NEXT: shll $16, %eax
1172 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1173 ; SSE2-NEXT: .LBB12_28: # %else35
1174 ; SSE2-NEXT: xorl %eax, %eax
1175 ; SSE2-NEXT: testb %al, %al
1176 ; SSE2-NEXT: jne .LBB12_30
1177 ; SSE2-NEXT: # %bb.29: # %cond.load37
1178 ; SSE2-NEXT: movzwl (%rax), %eax
1179 ; SSE2-NEXT: shll $16, %eax
1180 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1181 ; SSE2-NEXT: .LBB12_30: # %else38
1182 ; SSE2-NEXT: xorl %eax, %eax
1183 ; SSE2-NEXT: testb %al, %al
1184 ; SSE2-NEXT: jne .LBB12_32
1185 ; SSE2-NEXT: # %bb.31: # %cond.load40
1186 ; SSE2-NEXT: movzwl (%rax), %eax
1187 ; SSE2-NEXT: shll $16, %eax
1188 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1189 ; SSE2-NEXT: .LBB12_32: # %else41
1190 ; SSE2-NEXT: xorl %eax, %eax
1191 ; SSE2-NEXT: testb %al, %al
1192 ; SSE2-NEXT: jne .LBB12_34
1193 ; SSE2-NEXT: # %bb.33: # %cond.load43
1194 ; SSE2-NEXT: movzwl (%rax), %eax
1195 ; SSE2-NEXT: shll $16, %eax
1196 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1197 ; SSE2-NEXT: .LBB12_34: # %else44
1198 ; SSE2-NEXT: xorl %eax, %eax
1199 ; SSE2-NEXT: testb %al, %al
1200 ; SSE2-NEXT: jne .LBB12_36
1201 ; SSE2-NEXT: # %bb.35: # %cond.load46
1202 ; SSE2-NEXT: movzwl (%rax), %eax
1203 ; SSE2-NEXT: shll $16, %eax
1204 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1205 ; SSE2-NEXT: .LBB12_36: # %else47
1206 ; SSE2-NEXT: xorl %eax, %eax
1207 ; SSE2-NEXT: testb %al, %al
1208 ; SSE2-NEXT: jne .LBB12_38
1209 ; SSE2-NEXT: # %bb.37: # %cond.load49
1210 ; SSE2-NEXT: movzwl (%rax), %eax
1211 ; SSE2-NEXT: shll $16, %eax
1212 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1213 ; SSE2-NEXT: .LBB12_38: # %else50
1214 ; SSE2-NEXT: xorl %eax, %eax
1215 ; SSE2-NEXT: testb %al, %al
1216 ; SSE2-NEXT: jne .LBB12_40
1217 ; SSE2-NEXT: # %bb.39: # %cond.load52
1218 ; SSE2-NEXT: movzwl (%rax), %eax
1219 ; SSE2-NEXT: shll $16, %eax
1220 ; SSE2-NEXT: movd %eax, %xmm14
1221 ; SSE2-NEXT: .LBB12_40: # %else53
1222 ; SSE2-NEXT: xorl %eax, %eax
1223 ; SSE2-NEXT: testb %al, %al
1224 ; SSE2-NEXT: jne .LBB12_42
1225 ; SSE2-NEXT: # %bb.41: # %cond.load55
1226 ; SSE2-NEXT: movzwl (%rax), %eax
1227 ; SSE2-NEXT: shll $16, %eax
1228 ; SSE2-NEXT: movd %eax, %xmm15
1229 ; SSE2-NEXT: .LBB12_42: # %else56
1230 ; SSE2-NEXT: xorl %eax, %eax
1231 ; SSE2-NEXT: testb %al, %al
1232 ; SSE2-NEXT: jne .LBB12_44
1233 ; SSE2-NEXT: # %bb.43: # %cond.load58
1234 ; SSE2-NEXT: movzwl (%rax), %eax
1235 ; SSE2-NEXT: shll $16, %eax
1236 ; SSE2-NEXT: movd %eax, %xmm12
1237 ; SSE2-NEXT: .LBB12_44: # %else59
1238 ; SSE2-NEXT: xorl %eax, %eax
1239 ; SSE2-NEXT: testb %al, %al
1240 ; SSE2-NEXT: jne .LBB12_46
1241 ; SSE2-NEXT: # %bb.45: # %cond.load61
1242 ; SSE2-NEXT: movzwl (%rax), %eax
1243 ; SSE2-NEXT: shll $16, %eax
1244 ; SSE2-NEXT: movd %eax, %xmm13
1245 ; SSE2-NEXT: .LBB12_46: # %else62
1246 ; SSE2-NEXT: xorl %eax, %eax
1247 ; SSE2-NEXT: testb %al, %al
1248 ; SSE2-NEXT: jne .LBB12_48
1249 ; SSE2-NEXT: # %bb.47: # %cond.load64
1250 ; SSE2-NEXT: movzwl (%rax), %eax
1251 ; SSE2-NEXT: shll $16, %eax
1252 ; SSE2-NEXT: movd %eax, %xmm10
1253 ; SSE2-NEXT: .LBB12_48: # %else65
1254 ; SSE2-NEXT: xorl %eax, %eax
1255 ; SSE2-NEXT: testb %al, %al
1256 ; SSE2-NEXT: jne .LBB12_50
1257 ; SSE2-NEXT: # %bb.49: # %cond.load67
1258 ; SSE2-NEXT: movzwl (%rax), %eax
1259 ; SSE2-NEXT: shll $16, %eax
1260 ; SSE2-NEXT: movd %eax, %xmm11
1261 ; SSE2-NEXT: .LBB12_50: # %else68
1262 ; SSE2-NEXT: xorl %eax, %eax
1263 ; SSE2-NEXT: testb %al, %al
1264 ; SSE2-NEXT: jne .LBB12_52
1265 ; SSE2-NEXT: # %bb.51: # %cond.load70
1266 ; SSE2-NEXT: movzwl (%rax), %eax
1267 ; SSE2-NEXT: shll $16, %eax
1268 ; SSE2-NEXT: movd %eax, %xmm8
1269 ; SSE2-NEXT: .LBB12_52: # %else71
1270 ; SSE2-NEXT: xorl %eax, %eax
1271 ; SSE2-NEXT: testb %al, %al
1272 ; SSE2-NEXT: jne .LBB12_54
1273 ; SSE2-NEXT: # %bb.53: # %cond.load73
1274 ; SSE2-NEXT: movzwl (%rax), %eax
1275 ; SSE2-NEXT: shll $16, %eax
1276 ; SSE2-NEXT: movd %eax, %xmm9
1277 ; SSE2-NEXT: .LBB12_54: # %else74
1278 ; SSE2-NEXT: xorl %eax, %eax
1279 ; SSE2-NEXT: testb %al, %al
1280 ; SSE2-NEXT: jne .LBB12_56
1281 ; SSE2-NEXT: # %bb.55: # %cond.load76
1282 ; SSE2-NEXT: movzwl (%rax), %eax
1283 ; SSE2-NEXT: shll $16, %eax
1284 ; SSE2-NEXT: movd %eax, %xmm6
1285 ; SSE2-NEXT: .LBB12_56: # %else77
1286 ; SSE2-NEXT: xorl %eax, %eax
1287 ; SSE2-NEXT: testb %al, %al
1288 ; SSE2-NEXT: jne .LBB12_58
1289 ; SSE2-NEXT: # %bb.57: # %cond.load79
1290 ; SSE2-NEXT: movzwl (%rax), %eax
1291 ; SSE2-NEXT: shll $16, %eax
1292 ; SSE2-NEXT: movd %eax, %xmm7
1293 ; SSE2-NEXT: .LBB12_58: # %else80
1294 ; SSE2-NEXT: xorl %eax, %eax
1295 ; SSE2-NEXT: testb %al, %al
1296 ; SSE2-NEXT: jne .LBB12_60
1297 ; SSE2-NEXT: # %bb.59: # %cond.load82
1298 ; SSE2-NEXT: movzwl (%rax), %eax
1299 ; SSE2-NEXT: shll $16, %eax
1300 ; SSE2-NEXT: movd %eax, %xmm4
1301 ; SSE2-NEXT: .LBB12_60: # %else83
1302 ; SSE2-NEXT: xorl %eax, %eax
1303 ; SSE2-NEXT: testb %al, %al
1304 ; SSE2-NEXT: jne .LBB12_62
1305 ; SSE2-NEXT: # %bb.61: # %cond.load85
1306 ; SSE2-NEXT: movzwl (%rax), %eax
1307 ; SSE2-NEXT: shll $16, %eax
1308 ; SSE2-NEXT: movd %eax, %xmm5
1309 ; SSE2-NEXT: .LBB12_62: # %else86
1310 ; SSE2-NEXT: xorl %eax, %eax
1311 ; SSE2-NEXT: testb %al, %al
1312 ; SSE2-NEXT: jne .LBB12_64
1313 ; SSE2-NEXT: # %bb.63: # %cond.load88
1314 ; SSE2-NEXT: movzwl (%rax), %eax
1315 ; SSE2-NEXT: shll $16, %eax
1316 ; SSE2-NEXT: movd %eax, %xmm2
1317 ; SSE2-NEXT: .LBB12_64: # %else89
1318 ; SSE2-NEXT: xorl %eax, %eax
1319 ; SSE2-NEXT: testb %al, %al
1320 ; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1321 ; SSE2-NEXT: movd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1322 ; SSE2-NEXT: movd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1323 ; SSE2-NEXT: movd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1324 ; SSE2-NEXT: movd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1325 ; SSE2-NEXT: movd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1326 ; SSE2-NEXT: movd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1327 ; SSE2-NEXT: movd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1328 ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1329 ; SSE2-NEXT: movd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1330 ; SSE2-NEXT: movd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1331 ; SSE2-NEXT: movd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1332 ; SSE2-NEXT: movd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1333 ; SSE2-NEXT: movd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1334 ; SSE2-NEXT: jne .LBB12_65
1335 ; SSE2-NEXT: # %bb.66: # %cond.load91
1336 ; SSE2-NEXT: movzwl (%rax), %eax
1337 ; SSE2-NEXT: shll $16, %eax
1338 ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
1339 ; SSE2-NEXT: jmp .LBB12_67
1340 ; SSE2-NEXT: .LBB12_65:
1341 ; SSE2-NEXT: movd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
1342 ; SSE2-NEXT: .LBB12_67: # %else92
1343 ; SSE2-NEXT: callq __truncsfbf2@PLT
1344 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1345 ; SSE2-NEXT: shll $16, %ebx
1346 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1347 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1348 ; SSE2-NEXT: callq __truncsfbf2@PLT
1349 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1350 ; SSE2-NEXT: movzwl %ax, %r14d
1351 ; SSE2-NEXT: orl %ebx, %r14d
1352 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1353 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1354 ; SSE2-NEXT: callq __truncsfbf2@PLT
1355 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1356 ; SSE2-NEXT: shll $16, %ebx
1357 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1358 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1359 ; SSE2-NEXT: callq __truncsfbf2@PLT
1360 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1361 ; SSE2-NEXT: movzwl %ax, %eax
1362 ; SSE2-NEXT: orl %ebx, %eax
1363 ; SSE2-NEXT: shlq $32, %rax
1364 ; SSE2-NEXT: orq %r14, %rax
1365 ; SSE2-NEXT: movq %rax, %xmm0
1366 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1367 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1368 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1369 ; SSE2-NEXT: callq __truncsfbf2@PLT
1370 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1371 ; SSE2-NEXT: shll $16, %ebx
1372 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1373 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1374 ; SSE2-NEXT: callq __truncsfbf2@PLT
1375 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1376 ; SSE2-NEXT: movzwl %ax, %r14d
1377 ; SSE2-NEXT: orl %ebx, %r14d
1378 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1379 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1380 ; SSE2-NEXT: callq __truncsfbf2@PLT
1381 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1382 ; SSE2-NEXT: shll $16, %ebx
1383 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1384 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1385 ; SSE2-NEXT: callq __truncsfbf2@PLT
1386 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1387 ; SSE2-NEXT: movzwl %ax, %eax
1388 ; SSE2-NEXT: orl %ebx, %eax
1389 ; SSE2-NEXT: shlq $32, %rax
1390 ; SSE2-NEXT: orq %r14, %rax
1391 ; SSE2-NEXT: movq %rax, %xmm0
1392 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1393 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1394 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1395 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1396 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1397 ; SSE2-NEXT: callq __truncsfbf2@PLT
1398 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1399 ; SSE2-NEXT: shll $16, %ebx
1400 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1401 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1402 ; SSE2-NEXT: callq __truncsfbf2@PLT
1403 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1404 ; SSE2-NEXT: movzwl %ax, %r14d
1405 ; SSE2-NEXT: orl %ebx, %r14d
1406 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1407 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1408 ; SSE2-NEXT: callq __truncsfbf2@PLT
1409 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1410 ; SSE2-NEXT: shll $16, %ebx
1411 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1412 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1413 ; SSE2-NEXT: callq __truncsfbf2@PLT
1414 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1415 ; SSE2-NEXT: movzwl %ax, %eax
1416 ; SSE2-NEXT: orl %ebx, %eax
1417 ; SSE2-NEXT: shlq $32, %rax
1418 ; SSE2-NEXT: orq %r14, %rax
1419 ; SSE2-NEXT: movq %rax, %xmm0
1420 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1421 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1422 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1423 ; SSE2-NEXT: callq __truncsfbf2@PLT
1424 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1425 ; SSE2-NEXT: shll $16, %ebx
1426 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1427 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1428 ; SSE2-NEXT: callq __truncsfbf2@PLT
1429 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1430 ; SSE2-NEXT: movzwl %ax, %r14d
1431 ; SSE2-NEXT: orl %ebx, %r14d
1432 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1433 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1434 ; SSE2-NEXT: callq __truncsfbf2@PLT
1435 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1436 ; SSE2-NEXT: shll $16, %ebx
1437 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1438 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1439 ; SSE2-NEXT: callq __truncsfbf2@PLT
1440 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1441 ; SSE2-NEXT: movzwl %ax, %eax
1442 ; SSE2-NEXT: orl %ebx, %eax
1443 ; SSE2-NEXT: shlq $32, %rax
1444 ; SSE2-NEXT: orq %r14, %rax
1445 ; SSE2-NEXT: movq %rax, %xmm0
1446 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1447 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1448 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1449 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1450 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1451 ; SSE2-NEXT: callq __truncsfbf2@PLT
1452 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1453 ; SSE2-NEXT: shll $16, %ebx
1454 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1455 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1456 ; SSE2-NEXT: callq __truncsfbf2@PLT
1457 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1458 ; SSE2-NEXT: movzwl %ax, %r14d
1459 ; SSE2-NEXT: orl %ebx, %r14d
1460 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1461 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1462 ; SSE2-NEXT: callq __truncsfbf2@PLT
1463 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1464 ; SSE2-NEXT: shll $16, %ebx
1465 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1466 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1467 ; SSE2-NEXT: callq __truncsfbf2@PLT
1468 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1469 ; SSE2-NEXT: movzwl %ax, %eax
1470 ; SSE2-NEXT: orl %ebx, %eax
1471 ; SSE2-NEXT: shlq $32, %rax
1472 ; SSE2-NEXT: orq %r14, %rax
1473 ; SSE2-NEXT: movq %rax, %xmm0
1474 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1475 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1476 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1477 ; SSE2-NEXT: callq __truncsfbf2@PLT
1478 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1479 ; SSE2-NEXT: shll $16, %ebx
1480 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1481 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1482 ; SSE2-NEXT: callq __truncsfbf2@PLT
1483 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1484 ; SSE2-NEXT: movzwl %ax, %r14d
1485 ; SSE2-NEXT: orl %ebx, %r14d
1486 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1487 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1488 ; SSE2-NEXT: callq __truncsfbf2@PLT
1489 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1490 ; SSE2-NEXT: shll $16, %ebx
1491 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1492 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1493 ; SSE2-NEXT: callq __truncsfbf2@PLT
1494 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1495 ; SSE2-NEXT: movzwl %ax, %eax
1496 ; SSE2-NEXT: orl %ebx, %eax
1497 ; SSE2-NEXT: shlq $32, %rax
1498 ; SSE2-NEXT: orq %r14, %rax
1499 ; SSE2-NEXT: movq %rax, %xmm0
1500 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1501 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1502 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1503 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1504 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1505 ; SSE2-NEXT: callq __truncsfbf2@PLT
1506 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1507 ; SSE2-NEXT: shll $16, %ebx
1508 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1509 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1510 ; SSE2-NEXT: callq __truncsfbf2@PLT
1511 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1512 ; SSE2-NEXT: movzwl %ax, %r14d
1513 ; SSE2-NEXT: orl %ebx, %r14d
1514 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1515 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1516 ; SSE2-NEXT: callq __truncsfbf2@PLT
1517 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1518 ; SSE2-NEXT: shll $16, %ebx
1519 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1520 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1521 ; SSE2-NEXT: callq __truncsfbf2@PLT
1522 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1523 ; SSE2-NEXT: movzwl %ax, %eax
1524 ; SSE2-NEXT: orl %ebx, %eax
1525 ; SSE2-NEXT: shlq $32, %rax
1526 ; SSE2-NEXT: orq %r14, %rax
1527 ; SSE2-NEXT: movq %rax, %xmm0
1528 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1529 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1530 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1531 ; SSE2-NEXT: callq __truncsfbf2@PLT
1532 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1533 ; SSE2-NEXT: shll $16, %ebx
1534 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1535 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1536 ; SSE2-NEXT: callq __truncsfbf2@PLT
1537 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1538 ; SSE2-NEXT: movzwl %ax, %r14d
1539 ; SSE2-NEXT: orl %ebx, %r14d
1540 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1541 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1542 ; SSE2-NEXT: callq __truncsfbf2@PLT
1543 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
1544 ; SSE2-NEXT: shll $16, %ebx
1545 ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
1546 ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
1547 ; SSE2-NEXT: callq __truncsfbf2@PLT
1548 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
1549 ; SSE2-NEXT: movzwl %ax, %eax
1550 ; SSE2-NEXT: orl %ebx, %eax
1551 ; SSE2-NEXT: shlq $32, %rax
1552 ; SSE2-NEXT: orq %r14, %rax
1553 ; SSE2-NEXT: movq %rax, %xmm0
1554 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1555 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
1556 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1557 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1558 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1559 ; SSE2-NEXT: addq $200, %rsp
1560 ; SSE2-NEXT: popq %rbx
1561 ; SSE2-NEXT: popq %r14
1564 ; FP16-LABEL: pr63017_2:
1566 ; FP16-NEXT: vpbroadcastw {{.*#+}} zmm0 = [-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0,-1.875E+0]
1567 ; FP16-NEXT: vmovdqu16 (%rax), %zmm0 {%k1}
1570 ; AVXNC-LABEL: pr63017_2:
1572 ; AVXNC-NEXT: vpbroadcastw {{.*#+}} ymm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024]
1573 ; AVXNC-NEXT: xorl %eax, %eax
1574 ; AVXNC-NEXT: testb %al, %al
1575 ; AVXNC-NEXT: vmovdqa %ymm0, %ymm1
1576 ; AVXNC-NEXT: jne .LBB12_2
1577 ; AVXNC-NEXT: # %bb.1: # %cond.load
1578 ; AVXNC-NEXT: vpbroadcastw {{.*#+}} ymm1 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024]
1579 ; AVXNC-NEXT: vpbroadcastw {{.*#+}} ymm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024]
1580 ; AVXNC-NEXT: vpinsrw $0, (%rax), %xmm0, %xmm2
1581 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1582 ; AVXNC-NEXT: .LBB12_2: # %else
1583 ; AVXNC-NEXT: xorl %eax, %eax
1584 ; AVXNC-NEXT: testb %al, %al
1585 ; AVXNC-NEXT: jne .LBB12_4
1586 ; AVXNC-NEXT: # %bb.3: # %cond.load1
1587 ; AVXNC-NEXT: vpinsrw $1, (%rax), %xmm0, %xmm2
1588 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1589 ; AVXNC-NEXT: .LBB12_4: # %else2
1590 ; AVXNC-NEXT: xorl %eax, %eax
1591 ; AVXNC-NEXT: testb %al, %al
1592 ; AVXNC-NEXT: jne .LBB12_6
1593 ; AVXNC-NEXT: # %bb.5: # %cond.load4
1594 ; AVXNC-NEXT: vpinsrw $2, (%rax), %xmm0, %xmm2
1595 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1596 ; AVXNC-NEXT: .LBB12_6: # %else5
1597 ; AVXNC-NEXT: xorl %eax, %eax
1598 ; AVXNC-NEXT: testb %al, %al
1599 ; AVXNC-NEXT: jne .LBB12_8
1600 ; AVXNC-NEXT: # %bb.7: # %cond.load7
1601 ; AVXNC-NEXT: vpinsrw $3, (%rax), %xmm0, %xmm2
1602 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1603 ; AVXNC-NEXT: .LBB12_8: # %else8
1604 ; AVXNC-NEXT: xorl %eax, %eax
1605 ; AVXNC-NEXT: testb %al, %al
1606 ; AVXNC-NEXT: jne .LBB12_10
1607 ; AVXNC-NEXT: # %bb.9: # %cond.load10
1608 ; AVXNC-NEXT: vpinsrw $4, (%rax), %xmm0, %xmm2
1609 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1610 ; AVXNC-NEXT: .LBB12_10: # %else11
1611 ; AVXNC-NEXT: xorl %eax, %eax
1612 ; AVXNC-NEXT: testb %al, %al
1613 ; AVXNC-NEXT: jne .LBB12_12
1614 ; AVXNC-NEXT: # %bb.11: # %cond.load13
1615 ; AVXNC-NEXT: vpinsrw $5, (%rax), %xmm0, %xmm2
1616 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1617 ; AVXNC-NEXT: .LBB12_12: # %else14
1618 ; AVXNC-NEXT: xorl %eax, %eax
1619 ; AVXNC-NEXT: testb %al, %al
1620 ; AVXNC-NEXT: jne .LBB12_14
1621 ; AVXNC-NEXT: # %bb.13: # %cond.load16
1622 ; AVXNC-NEXT: vpinsrw $6, (%rax), %xmm0, %xmm2
1623 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1624 ; AVXNC-NEXT: .LBB12_14: # %else17
1625 ; AVXNC-NEXT: xorl %eax, %eax
1626 ; AVXNC-NEXT: testb %al, %al
1627 ; AVXNC-NEXT: jne .LBB12_16
1628 ; AVXNC-NEXT: # %bb.15: # %cond.load19
1629 ; AVXNC-NEXT: vpinsrw $7, (%rax), %xmm0, %xmm2
1630 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1631 ; AVXNC-NEXT: .LBB12_16: # %else20
1632 ; AVXNC-NEXT: xorl %eax, %eax
1633 ; AVXNC-NEXT: testb %al, %al
1634 ; AVXNC-NEXT: jne .LBB12_18
1635 ; AVXNC-NEXT: # %bb.17: # %cond.load22
1636 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1637 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15]
1638 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
1639 ; AVXNC-NEXT: .LBB12_18: # %else23
1640 ; AVXNC-NEXT: xorl %eax, %eax
1641 ; AVXNC-NEXT: testb %al, %al
1642 ; AVXNC-NEXT: jne .LBB12_20
1643 ; AVXNC-NEXT: # %bb.19: # %cond.load25
1644 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1645 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6,7,8],ymm2[9],ymm0[10,11,12,13,14,15]
1646 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
1647 ; AVXNC-NEXT: .LBB12_20: # %else26
1648 ; AVXNC-NEXT: xorl %eax, %eax
1649 ; AVXNC-NEXT: testb %al, %al
1650 ; AVXNC-NEXT: jne .LBB12_22
1651 ; AVXNC-NEXT: # %bb.21: # %cond.load28
1652 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1653 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm2[2],ymm0[3,4,5,6,7,8,9],ymm2[10],ymm0[11,12,13,14,15]
1654 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
1655 ; AVXNC-NEXT: .LBB12_22: # %else29
1656 ; AVXNC-NEXT: xorl %eax, %eax
1657 ; AVXNC-NEXT: testb %al, %al
1658 ; AVXNC-NEXT: jne .LBB12_24
1659 ; AVXNC-NEXT: # %bb.23: # %cond.load31
1660 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1661 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3],ymm0[4,5,6,7,8,9,10],ymm2[11],ymm0[12,13,14,15]
1662 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
1663 ; AVXNC-NEXT: .LBB12_24: # %else32
1664 ; AVXNC-NEXT: xorl %eax, %eax
1665 ; AVXNC-NEXT: testb %al, %al
1666 ; AVXNC-NEXT: jne .LBB12_26
1667 ; AVXNC-NEXT: # %bb.25: # %cond.load34
1668 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1669 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4],ymm0[5,6,7,8,9,10,11],ymm2[12],ymm0[13,14,15]
1670 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
1671 ; AVXNC-NEXT: .LBB12_26: # %else35
1672 ; AVXNC-NEXT: xorl %eax, %eax
1673 ; AVXNC-NEXT: testb %al, %al
1674 ; AVXNC-NEXT: jne .LBB12_28
1675 ; AVXNC-NEXT: # %bb.27: # %cond.load37
1676 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1677 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7,8,9,10,11,12],ymm2[13],ymm0[14,15]
1678 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
1679 ; AVXNC-NEXT: .LBB12_28: # %else38
1680 ; AVXNC-NEXT: xorl %eax, %eax
1681 ; AVXNC-NEXT: testb %al, %al
1682 ; AVXNC-NEXT: jne .LBB12_30
1683 ; AVXNC-NEXT: # %bb.29: # %cond.load40
1684 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1685 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm2[6],ymm0[7,8,9,10,11,12,13],ymm2[14],ymm0[15]
1686 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
1687 ; AVXNC-NEXT: .LBB12_30: # %else41
1688 ; AVXNC-NEXT: xorl %eax, %eax
1689 ; AVXNC-NEXT: testb %al, %al
1690 ; AVXNC-NEXT: jne .LBB12_32
1691 ; AVXNC-NEXT: # %bb.31: # %cond.load43
1692 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1693 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5,6],ymm2[7],ymm0[8,9,10,11,12,13,14],ymm2[15]
1694 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
1695 ; AVXNC-NEXT: .LBB12_32: # %else44
1696 ; AVXNC-NEXT: xorl %eax, %eax
1697 ; AVXNC-NEXT: testb %al, %al
1698 ; AVXNC-NEXT: jne .LBB12_34
1699 ; AVXNC-NEXT: # %bb.33: # %cond.load46
1700 ; AVXNC-NEXT: vpinsrw $0, (%rax), %xmm1, %xmm2
1701 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
1702 ; AVXNC-NEXT: .LBB12_34: # %else47
1703 ; AVXNC-NEXT: xorl %eax, %eax
1704 ; AVXNC-NEXT: testb %al, %al
1705 ; AVXNC-NEXT: jne .LBB12_36
1706 ; AVXNC-NEXT: # %bb.35: # %cond.load49
1707 ; AVXNC-NEXT: vpinsrw $1, (%rax), %xmm1, %xmm2
1708 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
1709 ; AVXNC-NEXT: .LBB12_36: # %else50
1710 ; AVXNC-NEXT: xorl %eax, %eax
1711 ; AVXNC-NEXT: testb %al, %al
1712 ; AVXNC-NEXT: jne .LBB12_38
1713 ; AVXNC-NEXT: # %bb.37: # %cond.load52
1714 ; AVXNC-NEXT: vpinsrw $2, (%rax), %xmm1, %xmm2
1715 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
1716 ; AVXNC-NEXT: .LBB12_38: # %else53
1717 ; AVXNC-NEXT: xorl %eax, %eax
1718 ; AVXNC-NEXT: testb %al, %al
1719 ; AVXNC-NEXT: jne .LBB12_40
1720 ; AVXNC-NEXT: # %bb.39: # %cond.load55
1721 ; AVXNC-NEXT: vpinsrw $3, (%rax), %xmm1, %xmm2
1722 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
1723 ; AVXNC-NEXT: .LBB12_40: # %else56
1724 ; AVXNC-NEXT: xorl %eax, %eax
1725 ; AVXNC-NEXT: testb %al, %al
1726 ; AVXNC-NEXT: jne .LBB12_42
1727 ; AVXNC-NEXT: # %bb.41: # %cond.load58
1728 ; AVXNC-NEXT: vpinsrw $4, (%rax), %xmm1, %xmm2
1729 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
1730 ; AVXNC-NEXT: .LBB12_42: # %else59
1731 ; AVXNC-NEXT: xorl %eax, %eax
1732 ; AVXNC-NEXT: testb %al, %al
1733 ; AVXNC-NEXT: jne .LBB12_44
1734 ; AVXNC-NEXT: # %bb.43: # %cond.load61
1735 ; AVXNC-NEXT: vpinsrw $5, (%rax), %xmm1, %xmm2
1736 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
1737 ; AVXNC-NEXT: .LBB12_44: # %else62
1738 ; AVXNC-NEXT: xorl %eax, %eax
1739 ; AVXNC-NEXT: testb %al, %al
1740 ; AVXNC-NEXT: jne .LBB12_46
1741 ; AVXNC-NEXT: # %bb.45: # %cond.load64
1742 ; AVXNC-NEXT: vpinsrw $6, (%rax), %xmm1, %xmm2
1743 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
1744 ; AVXNC-NEXT: .LBB12_46: # %else65
1745 ; AVXNC-NEXT: xorl %eax, %eax
1746 ; AVXNC-NEXT: testb %al, %al
1747 ; AVXNC-NEXT: jne .LBB12_48
1748 ; AVXNC-NEXT: # %bb.47: # %cond.load67
1749 ; AVXNC-NEXT: vpinsrw $7, (%rax), %xmm1, %xmm2
1750 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
1751 ; AVXNC-NEXT: .LBB12_48: # %else68
1752 ; AVXNC-NEXT: xorl %eax, %eax
1753 ; AVXNC-NEXT: testb %al, %al
1754 ; AVXNC-NEXT: jne .LBB12_50
1755 ; AVXNC-NEXT: # %bb.49: # %cond.load70
1756 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1757 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15]
1758 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1759 ; AVXNC-NEXT: .LBB12_50: # %else71
1760 ; AVXNC-NEXT: xorl %eax, %eax
1761 ; AVXNC-NEXT: testb %al, %al
1762 ; AVXNC-NEXT: jne .LBB12_52
1763 ; AVXNC-NEXT: # %bb.51: # %cond.load73
1764 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1765 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7,8],ymm2[9],ymm1[10,11,12,13,14,15]
1766 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1767 ; AVXNC-NEXT: .LBB12_52: # %else74
1768 ; AVXNC-NEXT: xorl %eax, %eax
1769 ; AVXNC-NEXT: testb %al, %al
1770 ; AVXNC-NEXT: jne .LBB12_54
1771 ; AVXNC-NEXT: # %bb.53: # %cond.load76
1772 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1773 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6,7,8,9],ymm2[10],ymm1[11,12,13,14,15]
1774 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1775 ; AVXNC-NEXT: .LBB12_54: # %else77
1776 ; AVXNC-NEXT: xorl %eax, %eax
1777 ; AVXNC-NEXT: testb %al, %al
1778 ; AVXNC-NEXT: jne .LBB12_56
1779 ; AVXNC-NEXT: # %bb.55: # %cond.load79
1780 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1781 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7,8,9,10],ymm2[11],ymm1[12,13,14,15]
1782 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1783 ; AVXNC-NEXT: .LBB12_56: # %else80
1784 ; AVXNC-NEXT: xorl %eax, %eax
1785 ; AVXNC-NEXT: testb %al, %al
1786 ; AVXNC-NEXT: jne .LBB12_58
1787 ; AVXNC-NEXT: # %bb.57: # %cond.load82
1788 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1789 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4],ymm1[5,6,7,8,9,10,11],ymm2[12],ymm1[13,14,15]
1790 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1791 ; AVXNC-NEXT: .LBB12_58: # %else83
1792 ; AVXNC-NEXT: xorl %eax, %eax
1793 ; AVXNC-NEXT: testb %al, %al
1794 ; AVXNC-NEXT: jne .LBB12_60
1795 ; AVXNC-NEXT: # %bb.59: # %cond.load85
1796 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1797 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7,8,9,10,11,12],ymm2[13],ymm1[14,15]
1798 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1799 ; AVXNC-NEXT: .LBB12_60: # %else86
1800 ; AVXNC-NEXT: xorl %eax, %eax
1801 ; AVXNC-NEXT: testb %al, %al
1802 ; AVXNC-NEXT: jne .LBB12_62
1803 ; AVXNC-NEXT: # %bb.61: # %cond.load88
1804 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1805 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7,8,9,10,11,12,13],ymm2[14],ymm1[15]
1806 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1807 ; AVXNC-NEXT: .LBB12_62: # %else89
1808 ; AVXNC-NEXT: xorl %eax, %eax
1809 ; AVXNC-NEXT: testb %al, %al
1810 ; AVXNC-NEXT: jne .LBB12_64
1811 ; AVXNC-NEXT: # %bb.63: # %cond.load91
1812 ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2
1813 ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5,6],ymm2[7],ymm1[8,9,10,11,12,13,14],ymm2[15]
1814 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1815 ; AVXNC-NEXT: .LBB12_64: # %else92
1817 %1 = call <32 x bfloat> @llvm.masked.load.v32bf16.p0(ptr poison, i32 2, <32 x i1> poison, <32 x bfloat> <bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80>)
1818 ret <32 x bfloat> %1
1821 define <32 x bfloat> @pr62997_3(<32 x bfloat> %0, bfloat %1) {
1822 ; X86-LABEL: pr62997_3:
1824 ; X86-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm1
1825 ; X86-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
1828 ; SSE2-LABEL: pr62997_3:
1830 ; SSE2-NEXT: movq %xmm0, %rax
1831 ; SSE2-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
1832 ; SSE2-NEXT: andq %rax, %rcx
1833 ; SSE2-NEXT: movzwl %ax, %eax
1834 ; SSE2-NEXT: pextrw $0, %xmm4, %edx
1835 ; SSE2-NEXT: shll $16, %edx
1836 ; SSE2-NEXT: orl %eax, %edx
1837 ; SSE2-NEXT: orq %rcx, %rdx
1838 ; SSE2-NEXT: movq %rdx, %xmm4
1839 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
1842 ; FP16-LABEL: pr62997_3:
1844 ; FP16-NEXT: vmovw %xmm1, %eax
1845 ; FP16-NEXT: vpinsrw $1, %eax, %xmm0, %xmm1
1846 ; FP16-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
1849 ; AVXNC-LABEL: pr62997_3:
1851 ; AVXNC-NEXT: vpextrw $0, %xmm2, %eax
1852 ; AVXNC-NEXT: vpinsrw $1, %eax, %xmm0, %xmm2
1853 ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1855 %3 = insertelement <32 x bfloat> %0, bfloat %1, i64 1
1856 ret <32 x bfloat> %3
1859 declare <32 x bfloat> @llvm.masked.load.v32bf16.p0(ptr, i32, <32 x i1>, <32 x bfloat>)
1861 define <4 x float> @pr64460_1(<4 x bfloat> %a) {
1862 ; X86-LABEL: pr64460_1:
1864 ; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
1865 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1868 ; SSE2-LABEL: pr64460_1:
1870 ; SSE2-NEXT: pextrw $1, %xmm0, %eax
1871 ; SSE2-NEXT: shll $16, %eax
1872 ; SSE2-NEXT: movd %eax, %xmm2
1873 ; SSE2-NEXT: movd %xmm0, %eax
1874 ; SSE2-NEXT: shll $16, %eax
1875 ; SSE2-NEXT: movd %eax, %xmm1
1876 ; SSE2-NEXT: pextrw $3, %xmm0, %eax
1877 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
1878 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
1879 ; SSE2-NEXT: shll $16, %eax
1880 ; SSE2-NEXT: movd %eax, %xmm2
1881 ; SSE2-NEXT: movd %xmm0, %eax
1882 ; SSE2-NEXT: shll $16, %eax
1883 ; SSE2-NEXT: movd %eax, %xmm0
1884 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
1885 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1886 ; SSE2-NEXT: movdqa %xmm1, %xmm0
1889 ; AVX-LABEL: pr64460_1:
1891 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
1892 ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1894 %b = fpext <4 x bfloat> %a to <4 x float>
1898 define <8 x float> @pr64460_2(<8 x bfloat> %a) {
1899 ; X86-LABEL: pr64460_2:
1901 ; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1902 ; X86-NEXT: vpslld $16, %ymm0, %ymm0
1905 ; SSE2-LABEL: pr64460_2:
1907 ; SSE2-NEXT: movq %xmm0, %rdx
1908 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
1909 ; SSE2-NEXT: movq %xmm0, %rcx
1910 ; SSE2-NEXT: movq %rcx, %rax
1911 ; SSE2-NEXT: shrq $32, %rax
1912 ; SSE2-NEXT: movq %rdx, %rsi
1913 ; SSE2-NEXT: shrq $32, %rsi
1914 ; SSE2-NEXT: movl %edx, %edi
1915 ; SSE2-NEXT: andl $-65536, %edi # imm = 0xFFFF0000
1916 ; SSE2-NEXT: movd %edi, %xmm1
1917 ; SSE2-NEXT: movl %edx, %edi
1918 ; SSE2-NEXT: shll $16, %edi
1919 ; SSE2-NEXT: movd %edi, %xmm0
1920 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1921 ; SSE2-NEXT: shrq $48, %rdx
1922 ; SSE2-NEXT: shll $16, %edx
1923 ; SSE2-NEXT: movd %edx, %xmm1
1924 ; SSE2-NEXT: shll $16, %esi
1925 ; SSE2-NEXT: movd %esi, %xmm2
1926 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1927 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
1928 ; SSE2-NEXT: movl %ecx, %edx
1929 ; SSE2-NEXT: andl $-65536, %edx # imm = 0xFFFF0000
1930 ; SSE2-NEXT: movd %edx, %xmm2
1931 ; SSE2-NEXT: movl %ecx, %edx
1932 ; SSE2-NEXT: shll $16, %edx
1933 ; SSE2-NEXT: movd %edx, %xmm1
1934 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
1935 ; SSE2-NEXT: shrq $48, %rcx
1936 ; SSE2-NEXT: shll $16, %ecx
1937 ; SSE2-NEXT: movd %ecx, %xmm2
1938 ; SSE2-NEXT: shll $16, %eax
1939 ; SSE2-NEXT: movd %eax, %xmm3
1940 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
1941 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
1944 ; AVX-LABEL: pr64460_2:
1946 ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1947 ; AVX-NEXT: vpslld $16, %ymm0, %ymm0
1949 %b = fpext <8 x bfloat> %a to <8 x float>
1953 define <16 x float> @pr64460_3(<16 x bfloat> %a) {
1954 ; X86-LABEL: pr64460_3:
1956 ; X86-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
1957 ; X86-NEXT: vpslld $16, %zmm0, %zmm0
1960 ; SSE2-LABEL: pr64460_3:
1962 ; SSE2-NEXT: movq %xmm1, %rdi
1963 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
1964 ; SSE2-NEXT: movq %xmm1, %rcx
1965 ; SSE2-NEXT: movq %rcx, %rax
1966 ; SSE2-NEXT: shrq $32, %rax
1967 ; SSE2-NEXT: movq %xmm0, %r9
1968 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
1969 ; SSE2-NEXT: movq %xmm0, %rsi
1970 ; SSE2-NEXT: movq %rsi, %rdx
1971 ; SSE2-NEXT: shrq $32, %rdx
1972 ; SSE2-NEXT: movq %rdi, %r8
1973 ; SSE2-NEXT: shrq $32, %r8
1974 ; SSE2-NEXT: movq %r9, %r10
1975 ; SSE2-NEXT: shrq $32, %r10
1976 ; SSE2-NEXT: movl %r9d, %r11d
1977 ; SSE2-NEXT: andl $-65536, %r11d # imm = 0xFFFF0000
1978 ; SSE2-NEXT: movd %r11d, %xmm1
1979 ; SSE2-NEXT: movl %r9d, %r11d
1980 ; SSE2-NEXT: shll $16, %r11d
1981 ; SSE2-NEXT: movd %r11d, %xmm0
1982 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1983 ; SSE2-NEXT: shrq $48, %r9
1984 ; SSE2-NEXT: shll $16, %r9d
1985 ; SSE2-NEXT: movd %r9d, %xmm1
1986 ; SSE2-NEXT: shll $16, %r10d
1987 ; SSE2-NEXT: movd %r10d, %xmm2
1988 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1989 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
1990 ; SSE2-NEXT: movl %edi, %r9d
1991 ; SSE2-NEXT: andl $-65536, %r9d # imm = 0xFFFF0000
1992 ; SSE2-NEXT: movd %r9d, %xmm1
1993 ; SSE2-NEXT: movl %edi, %r9d
1994 ; SSE2-NEXT: shll $16, %r9d
1995 ; SSE2-NEXT: movd %r9d, %xmm2
1996 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1997 ; SSE2-NEXT: shrq $48, %rdi
1998 ; SSE2-NEXT: shll $16, %edi
1999 ; SSE2-NEXT: movd %edi, %xmm1
2000 ; SSE2-NEXT: shll $16, %r8d
2001 ; SSE2-NEXT: movd %r8d, %xmm3
2002 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
2003 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
2004 ; SSE2-NEXT: movl %esi, %edi
2005 ; SSE2-NEXT: andl $-65536, %edi # imm = 0xFFFF0000
2006 ; SSE2-NEXT: movd %edi, %xmm3
2007 ; SSE2-NEXT: movl %esi, %edi
2008 ; SSE2-NEXT: shll $16, %edi
2009 ; SSE2-NEXT: movd %edi, %xmm1
2010 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
2011 ; SSE2-NEXT: shrq $48, %rsi
2012 ; SSE2-NEXT: shll $16, %esi
2013 ; SSE2-NEXT: movd %esi, %xmm3
2014 ; SSE2-NEXT: shll $16, %edx
2015 ; SSE2-NEXT: movd %edx, %xmm4
2016 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
2017 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
2018 ; SSE2-NEXT: movl %ecx, %edx
2019 ; SSE2-NEXT: andl $-65536, %edx # imm = 0xFFFF0000
2020 ; SSE2-NEXT: movd %edx, %xmm4
2021 ; SSE2-NEXT: movl %ecx, %edx
2022 ; SSE2-NEXT: shll $16, %edx
2023 ; SSE2-NEXT: movd %edx, %xmm3
2024 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
2025 ; SSE2-NEXT: shrq $48, %rcx
2026 ; SSE2-NEXT: shll $16, %ecx
2027 ; SSE2-NEXT: movd %ecx, %xmm4
2028 ; SSE2-NEXT: shll $16, %eax
2029 ; SSE2-NEXT: movd %eax, %xmm5
2030 ; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
2031 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
2034 ; F16-LABEL: pr64460_3:
2036 ; F16-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
2037 ; F16-NEXT: vpslld $16, %zmm0, %zmm0
2040 ; AVXNC-LABEL: pr64460_3:
2042 ; AVXNC-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2043 ; AVXNC-NEXT: vpslld $16, %ymm1, %ymm2
2044 ; AVXNC-NEXT: vextracti128 $1, %ymm0, %xmm0
2045 ; AVXNC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2046 ; AVXNC-NEXT: vpslld $16, %ymm0, %ymm1
2047 ; AVXNC-NEXT: vmovdqa %ymm2, %ymm0
2049 %b = fpext <16 x bfloat> %a to <16 x float>
2053 define <8 x double> @pr64460_4(<8 x bfloat> %a) {
2054 ; X86-LABEL: pr64460_4:
2056 ; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2057 ; X86-NEXT: vpslld $16, %ymm0, %ymm0
2058 ; X86-NEXT: vcvtps2pd %ymm0, %zmm0
2061 ; SSE2-LABEL: pr64460_4:
2063 ; SSE2-NEXT: movq %xmm0, %rsi
2064 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2065 ; SSE2-NEXT: movq %xmm0, %rdx
2066 ; SSE2-NEXT: movq %rdx, %rax
2067 ; SSE2-NEXT: shrq $32, %rax
2068 ; SSE2-NEXT: movq %rdx, %rcx
2069 ; SSE2-NEXT: shrq $48, %rcx
2070 ; SSE2-NEXT: movq %rsi, %rdi
2071 ; SSE2-NEXT: shrq $32, %rdi
2072 ; SSE2-NEXT: movq %rsi, %r8
2073 ; SSE2-NEXT: shrq $48, %r8
2074 ; SSE2-NEXT: movl %esi, %r9d
2075 ; SSE2-NEXT: andl $-65536, %r9d # imm = 0xFFFF0000
2076 ; SSE2-NEXT: movd %r9d, %xmm0
2077 ; SSE2-NEXT: cvtss2sd %xmm0, %xmm1
2078 ; SSE2-NEXT: shll $16, %esi
2079 ; SSE2-NEXT: movd %esi, %xmm0
2080 ; SSE2-NEXT: cvtss2sd %xmm0, %xmm0
2081 ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2082 ; SSE2-NEXT: shll $16, %r8d
2083 ; SSE2-NEXT: movd %r8d, %xmm1
2084 ; SSE2-NEXT: cvtss2sd %xmm1, %xmm2
2085 ; SSE2-NEXT: shll $16, %edi
2086 ; SSE2-NEXT: movd %edi, %xmm1
2087 ; SSE2-NEXT: cvtss2sd %xmm1, %xmm1
2088 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
2089 ; SSE2-NEXT: movl %edx, %esi
2090 ; SSE2-NEXT: andl $-65536, %esi # imm = 0xFFFF0000
2091 ; SSE2-NEXT: movd %esi, %xmm2
2092 ; SSE2-NEXT: cvtss2sd %xmm2, %xmm3
2093 ; SSE2-NEXT: shll $16, %edx
2094 ; SSE2-NEXT: movd %edx, %xmm2
2095 ; SSE2-NEXT: cvtss2sd %xmm2, %xmm2
2096 ; SSE2-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
2097 ; SSE2-NEXT: shll $16, %ecx
2098 ; SSE2-NEXT: movd %ecx, %xmm3
2099 ; SSE2-NEXT: cvtss2sd %xmm3, %xmm4
2100 ; SSE2-NEXT: shll $16, %eax
2101 ; SSE2-NEXT: movd %eax, %xmm3
2102 ; SSE2-NEXT: cvtss2sd %xmm3, %xmm3
2103 ; SSE2-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
2106 ; F16-LABEL: pr64460_4:
2108 ; F16-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2109 ; F16-NEXT: vpslld $16, %ymm0, %ymm0
2110 ; F16-NEXT: vcvtps2pd %ymm0, %zmm0
2113 ; AVXNC-LABEL: pr64460_4:
2115 ; AVXNC-NEXT: vpextrw $3, %xmm0, %eax
2116 ; AVXNC-NEXT: shll $16, %eax
2117 ; AVXNC-NEXT: vmovd %eax, %xmm1
2118 ; AVXNC-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
2119 ; AVXNC-NEXT: vpextrw $2, %xmm0, %eax
2120 ; AVXNC-NEXT: shll $16, %eax
2121 ; AVXNC-NEXT: vmovd %eax, %xmm2
2122 ; AVXNC-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
2123 ; AVXNC-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
2124 ; AVXNC-NEXT: vpextrw $1, %xmm0, %eax
2125 ; AVXNC-NEXT: shll $16, %eax
2126 ; AVXNC-NEXT: vmovd %eax, %xmm2
2127 ; AVXNC-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
2128 ; AVXNC-NEXT: vmovd %xmm0, %eax
2129 ; AVXNC-NEXT: shll $16, %eax
2130 ; AVXNC-NEXT: vmovd %eax, %xmm3
2131 ; AVXNC-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
2132 ; AVXNC-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
2133 ; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
2134 ; AVXNC-NEXT: vpextrw $7, %xmm0, %eax
2135 ; AVXNC-NEXT: shll $16, %eax
2136 ; AVXNC-NEXT: vmovd %eax, %xmm1
2137 ; AVXNC-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
2138 ; AVXNC-NEXT: vpextrw $6, %xmm0, %eax
2139 ; AVXNC-NEXT: shll $16, %eax
2140 ; AVXNC-NEXT: vmovd %eax, %xmm3
2141 ; AVXNC-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
2142 ; AVXNC-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
2143 ; AVXNC-NEXT: vpextrw $5, %xmm0, %eax
2144 ; AVXNC-NEXT: shll $16, %eax
2145 ; AVXNC-NEXT: vmovd %eax, %xmm3
2146 ; AVXNC-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
2147 ; AVXNC-NEXT: vpextrw $4, %xmm0, %eax
2148 ; AVXNC-NEXT: shll $16, %eax
2149 ; AVXNC-NEXT: vmovd %eax, %xmm0
2150 ; AVXNC-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
2151 ; AVXNC-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
2152 ; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2153 ; AVXNC-NEXT: vmovaps %ymm2, %ymm0
2155 %b = fpext <8 x bfloat> %a to <8 x double>
2159 define <4 x bfloat> @fptrunc_v4f32(<4 x float> %a) nounwind {
2160 ; X86-LABEL: fptrunc_v4f32:
2162 ; X86-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
2163 ; X86-NEXT: vcvtneps2bf16 %ymm0, %xmm0
2164 ; X86-NEXT: vzeroupper
2167 ; SSE2-LABEL: fptrunc_v4f32:
2169 ; SSE2-NEXT: subq $72, %rsp
2170 ; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
2171 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
2172 ; SSE2-NEXT: callq __truncsfbf2@PLT
2173 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2174 ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2175 ; SSE2-NEXT: callq __truncsfbf2@PLT
2176 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2177 ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2178 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
2179 ; SSE2-NEXT: callq __truncsfbf2@PLT
2180 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2181 ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2182 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
2183 ; SSE2-NEXT: callq __truncsfbf2@PLT
2184 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2185 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2186 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2187 ; SSE2-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2188 ; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
2189 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2190 ; SSE2-NEXT: addq $72, %rsp
2193 ; F16-LABEL: fptrunc_v4f32:
2195 ; F16-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
2196 ; F16-NEXT: vcvtneps2bf16 %ymm0, %xmm0
2197 ; F16-NEXT: vzeroupper
2200 ; AVXNC-LABEL: fptrunc_v4f32:
2202 ; AVXNC-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
2203 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm0, %xmm0
2204 ; AVXNC-NEXT: vzeroupper
2206 %b = fptrunc <4 x float> %a to <4 x bfloat>
2210 define <8 x bfloat> @fptrunc_v8f32(<8 x float> %a) nounwind {
2211 ; X86-LABEL: fptrunc_v8f32:
2213 ; X86-NEXT: vcvtneps2bf16 %ymm0, %xmm0
2214 ; X86-NEXT: vzeroupper
2217 ; SSE2-LABEL: fptrunc_v8f32:
2219 ; SSE2-NEXT: pushq %rbp
2220 ; SSE2-NEXT: pushq %r14
2221 ; SSE2-NEXT: pushq %rbx
2222 ; SSE2-NEXT: subq $32, %rsp
2223 ; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
2224 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2225 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
2226 ; SSE2-NEXT: callq __truncsfbf2@PLT
2227 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
2228 ; SSE2-NEXT: shll $16, %ebx
2229 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2230 ; SSE2-NEXT: callq __truncsfbf2@PLT
2231 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2232 ; SSE2-NEXT: movzwl %ax, %r14d
2233 ; SSE2-NEXT: orl %ebx, %r14d
2234 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2235 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
2236 ; SSE2-NEXT: callq __truncsfbf2@PLT
2237 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2238 ; SSE2-NEXT: shll $16, %ebp
2239 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2240 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2241 ; SSE2-NEXT: callq __truncsfbf2@PLT
2242 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2243 ; SSE2-NEXT: movzwl %ax, %ebx
2244 ; SSE2-NEXT: orl %ebp, %ebx
2245 ; SSE2-NEXT: shlq $32, %rbx
2246 ; SSE2-NEXT: orq %r14, %rbx
2247 ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2248 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
2249 ; SSE2-NEXT: callq __truncsfbf2@PLT
2250 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2251 ; SSE2-NEXT: shll $16, %ebp
2252 ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
2253 ; SSE2-NEXT: callq __truncsfbf2@PLT
2254 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2255 ; SSE2-NEXT: movzwl %ax, %r14d
2256 ; SSE2-NEXT: orl %ebp, %r14d
2257 ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2258 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
2259 ; SSE2-NEXT: callq __truncsfbf2@PLT
2260 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2261 ; SSE2-NEXT: shll $16, %ebp
2262 ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
2263 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2264 ; SSE2-NEXT: callq __truncsfbf2@PLT
2265 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2266 ; SSE2-NEXT: movzwl %ax, %eax
2267 ; SSE2-NEXT: orl %ebp, %eax
2268 ; SSE2-NEXT: shlq $32, %rax
2269 ; SSE2-NEXT: orq %r14, %rax
2270 ; SSE2-NEXT: movq %rax, %xmm1
2271 ; SSE2-NEXT: movq %rbx, %xmm0
2272 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2273 ; SSE2-NEXT: addq $32, %rsp
2274 ; SSE2-NEXT: popq %rbx
2275 ; SSE2-NEXT: popq %r14
2276 ; SSE2-NEXT: popq %rbp
2279 ; F16-LABEL: fptrunc_v8f32:
2281 ; F16-NEXT: vcvtneps2bf16 %ymm0, %xmm0
2282 ; F16-NEXT: vzeroupper
2285 ; AVXNC-LABEL: fptrunc_v8f32:
2287 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm0, %xmm0
2288 ; AVXNC-NEXT: vzeroupper
2290 %b = fptrunc <8 x float> %a to <8 x bfloat>
2294 define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind {
2295 ; X86-LABEL: fptrunc_v16f32:
2297 ; X86-NEXT: vcvtneps2bf16 %zmm0, %ymm0
2300 ; SSE2-LABEL: fptrunc_v16f32:
2302 ; SSE2-NEXT: pushq %rbp
2303 ; SSE2-NEXT: pushq %r15
2304 ; SSE2-NEXT: pushq %r14
2305 ; SSE2-NEXT: pushq %r12
2306 ; SSE2-NEXT: pushq %rbx
2307 ; SSE2-NEXT: subq $64, %rsp
2308 ; SSE2-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill
2309 ; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2310 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2311 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2312 ; SSE2-NEXT: movaps %xmm2, %xmm0
2313 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
2314 ; SSE2-NEXT: callq __truncsfbf2@PLT
2315 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
2316 ; SSE2-NEXT: shll $16, %ebx
2317 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2318 ; SSE2-NEXT: callq __truncsfbf2@PLT
2319 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2320 ; SSE2-NEXT: movzwl %ax, %r14d
2321 ; SSE2-NEXT: orl %ebx, %r14d
2322 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2323 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
2324 ; SSE2-NEXT: callq __truncsfbf2@PLT
2325 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2326 ; SSE2-NEXT: shll $16, %ebp
2327 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2328 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2329 ; SSE2-NEXT: callq __truncsfbf2@PLT
2330 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2331 ; SSE2-NEXT: movzwl %ax, %ebx
2332 ; SSE2-NEXT: orl %ebp, %ebx
2333 ; SSE2-NEXT: shlq $32, %rbx
2334 ; SSE2-NEXT: orq %r14, %rbx
2335 ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2336 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
2337 ; SSE2-NEXT: callq __truncsfbf2@PLT
2338 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2339 ; SSE2-NEXT: shll $16, %ebp
2340 ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
2341 ; SSE2-NEXT: callq __truncsfbf2@PLT
2342 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2343 ; SSE2-NEXT: movzwl %ax, %r15d
2344 ; SSE2-NEXT: orl %ebp, %r15d
2345 ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2346 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
2347 ; SSE2-NEXT: callq __truncsfbf2@PLT
2348 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2349 ; SSE2-NEXT: shll $16, %ebp
2350 ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
2351 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2352 ; SSE2-NEXT: callq __truncsfbf2@PLT
2353 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2354 ; SSE2-NEXT: movzwl %ax, %r14d
2355 ; SSE2-NEXT: orl %ebp, %r14d
2356 ; SSE2-NEXT: shlq $32, %r14
2357 ; SSE2-NEXT: orq %r15, %r14
2358 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2359 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
2360 ; SSE2-NEXT: callq __truncsfbf2@PLT
2361 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2362 ; SSE2-NEXT: shll $16, %ebp
2363 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2364 ; SSE2-NEXT: callq __truncsfbf2@PLT
2365 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2366 ; SSE2-NEXT: movzwl %ax, %r12d
2367 ; SSE2-NEXT: orl %ebp, %r12d
2368 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2369 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
2370 ; SSE2-NEXT: callq __truncsfbf2@PLT
2371 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2372 ; SSE2-NEXT: shll $16, %ebp
2373 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2374 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2375 ; SSE2-NEXT: callq __truncsfbf2@PLT
2376 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2377 ; SSE2-NEXT: movzwl %ax, %r15d
2378 ; SSE2-NEXT: orl %ebp, %r15d
2379 ; SSE2-NEXT: shlq $32, %r15
2380 ; SSE2-NEXT: orq %r12, %r15
2381 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2382 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
2383 ; SSE2-NEXT: callq __truncsfbf2@PLT
2384 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2385 ; SSE2-NEXT: shll $16, %ebp
2386 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2387 ; SSE2-NEXT: callq __truncsfbf2@PLT
2388 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2389 ; SSE2-NEXT: movzwl %ax, %r12d
2390 ; SSE2-NEXT: orl %ebp, %r12d
2391 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2392 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
2393 ; SSE2-NEXT: callq __truncsfbf2@PLT
2394 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2395 ; SSE2-NEXT: shll $16, %ebp
2396 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2397 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2398 ; SSE2-NEXT: callq __truncsfbf2@PLT
2399 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2400 ; SSE2-NEXT: movzwl %ax, %eax
2401 ; SSE2-NEXT: orl %ebp, %eax
2402 ; SSE2-NEXT: shlq $32, %rax
2403 ; SSE2-NEXT: orq %r12, %rax
2404 ; SSE2-NEXT: movq %rax, %xmm1
2405 ; SSE2-NEXT: movq %r15, %xmm0
2406 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2407 ; SSE2-NEXT: movq %r14, %xmm2
2408 ; SSE2-NEXT: movq %rbx, %xmm1
2409 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
2410 ; SSE2-NEXT: addq $64, %rsp
2411 ; SSE2-NEXT: popq %rbx
2412 ; SSE2-NEXT: popq %r12
2413 ; SSE2-NEXT: popq %r14
2414 ; SSE2-NEXT: popq %r15
2415 ; SSE2-NEXT: popq %rbp
2418 ; F16-LABEL: fptrunc_v16f32:
2420 ; F16-NEXT: vcvtneps2bf16 %zmm0, %ymm0
2423 ; AVXNC-LABEL: fptrunc_v16f32:
2425 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm0, %xmm0
2426 ; AVXNC-NEXT: vinsertf128 $0, %xmm0, %ymm0, %ymm0
2427 ; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm1, %xmm1
2428 ; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
2430 %b = fptrunc <16 x float> %a to <16 x bfloat>
2431 ret <16 x bfloat> %b
2434 define <8 x bfloat> @fptrunc_v8f64(<8 x double> %a) nounwind {
2435 ; X86-LABEL: fptrunc_v8f64:
2437 ; X86-NEXT: subl $204, %esp
2438 ; X86-NEXT: vmovups %zmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 64-byte Spill
2439 ; X86-NEXT: vextractf128 $1, %ymm0, %xmm0
2440 ; X86-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
2441 ; X86-NEXT: vmovlps %xmm0, (%esp)
2442 ; X86-NEXT: vzeroupper
2443 ; X86-NEXT: calll __truncdfbf2
2444 ; X86-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
2445 ; X86-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
2446 ; X86-NEXT: vmovhps %xmm0, (%esp)
2447 ; X86-NEXT: calll __truncdfbf2
2448 ; X86-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
2449 ; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %zmm0 # 64-byte Reload
2450 ; X86-NEXT: vmovlps %xmm0, (%esp)
2451 ; X86-NEXT: vzeroupper
2452 ; X86-NEXT: calll __truncdfbf2
2453 ; X86-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
2454 ; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %zmm0 # 64-byte Reload
2455 ; X86-NEXT: vmovhps %xmm0, (%esp)
2456 ; X86-NEXT: vzeroupper
2457 ; X86-NEXT: calll __truncdfbf2
2458 ; X86-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
2459 ; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %zmm0 # 64-byte Reload
2460 ; X86-NEXT: vextractf32x4 $2, %zmm0, %xmm0
2461 ; X86-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
2462 ; X86-NEXT: vmovlps %xmm0, (%esp)
2463 ; X86-NEXT: vzeroupper
2464 ; X86-NEXT: calll __truncdfbf2
2465 ; X86-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
2466 ; X86-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
2467 ; X86-NEXT: vmovhps %xmm0, (%esp)
2468 ; X86-NEXT: calll __truncdfbf2
2469 ; X86-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
2470 ; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %zmm0 # 64-byte Reload
2471 ; X86-NEXT: vextractf32x4 $3, %zmm0, %xmm0
2472 ; X86-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
2473 ; X86-NEXT: vmovlps %xmm0, (%esp)
2474 ; X86-NEXT: vzeroupper
2475 ; X86-NEXT: calll __truncdfbf2
2476 ; X86-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
2477 ; X86-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
2478 ; X86-NEXT: vmovhps %xmm0, (%esp)
2479 ; X86-NEXT: calll __truncdfbf2
2480 ; X86-NEXT: vmovdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
2481 ; X86-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2482 ; X86-NEXT: vmovdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
2483 ; X86-NEXT: vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
2484 ; X86-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
2485 ; X86-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2486 ; X86-NEXT: vmovdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
2487 ; X86-NEXT: vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
2488 ; X86-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
2489 ; X86-NEXT: vmovdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
2490 ; X86-NEXT: vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
2491 ; X86-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
2492 ; X86-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
2493 ; X86-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
2494 ; X86-NEXT: addl $204, %esp
2497 ; SSE2-LABEL: fptrunc_v8f64:
2499 ; SSE2-NEXT: pushq %rbp
2500 ; SSE2-NEXT: pushq %r14
2501 ; SSE2-NEXT: pushq %rbx
2502 ; SSE2-NEXT: subq $64, %rsp
2503 ; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2504 ; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2505 ; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
2506 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2507 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2508 ; SSE2-NEXT: callq __truncdfbf2@PLT
2509 ; SSE2-NEXT: pextrw $0, %xmm0, %ebx
2510 ; SSE2-NEXT: shll $16, %ebx
2511 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2512 ; SSE2-NEXT: callq __truncdfbf2@PLT
2513 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2514 ; SSE2-NEXT: movzwl %ax, %r14d
2515 ; SSE2-NEXT: orl %ebx, %r14d
2516 ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
2517 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2518 ; SSE2-NEXT: callq __truncdfbf2@PLT
2519 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2520 ; SSE2-NEXT: shll $16, %ebp
2521 ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
2522 ; SSE2-NEXT: callq __truncdfbf2@PLT
2523 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2524 ; SSE2-NEXT: movzwl %ax, %ebx
2525 ; SSE2-NEXT: orl %ebp, %ebx
2526 ; SSE2-NEXT: shlq $32, %rbx
2527 ; SSE2-NEXT: orq %r14, %rbx
2528 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2529 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2530 ; SSE2-NEXT: callq __truncdfbf2@PLT
2531 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2532 ; SSE2-NEXT: shll $16, %ebp
2533 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2534 ; SSE2-NEXT: callq __truncdfbf2@PLT
2535 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2536 ; SSE2-NEXT: movzwl %ax, %r14d
2537 ; SSE2-NEXT: orl %ebp, %r14d
2538 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2539 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
2540 ; SSE2-NEXT: callq __truncdfbf2@PLT
2541 ; SSE2-NEXT: pextrw $0, %xmm0, %ebp
2542 ; SSE2-NEXT: shll $16, %ebp
2543 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2544 ; SSE2-NEXT: callq __truncdfbf2@PLT
2545 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
2546 ; SSE2-NEXT: movzwl %ax, %eax
2547 ; SSE2-NEXT: orl %ebp, %eax
2548 ; SSE2-NEXT: shlq $32, %rax
2549 ; SSE2-NEXT: orq %r14, %rax
2550 ; SSE2-NEXT: movq %rax, %xmm1
2551 ; SSE2-NEXT: movq %rbx, %xmm0
2552 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2553 ; SSE2-NEXT: addq $64, %rsp
2554 ; SSE2-NEXT: popq %rbx
2555 ; SSE2-NEXT: popq %r14
2556 ; SSE2-NEXT: popq %rbp
2559 ; FP16-LABEL: fptrunc_v8f64:
2561 ; FP16-NEXT: subq $184, %rsp
2562 ; FP16-NEXT: vmovupd %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2563 ; FP16-NEXT: vextractf128 $1, %ymm0, %xmm0
2564 ; FP16-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2565 ; FP16-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
2566 ; FP16-NEXT: vzeroupper
2567 ; FP16-NEXT: callq __truncdfbf2@PLT
2568 ; FP16-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2569 ; FP16-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2570 ; FP16-NEXT: callq __truncdfbf2@PLT
2571 ; FP16-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2572 ; FP16-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2573 ; FP16-NEXT: # xmm0 = mem[1,0]
2574 ; FP16-NEXT: callq __truncdfbf2@PLT
2575 ; FP16-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2576 ; FP16-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
2577 ; FP16-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
2578 ; FP16-NEXT: vzeroupper
2579 ; FP16-NEXT: callq __truncdfbf2@PLT
2580 ; FP16-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2581 ; FP16-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
2582 ; FP16-NEXT: vextractf32x4 $2, %zmm0, %xmm0
2583 ; FP16-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
2584 ; FP16-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
2585 ; FP16-NEXT: vzeroupper
2586 ; FP16-NEXT: callq __truncdfbf2@PLT
2587 ; FP16-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2588 ; FP16-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
2589 ; FP16-NEXT: callq __truncdfbf2@PLT
2590 ; FP16-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
2591 ; FP16-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
2592 ; FP16-NEXT: vextractf32x4 $3, %zmm0, %xmm0
2593 ; FP16-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2594 ; FP16-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
2595 ; FP16-NEXT: vzeroupper
2596 ; FP16-NEXT: callq __truncdfbf2@PLT
2597 ; FP16-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2598 ; FP16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2599 ; FP16-NEXT: callq __truncdfbf2@PLT
2600 ; FP16-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
2601 ; FP16-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
2602 ; FP16-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
2603 ; FP16-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
2604 ; FP16-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
2605 ; FP16-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2606 ; FP16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2607 ; FP16-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
2608 ; FP16-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
2609 ; FP16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2610 ; FP16-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
2611 ; FP16-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
2612 ; FP16-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
2613 ; FP16-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
2614 ; FP16-NEXT: addq $184, %rsp
2617 ; AVXNC-LABEL: fptrunc_v8f64:
2619 ; AVXNC-NEXT: pushq %rbp
2620 ; AVXNC-NEXT: pushq %r15
2621 ; AVXNC-NEXT: pushq %r14
2622 ; AVXNC-NEXT: pushq %r13
2623 ; AVXNC-NEXT: pushq %r12
2624 ; AVXNC-NEXT: pushq %rbx
2625 ; AVXNC-NEXT: subq $168, %rsp
2626 ; AVXNC-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2627 ; AVXNC-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2628 ; AVXNC-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
2629 ; AVXNC-NEXT: vzeroupper
2630 ; AVXNC-NEXT: callq __truncdfbf2@PLT
2631 ; AVXNC-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2632 ; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2633 ; AVXNC-NEXT: vextractf128 $1, %ymm0, %xmm0
2634 ; AVXNC-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
2635 ; AVXNC-NEXT: vzeroupper
2636 ; AVXNC-NEXT: callq __truncdfbf2@PLT
2637 ; AVXNC-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2638 ; AVXNC-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
2639 ; AVXNC-NEXT: # xmm0 = mem[1,0]
2640 ; AVXNC-NEXT: callq __truncdfbf2@PLT
2641 ; AVXNC-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
2642 ; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2643 ; AVXNC-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
2644 ; AVXNC-NEXT: vzeroupper
2645 ; AVXNC-NEXT: callq __truncdfbf2@PLT
2646 ; AVXNC-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2647 ; AVXNC-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2648 ; AVXNC-NEXT: # xmm0 = mem[1,0]
2649 ; AVXNC-NEXT: callq __truncdfbf2@PLT
2650 ; AVXNC-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2651 ; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2652 ; AVXNC-NEXT: vextractf128 $1, %ymm0, %xmm0
2653 ; AVXNC-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2654 ; AVXNC-NEXT: vzeroupper
2655 ; AVXNC-NEXT: callq __truncdfbf2@PLT
2656 ; AVXNC-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2657 ; AVXNC-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2658 ; AVXNC-NEXT: # xmm0 = mem[1,0]
2659 ; AVXNC-NEXT: callq __truncdfbf2@PLT
2660 ; AVXNC-NEXT: vpextrw $0, %xmm0, %eax
2661 ; AVXNC-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
2662 ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2663 ; AVXNC-NEXT: vpextrw $0, %xmm0, %ebp
2664 ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2665 ; AVXNC-NEXT: vpextrw $0, %xmm0, %r14d
2666 ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2667 ; AVXNC-NEXT: vpextrw $0, %xmm0, %r15d
2668 ; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
2669 ; AVXNC-NEXT: vpextrw $0, %xmm0, %r12d
2670 ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2671 ; AVXNC-NEXT: vpextrw $0, %xmm0, %r13d
2672 ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2673 ; AVXNC-NEXT: vpextrw $0, %xmm0, %ebx
2674 ; AVXNC-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2675 ; AVXNC-NEXT: # xmm0 = mem[1,0]
2676 ; AVXNC-NEXT: callq __truncdfbf2@PLT
2677 ; AVXNC-NEXT: vpextrw $0, %xmm0, %eax
2678 ; AVXNC-NEXT: vmovd %ebx, %xmm0
2679 ; AVXNC-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
2680 ; AVXNC-NEXT: vpinsrw $2, %r13d, %xmm0, %xmm0
2681 ; AVXNC-NEXT: vpinsrw $3, %r12d, %xmm0, %xmm0
2682 ; AVXNC-NEXT: vpinsrw $4, %r15d, %xmm0, %xmm0
2683 ; AVXNC-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0
2684 ; AVXNC-NEXT: vpinsrw $6, %ebp, %xmm0, %xmm0
2685 ; AVXNC-NEXT: vpinsrw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
2686 ; AVXNC-NEXT: addq $168, %rsp
2687 ; AVXNC-NEXT: popq %rbx
2688 ; AVXNC-NEXT: popq %r12
2689 ; AVXNC-NEXT: popq %r13
2690 ; AVXNC-NEXT: popq %r14
2691 ; AVXNC-NEXT: popq %r15
2692 ; AVXNC-NEXT: popq %rbp
2694 %b = fptrunc <8 x double> %a to <8 x bfloat>
2698 define <32 x bfloat> @test_v8bf16_v32bf16(ptr %0) {
2699 ; X86-LABEL: test_v8bf16_v32bf16:
2701 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
2702 ; X86-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
2705 ; SSE2-LABEL: test_v8bf16_v32bf16:
2707 ; SSE2-NEXT: movaps (%rdi), %xmm0
2708 ; SSE2-NEXT: movaps %xmm0, %xmm1
2709 ; SSE2-NEXT: movaps %xmm0, %xmm2
2710 ; SSE2-NEXT: movaps %xmm0, %xmm3
2713 ; F16-LABEL: test_v8bf16_v32bf16:
2715 ; F16-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
2718 ; AVXNC-LABEL: test_v8bf16_v32bf16:
2720 ; AVXNC-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
2721 ; AVXNC-NEXT: vmovaps %ymm0, %ymm1
2723 %2 = load <8 x bfloat>, ptr %0, align 16
2724 %3 = shufflevector <8 x bfloat> %2, <8 x bfloat> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
2725 ret <32 x bfloat> %3
2728 define <16 x bfloat> @concat_v8bf16(<8 x bfloat> %x, <8 x bfloat> %y) {
2729 ; X86-LABEL: concat_v8bf16:
2731 ; X86-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
2732 ; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
2735 ; SSE2-LABEL: concat_v8bf16:
2739 ; AVX-LABEL: concat_v8bf16:
2741 ; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
2742 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
2744 %a = shufflevector <8 x bfloat> %x, <8 x bfloat> %y, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
2745 ret <16 x bfloat> %a
2748 define <8 x bfloat> @extract_v32bf16_v8bf16(<32 x bfloat> %x) {
2749 ; X86-LABEL: extract_v32bf16_v8bf16:
2751 ; X86-NEXT: vextractf128 $1, %ymm0, %xmm0
2752 ; X86-NEXT: vzeroupper
2755 ; SSE2-LABEL: extract_v32bf16_v8bf16:
2757 ; SSE2-NEXT: pextrw $0, %xmm1, %eax
2758 ; SSE2-NEXT: pextrw $1, %xmm1, %ecx
2759 ; SSE2-NEXT: shll $16, %ecx
2760 ; SSE2-NEXT: orl %eax, %ecx
2761 ; SSE2-NEXT: pextrw $2, %xmm1, %eax
2762 ; SSE2-NEXT: pextrw $3, %xmm1, %edx
2763 ; SSE2-NEXT: shll $16, %edx
2764 ; SSE2-NEXT: orl %eax, %edx
2765 ; SSE2-NEXT: shlq $32, %rdx
2766 ; SSE2-NEXT: orq %rcx, %rdx
2767 ; SSE2-NEXT: pextrw $4, %xmm1, %eax
2768 ; SSE2-NEXT: pextrw $5, %xmm1, %ecx
2769 ; SSE2-NEXT: shll $16, %ecx
2770 ; SSE2-NEXT: orl %eax, %ecx
2771 ; SSE2-NEXT: pextrw $6, %xmm1, %eax
2772 ; SSE2-NEXT: pextrw $7, %xmm1, %esi
2773 ; SSE2-NEXT: shll $16, %esi
2774 ; SSE2-NEXT: orl %eax, %esi
2775 ; SSE2-NEXT: shlq $32, %rsi
2776 ; SSE2-NEXT: orq %rcx, %rsi
2777 ; SSE2-NEXT: movq %rsi, %xmm1
2778 ; SSE2-NEXT: movq %rdx, %xmm0
2779 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2782 ; AVX-LABEL: extract_v32bf16_v8bf16:
2784 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
2785 ; AVX-NEXT: vzeroupper
2787 %a = shufflevector <32 x bfloat> %x, <32 x bfloat> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
2791 define <16 x bfloat> @concat_zero_v8bf16(<8 x bfloat> %x, <8 x bfloat> %y) {
2792 ; X86-LABEL: concat_zero_v8bf16:
2794 ; X86-NEXT: vmovaps %xmm0, %xmm0
2797 ; SSE2-LABEL: concat_zero_v8bf16:
2799 ; SSE2-NEXT: xorps %xmm1, %xmm1
2802 ; AVX-LABEL: concat_zero_v8bf16:
2804 ; AVX-NEXT: vmovaps %xmm0, %xmm0
2806 %a = shufflevector <8 x bfloat> %x, <8 x bfloat> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
2807 ret <16 x bfloat> %a