1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512F
3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
5 define <16 x i32> @_inreg16xi32(i32 %a) {
6 ; ALL-LABEL: _inreg16xi32:
8 ; ALL-NEXT: vpbroadcastd %edi, %zmm0
10 %b = insertelement <16 x i32> undef, i32 %a, i32 0
11 %c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
15 define <8 x i64> @_inreg8xi64(i64 %a) {
16 ; ALL-LABEL: _inreg8xi64:
18 ; ALL-NEXT: vpbroadcastq %rdi, %zmm0
20 %b = insertelement <8 x i64> undef, i64 %a, i32 0
21 %c = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
25 define <16 x float> @_ss16xfloat_v4(<4 x float> %a) {
26 ; ALL-LABEL: _ss16xfloat_v4:
28 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
30 %b = shufflevector <4 x float> %a, <4 x float> undef, <16 x i32> zeroinitializer
34 define <16 x float> @_inreg16xfloat(float %a) {
35 ; ALL-LABEL: _inreg16xfloat:
37 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
39 %b = insertelement <16 x float> undef, float %a, i32 0
40 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
44 define <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %mask1) {
45 ; ALL-LABEL: _ss16xfloat_mask:
47 ; ALL-NEXT: vpxor %xmm3, %xmm3, %xmm3
48 ; ALL-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
49 ; ALL-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
50 ; ALL-NEXT: vmovaps %zmm1, %zmm0
52 %mask = icmp ne <16 x i32> %mask1, zeroinitializer
53 %b = insertelement <16 x float> undef, float %a, i32 0
54 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
55 %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> %i
59 define <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
60 ; ALL-LABEL: _ss16xfloat_maskz:
62 ; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
63 ; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
64 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
66 %mask = icmp ne <16 x i32> %mask1, zeroinitializer
67 %b = insertelement <16 x float> undef, float %a, i32 0
68 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
69 %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> zeroinitializer
73 define <16 x float> @_ss16xfloat_load(float* %a.ptr) {
74 ; ALL-LABEL: _ss16xfloat_load:
76 ; ALL-NEXT: vbroadcastss (%rdi), %zmm0
78 %a = load float, float* %a.ptr
79 %b = insertelement <16 x float> undef, float %a, i32 0
80 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
84 define <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
85 ; ALL-LABEL: _ss16xfloat_mask_load:
87 ; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
88 ; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
89 ; ALL-NEXT: vbroadcastss (%rdi), %zmm0 {%k1}
91 %a = load float, float* %a.ptr
92 %mask = icmp ne <16 x i32> %mask1, zeroinitializer
93 %b = insertelement <16 x float> undef, float %a, i32 0
94 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
95 %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> %i
99 define <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
100 ; ALL-LABEL: _ss16xfloat_maskz_load:
102 ; ALL-NEXT: vpxor %xmm1, %xmm1, %xmm1
103 ; ALL-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
104 ; ALL-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z}
106 %a = load float, float* %a.ptr
107 %mask = icmp ne <16 x i32> %mask1, zeroinitializer
108 %b = insertelement <16 x float> undef, float %a, i32 0
109 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
110 %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> zeroinitializer
114 define <8 x double> @_inreg8xdouble(double %a) {
115 ; ALL-LABEL: _inreg8xdouble:
117 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
119 %b = insertelement <8 x double> undef, double %a, i32 0
120 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
124 define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
125 ; ALL-LABEL: _sd8xdouble_mask:
127 ; ALL-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
128 ; ALL-NEXT: vpxor %xmm3, %xmm3, %xmm3
129 ; ALL-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
130 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
131 ; ALL-NEXT: vmovapd %zmm1, %zmm0
133 %mask = icmp ne <8 x i32> %mask1, zeroinitializer
134 %b = insertelement <8 x double> undef, double %a, i32 0
135 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
136 %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> %i
140 define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
141 ; ALL-LABEL: _sd8xdouble_maskz:
143 ; ALL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
144 ; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
145 ; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
146 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
148 %mask = icmp ne <8 x i32> %mask1, zeroinitializer
149 %b = insertelement <8 x double> undef, double %a, i32 0
150 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
151 %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> zeroinitializer
155 define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
156 ; ALL-LABEL: _sd8xdouble_load:
158 ; ALL-NEXT: vbroadcastsd (%rdi), %zmm0
160 %a = load double, double* %a.ptr
161 %b = insertelement <8 x double> undef, double %a, i32 0
162 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
166 define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
167 ; ALL-LABEL: _sd8xdouble_mask_load:
169 ; ALL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
170 ; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
171 ; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
172 ; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
174 %a = load double, double* %a.ptr
175 %mask = icmp ne <8 x i32> %mask1, zeroinitializer
176 %b = insertelement <8 x double> undef, double %a, i32 0
177 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
178 %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> %i
182 define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
183 ; ALL-LABEL: _sd8xdouble_maskz_load:
185 ; ALL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
186 ; ALL-NEXT: vpxor %xmm1, %xmm1, %xmm1
187 ; ALL-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
188 ; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
190 %a = load double, double* %a.ptr
191 %mask = icmp ne <8 x i32> %mask1, zeroinitializer
192 %b = insertelement <8 x double> undef, double %a, i32 0
193 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
194 %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> zeroinitializer
198 define <16 x i32> @_xmm16xi32(<16 x i32> %a) {
199 ; ALL-LABEL: _xmm16xi32:
201 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
203 %b = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> zeroinitializer
207 define <16 x float> @_xmm16xfloat(<16 x float> %a) {
208 ; ALL-LABEL: _xmm16xfloat:
210 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
212 %b = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32> zeroinitializer
216 define <16 x i32> @test_vbroadcast() {
217 ; ALL-LABEL: test_vbroadcast:
218 ; ALL: # BB#0: # %entry
219 ; ALL-NEXT: vxorps %xmm0, %xmm0, %xmm0
220 ; ALL-NEXT: vcmpunordps %zmm0, %zmm0, %k1
221 ; ALL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
222 ; ALL-NEXT: knotw %k1, %k1
223 ; ALL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
226 %0 = sext <16 x i1> zeroinitializer to <16 x i32>
227 %1 = fcmp uno <16 x float> undef, zeroinitializer
228 %2 = sext <16 x i1> %1 to <16 x i32>
229 %3 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> %2
233 ; We implement the set1 intrinsics with vector initializers. Verify that the
234 ; IR generated will produce broadcasts at the end.
235 define <8 x double> @test_set1_pd(double %d) #2 {
236 ; ALL-LABEL: test_set1_pd:
237 ; ALL: # BB#0: # %entry
238 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
241 %vecinit.i = insertelement <8 x double> undef, double %d, i32 0
242 %vecinit1.i = insertelement <8 x double> %vecinit.i, double %d, i32 1
243 %vecinit2.i = insertelement <8 x double> %vecinit1.i, double %d, i32 2
244 %vecinit3.i = insertelement <8 x double> %vecinit2.i, double %d, i32 3
245 %vecinit4.i = insertelement <8 x double> %vecinit3.i, double %d, i32 4
246 %vecinit5.i = insertelement <8 x double> %vecinit4.i, double %d, i32 5
247 %vecinit6.i = insertelement <8 x double> %vecinit5.i, double %d, i32 6
248 %vecinit7.i = insertelement <8 x double> %vecinit6.i, double %d, i32 7
249 ret <8 x double> %vecinit7.i
252 define <8 x i64> @test_set1_epi64(i64 %d) #2 {
253 ; ALL-LABEL: test_set1_epi64:
254 ; ALL: # BB#0: # %entry
255 ; ALL-NEXT: vpbroadcastq %rdi, %zmm0
258 %vecinit.i = insertelement <8 x i64> undef, i64 %d, i32 0
259 %vecinit1.i = insertelement <8 x i64> %vecinit.i, i64 %d, i32 1
260 %vecinit2.i = insertelement <8 x i64> %vecinit1.i, i64 %d, i32 2
261 %vecinit3.i = insertelement <8 x i64> %vecinit2.i, i64 %d, i32 3
262 %vecinit4.i = insertelement <8 x i64> %vecinit3.i, i64 %d, i32 4
263 %vecinit5.i = insertelement <8 x i64> %vecinit4.i, i64 %d, i32 5
264 %vecinit6.i = insertelement <8 x i64> %vecinit5.i, i64 %d, i32 6
265 %vecinit7.i = insertelement <8 x i64> %vecinit6.i, i64 %d, i32 7
266 ret <8 x i64> %vecinit7.i
269 define <16 x float> @test_set1_ps(float %f) #2 {
270 ; ALL-LABEL: test_set1_ps:
271 ; ALL: # BB#0: # %entry
272 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
275 %vecinit.i = insertelement <16 x float> undef, float %f, i32 0
276 %vecinit1.i = insertelement <16 x float> %vecinit.i, float %f, i32 1
277 %vecinit2.i = insertelement <16 x float> %vecinit1.i, float %f, i32 2
278 %vecinit3.i = insertelement <16 x float> %vecinit2.i, float %f, i32 3
279 %vecinit4.i = insertelement <16 x float> %vecinit3.i, float %f, i32 4
280 %vecinit5.i = insertelement <16 x float> %vecinit4.i, float %f, i32 5
281 %vecinit6.i = insertelement <16 x float> %vecinit5.i, float %f, i32 6
282 %vecinit7.i = insertelement <16 x float> %vecinit6.i, float %f, i32 7
283 %vecinit8.i = insertelement <16 x float> %vecinit7.i, float %f, i32 8
284 %vecinit9.i = insertelement <16 x float> %vecinit8.i, float %f, i32 9
285 %vecinit10.i = insertelement <16 x float> %vecinit9.i, float %f, i32 10
286 %vecinit11.i = insertelement <16 x float> %vecinit10.i, float %f, i32 11
287 %vecinit12.i = insertelement <16 x float> %vecinit11.i, float %f, i32 12
288 %vecinit13.i = insertelement <16 x float> %vecinit12.i, float %f, i32 13
289 %vecinit14.i = insertelement <16 x float> %vecinit13.i, float %f, i32 14
290 %vecinit15.i = insertelement <16 x float> %vecinit14.i, float %f, i32 15
291 ret <16 x float> %vecinit15.i
294 define <16 x i32> @test_set1_epi32(i32 %f) #2 {
295 ; ALL-LABEL: test_set1_epi32:
296 ; ALL: # BB#0: # %entry
297 ; ALL-NEXT: vpbroadcastd %edi, %zmm0
300 %vecinit.i = insertelement <16 x i32> undef, i32 %f, i32 0
301 %vecinit1.i = insertelement <16 x i32> %vecinit.i, i32 %f, i32 1
302 %vecinit2.i = insertelement <16 x i32> %vecinit1.i, i32 %f, i32 2
303 %vecinit3.i = insertelement <16 x i32> %vecinit2.i, i32 %f, i32 3
304 %vecinit4.i = insertelement <16 x i32> %vecinit3.i, i32 %f, i32 4
305 %vecinit5.i = insertelement <16 x i32> %vecinit4.i, i32 %f, i32 5
306 %vecinit6.i = insertelement <16 x i32> %vecinit5.i, i32 %f, i32 6
307 %vecinit7.i = insertelement <16 x i32> %vecinit6.i, i32 %f, i32 7
308 %vecinit8.i = insertelement <16 x i32> %vecinit7.i, i32 %f, i32 8
309 %vecinit9.i = insertelement <16 x i32> %vecinit8.i, i32 %f, i32 9
310 %vecinit10.i = insertelement <16 x i32> %vecinit9.i, i32 %f, i32 10
311 %vecinit11.i = insertelement <16 x i32> %vecinit10.i, i32 %f, i32 11
312 %vecinit12.i = insertelement <16 x i32> %vecinit11.i, i32 %f, i32 12
313 %vecinit13.i = insertelement <16 x i32> %vecinit12.i, i32 %f, i32 13
314 %vecinit14.i = insertelement <16 x i32> %vecinit13.i, i32 %f, i32 14
315 %vecinit15.i = insertelement <16 x i32> %vecinit14.i, i32 %f, i32 15
316 ret <16 x i32> %vecinit15.i
319 ; We implement the scalar broadcast intrinsics with vector initializers.
320 ; Verify that the IR generated will produce the broadcast at the end.
321 define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a) {
322 ; ALL-LABEL: test_mm512_broadcastsd_pd:
323 ; ALL: # BB#0: # %entry
324 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
327 %0 = extractelement <2 x double> %a, i32 0
328 %vecinit.i = insertelement <8 x double> undef, double %0, i32 0
329 %vecinit1.i = insertelement <8 x double> %vecinit.i, double %0, i32 1
330 %vecinit2.i = insertelement <8 x double> %vecinit1.i, double %0, i32 2
331 %vecinit3.i = insertelement <8 x double> %vecinit2.i, double %0, i32 3
332 %vecinit4.i = insertelement <8 x double> %vecinit3.i, double %0, i32 4
333 %vecinit5.i = insertelement <8 x double> %vecinit4.i, double %0, i32 5
334 %vecinit6.i = insertelement <8 x double> %vecinit5.i, double %0, i32 6
335 %vecinit7.i = insertelement <8 x double> %vecinit6.i, double %0, i32 7
336 ret <8 x double> %vecinit7.i
339 define <16 x float> @test1(<8 x float>%a) {
342 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
344 %res = shufflevector <8 x float> %a, <8 x float> undef, <16 x i32> zeroinitializer
348 define <8 x double> @test2(<4 x double>%a) {
351 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
353 %res = shufflevector <4 x double> %a, <4 x double> undef, <8 x i32> zeroinitializer
357 define <64 x i8> @_invec32xi8(<32 x i8>%a) {
358 ; AVX512F-LABEL: _invec32xi8:
360 ; AVX512F-NEXT: vpbroadcastb %xmm0, %ymm0
361 ; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
364 ; AVX512BW-LABEL: _invec32xi8:
366 ; AVX512BW-NEXT: vpbroadcastb %xmm0, %zmm0
367 ; AVX512BW-NEXT: retq
368 %res = shufflevector <32 x i8> %a, <32 x i8> undef, <64 x i32> zeroinitializer
372 define <32 x i16> @_invec16xi16(<16 x i16>%a) {
373 ; AVX512F-LABEL: _invec16xi16:
375 ; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm0
376 ; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
379 ; AVX512BW-LABEL: _invec16xi16:
381 ; AVX512BW-NEXT: vpbroadcastw %xmm0, %zmm0
382 ; AVX512BW-NEXT: retq
383 %res = shufflevector <16 x i16> %a, <16 x i16> undef, <32 x i32> zeroinitializer
387 define <16 x i32> @_invec8xi32(<8 x i32>%a) {
388 ; ALL-LABEL: _invec8xi32:
390 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
392 %res = shufflevector <8 x i32> %a, <8 x i32> undef, <16 x i32> zeroinitializer
396 define <8 x i64> @_invec4xi64(<4 x i64>%a) {
397 ; ALL-LABEL: _invec4xi64:
399 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
401 %res = shufflevector <4 x i64> %a, <4 x i64> undef, <8 x i32> zeroinitializer
405 declare void @func_f32(float)
406 define <16 x float> @broadcast_ss_spill(float %x) {
407 ; ALL-LABEL: broadcast_ss_spill:
409 ; ALL-NEXT: subq $24, %rsp
410 ; ALL-NEXT: .cfi_def_cfa_offset 32
411 ; ALL-NEXT: vaddss %xmm0, %xmm0, %xmm0
412 ; ALL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
413 ; ALL-NEXT: callq func_f32
414 ; ALL-NEXT: vbroadcastss (%rsp), %zmm0 # 16-byte Folded Reload
415 ; ALL-NEXT: addq $24, %rsp
417 %a = fadd float %x, %x
418 call void @func_f32(float %a)
419 %b = insertelement <16 x float> undef, float %a, i32 0
420 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
424 declare void @func_f64(double)
425 define <8 x double> @broadcast_sd_spill(double %x) {
426 ; ALL-LABEL: broadcast_sd_spill:
428 ; ALL-NEXT: subq $24, %rsp
429 ; ALL-NEXT: .cfi_def_cfa_offset 32
430 ; ALL-NEXT: vaddsd %xmm0, %xmm0, %xmm0
431 ; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
432 ; ALL-NEXT: callq func_f64
433 ; ALL-NEXT: vbroadcastsd (%rsp), %zmm0 # 16-byte Folded Reload
434 ; ALL-NEXT: addq $24, %rsp
436 %a = fadd double %x, %x
437 call void @func_f64(double %a)
438 %b = insertelement <8 x double> undef, double %a, i32 0
439 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer