1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512cd,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512cd,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64
5 define <4 x i32> @test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
6 ; X86-LABEL: test_int_x86_avx512_mask_vplzcnt_d_128:
8 ; X86-NEXT: vplzcntd %xmm0, %xmm2
9 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
10 ; X86-NEXT: kmovw %eax, %k1
11 ; X86-NEXT: vplzcntd %xmm0, %xmm1 {%k1}
12 ; X86-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z}
13 ; X86-NEXT: vpaddd %xmm0, %xmm2, %xmm0
14 ; X86-NEXT: vpaddd %xmm0, %xmm1, %xmm0
17 ; X64-LABEL: test_int_x86_avx512_mask_vplzcnt_d_128:
19 ; X64-NEXT: vplzcntd %xmm0, %xmm2
20 ; X64-NEXT: kmovw %edi, %k1
21 ; X64-NEXT: vplzcntd %xmm0, %xmm1 {%k1}
22 ; X64-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z}
23 ; X64-NEXT: vpaddd %xmm0, %xmm2, %xmm0
24 ; X64-NEXT: vpaddd %xmm0, %xmm1, %xmm0
26 %1 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x0, i1 false)
27 %2 = bitcast i8 %x2 to <8 x i1>
28 %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
29 %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x1
30 %4 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x0, i1 false)
31 %5 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x0, i1 false)
32 %6 = bitcast i8 %x2 to <8 x i1>
33 %extract = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
34 %7 = select <4 x i1> %extract, <4 x i32> %5, <4 x i32> zeroinitializer
35 %res2 = add <4 x i32> %3, %4
36 %res4 = add <4 x i32> %res2, %7
39 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) #0
41 define <8 x i32> @test_int_x86_avx512_mask_vplzcnt_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
42 ; X86-LABEL: test_int_x86_avx512_mask_vplzcnt_d_256:
44 ; X86-NEXT: vplzcntd %ymm0, %ymm2
45 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
46 ; X86-NEXT: kmovw %eax, %k1
47 ; X86-NEXT: vplzcntd %ymm0, %ymm1 {%k1}
48 ; X86-NEXT: vpaddd %ymm2, %ymm1, %ymm0
51 ; X64-LABEL: test_int_x86_avx512_mask_vplzcnt_d_256:
53 ; X64-NEXT: vplzcntd %ymm0, %ymm2
54 ; X64-NEXT: kmovw %edi, %k1
55 ; X64-NEXT: vplzcntd %ymm0, %ymm1 {%k1}
56 ; X64-NEXT: vpaddd %ymm2, %ymm1, %ymm0
58 %1 = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %x0, i1 false)
59 %2 = bitcast i8 %x2 to <8 x i1>
60 %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x1
61 %4 = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %x0, i1 false)
62 %res2 = add <8 x i32> %3, %4
65 declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1) #0
67 define <2 x i64> @test_int_x86_avx512_mask_vplzcnt_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
68 ; X86-LABEL: test_int_x86_avx512_mask_vplzcnt_q_128:
70 ; X86-NEXT: vplzcntq %xmm0, %xmm2
71 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
72 ; X86-NEXT: kmovw %eax, %k1
73 ; X86-NEXT: vplzcntq %xmm0, %xmm1 {%k1}
74 ; X86-NEXT: vpaddq %xmm2, %xmm1, %xmm0
77 ; X64-LABEL: test_int_x86_avx512_mask_vplzcnt_q_128:
79 ; X64-NEXT: vplzcntq %xmm0, %xmm2
80 ; X64-NEXT: kmovw %edi, %k1
81 ; X64-NEXT: vplzcntq %xmm0, %xmm1 {%k1}
82 ; X64-NEXT: vpaddq %xmm2, %xmm1, %xmm0
84 %1 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %x0, i1 false)
85 %2 = bitcast i8 %x2 to <8 x i1>
86 %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
87 %3 = select <2 x i1> %extract, <2 x i64> %1, <2 x i64> %x1
88 %4 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %x0, i1 false)
89 %res2 = add <2 x i64> %3, %4
92 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) #0
94 define <4 x i64> @test_int_x86_avx512_mask_vplzcnt_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
95 ; X86-LABEL: test_int_x86_avx512_mask_vplzcnt_q_256:
97 ; X86-NEXT: vplzcntq %ymm0, %ymm2
98 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
99 ; X86-NEXT: kmovw %eax, %k1
100 ; X86-NEXT: vplzcntq %ymm0, %ymm1 {%k1}
101 ; X86-NEXT: vpaddq %ymm2, %ymm1, %ymm0
104 ; X64-LABEL: test_int_x86_avx512_mask_vplzcnt_q_256:
106 ; X64-NEXT: vplzcntq %ymm0, %ymm2
107 ; X64-NEXT: kmovw %edi, %k1
108 ; X64-NEXT: vplzcntq %ymm0, %ymm1 {%k1}
109 ; X64-NEXT: vpaddq %ymm2, %ymm1, %ymm0
111 %1 = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %x0, i1 false)
112 %2 = bitcast i8 %x2 to <8 x i1>
113 %extract = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
114 %3 = select <4 x i1> %extract, <4 x i64> %1, <4 x i64> %x1
115 %4 = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %x0, i1 false)
116 %res2 = add <4 x i64> %3, %4
119 declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1) #0
121 define <4 x i32> @test_int_x86_avx512_mask_vpconflict_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) {
122 ; X86-LABEL: test_int_x86_avx512_mask_vpconflict_d_128:
124 ; X86-NEXT: vpconflictd %xmm0, %xmm2
125 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
126 ; X86-NEXT: kmovw %eax, %k1
127 ; X86-NEXT: vpconflictd %xmm0, %xmm1 {%k1}
128 ; X86-NEXT: vpconflictd %xmm0, %xmm0 {%k1} {z}
129 ; X86-NEXT: vpaddd %xmm0, %xmm2, %xmm0
130 ; X86-NEXT: vpaddd %xmm0, %xmm1, %xmm0
133 ; X64-LABEL: test_int_x86_avx512_mask_vpconflict_d_128:
135 ; X64-NEXT: vpconflictd %xmm0, %xmm2
136 ; X64-NEXT: kmovw %edi, %k1
137 ; X64-NEXT: vpconflictd %xmm0, %xmm1 {%k1}
138 ; X64-NEXT: vpconflictd %xmm0, %xmm0 {%k1} {z}
139 ; X64-NEXT: vpaddd %xmm0, %xmm2, %xmm0
140 ; X64-NEXT: vpaddd %xmm0, %xmm1, %xmm0
142 %1 = call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %x0)
143 %2 = bitcast i8 %x2 to <8 x i1>
144 %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
145 %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x1
146 %4 = call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %x0)
147 %5 = call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %x0)
148 %6 = bitcast i8 %x2 to <8 x i1>
149 %extract = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
150 %7 = select <4 x i1> %extract, <4 x i32> %5, <4 x i32> zeroinitializer
151 %res2 = add <4 x i32> %3, %4
152 %res4 = add <4 x i32> %res2, %7
156 define <8 x i32> @test_int_x86_avx512_mask_vpconflict_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2) {
157 ; X86-LABEL: test_int_x86_avx512_mask_vpconflict_d_256:
159 ; X86-NEXT: vpconflictd %ymm0, %ymm2
160 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
161 ; X86-NEXT: kmovw %eax, %k1
162 ; X86-NEXT: vpconflictd %ymm0, %ymm1 {%k1}
163 ; X86-NEXT: vpconflictd %ymm0, %ymm0 {%k1} {z}
164 ; X86-NEXT: vpaddd %ymm0, %ymm2, %ymm0
165 ; X86-NEXT: vpaddd %ymm0, %ymm1, %ymm0
168 ; X64-LABEL: test_int_x86_avx512_mask_vpconflict_d_256:
170 ; X64-NEXT: vpconflictd %ymm0, %ymm2
171 ; X64-NEXT: kmovw %edi, %k1
172 ; X64-NEXT: vpconflictd %ymm0, %ymm1 {%k1}
173 ; X64-NEXT: vpconflictd %ymm0, %ymm0 {%k1} {z}
174 ; X64-NEXT: vpaddd %ymm0, %ymm2, %ymm0
175 ; X64-NEXT: vpaddd %ymm0, %ymm1, %ymm0
177 %1 = call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %x0)
178 %2 = bitcast i8 %x2 to <8 x i1>
179 %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x1
180 %4 = call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %x0)
181 %5 = call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %x0)
182 %6 = bitcast i8 %x2 to <8 x i1>
183 %7 = select <8 x i1> %6, <8 x i32> %5, <8 x i32> zeroinitializer
184 %res3 = add <8 x i32> %3, %4
185 %res4 = add <8 x i32> %7, %res3
189 define <2 x i64> @test_int_x86_avx512_mask_vpconflict_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) {
190 ; X86-LABEL: test_int_x86_avx512_mask_vpconflict_q_128:
192 ; X86-NEXT: vpconflictq %xmm0, %xmm2
193 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
194 ; X86-NEXT: kmovw %eax, %k1
195 ; X86-NEXT: vpconflictq %xmm0, %xmm1 {%k1}
196 ; X86-NEXT: vpconflictq %xmm0, %xmm0 {%k1} {z}
197 ; X86-NEXT: vpaddq %xmm0, %xmm2, %xmm0
198 ; X86-NEXT: vpaddq %xmm0, %xmm1, %xmm0
201 ; X64-LABEL: test_int_x86_avx512_mask_vpconflict_q_128:
203 ; X64-NEXT: vpconflictq %xmm0, %xmm2
204 ; X64-NEXT: kmovw %edi, %k1
205 ; X64-NEXT: vpconflictq %xmm0, %xmm1 {%k1}
206 ; X64-NEXT: vpconflictq %xmm0, %xmm0 {%k1} {z}
207 ; X64-NEXT: vpaddq %xmm0, %xmm2, %xmm0
208 ; X64-NEXT: vpaddq %xmm0, %xmm1, %xmm0
210 %1 = call <2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %x0)
211 %2 = bitcast i8 %x2 to <8 x i1>
212 %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
213 %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x1
214 %4 = call <2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %x0)
215 %5 = call <2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %x0)
216 %6 = bitcast i8 %x2 to <8 x i1>
217 %extract = shufflevector <8 x i1> %6, <8 x i1> %6, <2 x i32> <i32 0, i32 1>
218 %7 = select <2 x i1> %extract, <2 x i64> %5, <2 x i64> zeroinitializer
219 %res3 = add <2 x i64> %3, %4
220 %res4 = add <2 x i64> %7, %res3
224 define <4 x i64> @test_int_x86_avx512_mask_vpconflict_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) {
225 ; X86-LABEL: test_int_x86_avx512_mask_vpconflict_q_256:
227 ; X86-NEXT: vpconflictq %ymm0, %ymm2
228 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
229 ; X86-NEXT: kmovw %eax, %k1
230 ; X86-NEXT: vpconflictq %ymm0, %ymm1 {%k1}
231 ; X86-NEXT: vpconflictq %ymm0, %ymm0 {%k1} {z}
232 ; X86-NEXT: vpaddq %ymm0, %ymm2, %ymm0
233 ; X86-NEXT: vpaddq %ymm0, %ymm1, %ymm0
236 ; X64-LABEL: test_int_x86_avx512_mask_vpconflict_q_256:
238 ; X64-NEXT: vpconflictq %ymm0, %ymm2
239 ; X64-NEXT: kmovw %edi, %k1
240 ; X64-NEXT: vpconflictq %ymm0, %ymm1 {%k1}
241 ; X64-NEXT: vpconflictq %ymm0, %ymm0 {%k1} {z}
242 ; X64-NEXT: vpaddq %ymm0, %ymm2, %ymm0
243 ; X64-NEXT: vpaddq %ymm0, %ymm1, %ymm0
245 %1 = call <4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %x0)
246 %2 = bitcast i8 %x2 to <8 x i1>
247 %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
248 %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x1
249 %4 = call <4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %x0)
250 %5 = call <4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %x0)
251 %6 = bitcast i8 %x2 to <8 x i1>
252 %extract = shufflevector <8 x i1> %6, <8 x i1> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
253 %7 = select <4 x i1> %extract, <4 x i64> %5, <4 x i64> zeroinitializer
254 %res3 = add <4 x i64> %3, %4
255 %res4 = add <4 x i64> %7, %res3
259 declare <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32>)
260 declare <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32>)
261 declare <2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64>)
262 declare <4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64>)