1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX512DQVL
8 define <2 x i64> @combine_shuffle_sext_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
9 ; SSE-LABEL: combine_shuffle_sext_pmuldq:
11 ; SSE-NEXT: pmuldq %xmm1, %xmm0
14 ; AVX-LABEL: combine_shuffle_sext_pmuldq:
16 ; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
18 %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
19 %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
20 %3 = sext <2 x i32> %1 to <2 x i64>
21 %4 = sext <2 x i32> %2 to <2 x i64>
22 %5 = mul nuw <2 x i64> %3, %4
26 define <2 x i64> @combine_shuffle_zext_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
27 ; SSE-LABEL: combine_shuffle_zext_pmuludq:
29 ; SSE-NEXT: pmuludq %xmm1, %xmm0
32 ; AVX-LABEL: combine_shuffle_zext_pmuludq:
34 ; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
36 %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
37 %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
38 %3 = zext <2 x i32> %1 to <2 x i64>
39 %4 = zext <2 x i32> %2 to <2 x i64>
40 %5 = mul nuw <2 x i64> %3, %4
44 define <2 x i64> @combine_shuffle_zero_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
45 ; SSE-LABEL: combine_shuffle_zero_pmuludq:
47 ; SSE-NEXT: pmuludq %xmm1, %xmm0
50 ; AVX-LABEL: combine_shuffle_zero_pmuludq:
52 ; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
54 %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
55 %2 = shufflevector <4 x i32> %a1, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
56 %3 = bitcast <4 x i32> %1 to <2 x i64>
57 %4 = bitcast <4 x i32> %2 to <2 x i64>
58 %5 = mul <2 x i64> %3, %4
62 define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) {
63 ; SSE-LABEL: combine_shuffle_zero_pmuludq_256:
65 ; SSE-NEXT: pmuludq %xmm2, %xmm0
66 ; SSE-NEXT: pmuludq %xmm3, %xmm1
69 ; AVX1-LABEL: combine_shuffle_zero_pmuludq_256:
71 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
72 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
73 ; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
74 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
75 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
78 ; AVX2-LABEL: combine_shuffle_zero_pmuludq_256:
80 ; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
83 ; AVX512VL-LABEL: combine_shuffle_zero_pmuludq_256:
85 ; AVX512VL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
88 ; AVX512DQVL-LABEL: combine_shuffle_zero_pmuludq_256:
89 ; AVX512DQVL: # %bb.0:
90 ; AVX512DQVL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
91 ; AVX512DQVL-NEXT: retq
92 %1 = shufflevector <8 x i32> %a0, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
93 %2 = shufflevector <8 x i32> %a1, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
94 %3 = bitcast <8 x i32> %1 to <4 x i64>
95 %4 = bitcast <8 x i32> %2 to <4 x i64>
96 %5 = mul <4 x i64> %3, %4
100 define <8 x i64> @combine_zext_pmuludq_256(<8 x i32> %a) {
101 ; SSE-LABEL: combine_zext_pmuludq_256:
103 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,1,3,3]
104 ; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
105 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3]
106 ; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
107 ; SSE-NEXT: pmovsxdq {{.*#+}} xmm4 = [715827883,715827883]
108 ; SSE-NEXT: pmuludq %xmm4, %xmm0
109 ; SSE-NEXT: pmuludq %xmm4, %xmm1
110 ; SSE-NEXT: pmuludq %xmm4, %xmm2
111 ; SSE-NEXT: pmuludq %xmm4, %xmm3
114 ; AVX1-LABEL: combine_zext_pmuludq_256:
116 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
117 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
118 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,2,3,3]
119 ; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = [715827883,715827883]
120 ; AVX1-NEXT: # xmm4 = mem[0,0]
121 ; AVX1-NEXT: vpmuludq %xmm4, %xmm3, %xmm3
122 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
123 ; AVX1-NEXT: vpmuludq %xmm4, %xmm0, %xmm0
124 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
125 ; AVX1-NEXT: vpmuludq %xmm4, %xmm2, %xmm2
126 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
127 ; AVX1-NEXT: vpmuludq %xmm4, %xmm1, %xmm1
128 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
131 ; AVX2-LABEL: combine_zext_pmuludq_256:
133 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
134 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
135 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
136 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [715827883,715827883,715827883,715827883]
137 ; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm0
138 ; AVX2-NEXT: vpmuludq %ymm2, %ymm1, %ymm1
141 ; AVX512VL-LABEL: combine_zext_pmuludq_256:
143 ; AVX512VL-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
144 ; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
145 ; AVX512VL-NEXT: retq
147 ; AVX512DQVL-LABEL: combine_zext_pmuludq_256:
148 ; AVX512DQVL: # %bb.0:
149 ; AVX512DQVL-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
150 ; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
151 ; AVX512DQVL-NEXT: retq
152 %1 = zext <8 x i32> %a to <8 x i64>
153 %2 = mul nuw nsw <8 x i64> %1, <i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883>
157 define void @PR39398(i32 %a0) {
158 ; SSE-LABEL: PR39398:
159 ; SSE: # %bb.0: # %bb
160 ; SSE-NEXT: .p2align 4, 0x90
161 ; SSE-NEXT: .LBB5_1: # %bb10
162 ; SSE-NEXT: # =>This Inner Loop Header: Depth=1
163 ; SSE-NEXT: cmpl $232, %edi
164 ; SSE-NEXT: jne .LBB5_1
165 ; SSE-NEXT: # %bb.2: # %bb34
168 ; AVX-LABEL: PR39398:
169 ; AVX: # %bb.0: # %bb
170 ; AVX-NEXT: .p2align 4, 0x90
171 ; AVX-NEXT: .LBB5_1: # %bb10
172 ; AVX-NEXT: # =>This Inner Loop Header: Depth=1
173 ; AVX-NEXT: cmpl $232, %edi
174 ; AVX-NEXT: jne .LBB5_1
175 ; AVX-NEXT: # %bb.2: # %bb34
178 %tmp9 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> zeroinitializer
181 bb10: ; preds = %bb10, %bb
182 %tmp12 = phi <4 x i32> [ <i32 9, i32 8, i32 7, i32 6>, %bb ], [ zeroinitializer, %bb10 ]
183 %tmp16 = add <4 x i32> %tmp12, <i32 -4, i32 -4, i32 -4, i32 -4>
184 %tmp18 = zext <4 x i32> %tmp12 to <4 x i64>
185 %tmp19 = zext <4 x i32> %tmp16 to <4 x i64>
186 %tmp20 = xor <4 x i64> %tmp18, <i64 -1, i64 -1, i64 -1, i64 -1>
187 %tmp21 = xor <4 x i64> %tmp19, <i64 -1, i64 -1, i64 -1, i64 -1>
188 %tmp24 = mul <4 x i64> %tmp9, %tmp20
189 %tmp25 = mul <4 x i64> %tmp9, %tmp21
190 %tmp26 = select <4 x i1> undef, <4 x i64> zeroinitializer, <4 x i64> %tmp24
191 %tmp27 = select <4 x i1> undef, <4 x i64> zeroinitializer, <4 x i64> %tmp25
192 %tmp28 = add <4 x i64> zeroinitializer, %tmp26
193 %tmp29 = add <4 x i64> zeroinitializer, %tmp27
194 %tmp33 = icmp eq i32 %a0, 232
195 br i1 %tmp33, label %bb34, label %bb10
197 bb34: ; preds = %bb10
198 %tmp35 = add <4 x i64> %tmp29, %tmp28
202 define i32 @PR43159(ptr %a0) {
203 ; SSE-LABEL: PR43159:
204 ; SSE: # %bb.0: # %entry
205 ; SSE-NEXT: movdqa (%rdi), %xmm0
206 ; SSE-NEXT: movdqa %xmm0, %xmm1
207 ; SSE-NEXT: psrld $1, %xmm1
208 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
209 ; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
210 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
211 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
212 ; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
213 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
214 ; SSE-NEXT: psubd %xmm2, %xmm0
215 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
216 ; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
217 ; SSE-NEXT: pxor %xmm2, %xmm2
218 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
219 ; SSE-NEXT: paddd %xmm1, %xmm2
220 ; SSE-NEXT: movdqa %xmm2, %xmm0
221 ; SSE-NEXT: psrld $7, %xmm0
222 ; SSE-NEXT: psrld $6, %xmm2
223 ; SSE-NEXT: movd %xmm2, %edi
224 ; SSE-NEXT: pextrd $1, %xmm0, %esi
225 ; SSE-NEXT: pextrd $2, %xmm2, %edx
226 ; SSE-NEXT: pextrd $3, %xmm0, %ecx
227 ; SSE-NEXT: jmp foo # TAILCALL
229 ; AVX1-LABEL: PR43159:
230 ; AVX1: # %bb.0: # %entry
231 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
232 ; AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
233 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
234 ; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
235 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
236 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
237 ; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
238 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
239 ; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm0
240 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
241 ; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
242 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
243 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
244 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
245 ; AVX1-NEXT: vpsrld $7, %xmm0, %xmm1
246 ; AVX1-NEXT: vpsrld $6, %xmm0, %xmm0
247 ; AVX1-NEXT: vmovd %xmm0, %edi
248 ; AVX1-NEXT: vpextrd $1, %xmm1, %esi
249 ; AVX1-NEXT: vpextrd $2, %xmm0, %edx
250 ; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
251 ; AVX1-NEXT: jmp foo # TAILCALL
253 ; AVX2-LABEL: PR43159:
254 ; AVX2: # %bb.0: # %entry
255 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
256 ; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
257 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
258 ; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
259 ; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
260 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
261 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
262 ; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0
263 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
264 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
265 ; AVX2-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
266 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
267 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
268 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
269 ; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
270 ; AVX2-NEXT: vmovd %xmm0, %edi
271 ; AVX2-NEXT: vpextrd $1, %xmm0, %esi
272 ; AVX2-NEXT: vpextrd $2, %xmm0, %edx
273 ; AVX2-NEXT: vpextrd $3, %xmm0, %ecx
274 ; AVX2-NEXT: jmp foo # TAILCALL
276 ; AVX512VL-LABEL: PR43159:
277 ; AVX512VL: # %bb.0: # %entry
278 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
279 ; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
280 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
281 ; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
282 ; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
283 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
284 ; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
285 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm0, %xmm0
286 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
287 ; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
288 ; AVX512VL-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
289 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
290 ; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
291 ; AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
292 ; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
293 ; AVX512VL-NEXT: vmovd %xmm0, %edi
294 ; AVX512VL-NEXT: vpextrd $1, %xmm0, %esi
295 ; AVX512VL-NEXT: vpextrd $2, %xmm0, %edx
296 ; AVX512VL-NEXT: vpextrd $3, %xmm0, %ecx
297 ; AVX512VL-NEXT: jmp foo # TAILCALL
299 ; AVX512DQVL-LABEL: PR43159:
300 ; AVX512DQVL: # %bb.0: # %entry
301 ; AVX512DQVL-NEXT: vmovdqa (%rdi), %xmm0
302 ; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
303 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
304 ; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
305 ; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
306 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
307 ; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
308 ; AVX512DQVL-NEXT: vpsubd %xmm2, %xmm0, %xmm0
309 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
310 ; AVX512DQVL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
311 ; AVX512DQVL-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
312 ; AVX512DQVL-NEXT: vpxor %xmm2, %xmm2, %xmm2
313 ; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
314 ; AVX512DQVL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
315 ; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
316 ; AVX512DQVL-NEXT: vmovd %xmm0, %edi
317 ; AVX512DQVL-NEXT: vpextrd $1, %xmm0, %esi
318 ; AVX512DQVL-NEXT: vpextrd $2, %xmm0, %edx
319 ; AVX512DQVL-NEXT: vpextrd $3, %xmm0, %ecx
320 ; AVX512DQVL-NEXT: jmp foo # TAILCALL
322 %0 = load <4 x i32>, ptr %a0, align 16
323 %div = udiv <4 x i32> %0, <i32 167, i32 237, i32 254, i32 177>
324 %ext0 = extractelement <4 x i32> %div, i32 0
325 %ext1 = extractelement <4 x i32> %div, i32 1
326 %ext2 = extractelement <4 x i32> %div, i32 2
327 %ext3 = extractelement <4 x i32> %div, i32 3
328 %call = tail call i32 @foo(i32 %ext0, i32 %ext1, i32 %ext2, i32 %ext3)
331 declare dso_local i32 @foo(i32, i32, i32, i32)
333 define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
334 ; SSE-LABEL: PR49658_zext:
335 ; SSE: # %bb.0: # %start
336 ; SSE-NEXT: movd %esi, %xmm0
337 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
338 ; SSE-NEXT: pxor %xmm0, %xmm0
339 ; SSE-NEXT: movq $-2097152, %rax # imm = 0xFFE00000
340 ; SSE-NEXT: pxor %xmm1, %xmm1
341 ; SSE-NEXT: .p2align 4, 0x90
342 ; SSE-NEXT: .LBB7_1: # %loop
343 ; SSE-NEXT: # =>This Inner Loop Header: Depth=1
344 ; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
345 ; SSE-NEXT: pmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
346 ; SSE-NEXT: pmovzxdq {{.*#+}} xmm5 = mem[0],zero,mem[1],zero
347 ; SSE-NEXT: pmovzxdq {{.*#+}} xmm6 = mem[0],zero,mem[1],zero
348 ; SSE-NEXT: pmuludq %xmm2, %xmm6
349 ; SSE-NEXT: pmuludq %xmm2, %xmm5
350 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm6[1,3]
351 ; SSE-NEXT: paddd %xmm5, %xmm0
352 ; SSE-NEXT: pmuludq %xmm2, %xmm4
353 ; SSE-NEXT: pmuludq %xmm2, %xmm3
354 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm3[1,3]
355 ; SSE-NEXT: paddd %xmm4, %xmm1
356 ; SSE-NEXT: subq $-128, %rax
357 ; SSE-NEXT: jne .LBB7_1
358 ; SSE-NEXT: # %bb.2: # %end
361 ; AVX1-LABEL: PR49658_zext:
362 ; AVX1: # %bb.0: # %start
363 ; AVX1-NEXT: movl %esi, %eax
364 ; AVX1-NEXT: vmovq %rax, %xmm0
365 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
366 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
367 ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
368 ; AVX1-NEXT: movq $-2097152, %rax # imm = 0xFFE00000
369 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
370 ; AVX1-NEXT: .p2align 4, 0x90
371 ; AVX1-NEXT: .LBB7_1: # %loop
372 ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
373 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
374 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = mem[0],zero,mem[1],zero
375 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = mem[0],zero,mem[1],zero
376 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm6 = mem[0],zero,mem[1],zero
377 ; AVX1-NEXT: vpmuludq %xmm6, %xmm2, %xmm6
378 ; AVX1-NEXT: vpmuludq %xmm5, %xmm1, %xmm5
379 ; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,3],xmm6[1,3]
380 ; AVX1-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
381 ; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm3
382 ; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,3],xmm4[1,3]
383 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
384 ; AVX1-NEXT: vpaddd %xmm4, %xmm5, %xmm4
385 ; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm0
386 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
387 ; AVX1-NEXT: subq $-128, %rax
388 ; AVX1-NEXT: jne .LBB7_1
389 ; AVX1-NEXT: # %bb.2: # %end
392 ; AVX2-LABEL: PR49658_zext:
393 ; AVX2: # %bb.0: # %start
394 ; AVX2-NEXT: movl %esi, %eax
395 ; AVX2-NEXT: vmovq %rax, %xmm0
396 ; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
397 ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
398 ; AVX2-NEXT: movq $-2097152, %rax # imm = 0xFFE00000
399 ; AVX2-NEXT: .p2align 4, 0x90
400 ; AVX2-NEXT: .LBB7_1: # %loop
401 ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
402 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
403 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
404 ; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm3
405 ; AVX2-NEXT: vpmuludq %ymm2, %ymm1, %ymm2
406 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3]
407 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
408 ; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,3],ymm4[1,3],ymm2[5,7],ymm4[5,7]
409 ; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
410 ; AVX2-NEXT: subq $-128, %rax
411 ; AVX2-NEXT: jne .LBB7_1
412 ; AVX2-NEXT: # %bb.2: # %end
415 ; AVX512VL-LABEL: PR49658_zext:
416 ; AVX512VL: # %bb.0: # %start
417 ; AVX512VL-NEXT: movl %esi, %eax
418 ; AVX512VL-NEXT: vpbroadcastq %rax, %zmm1
419 ; AVX512VL-NEXT: vpxor %xmm0, %xmm0, %xmm0
420 ; AVX512VL-NEXT: movq $-2097152, %rax # imm = 0xFFE00000
421 ; AVX512VL-NEXT: .p2align 4, 0x90
422 ; AVX512VL-NEXT: .LBB7_1: # %loop
423 ; AVX512VL-NEXT: # =>This Inner Loop Header: Depth=1
424 ; AVX512VL-NEXT: vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
425 ; AVX512VL-NEXT: vpmuludq %zmm2, %zmm1, %zmm2
426 ; AVX512VL-NEXT: vpsrlq $32, %zmm2, %zmm2
427 ; AVX512VL-NEXT: vpmovqd %zmm2, %ymm2
428 ; AVX512VL-NEXT: vpaddd %ymm0, %ymm2, %ymm0
429 ; AVX512VL-NEXT: subq $-128, %rax
430 ; AVX512VL-NEXT: jne .LBB7_1
431 ; AVX512VL-NEXT: # %bb.2: # %end
432 ; AVX512VL-NEXT: retq
434 ; AVX512DQVL-LABEL: PR49658_zext:
435 ; AVX512DQVL: # %bb.0: # %start
436 ; AVX512DQVL-NEXT: movl %esi, %eax
437 ; AVX512DQVL-NEXT: vpbroadcastq %rax, %zmm1
438 ; AVX512DQVL-NEXT: vpxor %xmm0, %xmm0, %xmm0
439 ; AVX512DQVL-NEXT: movq $-2097152, %rax # imm = 0xFFE00000
440 ; AVX512DQVL-NEXT: .p2align 4, 0x90
441 ; AVX512DQVL-NEXT: .LBB7_1: # %loop
442 ; AVX512DQVL-NEXT: # =>This Inner Loop Header: Depth=1
443 ; AVX512DQVL-NEXT: vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
444 ; AVX512DQVL-NEXT: vpmuludq %zmm2, %zmm1, %zmm2
445 ; AVX512DQVL-NEXT: vpsrlq $32, %zmm2, %zmm2
446 ; AVX512DQVL-NEXT: vpmovqd %zmm2, %ymm2
447 ; AVX512DQVL-NEXT: vpaddd %ymm0, %ymm2, %ymm0
448 ; AVX512DQVL-NEXT: subq $-128, %rax
449 ; AVX512DQVL-NEXT: jne .LBB7_1
450 ; AVX512DQVL-NEXT: # %bb.2: # %end
451 ; AVX512DQVL-NEXT: retq
453 %t1 = zext i32 %mul to i64
454 %t2 = insertelement <8 x i64> undef, i64 %t1, i32 0
455 %mulvec = shufflevector <8 x i64> %t2, <8 x i64> undef, <8 x i32> zeroinitializer
458 %loopcnt = phi i64 [ 0, %start ], [ %nextcnt, %loop ]
459 %sum = phi <8 x i32> [ zeroinitializer, %start ], [ %nextsum, %loop ]
460 %ptroff = getelementptr inbounds i32, ptr %ptr, i64 %loopcnt
461 %v = load <8 x i32>, ptr %ptroff, align 4
462 %v64 = zext <8 x i32> %v to <8 x i64>
463 %vmul = mul nuw <8 x i64> %mulvec, %v64
464 %vmulhi = lshr <8 x i64> %vmul, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
465 %vtrunc = trunc <8 x i64> %vmulhi to <8 x i32>
466 %nextsum = add <8 x i32> %vtrunc, %sum
467 %nextcnt = add i64 %loopcnt, 32
468 %isdone = icmp eq i64 %nextcnt, 524288
469 br i1 %isdone, label %end, label %loop
471 ret <8 x i32> %nextsum
474 define <8 x i32> @PR49658_sext(ptr %ptr, i32 %mul) {
475 ; SSE-LABEL: PR49658_sext:
476 ; SSE: # %bb.0: # %start
477 ; SSE-NEXT: movslq %esi, %rax
478 ; SSE-NEXT: movq %rax, %xmm0
479 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
480 ; SSE-NEXT: pxor %xmm0, %xmm0
481 ; SSE-NEXT: movq $-2097152, %rax # imm = 0xFFE00000
482 ; SSE-NEXT: movdqa %xmm2, %xmm3
483 ; SSE-NEXT: psrlq $32, %xmm3
484 ; SSE-NEXT: pxor %xmm1, %xmm1
485 ; SSE-NEXT: .p2align 4, 0x90
486 ; SSE-NEXT: .LBB8_1: # %loop
487 ; SSE-NEXT: # =>This Inner Loop Header: Depth=1
488 ; SSE-NEXT: pmovsxdq 2097176(%rdi,%rax), %xmm5
489 ; SSE-NEXT: pmovsxdq 2097168(%rdi,%rax), %xmm4
490 ; SSE-NEXT: pmovsxdq 2097152(%rdi,%rax), %xmm6
491 ; SSE-NEXT: pmovsxdq 2097160(%rdi,%rax), %xmm7
492 ; SSE-NEXT: movdqa %xmm3, %xmm8
493 ; SSE-NEXT: pmuludq %xmm7, %xmm8
494 ; SSE-NEXT: movdqa %xmm2, %xmm9
495 ; SSE-NEXT: pmuludq %xmm7, %xmm9
496 ; SSE-NEXT: psrlq $32, %xmm7
497 ; SSE-NEXT: pmuludq %xmm2, %xmm7
498 ; SSE-NEXT: paddq %xmm8, %xmm7
499 ; SSE-NEXT: psllq $32, %xmm7
500 ; SSE-NEXT: paddq %xmm9, %xmm7
501 ; SSE-NEXT: movdqa %xmm3, %xmm8
502 ; SSE-NEXT: pmuludq %xmm6, %xmm8
503 ; SSE-NEXT: movdqa %xmm2, %xmm9
504 ; SSE-NEXT: pmuludq %xmm6, %xmm9
505 ; SSE-NEXT: psrlq $32, %xmm6
506 ; SSE-NEXT: pmuludq %xmm2, %xmm6
507 ; SSE-NEXT: paddq %xmm8, %xmm6
508 ; SSE-NEXT: psllq $32, %xmm6
509 ; SSE-NEXT: paddq %xmm9, %xmm6
510 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,3],xmm7[1,3]
511 ; SSE-NEXT: paddd %xmm6, %xmm0
512 ; SSE-NEXT: movdqa %xmm4, %xmm6
513 ; SSE-NEXT: psrlq $32, %xmm6
514 ; SSE-NEXT: pmuludq %xmm2, %xmm6
515 ; SSE-NEXT: movdqa %xmm3, %xmm7
516 ; SSE-NEXT: pmuludq %xmm4, %xmm7
517 ; SSE-NEXT: paddq %xmm6, %xmm7
518 ; SSE-NEXT: psllq $32, %xmm7
519 ; SSE-NEXT: pmuludq %xmm2, %xmm4
520 ; SSE-NEXT: paddq %xmm7, %xmm4
521 ; SSE-NEXT: movdqa %xmm5, %xmm6
522 ; SSE-NEXT: psrlq $32, %xmm6
523 ; SSE-NEXT: pmuludq %xmm2, %xmm6
524 ; SSE-NEXT: movdqa %xmm3, %xmm7
525 ; SSE-NEXT: pmuludq %xmm5, %xmm7
526 ; SSE-NEXT: paddq %xmm6, %xmm7
527 ; SSE-NEXT: psllq $32, %xmm7
528 ; SSE-NEXT: pmuludq %xmm2, %xmm5
529 ; SSE-NEXT: paddq %xmm7, %xmm5
530 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm5[1,3]
531 ; SSE-NEXT: paddd %xmm4, %xmm1
532 ; SSE-NEXT: subq $-128, %rax
533 ; SSE-NEXT: jne .LBB8_1
534 ; SSE-NEXT: # %bb.2: # %end
537 ; AVX1-LABEL: PR49658_sext:
538 ; AVX1: # %bb.0: # %start
539 ; AVX1-NEXT: movslq %esi, %rax
540 ; AVX1-NEXT: vmovq %rax, %xmm0
541 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
542 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
543 ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
544 ; AVX1-NEXT: movq $-2097152, %rax # imm = 0xFFE00000
545 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
546 ; AVX1-NEXT: .p2align 4, 0x90
547 ; AVX1-NEXT: .LBB8_1: # %loop
548 ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
549 ; AVX1-NEXT: vpmovsxdq 2097152(%rdi,%rax), %xmm3
550 ; AVX1-NEXT: vpmovsxdq 2097160(%rdi,%rax), %xmm4
551 ; AVX1-NEXT: vpmovsxdq 2097168(%rdi,%rax), %xmm5
552 ; AVX1-NEXT: vpmovsxdq 2097176(%rdi,%rax), %xmm6
553 ; AVX1-NEXT: vpmuldq %xmm6, %xmm2, %xmm6
554 ; AVX1-NEXT: vpmuldq %xmm5, %xmm1, %xmm5
555 ; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,3],xmm6[1,3]
556 ; AVX1-NEXT: vpmuldq %xmm4, %xmm2, %xmm4
557 ; AVX1-NEXT: vpmuldq %xmm3, %xmm1, %xmm3
558 ; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,3],xmm4[1,3]
559 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
560 ; AVX1-NEXT: vpaddd %xmm4, %xmm5, %xmm4
561 ; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm0
562 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
563 ; AVX1-NEXT: subq $-128, %rax
564 ; AVX1-NEXT: jne .LBB8_1
565 ; AVX1-NEXT: # %bb.2: # %end
568 ; AVX2-LABEL: PR49658_sext:
569 ; AVX2: # %bb.0: # %start
570 ; AVX2-NEXT: movslq %esi, %rax
571 ; AVX2-NEXT: vmovq %rax, %xmm0
572 ; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
573 ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
574 ; AVX2-NEXT: movq $-2097152, %rax # imm = 0xFFE00000
575 ; AVX2-NEXT: .p2align 4, 0x90
576 ; AVX2-NEXT: .LBB8_1: # %loop
577 ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
578 ; AVX2-NEXT: vpmovsxdq 2097168(%rdi,%rax), %ymm2
579 ; AVX2-NEXT: vpmovsxdq 2097152(%rdi,%rax), %ymm3
580 ; AVX2-NEXT: vpmuldq %ymm3, %ymm1, %ymm3
581 ; AVX2-NEXT: vpmuldq %ymm2, %ymm1, %ymm2
582 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3]
583 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
584 ; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,3],ymm4[1,3],ymm2[5,7],ymm4[5,7]
585 ; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
586 ; AVX2-NEXT: subq $-128, %rax
587 ; AVX2-NEXT: jne .LBB8_1
588 ; AVX2-NEXT: # %bb.2: # %end
591 ; AVX512VL-LABEL: PR49658_sext:
592 ; AVX512VL: # %bb.0: # %start
593 ; AVX512VL-NEXT: movslq %esi, %rax
594 ; AVX512VL-NEXT: vpbroadcastq %rax, %zmm1
595 ; AVX512VL-NEXT: vpxor %xmm0, %xmm0, %xmm0
596 ; AVX512VL-NEXT: movq $-2097152, %rax # imm = 0xFFE00000
597 ; AVX512VL-NEXT: .p2align 4, 0x90
598 ; AVX512VL-NEXT: .LBB8_1: # %loop
599 ; AVX512VL-NEXT: # =>This Inner Loop Header: Depth=1
600 ; AVX512VL-NEXT: vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
601 ; AVX512VL-NEXT: vpmuldq %zmm2, %zmm1, %zmm2
602 ; AVX512VL-NEXT: vpsrlq $32, %zmm2, %zmm2
603 ; AVX512VL-NEXT: vpmovqd %zmm2, %ymm2
604 ; AVX512VL-NEXT: vpaddd %ymm0, %ymm2, %ymm0
605 ; AVX512VL-NEXT: subq $-128, %rax
606 ; AVX512VL-NEXT: jne .LBB8_1
607 ; AVX512VL-NEXT: # %bb.2: # %end
608 ; AVX512VL-NEXT: retq
610 ; AVX512DQVL-LABEL: PR49658_sext:
611 ; AVX512DQVL: # %bb.0: # %start
612 ; AVX512DQVL-NEXT: movslq %esi, %rax
613 ; AVX512DQVL-NEXT: vpbroadcastq %rax, %zmm1
614 ; AVX512DQVL-NEXT: vpxor %xmm0, %xmm0, %xmm0
615 ; AVX512DQVL-NEXT: movq $-2097152, %rax # imm = 0xFFE00000
616 ; AVX512DQVL-NEXT: .p2align 4, 0x90
617 ; AVX512DQVL-NEXT: .LBB8_1: # %loop
618 ; AVX512DQVL-NEXT: # =>This Inner Loop Header: Depth=1
619 ; AVX512DQVL-NEXT: vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
620 ; AVX512DQVL-NEXT: vpmuldq %zmm2, %zmm1, %zmm2
621 ; AVX512DQVL-NEXT: vpsrlq $32, %zmm2, %zmm2
622 ; AVX512DQVL-NEXT: vpmovqd %zmm2, %ymm2
623 ; AVX512DQVL-NEXT: vpaddd %ymm0, %ymm2, %ymm0
624 ; AVX512DQVL-NEXT: subq $-128, %rax
625 ; AVX512DQVL-NEXT: jne .LBB8_1
626 ; AVX512DQVL-NEXT: # %bb.2: # %end
627 ; AVX512DQVL-NEXT: retq
629 %t1 = sext i32 %mul to i64
630 %t2 = insertelement <8 x i64> undef, i64 %t1, i32 0
631 %mulvec = shufflevector <8 x i64> %t2, <8 x i64> undef, <8 x i32> zeroinitializer
634 %loopcnt = phi i64 [ 0, %start ], [ %nextcnt, %loop ]
635 %sum = phi <8 x i32> [ zeroinitializer, %start ], [ %nextsum, %loop ]
636 %ptroff = getelementptr inbounds i32, ptr %ptr, i64 %loopcnt
637 %v = load <8 x i32>, ptr %ptroff, align 4
638 %v64 = sext <8 x i32> %v to <8 x i64>
639 %vmul = mul <8 x i64> %mulvec, %v64
640 %vmulhi = ashr <8 x i64> %vmul, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
641 %vtrunc = trunc <8 x i64> %vmulhi to <8 x i32>
642 %nextsum = add <8 x i32> %vtrunc, %sum
643 %nextcnt = add i64 %loopcnt, 32
644 %isdone = icmp eq i64 %nextcnt, 524288
645 br i1 %isdone, label %end, label %loop
647 ret <8 x i32> %nextsum