1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
4 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
9 define <2 x i32> @_mul2xi32a(<2 x i32>, <2 x i32>) {
10 ; SSE2-LABEL: _mul2xi32a:
12 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
13 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
14 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
15 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
16 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
17 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
18 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
21 ; SSE42-LABEL: _mul2xi32a:
23 ; SSE42-NEXT: pmulld %xmm1, %xmm0
26 ; AVX-LABEL: _mul2xi32a:
28 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
30 %r = mul <2 x i32> %0, %1
34 define <2 x i32> @_mul2xi32b(<2 x i32>, <2 x i32>) {
35 ; SSE-LABEL: _mul2xi32b:
37 ; SSE-NEXT: pmuludq %xmm1, %xmm0
40 ; AVX-LABEL: _mul2xi32b:
42 ; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
44 %factor0 = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
45 %factor1 = shufflevector <2 x i32> %1, <2 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
46 %product64 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %factor0, <4 x i32> %factor1) readnone
47 %product = bitcast <2 x i64> %product64 to <4 x i32>
48 %r = shufflevector <4 x i32> %product, <4 x i32> undef, <2 x i32> <i32 0, i32 4>
52 define <4 x i32> @_mul4xi32a(<4 x i32>, <4 x i32>) {
53 ; SSE2-LABEL: _mul4xi32a:
55 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
56 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
57 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
58 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
59 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
60 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
61 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
64 ; SSE42-LABEL: _mul4xi32a:
66 ; SSE42-NEXT: pmulld %xmm1, %xmm0
69 ; AVX-LABEL: _mul4xi32a:
71 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
73 %r = mul <4 x i32> %0, %1
77 define <4 x i32> @_mul4xi32b(<4 x i32>, <4 x i32>) {
78 ; SSE2-LABEL: _mul4xi32b:
80 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
81 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
82 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
83 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
84 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
85 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
86 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
89 ; SSE42-LABEL: _mul4xi32b:
91 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
92 ; SSE42-NEXT: pmuludq %xmm1, %xmm0
93 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
94 ; SSE42-NEXT: pmuludq %xmm2, %xmm1
95 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
96 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
99 ; AVX1-LABEL: _mul4xi32b:
101 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
102 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
103 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
104 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
105 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
106 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
109 ; AVX2-LABEL: _mul4xi32b:
111 ; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
112 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
113 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
114 ; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
115 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
116 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
118 %even0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
119 %even1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
120 %evenMul64 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %even0, <4 x i32> %even1) readnone
121 %evenMul = bitcast <2 x i64> %evenMul64 to <4 x i32>
122 %odd0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
123 %odd1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
124 %oddMul64 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %odd0, <4 x i32> %odd1) readnone
125 %oddMul = bitcast <2 x i64> %oddMul64 to <4 x i32>
126 %r = shufflevector <4 x i32> %evenMul, <4 x i32> %oddMul, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
130 ; the following extractelement's and insertelement's
131 ; are just an unrolled 'zext' on a vector
132 ; %ext0 = zext <4 x i32> %0 to <4 x i64>
133 ; %ext1 = zext <4 x i32> %1 to <4 x i64>
134 define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
135 ; SSE2-LABEL: _mul4xi32toi64a:
137 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
138 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,1,3,3]
139 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
140 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
141 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
142 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
143 ; SSE2-NEXT: movdqa %xmm2, %xmm1
146 ; SSE42-LABEL: _mul4xi32toi64a:
148 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
149 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,1,3,3]
150 ; SSE42-NEXT: pmuludq %xmm3, %xmm2
151 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
152 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
153 ; SSE42-NEXT: pmuludq %xmm1, %xmm0
154 ; SSE42-NEXT: movdqa %xmm2, %xmm1
157 ; AVX1-LABEL: _mul4xi32toi64a:
159 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
160 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,2,3,3]
161 ; AVX1-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
162 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
163 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
164 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
165 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
168 ; AVX2-LABEL: _mul4xi32toi64a:
170 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
171 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
172 ; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
174 %f00 = extractelement <4 x i32> %0, i32 0
175 %f01 = extractelement <4 x i32> %0, i32 1
176 %f02 = extractelement <4 x i32> %0, i32 2
177 %f03 = extractelement <4 x i32> %0, i32 3
178 %f10 = extractelement <4 x i32> %1, i32 0
179 %f11 = extractelement <4 x i32> %1, i32 1
180 %f12 = extractelement <4 x i32> %1, i32 2
181 %f13 = extractelement <4 x i32> %1, i32 3
182 %ext00 = zext i32 %f00 to i64
183 %ext01 = zext i32 %f01 to i64
184 %ext02 = zext i32 %f02 to i64
185 %ext03 = zext i32 %f03 to i64
186 %ext10 = zext i32 %f10 to i64
187 %ext11 = zext i32 %f11 to i64
188 %ext12 = zext i32 %f12 to i64
189 %ext13 = zext i32 %f13 to i64
190 %extv00 = insertelement <4 x i64> undef, i64 %ext00, i32 0
191 %extv01 = insertelement <4 x i64> %extv00, i64 %ext01, i32 1
192 %extv02 = insertelement <4 x i64> %extv01, i64 %ext02, i32 2
193 %extv03 = insertelement <4 x i64> %extv02, i64 %ext03, i32 3
194 %extv10 = insertelement <4 x i64> undef, i64 %ext10, i32 0
195 %extv11 = insertelement <4 x i64> %extv10, i64 %ext11, i32 1
196 %extv12 = insertelement <4 x i64> %extv11, i64 %ext12, i32 2
197 %extv13 = insertelement <4 x i64> %extv12, i64 %ext13, i32 3
198 %r = mul <4 x i64> %extv03, %extv13
202 ; very similar to mul4xi32 above
203 ; there is no bitcast and the final shuffle is a little different
204 define <4 x i64> @_mul4xi32toi64b(<4 x i32>, <4 x i32>) {
205 ; SSE-LABEL: _mul4xi32toi64b:
207 ; SSE-NEXT: movdqa %xmm0, %xmm2
208 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
209 ; SSE-NEXT: pmuludq %xmm1, %xmm2
210 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
211 ; SSE-NEXT: pmuludq %xmm0, %xmm1
212 ; SSE-NEXT: movdqa %xmm2, %xmm0
213 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
214 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
215 ; SSE-NEXT: movdqa %xmm2, %xmm1
218 ; AVX1-LABEL: _mul4xi32toi64b:
220 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
221 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
222 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
223 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
224 ; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm2[1],xmm0[1]
225 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
226 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
229 ; AVX2-LABEL: _mul4xi32toi64b:
231 ; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
232 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
233 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
234 ; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
235 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm2[1],xmm0[1]
236 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
237 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
239 %even0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
240 %even1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
241 %evenMul = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %even0, <4 x i32> %even1) readnone
242 %odd0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
243 %odd1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
244 %oddMul = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %odd0, <4 x i32> %odd1) readnone
245 %r = shufflevector <2 x i64> %evenMul, <2 x i64> %oddMul, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
249 ; Here we do not split into even and odd indexed elements
250 ; but into the lower and the upper half of the factor vectors.
251 ; This makes the initial shuffle more complicated,
252 ; but the final shuffle is a no-op.
253 define <4 x i64> @_mul4xi32toi64c(<4 x i32>, <4 x i32>) {
254 ; SSE2-LABEL: _mul4xi32toi64c:
256 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,1,3]
257 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
258 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
259 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
260 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
261 ; SSE2-NEXT: pmuludq %xmm0, %xmm1
262 ; SSE2-NEXT: movdqa %xmm2, %xmm0
265 ; SSE42-LABEL: _mul4xi32toi64c:
267 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
268 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
269 ; SSE42-NEXT: pmuludq %xmm3, %xmm2
270 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
271 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
272 ; SSE42-NEXT: pmuludq %xmm0, %xmm1
273 ; SSE42-NEXT: movdqa %xmm2, %xmm0
276 ; AVX1-LABEL: _mul4xi32toi64c:
278 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
279 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
280 ; AVX1-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
281 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
282 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
283 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
284 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
287 ; AVX2-LABEL: _mul4xi32toi64c:
289 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
290 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
291 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
292 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
293 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
294 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
295 ; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
297 %lower0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 1, i32 undef>
298 %lower1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 1, i32 undef>
299 %lowerMul = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %lower0, <4 x i32> %lower1) readnone
300 %upper0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 3, i32 undef>
301 %upper1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 3, i32 undef>
302 %upperMul = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %upper0, <4 x i32> %upper1) readnone
303 %r = shufflevector <2 x i64> %lowerMul, <2 x i64> %upperMul, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
307 ; If we know, that the most significant half of i64 elements are zero,
308 ; then multiplication can be simplified drastically.
309 ; In the following example I assert a zero upper half
310 ; by 'trunc' followed by 'zext'.
312 ; the following extractelement's and insertelement's
313 ; are just an unrolled 'trunc' plus 'zext' on a vector
314 ; %trunc0 = trunc <2 x i64> %0 to <2 x i32>
315 ; %trunc1 = trunc <2 x i64> %1 to <2 x i32>
316 ; %ext0 = zext <2 x i32> %0 to <2 x i64>
317 ; %ext1 = zext <2 x i32> %1 to <2 x i64>
318 define <2 x i64> @_mul2xi64toi64a(<2 x i64>, <2 x i64>) {
319 ; SSE-LABEL: _mul2xi64toi64a:
321 ; SSE-NEXT: pmuludq %xmm1, %xmm0
324 ; AVX-LABEL: _mul2xi64toi64a:
326 ; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
328 %f00 = extractelement <2 x i64> %0, i32 0
329 %f01 = extractelement <2 x i64> %0, i32 1
330 %f10 = extractelement <2 x i64> %1, i32 0
331 %f11 = extractelement <2 x i64> %1, i32 1
332 %trunc00 = trunc i64 %f00 to i32
333 %trunc01 = trunc i64 %f01 to i32
334 %ext00 = zext i32 %trunc00 to i64
335 %ext01 = zext i32 %trunc01 to i64
336 %trunc10 = trunc i64 %f10 to i32
337 %trunc11 = trunc i64 %f11 to i32
338 %ext10 = zext i32 %trunc10 to i64
339 %ext11 = zext i32 %trunc11 to i64
340 %extv00 = insertelement <2 x i64> undef, i64 %ext00, i32 0
341 %extv01 = insertelement <2 x i64> %extv00, i64 %ext01, i32 1
342 %extv10 = insertelement <2 x i64> undef, i64 %ext10, i32 0
343 %extv11 = insertelement <2 x i64> %extv10, i64 %ext11, i32 1
344 %r = mul <2 x i64> %extv01, %extv11
348 define <2 x i64> @_mul2xi64toi64b(<2 x i64>, <2 x i64>) {
349 ; SSE-LABEL: _mul2xi64toi64b:
351 ; SSE-NEXT: pmuludq %xmm1, %xmm0
354 ; AVX-LABEL: _mul2xi64toi64b:
356 ; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
358 %f0 = bitcast <2 x i64> %0 to <4 x i32>
359 %f1 = bitcast <2 x i64> %1 to <4 x i32>
360 %r = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %f0, <4 x i32> %f1) readnone
364 declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnone