1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
6 define <4 x i32> @combine_vec_mul_zero(<4 x i32> %x) {
7 ; SSE-LABEL: combine_vec_mul_zero:
9 ; SSE-NEXT: xorps %xmm0, %xmm0
12 ; AVX-LABEL: combine_vec_mul_zero:
14 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
16 %1 = mul <4 x i32> %x, zeroinitializer
20 ; fold (mul x, 1) -> x
21 define <4 x i32> @combine_vec_mul_one(<4 x i32> %x) {
22 ; SSE-LABEL: combine_vec_mul_one:
26 ; AVX-LABEL: combine_vec_mul_one:
29 %1 = mul <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
33 ; fold (mul x, -1) -> 0-x
34 define <4 x i32> @combine_vec_mul_negone(<4 x i32> %x) {
35 ; SSE-LABEL: combine_vec_mul_negone:
37 ; SSE-NEXT: pxor %xmm1, %xmm1
38 ; SSE-NEXT: psubd %xmm0, %xmm1
39 ; SSE-NEXT: movdqa %xmm1, %xmm0
42 ; AVX-LABEL: combine_vec_mul_negone:
44 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
45 ; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
47 %1 = mul <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
51 ; fold (mul x, (1 << c)) -> x << c
52 define <4 x i32> @combine_vec_mul_pow2a(<4 x i32> %x) {
53 ; SSE-LABEL: combine_vec_mul_pow2a:
55 ; SSE-NEXT: paddd %xmm0, %xmm0
58 ; AVX-LABEL: combine_vec_mul_pow2a:
60 ; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
62 %1 = mul <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
66 define <4 x i32> @combine_vec_mul_pow2b(<4 x i32> %x) {
67 ; SSE-LABEL: combine_vec_mul_pow2b:
69 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
72 ; AVX-LABEL: combine_vec_mul_pow2b:
74 ; AVX-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
76 %1 = mul <4 x i32> %x, <i32 1, i32 2, i32 4, i32 16>
80 define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) {
81 ; SSE-LABEL: combine_vec_mul_pow2c:
83 ; SSE-NEXT: movdqa %xmm0, %xmm2
84 ; SSE-NEXT: paddq %xmm0, %xmm2
85 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
86 ; SSE-NEXT: movdqa %xmm1, %xmm2
87 ; SSE-NEXT: psllq $4, %xmm2
88 ; SSE-NEXT: psllq $2, %xmm1
89 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
92 ; AVX-LABEL: combine_vec_mul_pow2c:
94 ; AVX-NEXT: vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
96 %1 = mul <4 x i64> %x, <i64 1, i64 2, i64 4, i64 16>
100 ; fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
101 define <4 x i32> @combine_vec_mul_negpow2a(<4 x i32> %x) {
102 ; SSE-LABEL: combine_vec_mul_negpow2a:
104 ; SSE-NEXT: paddd %xmm0, %xmm0
105 ; SSE-NEXT: pxor %xmm1, %xmm1
106 ; SSE-NEXT: psubd %xmm0, %xmm1
107 ; SSE-NEXT: movdqa %xmm1, %xmm0
110 ; AVX-LABEL: combine_vec_mul_negpow2a:
112 ; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
113 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
114 ; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
116 %1 = mul <4 x i32> %x, <i32 -2, i32 -2, i32 -2, i32 -2>
120 define <4 x i32> @combine_vec_mul_negpow2b(<4 x i32> %x) {
121 ; SSE-LABEL: combine_vec_mul_negpow2b:
123 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
126 ; AVX-LABEL: combine_vec_mul_negpow2b:
128 ; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
130 %1 = mul <4 x i32> %x, <i32 -1, i32 -2, i32 -4, i32 -16>
134 define <4 x i64> @combine_vec_mul_negpow2c(<4 x i64> %x) {
135 ; SSE-LABEL: combine_vec_mul_negpow2c:
137 ; SSE-NEXT: pmovsxbd {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
138 ; SSE-NEXT: movdqa %xmm0, %xmm3
139 ; SSE-NEXT: pmuludq %xmm2, %xmm3
140 ; SSE-NEXT: movdqa %xmm0, %xmm4
141 ; SSE-NEXT: psrlq $32, %xmm4
142 ; SSE-NEXT: pmovsxbq {{.*#+}} xmm5 = [18446744073709551615,18446744073709551614]
143 ; SSE-NEXT: pmuludq %xmm5, %xmm4
144 ; SSE-NEXT: paddq %xmm3, %xmm4
145 ; SSE-NEXT: psllq $32, %xmm4
146 ; SSE-NEXT: pmuludq %xmm5, %xmm0
147 ; SSE-NEXT: paddq %xmm4, %xmm0
148 ; SSE-NEXT: pmuludq %xmm1, %xmm2
149 ; SSE-NEXT: movdqa %xmm1, %xmm3
150 ; SSE-NEXT: psrlq $32, %xmm3
151 ; SSE-NEXT: pmovsxbq {{.*#+}} xmm4 = [18446744073709551612,18446744073709551600]
152 ; SSE-NEXT: pmuludq %xmm4, %xmm3
153 ; SSE-NEXT: paddq %xmm2, %xmm3
154 ; SSE-NEXT: psllq $32, %xmm3
155 ; SSE-NEXT: pmuludq %xmm4, %xmm1
156 ; SSE-NEXT: paddq %xmm3, %xmm1
159 ; AVX-LABEL: combine_vec_mul_negpow2c:
161 ; AVX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
162 ; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm1
163 ; AVX-NEXT: vpsrlq $32, %ymm0, %ymm2
164 ; AVX-NEXT: vpmovsxbq {{.*#+}} ymm3 = [18446744073709551615,18446744073709551614,18446744073709551612,18446744073709551600]
165 ; AVX-NEXT: vpmuludq %ymm3, %ymm2, %ymm2
166 ; AVX-NEXT: vpaddq %ymm2, %ymm1, %ymm1
167 ; AVX-NEXT: vpsllq $32, %ymm1, %ymm1
168 ; AVX-NEXT: vpmuludq %ymm3, %ymm0, %ymm0
169 ; AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
171 %1 = mul <4 x i64> %x, <i64 -1, i64 -2, i64 -4, i64 -16>
175 ; (mul (shl X, c1), c2) -> (mul X, c2 << c1)
176 define <4 x i32> @combine_vec_mul_shl_const(<4 x i32> %x) {
177 ; SSE-LABEL: combine_vec_mul_shl_const:
179 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
182 ; AVX-LABEL: combine_vec_mul_shl_const:
184 ; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
186 %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
187 %2 = mul <4 x i32> %1, <i32 1, i32 3, i32 5, i32 7>
191 ; (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one use.
192 define <4 x i32> @combine_vec_mul_shl_oneuse0(<4 x i32> %x, <4 x i32> %y) {
193 ; SSE-LABEL: combine_vec_mul_shl_oneuse0:
195 ; SSE-NEXT: pmulld %xmm1, %xmm0
196 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
199 ; AVX-LABEL: combine_vec_mul_shl_oneuse0:
201 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
202 ; AVX-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
204 %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
205 %2 = mul <4 x i32> %1, %y
209 define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) {
210 ; SSE-LABEL: combine_vec_mul_shl_oneuse1:
212 ; SSE-NEXT: pmulld %xmm1, %xmm0
213 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
216 ; AVX-LABEL: combine_vec_mul_shl_oneuse1:
218 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
219 ; AVX-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
221 %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
222 %2 = mul <4 x i32> %y, %1
226 define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) {
227 ; SSE-LABEL: combine_vec_mul_shl_multiuse0:
229 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
230 ; SSE-NEXT: pmulld %xmm0, %xmm1
231 ; SSE-NEXT: paddd %xmm1, %xmm0
234 ; AVX-LABEL: combine_vec_mul_shl_multiuse0:
236 ; AVX-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
237 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm1
238 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
240 %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
241 %2 = mul <4 x i32> %1, %y
242 %3 = add <4 x i32> %1, %2
246 define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) {
247 ; SSE-LABEL: combine_vec_mul_shl_multiuse1:
249 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
250 ; SSE-NEXT: pmulld %xmm0, %xmm1
251 ; SSE-NEXT: paddd %xmm1, %xmm0
254 ; AVX-LABEL: combine_vec_mul_shl_multiuse1:
256 ; AVX-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
257 ; AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm1
258 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
260 %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
261 %2 = mul <4 x i32> %y, %1
262 %3 = add <4 x i32> %1, %2
266 ; fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
268 define <4 x i32> @combine_vec_mul_add(<4 x i32> %x) {
269 ; SSE-LABEL: combine_vec_mul_add:
271 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
272 ; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
275 ; AVX-LABEL: combine_vec_mul_add:
277 ; AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
278 ; AVX-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
280 %1 = add <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
281 %2 = mul <4 x i32> %1, <i32 4, i32 6, i32 2, i32 0>
285 ; fold Y = sra (X, size(X)-1); mul (or (Y, 1), X) -> (abs X)
287 define <16 x i8> @combine_mul_to_abs_v16i8(<16 x i8> %x) {
288 ; SSE-LABEL: combine_mul_to_abs_v16i8:
290 ; SSE-NEXT: pabsb %xmm0, %xmm0
293 ; AVX-LABEL: combine_mul_to_abs_v16i8:
295 ; AVX-NEXT: vpabsb %xmm0, %xmm0
297 %s = ashr <16 x i8> %x, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
298 %o = or <16 x i8> %s, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
299 %m = mul <16 x i8> %o, %x
303 define <2 x i64> @combine_mul_to_abs_v2i64(<2 x i64> %x) {
304 ; SSE-LABEL: combine_mul_to_abs_v2i64:
306 ; SSE-NEXT: pxor %xmm1, %xmm1
307 ; SSE-NEXT: psubq %xmm0, %xmm1
308 ; SSE-NEXT: blendvpd %xmm0, %xmm1, %xmm0
311 ; AVX-LABEL: combine_mul_to_abs_v2i64:
313 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
314 ; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm1
315 ; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
317 %s = ashr <2 x i64> %x, <i64 63, i64 63>
318 %o = or <2 x i64> %s, <i64 1, i64 1>
319 %m = mul <2 x i64> %x, %o
323 ; 'Quadratic Reciprocity' - and(mul(x,x),2) -> 0
325 define i64 @combine_mul_self_knownbits(i64 %x) {
326 ; SSE-LABEL: combine_mul_self_knownbits:
328 ; SSE-NEXT: xorl %eax, %eax
331 ; AVX-LABEL: combine_mul_self_knownbits:
333 ; AVX-NEXT: xorl %eax, %eax
340 define <4 x i32> @combine_mul_self_knownbits_vector(<4 x i32> %x) {
341 ; SSE-LABEL: combine_mul_self_knownbits_vector:
343 ; SSE-NEXT: xorps %xmm0, %xmm0
346 ; AVX-LABEL: combine_mul_self_knownbits_vector:
348 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
350 %1 = mul <4 x i32> %x, %x
351 %2 = and <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
355 ; mul(x,x) - bit[1] is 0, but if demanding the other bits the source must not be undef
357 define i64 @combine_mul_self_demandedbits(i64 %x) {
358 ; SSE-LABEL: combine_mul_self_demandedbits:
360 ; SSE-NEXT: movq %rdi, %rax
361 ; SSE-NEXT: imulq %rdi, %rax
364 ; AVX-LABEL: combine_mul_self_demandedbits:
366 ; AVX-NEXT: movq %rdi, %rax
367 ; AVX-NEXT: imulq %rdi, %rax
374 define <4 x i32> @combine_mul_self_demandedbits_vector(<4 x i32> %x) {
375 ; SSE-LABEL: combine_mul_self_demandedbits_vector:
377 ; SSE-NEXT: pmulld %xmm0, %xmm0
380 ; AVX-LABEL: combine_mul_self_demandedbits_vector:
382 ; AVX-NEXT: vpmulld %xmm0, %xmm0, %xmm0
384 %1 = freeze <4 x i32> %x
385 %2 = mul <4 x i32> %1, %1
386 %3 = and <4 x i32> %2, <i32 -3, i32 -3, i32 -3, i32 -3>
390 ; PR59217 - Reuse umul_lohi/smul_lohi node
392 define i64 @combine_mul_umul_lohi_i64(i64 %a, i64 %b) {
393 ; SSE-LABEL: combine_mul_umul_lohi_i64:
395 ; SSE-NEXT: movq %rdi, %rax
396 ; SSE-NEXT: mulq %rsi
397 ; SSE-NEXT: xorq %rdx, %rax
400 ; AVX-LABEL: combine_mul_umul_lohi_i64:
402 ; AVX-NEXT: movq %rdi, %rax
403 ; AVX-NEXT: mulq %rsi
404 ; AVX-NEXT: xorq %rdx, %rax
406 %a128 = zext i64 %a to i128
407 %b128 = zext i64 %b to i128
408 %m128 = mul nuw i128 %a128, %b128
409 %hi128 = lshr i128 %m128, 64
410 %hi = trunc i128 %hi128 to i64
412 %r = xor i64 %lo, %hi
416 define i64 @combine_mul_smul_lohi_commute_i64(i64 %a, i64 %b) {
417 ; SSE-LABEL: combine_mul_smul_lohi_commute_i64:
419 ; SSE-NEXT: movq %rdi, %rax
420 ; SSE-NEXT: imulq %rsi
421 ; SSE-NEXT: xorq %rdx, %rax
424 ; AVX-LABEL: combine_mul_smul_lohi_commute_i64:
426 ; AVX-NEXT: movq %rdi, %rax
427 ; AVX-NEXT: imulq %rsi
428 ; AVX-NEXT: xorq %rdx, %rax
430 %a128 = sext i64 %a to i128
431 %b128 = sext i64 %b to i128
432 %m128 = mul nsw i128 %a128, %b128
433 %hi128 = lshr i128 %m128, 64
434 %hi = trunc i128 %hi128 to i64
436 %r = xor i64 %lo, %hi
440 define i64 @combine_mul_umul_lohi_const_i64(i64 %h) {
441 ; SSE-LABEL: combine_mul_umul_lohi_const_i64:
443 ; SSE-NEXT: movq %rdi, %rax
444 ; SSE-NEXT: movabsq $-4265267296055464877, %rcx # imm = 0xC4CEB9FE1A85EC53
445 ; SSE-NEXT: mulq %rcx
446 ; SSE-NEXT: xorq %rdx, %rax
449 ; AVX-LABEL: combine_mul_umul_lohi_const_i64:
451 ; AVX-NEXT: movq %rdi, %rax
452 ; AVX-NEXT: movabsq $-4265267296055464877, %rcx # imm = 0xC4CEB9FE1A85EC53
453 ; AVX-NEXT: mulq %rcx
454 ; AVX-NEXT: xorq %rdx, %rax
456 %h128 = zext i64 %h to i128
457 %m128 = mul nuw i128 %h128, 14181476777654086739
458 %hi128 = lshr i128 %m128, 64
459 %hi = trunc i128 %hi128 to i64
460 %lo = mul i64 %h, 14181476777654086739
461 %r = xor i64 %lo, %hi
465 define i64 @combine_mul_smul_lohi_const_i64(i64 %h) {
466 ; SSE-LABEL: combine_mul_smul_lohi_const_i64:
468 ; SSE-NEXT: movq %rdi, %rax
469 ; SSE-NEXT: movq %rdi, %rcx
470 ; SSE-NEXT: sarq $63, %rcx
471 ; SSE-NEXT: movabsq $-4265267296055464877, %rsi # imm = 0xC4CEB9FE1A85EC53
472 ; SSE-NEXT: mulq %rsi
473 ; SSE-NEXT: imulq %rsi, %rcx
474 ; SSE-NEXT: addq %rdx, %rcx
475 ; SSE-NEXT: xorq %rcx, %rax
478 ; AVX-LABEL: combine_mul_smul_lohi_const_i64:
480 ; AVX-NEXT: movq %rdi, %rax
481 ; AVX-NEXT: movq %rdi, %rcx
482 ; AVX-NEXT: sarq $63, %rcx
483 ; AVX-NEXT: movabsq $-4265267296055464877, %rsi # imm = 0xC4CEB9FE1A85EC53
484 ; AVX-NEXT: mulq %rsi
485 ; AVX-NEXT: imulq %rsi, %rcx
486 ; AVX-NEXT: addq %rdx, %rcx
487 ; AVX-NEXT: xorq %rcx, %rax
489 %h128 = sext i64 %h to i128
490 %m128 = mul nsw i128 %h128, 14181476777654086739
491 %hi128 = lshr i128 %m128, 64
492 %hi = trunc i128 %hi128 to i64
493 %lo = mul i64 %h, 14181476777654086739
494 %r = xor i64 %lo, %hi
498 ; This would infinite loop because DAGCombiner wants to turn this into a shift,
499 ; but x86 lowering wants to avoid non-uniform vector shift amounts.
501 define <16 x i8> @PR35579(<16 x i8> %x) {
502 ; SSE-LABEL: PR35579:
504 ; SSE-NEXT: movdqa %xmm0, %xmm1
505 ; SSE-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
506 ; SSE-NEXT: psllw $8, %xmm1
507 ; SSE-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,2,0,4,0,2,0,8,0,2,0,4,0,2,0]
508 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
509 ; SSE-NEXT: por %xmm1, %xmm0
512 ; AVX-LABEL: PR35579:
514 ; AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
515 ; AVX-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,1,2,1,4,1,2,1,8,1,2,1,4,1,2,1]
516 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
517 ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
518 ; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
519 ; AVX-NEXT: vzeroupper
521 %r = mul <16 x i8> %x, <i8 0, i8 1, i8 2, i8 1, i8 4, i8 1, i8 2, i8 1, i8 8, i8 1, i8 2, i8 1, i8 4, i8 1, i8 2, i8 1>
525 ; OSS Fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=15429
526 define <4 x i64> @fuzz15429(<4 x i64> %InVec) {
527 ; SSE-LABEL: fuzz15429:
529 ; SSE-NEXT: movdqa %xmm1, %xmm2
530 ; SSE-NEXT: psllq $3, %xmm2
531 ; SSE-NEXT: psllq $2, %xmm1
532 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
533 ; SSE-NEXT: paddq %xmm0, %xmm0
534 ; SSE-NEXT: movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
535 ; SSE-NEXT: pinsrq $0, %rax, %xmm0
538 ; AVX-LABEL: fuzz15429:
540 ; AVX-NEXT: vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
541 ; AVX-NEXT: movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
542 ; AVX-NEXT: vmovq %rax, %xmm1
543 ; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
545 %mul = mul <4 x i64> %InVec, <i64 1, i64 2, i64 4, i64 8>
546 %I = insertelement <4 x i64> %mul, i64 9223372036854775807, i64 0