1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
4 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
5 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX
7 ; Combine tests involving SSE41 target shuffles (BLEND,INSERTPS,MOVZX)
9 declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
11 define <16 x i8> @combine_vpshufb_as_movzx(<16 x i8> %a0) {
12 ; SSE-LABEL: combine_vpshufb_as_movzx:
14 ; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
17 ; AVX-LABEL: combine_vpshufb_as_movzx:
19 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
21 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 undef, i8 undef, i8 -1, i8 -1, i8 -1, i8 -1>)
25 define <16 x i8> @PR50049(ptr %p1, ptr %p2) {
28 ; SSE-NEXT: movdqa (%rdi), %xmm2
29 ; SSE-NEXT: movdqa 16(%rdi), %xmm0
30 ; SSE-NEXT: movdqa 32(%rdi), %xmm1
31 ; SSE-NEXT: movdqa (%rsi), %xmm4
32 ; SSE-NEXT: movdqa 16(%rsi), %xmm5
33 ; SSE-NEXT: movdqa 32(%rsi), %xmm3
34 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = <128,128,128,128,128,128,2,5,8,11,14,u,u,u,u,u>
35 ; SSE-NEXT: pshufb %xmm6, %xmm0
36 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = <0,3,6,9,12,15,128,128,128,128,128,u,u,u,u,u>
37 ; SSE-NEXT: pshufb %xmm7, %xmm2
38 ; SSE-NEXT: por %xmm0, %xmm2
39 ; SSE-NEXT: pshufb %xmm6, %xmm5
40 ; SSE-NEXT: pshufb %xmm7, %xmm4
41 ; SSE-NEXT: por %xmm5, %xmm4
42 ; SSE-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
43 ; SSE-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
44 ; SSE-NEXT: pmullw %xmm5, %xmm0
45 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
46 ; SSE-NEXT: pand %xmm5, %xmm0
47 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = <8,u,9,u,10,u,128,u,128,u,128,u,128,u,128,u>
48 ; SSE-NEXT: pshufb %xmm6, %xmm4
49 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = <128,u,128,u,128,u,1,u,4,u,7,u,10,u,13,u>
50 ; SSE-NEXT: pshufb %xmm7, %xmm3
51 ; SSE-NEXT: por %xmm4, %xmm3
52 ; SSE-NEXT: pshufb %xmm6, %xmm2
53 ; SSE-NEXT: pshufb %xmm7, %xmm1
54 ; SSE-NEXT: por %xmm2, %xmm1
55 ; SSE-NEXT: pmullw %xmm3, %xmm1
56 ; SSE-NEXT: pand %xmm5, %xmm1
57 ; SSE-NEXT: packuswb %xmm1, %xmm0
59 %x1 = load <48 x i8>, ptr %p1, align 16
60 %x2 = load <48 x i8>, ptr %p2, align 16
61 %s1 = shufflevector <48 x i8> %x1, <48 x i8> poison, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
62 %s2 = shufflevector <48 x i8> %x2, <48 x i8> poison, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
63 %r = mul <16 x i8> %s1, %s2