1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64
5 declare <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
6 declare <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
8 define <16 x i16> @combine_vpermt2var_16i16_identity(<16 x i16> %x0, <16 x i16> %x1) {
9 ; CHECK-LABEL: combine_vpermt2var_16i16_identity:
11 ; CHECK-NEXT: ret{{[l|q]}}
12 %res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> %x0, <16 x i16> %x1, i16 -1)
13 %res1 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 30, i16 13, i16 28, i16 11, i16 26, i16 9, i16 24, i16 7, i16 22, i16 5, i16 20, i16 3, i16 18, i16 1, i16 16>, <16 x i16> %res0, <16 x i16> %res0, i16 -1)
16 define <16 x i16> @combine_vpermt2var_16i16_identity_mask(<16 x i16> %x0, <16 x i16> %x1, i16 %m) {
17 ; X86-LABEL: combine_vpermt2var_16i16_identity_mask:
19 ; X86-NEXT: vmovdqa {{.*#+}} ymm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
20 ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
21 ; X86-NEXT: vpermi2w %ymm0, %ymm0, %ymm1 {%k1} {z}
22 ; X86-NEXT: vmovdqa {{.*#+}} ymm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
23 ; X86-NEXT: vpermi2w %ymm1, %ymm1, %ymm0 {%k1} {z}
26 ; X64-LABEL: combine_vpermt2var_16i16_identity_mask:
28 ; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
29 ; X64-NEXT: kmovd %edi, %k1
30 ; X64-NEXT: vpermi2w %ymm0, %ymm0, %ymm1 {%k1} {z}
31 ; X64-NEXT: vmovdqa {{.*#+}} ymm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
32 ; X64-NEXT: vpermi2w %ymm1, %ymm1, %ymm0 {%k1} {z}
34 %res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> %x0, <16 x i16> %x1, i16 %m)
35 %res1 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 30, i16 13, i16 28, i16 11, i16 26, i16 9, i16 24, i16 7, i16 22, i16 5, i16 20, i16 3, i16 18, i16 1, i16 16>, <16 x i16> %res0, <16 x i16> %res0, i16 %m)
39 define <16 x i16> @combine_vpermi2var_16i16_as_permw(<16 x i16> %x0, <16 x i16> %x1) {
40 ; CHECK-LABEL: combine_vpermi2var_16i16_as_permw:
42 ; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7]
43 ; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0
44 ; CHECK-NEXT: ret{{[l|q]}}
45 %res0 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %x0, <16 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> %x1, i16 -1)
46 %res1 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %res0, <16 x i16> <i16 0, i16 15, i16 1, i16 14, i16 2, i16 13, i16 3, i16 12, i16 4, i16 11, i16 5, i16 10, i16 6, i16 9, i16 7, i16 8>, <16 x i16> %res0, i16 -1)
50 define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_vperm2(<16 x i16> %x0, <16 x i16> %x1) {
51 ; CHECK-LABEL: combine_vpermt2var_vpermi2var_16i16_as_vperm2:
53 ; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [0,31,2,2,4,29,6,27,8,25,10,23,12,21,14,19]
54 ; CHECK-NEXT: vpermt2w %ymm1, %ymm2, %ymm0
55 ; CHECK-NEXT: ret{{[l|q]}}
56 %res0 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %x0, <16 x i16> <i16 0, i16 31, i16 2, i16 29, i16 4, i16 27, i16 6, i16 25, i16 8, i16 23, i16 10, i16 21, i16 12, i16 19, i16 14, i16 17>, <16 x i16> %x1, i16 -1)
57 %res1 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 0, i16 17, i16 2, i16 18, i16 4, i16 19, i16 6, i16 21, i16 8, i16 23, i16 10, i16 25, i16 12, i16 27, i16 14, i16 29>, <16 x i16> %res0, <16 x i16> %res0, i16 -1)
61 define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_unpckhwd(<16 x i16> %a0, <16 x i16> %a1) {
62 ; CHECK-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpckhwd:
64 ; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
65 ; CHECK-NEXT: ret{{[l|q]}}
66 %res0 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %a0, <16 x i16> <i16 20, i16 4, i16 21, i16 5, i16 22, i16 6, i16 23, i16 7, i16 28, i16 12, i16 29, i16 13, i16 30, i16 14, i16 31, i16 15>, <16 x i16> %a1, i16 -1)
70 define <16 x i16> @combine_vpermt2var_vpermi2var_16i16_as_unpcklwd(<16 x i16> %a0, <16 x i16> %a1) {
71 ; CHECK-LABEL: combine_vpermt2var_vpermi2var_16i16_as_unpcklwd:
73 ; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
74 ; CHECK-NEXT: ret{{[l|q]}}
75 %res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 0, i16 16, i16 1, i16 17, i16 2, i16 18, i16 3, i16 19, i16 8, i16 24, i16 9, i16 25, i16 10, i16 26, i16 11, i16 27>, <16 x i16> %a0, <16 x i16> %a1, i16 -1)
79 define <16 x i8> @combine_shuffle_vrotri_v2i64(<2 x i64> %a0) {
80 ; CHECK-LABEL: combine_shuffle_vrotri_v2i64:
82 ; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13,12,11,10,9,8,15,14,5,4,3,2,1,0,7,6]
83 ; CHECK-NEXT: ret{{[l|q]}}
84 %1 = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %a0, <2 x i64> %a0, <2 x i64> <i64 48, i64 48>)
85 %2 = bitcast <2 x i64> %1 to <16 x i8>
86 %3 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
89 declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
91 define <16 x i8> @combine_shuffle_vrotli_v4i32(<4 x i32> %a0) {
92 ; CHECK-LABEL: combine_shuffle_vrotli_v4i32:
94 ; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[14,13,12,15,10,9,8,11,6,5,4,7,2,1,0,3]
95 ; CHECK-NEXT: ret{{[l|q]}}
96 %1 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %a0, <4 x i32> %a0, <4 x i32> <i32 8, i32 8, i32 8, i32 8>)
97 %2 = bitcast <4 x i32> %1 to <16 x i8>
98 %3 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
101 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
103 define void @PR46178(i16* %0) {
104 ; X86-LABEL: PR46178:
106 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
107 ; X86-NEXT: vmovdqu 0, %ymm0
108 ; X86-NEXT: vmovdqu (%eax), %ymm1
109 ; X86-NEXT: vpmovqw %ymm0, %xmm0
110 ; X86-NEXT: vpmovqw %ymm1, %xmm1
111 ; X86-NEXT: vpsllw $8, %xmm1, %xmm1
112 ; X86-NEXT: vpsraw $8, %xmm1, %xmm1
113 ; X86-NEXT: vpsllw $8, %xmm0, %xmm0
114 ; X86-NEXT: vpsraw $8, %xmm0, %xmm0
115 ; X86-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
116 ; X86-NEXT: vmovupd %ymm0, (%eax)
117 ; X86-NEXT: vzeroupper
120 ; X64-LABEL: PR46178:
122 ; X64-NEXT: vmovdqu 0, %ymm0
123 ; X64-NEXT: vmovdqu (%rax), %ymm1
124 ; X64-NEXT: vpmovqw %ymm0, %xmm0
125 ; X64-NEXT: vpmovqw %ymm1, %xmm1
126 ; X64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
127 ; X64-NEXT: vpsllw $8, %ymm0, %ymm0
128 ; X64-NEXT: vpsraw $8, %ymm0, %ymm0
129 ; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
130 ; X64-NEXT: vmovdqa %xmm0, %xmm0
131 ; X64-NEXT: vmovdqu %ymm0, (%rdi)
132 ; X64-NEXT: vzeroupper
134 %2 = load <4 x i64>, <4 x i64>* null, align 8
135 %3 = load <4 x i64>, <4 x i64>* undef, align 8
136 %4 = trunc <4 x i64> %2 to <4 x i16>
137 %5 = trunc <4 x i64> %3 to <4 x i16>
138 %6 = shl <4 x i16> %4, <i16 8, i16 8, i16 8, i16 8>
139 %7 = shl <4 x i16> %5, <i16 8, i16 8, i16 8, i16 8>
140 %8 = ashr exact <4 x i16> %6, <i16 8, i16 8, i16 8, i16 8>
141 %9 = ashr exact <4 x i16> %7, <i16 8, i16 8, i16 8, i16 8>
142 %10 = bitcast i16* %0 to <4 x i16>*
143 %11 = getelementptr inbounds i16, i16* %0, i64 4
144 %12 = bitcast i16* %11 to <4 x i16>*
145 %13 = getelementptr inbounds i16, i16* %0, i64 8
146 %14 = bitcast i16* %13 to <4 x i16>*
147 %15 = getelementptr inbounds i16, i16* %0, i64 12
148 %16 = bitcast i16* %15 to <4 x i16>*
149 store <4 x i16> %8, <4 x i16>* %10, align 2
150 store <4 x i16> %9, <4 x i16>* %12, align 2
151 store <4 x i16> zeroinitializer, <4 x i16>* %14, align 2
152 store <4 x i16> zeroinitializer, <4 x i16>* %16, align 2
156 define <8 x i32> @PR46393(<8 x i16> %a0, i8 %a1) {
157 ; X86-LABEL: PR46393:
159 ; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
160 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
161 ; X86-NEXT: kmovd %eax, %k1
162 ; X86-NEXT: vpslld $16, %ymm0, %ymm0 {%k1} {z}
165 ; X64-LABEL: PR46393:
167 ; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
168 ; X64-NEXT: kmovd %edi, %k1
169 ; X64-NEXT: vpslld $16, %ymm0, %ymm0 {%k1} {z}
171 %zext = sext <8 x i16> %a0 to <8 x i32>
172 %mask = bitcast i8 %a1 to <8 x i1>
173 %shl = shl nuw <8 x i32> %zext, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
174 %sel = select <8 x i1> %mask, <8 x i32> %shl, <8 x i32> zeroinitializer