1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2,AVX2-SLOW %s
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2,AVX2-FAST %s
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2,AVX2-FAST %s
6 ; These patterns are produced by LoopVectorizer for interleaved loads.
8 define void @vf2(<4 x i16>* %in.vec, <2 x i16>* %out.vec0, <2 x i16>* %out.vec1) nounwind {
11 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
12 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
13 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
14 ; AVX2-NEXT: vmovd %xmm1, (%rsi)
15 ; AVX2-NEXT: vmovd %xmm0, (%rdx)
17 %wide.vec = load <4 x i16>, <4 x i16>* %in.vec, align 32
19 %strided.vec0 = shufflevector <4 x i16> %wide.vec, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
20 %strided.vec1 = shufflevector <4 x i16> %wide.vec, <4 x i16> poison, <2 x i32> <i32 1, i32 3>
22 store <2 x i16> %strided.vec0, <2 x i16>* %out.vec0, align 32
23 store <2 x i16> %strided.vec1, <2 x i16>* %out.vec1, align 32
28 define void @vf4(<8 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1) nounwind {
31 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
32 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
33 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u]
34 ; AVX2-NEXT: vmovq %xmm1, (%rsi)
35 ; AVX2-NEXT: vmovq %xmm0, (%rdx)
37 %wide.vec = load <8 x i16>, <8 x i16>* %in.vec, align 32
39 %strided.vec0 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
40 %strided.vec1 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
42 store <4 x i16> %strided.vec0, <4 x i16>* %out.vec0, align 32
43 store <4 x i16> %strided.vec1, <4 x i16>* %out.vec1, align 32
48 define void @vf8(<16 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1) nounwind {
51 ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
52 ; AVX2-NEXT: vmovdqa (%rdi), %xmm1
53 ; AVX2-NEXT: vmovdqa 16(%rdi), %xmm2
54 ; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
55 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
56 ; AVX2-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
57 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15]
58 ; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
59 ; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
60 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
61 ; AVX2-NEXT: vmovdqa %xmm0, (%rsi)
62 ; AVX2-NEXT: vmovdqa %xmm1, (%rdx)
64 %wide.vec = load <16 x i16>, <16 x i16>* %in.vec, align 32
66 %strided.vec0 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
67 %strided.vec1 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
69 store <8 x i16> %strided.vec0, <8 x i16>* %out.vec0, align 32
70 store <8 x i16> %strided.vec1, <8 x i16>* %out.vec1, align 32
75 define void @vf16(<32 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.vec1) nounwind {
76 ; AVX2-SLOW-LABEL: vf16:
78 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
79 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1
80 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm2 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
81 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
82 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm3 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
83 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
84 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm3[0,2],ymm2[0,2],ymm3[4,6],ymm2[4,6]
85 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
86 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
87 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
88 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
89 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
90 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rsi)
91 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rdx)
92 ; AVX2-SLOW-NEXT: vzeroupper
93 ; AVX2-SLOW-NEXT: retq
95 ; AVX2-FAST-LABEL: vf16:
97 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
98 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm1
99 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u>
100 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm1, %ymm3
101 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm0, %ymm2
102 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6]
103 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
104 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
105 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
106 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
107 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
108 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rsi)
109 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rdx)
110 ; AVX2-FAST-NEXT: vzeroupper
111 ; AVX2-FAST-NEXT: retq
112 %wide.vec = load <32 x i16>, <32 x i16>* %in.vec, align 32
114 %strided.vec0 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
115 %strided.vec1 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
117 store <16 x i16> %strided.vec0, <16 x i16>* %out.vec0, align 32
118 store <16 x i16> %strided.vec1, <16 x i16>* %out.vec1, align 32
123 define void @vf32(<64 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.vec1) nounwind {
124 ; AVX2-SLOW-LABEL: vf32:
125 ; AVX2-SLOW: # %bb.0:
126 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
127 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1
128 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm2
129 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm3
130 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm4 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
131 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
132 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
133 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
134 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm5[0,2],ymm4[0,2],ymm5[4,6],ymm4[4,6]
135 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3]
136 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
137 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
138 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm6 = ymm2[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
139 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
140 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6]
141 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3]
142 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
143 ; AVX2-SLOW-NEXT: vpshufb %ymm6, %ymm1, %ymm1
144 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = <2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
145 ; AVX2-SLOW-NEXT: vpshufb %ymm7, %ymm0, %ymm0
146 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
147 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
148 ; AVX2-SLOW-NEXT: vpshufb %ymm6, %ymm3, %ymm1
149 ; AVX2-SLOW-NEXT: vpshufb %ymm7, %ymm2, %ymm2
150 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
151 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
152 ; AVX2-SLOW-NEXT: vmovaps %ymm5, 32(%rsi)
153 ; AVX2-SLOW-NEXT: vmovaps %ymm4, (%rsi)
154 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, 32(%rdx)
155 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rdx)
156 ; AVX2-SLOW-NEXT: vzeroupper
157 ; AVX2-SLOW-NEXT: retq
159 ; AVX2-FAST-LABEL: vf32:
160 ; AVX2-FAST: # %bb.0:
161 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
162 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm1
163 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm2
164 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm3
165 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u>
166 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm1, %ymm5
167 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm0, %ymm6
168 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6]
169 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3]
170 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm3, %ymm6
171 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm2, %ymm4
172 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2],ymm6[0,2],ymm4[4,6],ymm6[4,6]
173 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3]
174 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
175 ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm1, %ymm1
176 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
177 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm0, %ymm0
178 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
179 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
180 ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm3, %ymm1
181 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm2, %ymm2
182 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
183 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
184 ; AVX2-FAST-NEXT: vmovaps %ymm4, 32(%rsi)
185 ; AVX2-FAST-NEXT: vmovaps %ymm5, (%rsi)
186 ; AVX2-FAST-NEXT: vmovdqa %ymm1, 32(%rdx)
187 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rdx)
188 ; AVX2-FAST-NEXT: vzeroupper
189 ; AVX2-FAST-NEXT: retq
190 %wide.vec = load <64 x i16>, <64 x i16>* %in.vec, align 32
192 %strided.vec0 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
193 %strided.vec1 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
195 store <32 x i16> %strided.vec0, <32 x i16>* %out.vec0, align 32
196 store <32 x i16> %strided.vec1, <32 x i16>* %out.vec1, align 32