1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -disable-peephole -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,AVX1,X86-AVX1
3 ; RUN: llc < %s -disable-peephole -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X86,AVX,AVX2,X86-AVX2
4 ; RUN: llc < %s -disable-peephole -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,AVX512,X86-AVX512
5 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,AVX1,X64-AVX1
6 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64,AVX,AVX2,X64-AVX2
7 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX512,X64-AVX512
9 ; Combine tests involving AVX target shuffles
11 declare <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float>, i8)
12 declare <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float>, i8)
13 declare <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double>, i8)
14 declare <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double>, i8)
16 declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>)
17 declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>)
18 declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>)
19 declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>)
21 declare <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32>, <8 x i32>, i8)
22 declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8)
23 declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, i8)
25 define <4 x float> @combine_vpermilvar_4f32_identity(<4 x float> %a0) {
26 ; CHECK-LABEL: combine_vpermilvar_4f32_identity:
28 ; CHECK-NEXT: ret{{[l|q]}}
29 %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
30 %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
34 define <4 x float> @combine_vpermilvar_4f32_movddup(<4 x float> %a0) {
35 ; CHECK-LABEL: combine_vpermilvar_4f32_movddup:
37 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
38 ; CHECK-NEXT: ret{{[l|q]}}
39 %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 1, i32 0, i32 1>)
42 define <4 x float> @combine_vpermilvar_4f32_movddup_load(ptr%a0) {
43 ; X86-LABEL: combine_vpermilvar_4f32_movddup_load:
45 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
46 ; X86-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
49 ; X64-LABEL: combine_vpermilvar_4f32_movddup_load:
51 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
53 %1 = load <4 x float>, ptr%a0
54 %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 0, i32 1, i32 0, i32 1>)
58 define <4 x float> @combine_vpermilvar_4f32_movshdup(<4 x float> %a0) {
59 ; CHECK-LABEL: combine_vpermilvar_4f32_movshdup:
61 ; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
62 ; CHECK-NEXT: ret{{[l|q]}}
63 %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 undef, i32 1, i32 3, i32 3>)
67 define <4 x float> @combine_vpermilvar_4f32_movsldup(<4 x float> %a0) {
68 ; CHECK-LABEL: combine_vpermilvar_4f32_movsldup:
70 ; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
71 ; CHECK-NEXT: ret{{[l|q]}}
72 %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 undef>)
76 define <4 x float> @combine_vpermilvar_4f32_unpckh(<4 x float> %a0) {
77 ; CHECK-LABEL: combine_vpermilvar_4f32_unpckh:
79 ; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
80 ; CHECK-NEXT: ret{{[l|q]}}
81 %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 2, i32 2, i32 3, i32 3>)
85 define <4 x float> @combine_vpermilvar_4f32_unpckl(<4 x float> %a0) {
86 ; CHECK-LABEL: combine_vpermilvar_4f32_unpckl:
88 ; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0,1,1]
89 ; CHECK-NEXT: ret{{[l|q]}}
90 %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 1, i32 1>)
94 define <8 x float> @combine_vpermilvar_8f32_identity(<8 x float> %a0) {
95 ; CHECK-LABEL: combine_vpermilvar_8f32_identity:
97 ; CHECK-NEXT: ret{{[l|q]}}
98 %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 undef>)
99 %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1>)
103 define <8 x float> @combine_vpermilvar_8f32_10326u4u(<8 x float> %a0) {
104 ; CHECK-LABEL: combine_vpermilvar_8f32_10326u4u:
106 ; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
107 ; CHECK-NEXT: ret{{[l|q]}}
108 %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 0, i32 1, i32 2, i32 undef>)
109 %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 undef>)
113 define <8 x float> @combine_vpermilvar_vperm2f128_8f32(<8 x float> %a0) {
114 ; AVX1-LABEL: combine_vpermilvar_vperm2f128_8f32:
116 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
117 ; AVX1-NEXT: ret{{[l|q]}}
119 ; AVX2-LABEL: combine_vpermilvar_vperm2f128_8f32:
121 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
122 ; AVX2-NEXT: ret{{[l|q]}}
124 ; AVX512-LABEL: combine_vpermilvar_vperm2f128_8f32:
126 ; AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
127 ; AVX512-NEXT: ret{{[l|q]}}
128 %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
129 %2 = shufflevector <8 x float> %1, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
130 %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
134 define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) {
135 ; AVX-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
137 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
138 ; AVX-NEXT: ret{{[l|q]}}
140 ; AVX512-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
142 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
143 ; AVX512-NEXT: vmovaps {{.*#+}} ymm1 = [16,17,18,19,3,2,1,0]
144 ; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
145 ; AVX512-NEXT: vpermt2ps %zmm2, %zmm1, %zmm0
146 ; AVX512-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
147 ; AVX512-NEXT: ret{{[l|q]}}
148 %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
149 %2 = shufflevector <8 x float> %1, <8 x float> zeroinitializer, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
150 %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
154 define <4 x double> @combine_vperm2f128_vpermilvar_as_vperm2f128(<4 x double> %a0) {
155 ; CHECK-LABEL: combine_vperm2f128_vpermilvar_as_vperm2f128:
157 ; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
158 ; CHECK-NEXT: ret{{[l|q]}}
159 %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
160 %2 = shufflevector <4 x double> %1, <4 x double> zeroinitializer, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
161 %3 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %2, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
165 define <4 x double> @combine_vperm2f128_vpermilvar_as_vmovaps(<4 x double> %a0) {
166 ; CHECK-LABEL: combine_vperm2f128_vpermilvar_as_vmovaps:
168 ; CHECK-NEXT: vmovaps %xmm0, %xmm0
169 ; CHECK-NEXT: ret{{[l|q]}}
170 %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
171 %2 = shufflevector <4 x double> %1, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
172 %3 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %2, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
176 define <8 x float> @combine_vpermilvar_8f32_movddup(<8 x float> %a0) {
177 ; CHECK-LABEL: combine_vpermilvar_8f32_movddup:
179 ; CHECK-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
180 ; CHECK-NEXT: ret{{[l|q]}}
181 %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>)
184 define <8 x float> @combine_vpermilvar_8f32_movddup_load(ptr%a0) {
185 ; X86-LABEL: combine_vpermilvar_8f32_movddup_load:
187 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
188 ; X86-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
191 ; X64-LABEL: combine_vpermilvar_8f32_movddup_load:
193 ; X64-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
195 %1 = load <8 x float>, ptr%a0
196 %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>)
200 define <8 x float> @combine_vpermilvar_8f32_movshdup(<8 x float> %a0) {
201 ; CHECK-LABEL: combine_vpermilvar_8f32_movshdup:
203 ; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
204 ; CHECK-NEXT: ret{{[l|q]}}
205 %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 5, i32 7, i32 7>)
208 define <8 x float> @demandedelts_vpermilvar_8f32_movshdup(<8 x float> %a0, i32 %a1) {
209 ; CHECK-LABEL: demandedelts_vpermilvar_8f32_movshdup:
211 ; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
212 ; CHECK-NEXT: ret{{[l|q]}}
213 %1 = insertelement <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 5, i32 7, i32 7>, i32 %a1, i32 7
214 %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %1)
215 %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 6>
219 define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) {
220 ; CHECK-LABEL: combine_vpermilvar_8f32_movsldup:
222 ; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
223 ; CHECK-NEXT: ret{{[l|q]}}
224 %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>)
227 define <8 x float> @demandedelts_vpermilvar_8f32_movsldup(<8 x float> %a0, i32 %a1) {
228 ; CHECK-LABEL: demandedelts_vpermilvar_8f32_movsldup:
230 ; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
231 ; CHECK-NEXT: ret{{[l|q]}}
232 %1 = insertelement <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>, i32 %a1, i32 0
233 %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %1)
234 %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
238 define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) {
239 ; CHECK-LABEL: combine_vpermilvar_2f64_identity:
241 ; CHECK-NEXT: ret{{[l|q]}}
242 %1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 2, i64 0>)
243 %2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> <i64 2, i64 0>)
247 define <2 x double> @combine_vpermilvar_2f64_movddup(<2 x double> %a0) {
248 ; CHECK-LABEL: combine_vpermilvar_2f64_movddup:
250 ; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
251 ; CHECK-NEXT: ret{{[l|q]}}
252 %1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 0, i64 0>)
256 define <4 x double> @combine_vpermilvar_4f64_identity(<4 x double> %a0) {
257 ; CHECK-LABEL: combine_vpermilvar_4f64_identity:
259 ; CHECK-NEXT: ret{{[l|q]}}
260 %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
261 %2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
265 define <4 x double> @combine_vpermilvar_4f64_movddup(<4 x double> %a0) {
266 ; CHECK-LABEL: combine_vpermilvar_4f64_movddup:
268 ; CHECK-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
269 ; CHECK-NEXT: ret{{[l|q]}}
270 %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 0, i64 0, i64 4, i64 4>)
274 define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) {
275 ; CHECK-LABEL: combine_vpermilvar_4f32_4stage:
277 ; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0,3,1]
278 ; CHECK-NEXT: ret{{[l|q]}}
279 %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
280 %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 2, i32 3, i32 0, i32 1>)
281 %3 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>)
282 %4 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %3, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
286 define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) {
287 ; CHECK-LABEL: combine_vpermilvar_8f32_4stage:
289 ; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
290 ; CHECK-NEXT: ret{{[l|q]}}
291 %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
292 %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>)
293 %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 0, i32 2, i32 1, i32 3>)
294 %4 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %3, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
298 define <4 x float> @combine_vpermilvar_4f32_as_insertps(<4 x float> %a0) {
299 ; CHECK-LABEL: combine_vpermilvar_4f32_as_insertps:
301 ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
302 ; CHECK-NEXT: ret{{[l|q]}}
303 %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
304 %2 = shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 1, i32 4>
308 define <8 x i32> @combine_blend_of_permutes_v8i32(<4 x i64> %a0, <4 x i64> %a1) {
309 ; AVX1-LABEL: combine_blend_of_permutes_v8i32:
311 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
312 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
313 ; AVX1-NEXT: ret{{[l|q]}}
315 ; AVX2-LABEL: combine_blend_of_permutes_v8i32:
317 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
318 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
319 ; AVX2-NEXT: ret{{[l|q]}}
321 ; AVX512-LABEL: combine_blend_of_permutes_v8i32:
323 ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
324 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
325 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm2 = [4,21,6,23,16,1,2,19]
326 ; AVX512-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
327 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
328 ; AVX512-NEXT: ret{{[l|q]}}
329 %s0 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
330 %s1 = shufflevector <4 x i64> %a1, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
331 %x0 = bitcast <4 x i64> %s0 to <8 x i32>
332 %x1 = bitcast <4 x i64> %s1 to <8 x i32>
333 %r = shufflevector <8 x i32> %x0, <8 x i32> %x1, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 12, i32 5, i32 6, i32 15>
337 define <2 x double> @constant_fold_vpermilvar_pd() {
338 ; CHECK-LABEL: constant_fold_vpermilvar_pd:
340 ; CHECK-NEXT: vmovaps {{.*#+}} xmm0 = [2.0E+0,1.0E+0]
341 ; CHECK-NEXT: ret{{[l|q]}}
342 %1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> <double 1.0, double 2.0>, <2 x i64> <i64 2, i64 0>)
346 define <4 x double> @constant_fold_vpermilvar_pd_256() {
347 ; CHECK-LABEL: constant_fold_vpermilvar_pd_256:
349 ; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [2.0E+0,1.0E+0,3.0E+0,4.0E+0]
350 ; CHECK-NEXT: ret{{[l|q]}}
351 %1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> <double 1.0, double 2.0, double 3.0, double 4.0>, <4 x i64> <i64 2, i64 0, i64 0, i64 2>)
355 define <4 x float> @constant_fold_vpermilvar_ps() {
356 ; CHECK-LABEL: constant_fold_vpermilvar_ps:
358 ; CHECK-NEXT: vmovaps {{.*#+}} xmm0 = [4.0E+0,1.0E+0,3.0E+0,2.0E+0]
359 ; CHECK-NEXT: ret{{[l|q]}}
360 %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x i32> <i32 3, i32 0, i32 2, i32 1>)
364 define <8 x float> @constant_fold_vpermilvar_ps_256() {
365 ; CHECK-LABEL: constant_fold_vpermilvar_ps_256:
367 ; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [1.0E+0,1.0E+0,3.0E+0,2.0E+0,5.0E+0,6.0E+0,6.0E+0,6.0E+0]
368 ; CHECK-NEXT: ret{{[l|q]}}
369 %1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 0, i32 2, i32 1, i32 0, i32 1, i32 1, i32 1>)
373 define void @PR39483() {
374 ; X86-AVX1-LABEL: PR39483:
375 ; X86-AVX1: # %bb.0: # %entry
376 ; X86-AVX1-NEXT: vmovups 32, %ymm0
377 ; X86-AVX1-NEXT: vmovups 64, %ymm1
378 ; X86-AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
379 ; X86-AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[0,3],ymm2[4,5],ymm1[4,7]
380 ; X86-AVX1-NEXT: vmovups 16, %xmm2
381 ; X86-AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
382 ; X86-AVX1-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,0],ymm3[2,0],ymm2[5,4],ymm3[6,4]
383 ; X86-AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[0,3],ymm2[6,4],ymm0[4,7]
384 ; X86-AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
385 ; X86-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
386 ; X86-AVX1-NEXT: vmulps %ymm1, %ymm0, %ymm0
387 ; X86-AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0
388 ; X86-AVX1-NEXT: vmovups %ymm0, (%eax)
390 ; X86-AVX2-LABEL: PR39483:
391 ; X86-AVX2: # %bb.0: # %entry
392 ; X86-AVX2-NEXT: vmovups 32, %ymm0
393 ; X86-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
394 ; X86-AVX2-NEXT: vmovaps {{.*#+}} ymm1 = [2,5,0,3,6,u,u,u]
395 ; X86-AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
396 ; X86-AVX2-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,1,0,3,4,5,4,7]
397 ; X86-AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
398 ; X86-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
399 ; X86-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
400 ; X86-AVX2-NEXT: vmulps %ymm1, %ymm0, %ymm0
401 ; X86-AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
402 ; X86-AVX2-NEXT: vmovups %ymm0, (%eax)
404 ; X86-AVX512-LABEL: PR39483:
405 ; X86-AVX512: # %bb.0: # %entry
406 ; X86-AVX512-NEXT: vmovups 0, %zmm0
407 ; X86-AVX512-NEXT: vmovups 64, %ymm1
408 ; X86-AVX512-NEXT: vmovaps {{.*#+}} ymm2 = [2,5,8,11,14,17,20,23]
409 ; X86-AVX512-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2
410 ; X86-AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
411 ; X86-AVX512-NEXT: vmulps %ymm0, %ymm2, %ymm1
412 ; X86-AVX512-NEXT: vaddps %ymm0, %ymm1, %ymm0
413 ; X86-AVX512-NEXT: vmovups %ymm0, (%eax)
415 ; X64-AVX1-LABEL: PR39483:
416 ; X64-AVX1: # %bb.0: # %entry
417 ; X64-AVX1-NEXT: vmovups 32, %ymm0
418 ; X64-AVX1-NEXT: vmovups 64, %ymm1
419 ; X64-AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
420 ; X64-AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[0,3],ymm2[4,5],ymm1[4,7]
421 ; X64-AVX1-NEXT: vmovups 16, %xmm2
422 ; X64-AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
423 ; X64-AVX1-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,0],ymm3[2,0],ymm2[5,4],ymm3[6,4]
424 ; X64-AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[0,3],ymm2[6,4],ymm0[4,7]
425 ; X64-AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
426 ; X64-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
427 ; X64-AVX1-NEXT: vmulps %ymm1, %ymm0, %ymm0
428 ; X64-AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0
429 ; X64-AVX1-NEXT: vmovups %ymm0, (%rax)
431 ; X64-AVX2-LABEL: PR39483:
432 ; X64-AVX2: # %bb.0: # %entry
433 ; X64-AVX2-NEXT: vmovups 32, %ymm0
434 ; X64-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
435 ; X64-AVX2-NEXT: vmovaps {{.*#+}} ymm1 = [2,5,0,3,6,u,u,u]
436 ; X64-AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
437 ; X64-AVX2-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,1,0,3,4,5,4,7]
438 ; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
439 ; X64-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
440 ; X64-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
441 ; X64-AVX2-NEXT: vmulps %ymm1, %ymm0, %ymm0
442 ; X64-AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
443 ; X64-AVX2-NEXT: vmovups %ymm0, (%rax)
445 ; X64-AVX512-LABEL: PR39483:
446 ; X64-AVX512: # %bb.0: # %entry
447 ; X64-AVX512-NEXT: vmovups 0, %zmm0
448 ; X64-AVX512-NEXT: vmovups 64, %ymm1
449 ; X64-AVX512-NEXT: vmovaps {{.*#+}} ymm2 = [2,5,8,11,14,17,20,23]
450 ; X64-AVX512-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2
451 ; X64-AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
452 ; X64-AVX512-NEXT: vmulps %ymm0, %ymm2, %ymm1
453 ; X64-AVX512-NEXT: vaddps %ymm0, %ymm1, %ymm0
454 ; X64-AVX512-NEXT: vmovups %ymm0, (%rax)
456 %wide.vec = load <24 x float>, ptr null, align 4
457 %strided.vec18 = shufflevector <24 x float> %wide.vec, <24 x float> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
458 %0 = fmul <8 x float> %strided.vec18, zeroinitializer
459 %1 = fadd <8 x float> zeroinitializer, %0
460 store <8 x float> %1, ptr undef, align 16
464 define void @PR48908(<4 x double> %v0, <4 x double> %v1, <4 x double> %v2, ptr noalias %out0, ptr noalias %out1, ptr noalias %out2) {
465 ; X86-AVX1-LABEL: PR48908:
467 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
468 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
469 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %edx
470 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3
471 ; X86-AVX1-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0,1,2,2]
472 ; X86-AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],ymm2[0,1]
473 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm5
474 ; X86-AVX1-NEXT: vshufpd {{.*#+}} ymm4 = ymm5[1],ymm4[0],ymm5[2],ymm4[3]
475 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm5
476 ; X86-AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,0,1]
477 ; X86-AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2],ymm3[3]
478 ; X86-AVX1-NEXT: vmovapd %ymm3, (%edx)
479 ; X86-AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm4[2,3,0,1]
480 ; X86-AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm0[2,3]
481 ; X86-AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3]
482 ; X86-AVX1-NEXT: vmovapd %ymm3, (%ecx)
483 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
484 ; X86-AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
485 ; X86-AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
486 ; X86-AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
487 ; X86-AVX1-NEXT: vmovapd %ymm0, (%eax)
488 ; X86-AVX1-NEXT: vzeroupper
489 ; X86-AVX1-NEXT: retl
491 ; X86-AVX2-LABEL: PR48908:
493 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
494 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
495 ; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %edx
496 ; X86-AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3
497 ; X86-AVX2-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],ymm2[0,1]
498 ; X86-AVX2-NEXT: vshufpd {{.*#+}} xmm5 = xmm1[1,0]
499 ; X86-AVX2-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm0[0,1],ymm2[0,1]
500 ; X86-AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,2,1]
501 ; X86-AVX2-NEXT: vblendpd {{.*#+}} ymm3 = ymm6[0],ymm3[1],ymm6[2],ymm3[3]
502 ; X86-AVX2-NEXT: vmovapd %ymm3, (%edx)
503 ; X86-AVX2-NEXT: vblendpd {{.*#+}} ymm3 = ymm5[0,1],ymm0[2,3]
504 ; X86-AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,3,2,0]
505 ; X86-AVX2-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3]
506 ; X86-AVX2-NEXT: vmovapd %ymm3, (%ecx)
507 ; X86-AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
508 ; X86-AVX2-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
509 ; X86-AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
510 ; X86-AVX2-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
511 ; X86-AVX2-NEXT: vmovapd %ymm0, (%eax)
512 ; X86-AVX2-NEXT: vzeroupper
513 ; X86-AVX2-NEXT: retl
515 ; X86-AVX512-LABEL: PR48908:
516 ; X86-AVX512: # %bb.0:
517 ; X86-AVX512-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
518 ; X86-AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
519 ; X86-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
520 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
521 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %ecx
522 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %edx
523 ; X86-AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm4
524 ; X86-AVX512-NEXT: vmovapd {{.*#+}} ymm3 = [1,0,2,0,8,0,9,0]
525 ; X86-AVX512-NEXT: vpermi2pd %zmm2, %zmm1, %zmm3
526 ; X86-AVX512-NEXT: vmovapd {{.*#+}} ymm5 = [0,0,10,0,2,0,9,0]
527 ; X86-AVX512-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm0[0,1],ymm2[0,1]
528 ; X86-AVX512-NEXT: vpermt2pd %zmm4, %zmm5, %zmm6
529 ; X86-AVX512-NEXT: vmovapd %ymm6, (%edx)
530 ; X86-AVX512-NEXT: vmovapd {{.*#+}} ymm4 = [0,0,3,0,10,0,1,0]
531 ; X86-AVX512-NEXT: vpermi2pd %zmm0, %zmm3, %zmm4
532 ; X86-AVX512-NEXT: vmovapd %ymm4, (%ecx)
533 ; X86-AVX512-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [3,0,11,0,3,0,11,0]
534 ; X86-AVX512-NEXT: # ymm3 = mem[0,1,0,1]
535 ; X86-AVX512-NEXT: vpermi2pd %zmm1, %zmm0, %zmm3
536 ; X86-AVX512-NEXT: vmovapd {{.*#+}} ymm0 = [2,0,8,0,9,0,3,0]
537 ; X86-AVX512-NEXT: vpermi2pd %zmm3, %zmm2, %zmm0
538 ; X86-AVX512-NEXT: vmovapd %ymm0, (%eax)
539 ; X86-AVX512-NEXT: vzeroupper
540 ; X86-AVX512-NEXT: retl
542 ; X64-AVX1-LABEL: PR48908:
544 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3
545 ; X64-AVX1-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0,1,2,2]
546 ; X64-AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],ymm2[0,1]
547 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm5
548 ; X64-AVX1-NEXT: vshufpd {{.*#+}} ymm4 = ymm5[1],ymm4[0],ymm5[2],ymm4[3]
549 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm5
550 ; X64-AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,0,1]
551 ; X64-AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2],ymm3[3]
552 ; X64-AVX1-NEXT: vmovapd %ymm3, (%rdi)
553 ; X64-AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm4[2,3,0,1]
554 ; X64-AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm0[2,3]
555 ; X64-AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3]
556 ; X64-AVX1-NEXT: vmovapd %ymm3, (%rsi)
557 ; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
558 ; X64-AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
559 ; X64-AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
560 ; X64-AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
561 ; X64-AVX1-NEXT: vmovapd %ymm0, (%rdx)
562 ; X64-AVX1-NEXT: vzeroupper
563 ; X64-AVX1-NEXT: retq
565 ; X64-AVX2-LABEL: PR48908:
567 ; X64-AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3
568 ; X64-AVX2-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],ymm2[0,1]
569 ; X64-AVX2-NEXT: vshufpd {{.*#+}} xmm5 = xmm1[1,0]
570 ; X64-AVX2-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm0[0,1],ymm2[0,1]
571 ; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,2,1]
572 ; X64-AVX2-NEXT: vblendpd {{.*#+}} ymm3 = ymm6[0],ymm3[1],ymm6[2],ymm3[3]
573 ; X64-AVX2-NEXT: vmovapd %ymm3, (%rdi)
574 ; X64-AVX2-NEXT: vblendpd {{.*#+}} ymm3 = ymm5[0,1],ymm0[2,3]
575 ; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,3,2,0]
576 ; X64-AVX2-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3]
577 ; X64-AVX2-NEXT: vmovapd %ymm3, (%rsi)
578 ; X64-AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
579 ; X64-AVX2-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
580 ; X64-AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
581 ; X64-AVX2-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
582 ; X64-AVX2-NEXT: vmovapd %ymm0, (%rdx)
583 ; X64-AVX2-NEXT: vzeroupper
584 ; X64-AVX2-NEXT: retq
586 ; X64-AVX512-LABEL: PR48908:
587 ; X64-AVX512: # %bb.0:
588 ; X64-AVX512-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
589 ; X64-AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
590 ; X64-AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
591 ; X64-AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3
592 ; X64-AVX512-NEXT: vmovapd {{.*#+}} ymm4 = [1,2,8,9]
593 ; X64-AVX512-NEXT: vpermi2pd %zmm2, %zmm1, %zmm4
594 ; X64-AVX512-NEXT: vmovapd {{.*#+}} ymm5 = [0,10,2,9]
595 ; X64-AVX512-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm0[0,1],ymm2[0,1]
596 ; X64-AVX512-NEXT: vpermt2pd %zmm3, %zmm5, %zmm6
597 ; X64-AVX512-NEXT: vmovapd %ymm6, (%rdi)
598 ; X64-AVX512-NEXT: vmovapd {{.*#+}} ymm3 = [0,3,10,1]
599 ; X64-AVX512-NEXT: vpermi2pd %zmm0, %zmm4, %zmm3
600 ; X64-AVX512-NEXT: vmovapd %ymm3, (%rsi)
601 ; X64-AVX512-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [3,11,3,11]
602 ; X64-AVX512-NEXT: # ymm3 = mem[0,1,0,1]
603 ; X64-AVX512-NEXT: vpermi2pd %zmm1, %zmm0, %zmm3
604 ; X64-AVX512-NEXT: vmovapd {{.*#+}} ymm0 = [2,8,9,3]
605 ; X64-AVX512-NEXT: vpermi2pd %zmm3, %zmm2, %zmm0
606 ; X64-AVX512-NEXT: vmovapd %ymm0, (%rdx)
607 ; X64-AVX512-NEXT: vzeroupper
608 ; X64-AVX512-NEXT: retq
609 %t0 = shufflevector <4 x double> %v0, <4 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
610 %t1 = shufflevector <4 x double> %v1, <4 x double> %v2, <4 x i32> <i32 1, i32 2, i32 4, i32 5>
611 %r0 = shufflevector <4 x double> %t0, <4 x double> %t1, <4 x i32> <i32 0, i32 3, i32 6, i32 1>
612 store <4 x double> %r0, ptr %out0, align 32
613 %r1 = shufflevector <4 x double> %t0, <4 x double> %t1, <4 x i32> <i32 4, i32 7, i32 2, i32 5>
614 store <4 x double> %r1, ptr %out1, align 32
615 %t2 = shufflevector <4 x double> %v0, <4 x double> %v1, <4 x i32> <i32 3, i32 7, i32 undef, i32 undef>
616 %r2 = shufflevector <4 x double> %t2, <4 x double> %v2, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
617 store <4 x double> %r2, ptr %out2, align 32
621 define <4 x i64> @concat_self_v4i64(<2 x i64> %x) {
622 ; AVX1-LABEL: concat_self_v4i64:
624 ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
625 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
626 ; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0,0,3,3]
627 ; AVX1-NEXT: ret{{[l|q]}}
629 ; AVX2-LABEL: concat_self_v4i64:
631 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
632 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,1]
633 ; AVX2-NEXT: ret{{[l|q]}}
635 ; AVX512-LABEL: concat_self_v4i64:
637 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
638 ; AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,1]
639 ; AVX512-NEXT: ret{{[l|q]}}
640 %cat = shufflevector <2 x i64> %x, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
641 %s = shufflevector <4 x i64> %cat, <4 x i64> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
645 define <8 x i32> @concat_self_v8i32(<4 x i32> %x) {
646 ; AVX1-LABEL: concat_self_v8i32:
648 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[3,2,1,0]
649 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,2,1,3]
650 ; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm2
651 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
652 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
653 ; AVX1-NEXT: ret{{[l|q]}}
655 ; AVX2-LABEL: concat_self_v8i32:
657 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
658 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm1
659 ; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm2 = [3,2,1,0,0,2,1,3]
660 ; AVX2-NEXT: vpermd %ymm0, %ymm2, %ymm0
661 ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
662 ; AVX2-NEXT: ret{{[l|q]}}
664 ; AVX512-LABEL: concat_self_v8i32:
666 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
667 ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm1
668 ; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm2 = [3,2,1,0,0,2,1,3]
669 ; AVX512-NEXT: vpermd %ymm0, %ymm2, %ymm0
670 ; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
671 ; AVX512-NEXT: ret{{[l|q]}}
672 %cat = shufflevector <4 x i32> %x, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
673 %s = shufflevector <8 x i32> %cat, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 0, i32 2, i32 1, i32 3>
674 %a = add <8 x i32> %s, %cat
678 define <16 x i64> @bit_reversal_permutation(<16 x i64> %a0) nounwind {
679 ; X86-AVX1-LABEL: bit_reversal_permutation:
681 ; X86-AVX1-NEXT: pushl %ebp
682 ; X86-AVX1-NEXT: movl %esp, %ebp
683 ; X86-AVX1-NEXT: andl $-32, %esp
684 ; X86-AVX1-NEXT: subl $32, %esp
685 ; X86-AVX1-NEXT: vmovaps 8(%ebp), %ymm5
686 ; X86-AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm2[2,3],ymm5[2,3]
687 ; X86-AVX1-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm0[2,3],ymm1[2,3]
688 ; X86-AVX1-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm6[0],ymm3[0],ymm6[2],ymm3[2]
689 ; X86-AVX1-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm6[1],ymm3[1],ymm6[3],ymm3[3]
690 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
691 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
692 ; X86-AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
693 ; X86-AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
694 ; X86-AVX1-NEXT: vmovaps %ymm4, %ymm1
695 ; X86-AVX1-NEXT: movl %ebp, %esp
696 ; X86-AVX1-NEXT: popl %ebp
697 ; X86-AVX1-NEXT: retl
699 ; X86-AVX2-LABEL: bit_reversal_permutation:
701 ; X86-AVX2-NEXT: pushl %ebp
702 ; X86-AVX2-NEXT: movl %esp, %ebp
703 ; X86-AVX2-NEXT: andl $-32, %esp
704 ; X86-AVX2-NEXT: subl $32, %esp
705 ; X86-AVX2-NEXT: vmovaps 8(%ebp), %ymm6
706 ; X86-AVX2-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
707 ; X86-AVX2-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm6[0],ymm2[2],ymm6[2]
708 ; X86-AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3]
709 ; X86-AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7]
710 ; X86-AVX2-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm2[2,3],ymm6[2,3]
711 ; X86-AVX2-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
712 ; X86-AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
713 ; X86-AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7]
714 ; X86-AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm5
715 ; X86-AVX2-NEXT: vmovlhps {{.*#+}} xmm7 = xmm2[0],xmm6[0]
716 ; X86-AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,0,2,1]
717 ; X86-AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm7[2,3],ymm5[4,5],ymm7[6,7]
718 ; X86-AVX2-NEXT: vinsertf128 $1, %xmm6, %ymm2, %ymm2
719 ; X86-AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
720 ; X86-AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
721 ; X86-AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
722 ; X86-AVX2-NEXT: vmovaps %ymm5, %ymm0
723 ; X86-AVX2-NEXT: vmovaps %ymm4, %ymm1
724 ; X86-AVX2-NEXT: movl %ebp, %esp
725 ; X86-AVX2-NEXT: popl %ebp
726 ; X86-AVX2-NEXT: retl
728 ; AVX512-LABEL: bit_reversal_permutation:
730 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,8,4,12,2,10,6,14]
731 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
732 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm3 = [1,9,5,13,3,11,7,15]
733 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
734 ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0
735 ; AVX512-NEXT: vmovdqa64 %zmm3, %zmm1
736 ; AVX512-NEXT: ret{{[l|q]}}
738 ; X64-AVX1-LABEL: bit_reversal_permutation:
740 ; X64-AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm2[2,3],ymm3[2,3]
741 ; X64-AVX1-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm0[2,3],ymm1[2,3]
742 ; X64-AVX1-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm6[0],ymm5[0],ymm6[2],ymm5[2]
743 ; X64-AVX1-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm6[1],ymm5[1],ymm6[3],ymm5[3]
744 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
745 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
746 ; X64-AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
747 ; X64-AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
748 ; X64-AVX1-NEXT: vmovaps %ymm4, %ymm1
749 ; X64-AVX1-NEXT: vmovaps %ymm5, %ymm3
750 ; X64-AVX1-NEXT: retq
752 ; X64-AVX2-LABEL: bit_reversal_permutation:
754 ; X64-AVX2-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
755 ; X64-AVX2-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
756 ; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3]
757 ; X64-AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5],ymm5[6,7]
758 ; X64-AVX2-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm2[2,3],ymm3[2,3]
759 ; X64-AVX2-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
760 ; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
761 ; X64-AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
762 ; X64-AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm6
763 ; X64-AVX2-NEXT: vmovlhps {{.*#+}} xmm7 = xmm2[0],xmm3[0]
764 ; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,0,2,1]
765 ; X64-AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5],ymm7[6,7]
766 ; X64-AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
767 ; X64-AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
768 ; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
769 ; X64-AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
770 ; X64-AVX2-NEXT: vmovaps %ymm6, %ymm0
771 ; X64-AVX2-NEXT: vmovaps %ymm4, %ymm1
772 ; X64-AVX2-NEXT: vmovaps %ymm5, %ymm3
773 ; X64-AVX2-NEXT: retq
774 %v0 = shufflevector <16 x i64> %a0, <16 x i64> undef, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 2, i32 3, i32 6, i32 7, i32 8, i32 9, i32 12, i32 13, i32 10, i32 11, i32 14, i32 15>
775 %v1 = shufflevector <16 x i64> %v0, <16 x i64> undef, <16 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>