1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved stores.
18 define void @store_i8_stride2_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.vec) nounwind {
19 ; SSE-LABEL: store_i8_stride2_vf2:
21 ; SSE-NEXT: movdqa (%rdi), %xmm0
22 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
23 ; SSE-NEXT: movd %xmm0, (%rdx)
26 ; AVX-LABEL: store_i8_stride2_vf2:
28 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
29 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
30 ; AVX-NEXT: vmovd %xmm0, (%rdx)
32 %in.vec0 = load <2 x i8>, ptr %in.vecptr0, align 64
33 %in.vec1 = load <2 x i8>, ptr %in.vecptr1, align 64
34 %1 = shufflevector <2 x i8> %in.vec0, <2 x i8> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
35 %interleaved.vec = shufflevector <4 x i8> %1, <4 x i8> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
36 store <4 x i8> %interleaved.vec, ptr %out.vec, align 64
40 define void @store_i8_stride2_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.vec) nounwind {
41 ; SSE-LABEL: store_i8_stride2_vf4:
43 ; SSE-NEXT: movdqa (%rdi), %xmm0
44 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
45 ; SSE-NEXT: movq %xmm0, (%rdx)
48 ; AVX-LABEL: store_i8_stride2_vf4:
50 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
51 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
52 ; AVX-NEXT: vmovq %xmm0, (%rdx)
54 %in.vec0 = load <4 x i8>, ptr %in.vecptr0, align 64
55 %in.vec1 = load <4 x i8>, ptr %in.vecptr1, align 64
56 %1 = shufflevector <4 x i8> %in.vec0, <4 x i8> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
57 %interleaved.vec = shufflevector <8 x i8> %1, <8 x i8> poison, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
58 store <8 x i8> %interleaved.vec, ptr %out.vec, align 64
62 define void @store_i8_stride2_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.vec) nounwind {
63 ; SSE-LABEL: store_i8_stride2_vf8:
65 ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
66 ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
67 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
68 ; SSE-NEXT: movdqa %xmm1, (%rdx)
71 ; AVX-LABEL: store_i8_stride2_vf8:
73 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
74 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
75 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
76 ; AVX-NEXT: vmovdqa %xmm0, (%rdx)
78 %in.vec0 = load <8 x i8>, ptr %in.vecptr0, align 64
79 %in.vec1 = load <8 x i8>, ptr %in.vecptr1, align 64
80 %1 = shufflevector <8 x i8> %in.vec0, <8 x i8> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
81 %interleaved.vec = shufflevector <16 x i8> %1, <16 x i8> poison, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
82 store <16 x i8> %interleaved.vec, ptr %out.vec, align 64
86 define void @store_i8_stride2_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.vec) nounwind {
87 ; SSE-LABEL: store_i8_stride2_vf16:
89 ; SSE-NEXT: movdqa (%rdi), %xmm0
90 ; SSE-NEXT: movdqa (%rsi), %xmm1
91 ; SSE-NEXT: movdqa %xmm0, %xmm2
92 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
93 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
94 ; SSE-NEXT: movdqa %xmm0, 16(%rdx)
95 ; SSE-NEXT: movdqa %xmm2, (%rdx)
98 ; AVX1-ONLY-LABEL: store_i8_stride2_vf16:
100 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0
101 ; AVX1-ONLY-NEXT: vmovdqa (%rsi), %xmm1
102 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
103 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
104 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, 16(%rdx)
105 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, (%rdx)
106 ; AVX1-ONLY-NEXT: retq
108 ; AVX2-LABEL: store_i8_stride2_vf16:
110 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
111 ; AVX2-NEXT: vinserti128 $1, (%rsi), %ymm0, %ymm0
112 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
113 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15,16,24,17,25,18,26,19,27,20,28,21,29,22,30,23,31]
114 ; AVX2-NEXT: vmovdqa %ymm0, (%rdx)
115 ; AVX2-NEXT: vzeroupper
117 %in.vec0 = load <16 x i8>, ptr %in.vecptr0, align 64
118 %in.vec1 = load <16 x i8>, ptr %in.vecptr1, align 64
119 %1 = shufflevector <16 x i8> %in.vec0, <16 x i8> %in.vec1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
120 %interleaved.vec = shufflevector <32 x i8> %1, <32 x i8> poison, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
121 store <32 x i8> %interleaved.vec, ptr %out.vec, align 64
125 define void @store_i8_stride2_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.vec) nounwind {
126 ; SSE-LABEL: store_i8_stride2_vf32:
128 ; SSE-NEXT: movdqa (%rdi), %xmm0
129 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
130 ; SSE-NEXT: movdqa (%rsi), %xmm2
131 ; SSE-NEXT: movdqa 16(%rsi), %xmm3
132 ; SSE-NEXT: movdqa %xmm0, %xmm4
133 ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
134 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
135 ; SSE-NEXT: movdqa %xmm1, %xmm2
136 ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
137 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
138 ; SSE-NEXT: movdqa %xmm1, 32(%rdx)
139 ; SSE-NEXT: movdqa %xmm2, 48(%rdx)
140 ; SSE-NEXT: movdqa %xmm0, (%rdx)
141 ; SSE-NEXT: movdqa %xmm4, 16(%rdx)
144 ; AVX1-ONLY-LABEL: store_i8_stride2_vf32:
145 ; AVX1-ONLY: # %bb.0:
146 ; AVX1-ONLY-NEXT: vmovdqa (%rsi), %xmm0
147 ; AVX1-ONLY-NEXT: vmovdqa 16(%rsi), %xmm1
148 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm2
149 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm3
150 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
151 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
152 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
153 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
154 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, 48(%rdx)
155 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, 32(%rdx)
156 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rdx)
157 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, 16(%rdx)
158 ; AVX1-ONLY-NEXT: retq
160 ; AVX2-ONLY-LABEL: store_i8_stride2_vf32:
161 ; AVX2-ONLY: # %bb.0:
162 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm0
163 ; AVX2-ONLY-NEXT: vmovdqa (%rsi), %ymm1
164 ; AVX2-ONLY-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
165 ; AVX2-ONLY-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
166 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[0,1],ymm2[0,1]
167 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
168 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 32(%rdx)
169 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, (%rdx)
170 ; AVX2-ONLY-NEXT: vzeroupper
171 ; AVX2-ONLY-NEXT: retq
173 ; AVX512F-LABEL: store_i8_stride2_vf32:
175 ; AVX512F-NEXT: vmovdqa (%rsi), %xmm0
176 ; AVX512F-NEXT: vmovdqa 16(%rsi), %xmm1
177 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm2
178 ; AVX512F-NEXT: vmovdqa 16(%rdi), %xmm3
179 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
180 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
181 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
182 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
183 ; AVX512F-NEXT: vmovdqa %xmm1, 32(%rdx)
184 ; AVX512F-NEXT: vmovdqa %xmm2, 48(%rdx)
185 ; AVX512F-NEXT: vmovdqa %xmm0, (%rdx)
186 ; AVX512F-NEXT: vmovdqa %xmm4, 16(%rdx)
189 ; AVX512BW-LABEL: store_i8_stride2_vf32:
191 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
192 ; AVX512BW-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
193 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,4,1,5,2,6,3,7]
194 ; AVX512BW-NEXT: vpermq %zmm0, %zmm1, %zmm0
195 ; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15,16,24,17,25,18,26,19,27,20,28,21,29,22,30,23,31,32,40,33,41,34,42,35,43,36,44,37,45,38,46,39,47,48,56,49,57,50,58,51,59,52,60,53,61,54,62,55,63]
196 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx)
197 ; AVX512BW-NEXT: vzeroupper
198 ; AVX512BW-NEXT: retq
199 %in.vec0 = load <32 x i8>, ptr %in.vecptr0, align 64
200 %in.vec1 = load <32 x i8>, ptr %in.vecptr1, align 64
201 %1 = shufflevector <32 x i8> %in.vec0, <32 x i8> %in.vec1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
202 %interleaved.vec = shufflevector <64 x i8> %1, <64 x i8> poison, <64 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
203 store <64 x i8> %interleaved.vec, ptr %out.vec, align 64
207 define void @store_i8_stride2_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.vec) nounwind {
208 ; SSE-LABEL: store_i8_stride2_vf64:
210 ; SSE-NEXT: movdqa (%rdi), %xmm0
211 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
212 ; SSE-NEXT: movdqa 32(%rdi), %xmm2
213 ; SSE-NEXT: movdqa 48(%rdi), %xmm3
214 ; SSE-NEXT: movdqa (%rsi), %xmm4
215 ; SSE-NEXT: movdqa 16(%rsi), %xmm5
216 ; SSE-NEXT: movdqa 32(%rsi), %xmm6
217 ; SSE-NEXT: movdqa 48(%rsi), %xmm7
218 ; SSE-NEXT: movdqa %xmm0, %xmm8
219 ; SSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15]
220 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
221 ; SSE-NEXT: movdqa %xmm1, %xmm4
222 ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
223 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
224 ; SSE-NEXT: movdqa %xmm2, %xmm5
225 ; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
226 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
227 ; SSE-NEXT: movdqa %xmm3, %xmm6
228 ; SSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
229 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
230 ; SSE-NEXT: movdqa %xmm3, 96(%rdx)
231 ; SSE-NEXT: movdqa %xmm6, 112(%rdx)
232 ; SSE-NEXT: movdqa %xmm2, 64(%rdx)
233 ; SSE-NEXT: movdqa %xmm5, 80(%rdx)
234 ; SSE-NEXT: movdqa %xmm1, 32(%rdx)
235 ; SSE-NEXT: movdqa %xmm4, 48(%rdx)
236 ; SSE-NEXT: movdqa %xmm0, (%rdx)
237 ; SSE-NEXT: movdqa %xmm8, 16(%rdx)
240 ; AVX1-ONLY-LABEL: store_i8_stride2_vf64:
241 ; AVX1-ONLY: # %bb.0:
242 ; AVX1-ONLY-NEXT: vmovdqa (%rsi), %xmm0
243 ; AVX1-ONLY-NEXT: vmovdqa 16(%rsi), %xmm1
244 ; AVX1-ONLY-NEXT: vmovdqa 32(%rsi), %xmm2
245 ; AVX1-ONLY-NEXT: vmovdqa 48(%rsi), %xmm3
246 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm4
247 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm5
248 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm6
249 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm7
250 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
251 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
252 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm7[8],xmm3[8],xmm7[9],xmm3[9],xmm7[10],xmm3[10],xmm7[11],xmm3[11],xmm7[12],xmm3[12],xmm7[13],xmm3[13],xmm7[14],xmm3[14],xmm7[15],xmm3[15]
253 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
254 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
255 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
256 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
257 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
258 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rdx)
259 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, 16(%rdx)
260 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, 32(%rdx)
261 ; AVX1-ONLY-NEXT: vmovdqa %xmm7, 48(%rdx)
262 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, 96(%rdx)
263 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, 112(%rdx)
264 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, 64(%rdx)
265 ; AVX1-ONLY-NEXT: vmovdqa %xmm8, 80(%rdx)
266 ; AVX1-ONLY-NEXT: retq
268 ; AVX2-ONLY-LABEL: store_i8_stride2_vf64:
269 ; AVX2-ONLY: # %bb.0:
270 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm0
271 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %ymm1
272 ; AVX2-ONLY-NEXT: vmovdqa (%rsi), %ymm2
273 ; AVX2-ONLY-NEXT: vmovdqa 32(%rsi), %ymm3
274 ; AVX2-ONLY-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
275 ; AVX2-ONLY-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
276 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm4[2,3]
277 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[0,1],ymm4[0,1]
278 ; AVX2-ONLY-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm1[8],ymm3[8],ymm1[9],ymm3[9],ymm1[10],ymm3[10],ymm1[11],ymm3[11],ymm1[12],ymm3[12],ymm1[13],ymm3[13],ymm1[14],ymm3[14],ymm1[15],ymm3[15],ymm1[24],ymm3[24],ymm1[25],ymm3[25],ymm1[26],ymm3[26],ymm1[27],ymm3[27],ymm1[28],ymm3[28],ymm1[29],ymm3[29],ymm1[30],ymm3[30],ymm1[31],ymm3[31]
279 ; AVX2-ONLY-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[4],ymm3[4],ymm1[5],ymm3[5],ymm1[6],ymm3[6],ymm1[7],ymm3[7],ymm1[16],ymm3[16],ymm1[17],ymm3[17],ymm1[18],ymm3[18],ymm1[19],ymm3[19],ymm1[20],ymm3[20],ymm1[21],ymm3[21],ymm1[22],ymm3[22],ymm1[23],ymm3[23]
280 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm4[2,3]
281 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[0,1],ymm4[0,1]
282 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, 64(%rdx)
283 ; AVX2-ONLY-NEXT: vmovdqa %ymm3, 96(%rdx)
284 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, (%rdx)
285 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, 32(%rdx)
286 ; AVX2-ONLY-NEXT: vzeroupper
287 ; AVX2-ONLY-NEXT: retq
289 ; AVX512F-LABEL: store_i8_stride2_vf64:
291 ; AVX512F-NEXT: vmovdqa (%rsi), %xmm0
292 ; AVX512F-NEXT: vmovdqa 16(%rsi), %xmm1
293 ; AVX512F-NEXT: vmovdqa 32(%rsi), %xmm2
294 ; AVX512F-NEXT: vmovdqa 48(%rsi), %xmm3
295 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm4
296 ; AVX512F-NEXT: vmovdqa 16(%rdi), %xmm5
297 ; AVX512F-NEXT: vmovdqa 32(%rdi), %xmm6
298 ; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm7
299 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
300 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
301 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
302 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
303 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
304 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
305 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm7[8],xmm3[8],xmm7[9],xmm3[9],xmm7[10],xmm3[10],xmm7[11],xmm3[11],xmm7[12],xmm3[12],xmm7[13],xmm3[13],xmm7[14],xmm3[14],xmm7[15],xmm3[15]
306 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
307 ; AVX512F-NEXT: vmovdqa %xmm3, 96(%rdx)
308 ; AVX512F-NEXT: vmovdqa %xmm6, 112(%rdx)
309 ; AVX512F-NEXT: vmovdqa %xmm2, 64(%rdx)
310 ; AVX512F-NEXT: vmovdqa %xmm5, 80(%rdx)
311 ; AVX512F-NEXT: vmovdqa %xmm1, 32(%rdx)
312 ; AVX512F-NEXT: vmovdqa %xmm4, 48(%rdx)
313 ; AVX512F-NEXT: vmovdqa %xmm0, (%rdx)
314 ; AVX512F-NEXT: vmovdqa %xmm8, 16(%rdx)
317 ; AVX512BW-LABEL: store_i8_stride2_vf64:
319 ; AVX512BW-NEXT: vmovdqa (%rsi), %xmm0
320 ; AVX512BW-NEXT: vmovdqa 16(%rsi), %xmm1
321 ; AVX512BW-NEXT: vmovdqa 32(%rsi), %xmm2
322 ; AVX512BW-NEXT: vmovdqa 48(%rsi), %xmm3
323 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm4
324 ; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm5
325 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %xmm6
326 ; AVX512BW-NEXT: vmovdqa 48(%rdi), %xmm7
327 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm7[8],xmm3[8],xmm7[9],xmm3[9],xmm7[10],xmm3[10],xmm7[11],xmm3[11],xmm7[12],xmm3[12],xmm7[13],xmm3[13],xmm7[14],xmm3[14],xmm7[15],xmm3[15]
328 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
329 ; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm3, %ymm3
330 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
331 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
332 ; AVX512BW-NEXT: vinserti128 $1, %xmm7, %ymm2, %ymm2
333 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
334 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
335 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
336 ; AVX512BW-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
337 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
338 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
339 ; AVX512BW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
340 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
341 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx)
342 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 64(%rdx)
343 ; AVX512BW-NEXT: vzeroupper
344 ; AVX512BW-NEXT: retq
345 %in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64
346 %in.vec1 = load <64 x i8>, ptr %in.vecptr1, align 64
347 %1 = shufflevector <64 x i8> %in.vec0, <64 x i8> %in.vec1, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
348 %interleaved.vec = shufflevector <128 x i8> %1, <128 x i8> poison, <128 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
349 store <128 x i8> %interleaved.vec, ptr %out.vec, align 64
352 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
355 ; AVX2-FAST-PERLANE: {{.*}}
358 ; AVX512-FAST: {{.*}}
359 ; AVX512-SLOW: {{.*}}
360 ; AVX512BW-FAST: {{.*}}
361 ; AVX512BW-ONLY-FAST: {{.*}}
362 ; AVX512BW-ONLY-SLOW: {{.*}}
363 ; AVX512BW-SLOW: {{.*}}
364 ; AVX512DQ-FAST: {{.*}}
365 ; AVX512DQ-SLOW: {{.*}}
366 ; AVX512DQBW-FAST: {{.*}}
367 ; AVX512DQBW-SLOW: {{.*}}
368 ; AVX512F-FAST: {{.*}}
369 ; AVX512F-ONLY-FAST: {{.*}}
370 ; AVX512F-ONLY-SLOW: {{.*}}
371 ; AVX512F-SLOW: {{.*}}