1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i8_stride3_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
19 ; SSE-LABEL: load_i8_stride3_vf2:
21 ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
22 ; SSE-NEXT: pxor %xmm1, %xmm1
23 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
24 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,3,2,3,4,5,6,7]
25 ; SSE-NEXT: packuswb %xmm1, %xmm1
26 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
27 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
28 ; SSE-NEXT: packuswb %xmm2, %xmm2
29 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
30 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
31 ; SSE-NEXT: packuswb %xmm0, %xmm0
32 ; SSE-NEXT: movd %xmm1, %eax
33 ; SSE-NEXT: movw %ax, (%rsi)
34 ; SSE-NEXT: movd %xmm2, %eax
35 ; SSE-NEXT: movw %ax, (%rdx)
36 ; SSE-NEXT: movd %xmm0, %eax
37 ; SSE-NEXT: movw %ax, (%rcx)
40 ; AVX-LABEL: load_i8_stride3_vf2:
42 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
43 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,3,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
44 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,4,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
45 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
46 ; AVX-NEXT: vpextrw $0, %xmm1, (%rsi)
47 ; AVX-NEXT: vpextrw $0, %xmm2, (%rdx)
48 ; AVX-NEXT: vpextrw $0, %xmm0, (%rcx)
50 %wide.vec = load <6 x i8>, ptr %in.vec, align 64
51 %strided.vec0 = shufflevector <6 x i8> %wide.vec, <6 x i8> poison, <2 x i32> <i32 0, i32 3>
52 %strided.vec1 = shufflevector <6 x i8> %wide.vec, <6 x i8> poison, <2 x i32> <i32 1, i32 4>
53 %strided.vec2 = shufflevector <6 x i8> %wide.vec, <6 x i8> poison, <2 x i32> <i32 2, i32 5>
54 store <2 x i8> %strided.vec0, ptr %out.vec0, align 64
55 store <2 x i8> %strided.vec1, ptr %out.vec1, align 64
56 store <2 x i8> %strided.vec2, ptr %out.vec2, align 64
60 define void @load_i8_stride3_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
61 ; SSE-LABEL: load_i8_stride3_vf4:
63 ; SSE-NEXT: movdqa (%rdi), %xmm0
64 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,65535,65535]
65 ; SSE-NEXT: pxor %xmm2, %xmm2
66 ; SSE-NEXT: movdqa %xmm0, %xmm3
67 ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
68 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
69 ; SSE-NEXT: movdqa %xmm0, %xmm2
70 ; SSE-NEXT: pand %xmm1, %xmm2
71 ; SSE-NEXT: pandn %xmm3, %xmm1
72 ; SSE-NEXT: por %xmm2, %xmm1
73 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
74 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
75 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
76 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
77 ; SSE-NEXT: packuswb %xmm1, %xmm1
78 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,3,2,3,4,5,6,7]
79 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[0,0]
80 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[2,3]
81 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7]
82 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
83 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
84 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,2,3,0,4,5,6,7]
85 ; SSE-NEXT: packuswb %xmm3, %xmm3
86 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
87 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
88 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
89 ; SSE-NEXT: packuswb %xmm0, %xmm0
90 ; SSE-NEXT: movd %xmm1, (%rsi)
91 ; SSE-NEXT: movd %xmm3, (%rdx)
92 ; SSE-NEXT: movd %xmm0, (%rcx)
95 ; AVX-LABEL: load_i8_stride3_vf4:
97 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
98 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,3,6,9,u,u,u,u,u,u,u,u,u,u,u,u]
99 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,4,7,10,u,u,u,u,u,u,u,u,u,u,u,u]
100 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,u,u,u,u,u,u,u,u,u,u,u,u]
101 ; AVX-NEXT: vmovd %xmm1, (%rsi)
102 ; AVX-NEXT: vmovd %xmm2, (%rdx)
103 ; AVX-NEXT: vmovd %xmm0, (%rcx)
105 %wide.vec = load <12 x i8>, ptr %in.vec, align 64
106 %strided.vec0 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
107 %strided.vec1 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
108 %strided.vec2 = shufflevector <12 x i8> %wide.vec, <12 x i8> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
109 store <4 x i8> %strided.vec0, ptr %out.vec0, align 64
110 store <4 x i8> %strided.vec1, ptr %out.vec1, align 64
111 store <4 x i8> %strided.vec2, ptr %out.vec2, align 64
115 define void @load_i8_stride3_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
116 ; SSE-LABEL: load_i8_stride3_vf8:
118 ; SSE-NEXT: movdqa (%rdi), %xmm0
119 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
120 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,255,255,255,255,255,255,255,255]
121 ; SSE-NEXT: movdqa %xmm0, %xmm2
122 ; SSE-NEXT: pand %xmm4, %xmm2
123 ; SSE-NEXT: pandn %xmm1, %xmm4
124 ; SSE-NEXT: por %xmm2, %xmm4
125 ; SSE-NEXT: pxor %xmm2, %xmm2
126 ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
127 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
128 ; SSE-NEXT: pand %xmm5, %xmm4
129 ; SSE-NEXT: movdqa %xmm0, %xmm3
130 ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
131 ; SSE-NEXT: pandn %xmm3, %xmm5
132 ; SSE-NEXT: por %xmm4, %xmm5
133 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,1,3]
134 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
135 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
136 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,1,4,5,6,7]
137 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,4,7]
138 ; SSE-NEXT: packuswb %xmm4, %xmm4
139 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,255,255,0,255,255,0,255,255,255,255,255,255,255,255,255]
140 ; SSE-NEXT: movdqa %xmm0, %xmm6
141 ; SSE-NEXT: pand %xmm5, %xmm6
142 ; SSE-NEXT: pandn %xmm1, %xmm5
143 ; SSE-NEXT: por %xmm6, %xmm5
144 ; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
145 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,65535,0,65535,65535]
146 ; SSE-NEXT: pand %xmm6, %xmm5
147 ; SSE-NEXT: pandn %xmm3, %xmm6
148 ; SSE-NEXT: por %xmm5, %xmm6
149 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[2,1,0,3,4,5,6,7]
150 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,4,7]
151 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
152 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
153 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,6,7,4]
154 ; SSE-NEXT: packuswb %xmm5, %xmm5
155 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,0,255,255,0,255,255,0,255,255,255,255,255,255,255,255]
156 ; SSE-NEXT: pand %xmm6, %xmm0
157 ; SSE-NEXT: pandn %xmm1, %xmm6
158 ; SSE-NEXT: por %xmm0, %xmm6
159 ; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
160 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,65535,65535,0,65535,65535,0,65535]
161 ; SSE-NEXT: pand %xmm0, %xmm6
162 ; SSE-NEXT: pandn %xmm3, %xmm0
163 ; SSE-NEXT: por %xmm6, %xmm0
164 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
165 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
166 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
167 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
168 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
169 ; SSE-NEXT: packuswb %xmm0, %xmm0
170 ; SSE-NEXT: movq %xmm4, (%rsi)
171 ; SSE-NEXT: movq %xmm5, (%rdx)
172 ; SSE-NEXT: movq %xmm0, (%rcx)
175 ; AVX-LABEL: load_i8_stride3_vf8:
177 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
178 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
179 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
180 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,3,6,9,12,15],zero,zero,xmm0[u,u,u,u,u,u,u,u]
181 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
182 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,xmm1[0,3,6,u,u,u,u,u,u,u,u]
183 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,4,7,10,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
184 ; AVX-NEXT: vpor %xmm3, %xmm4, %xmm3
185 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
186 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
187 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
188 ; AVX-NEXT: vmovq %xmm2, (%rsi)
189 ; AVX-NEXT: vmovq %xmm3, (%rdx)
190 ; AVX-NEXT: vmovq %xmm0, (%rcx)
192 %wide.vec = load <24 x i8>, ptr %in.vec, align 64
193 %strided.vec0 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
194 %strided.vec1 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
195 %strided.vec2 = shufflevector <24 x i8> %wide.vec, <24 x i8> poison, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
196 store <8 x i8> %strided.vec0, ptr %out.vec0, align 64
197 store <8 x i8> %strided.vec1, ptr %out.vec1, align 64
198 store <8 x i8> %strided.vec2, ptr %out.vec2, align 64
202 define void @load_i8_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
203 ; SSE-LABEL: load_i8_stride3_vf16:
205 ; SSE-NEXT: movdqa (%rdi), %xmm5
206 ; SSE-NEXT: movdqa 16(%rdi), %xmm4
207 ; SSE-NEXT: movdqa 32(%rdi), %xmm0
208 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
209 ; SSE-NEXT: movdqa %xmm3, %xmm1
210 ; SSE-NEXT: pandn %xmm4, %xmm1
211 ; SSE-NEXT: movdqa %xmm5, %xmm2
212 ; SSE-NEXT: pand %xmm3, %xmm2
213 ; SSE-NEXT: por %xmm1, %xmm2
214 ; SSE-NEXT: pxor %xmm6, %xmm6
215 ; SSE-NEXT: movdqa %xmm2, %xmm7
216 ; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
217 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,0,65535,65535,0]
218 ; SSE-NEXT: movdqa %xmm1, %xmm8
219 ; SSE-NEXT: pandn %xmm7, %xmm8
220 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
221 ; SSE-NEXT: pand %xmm1, %xmm2
222 ; SSE-NEXT: por %xmm8, %xmm2
223 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
224 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
225 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
226 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,2,1,4,5,6,7]
227 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm2[0,1,2,3,6,5,4,7]
228 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[0,3,2,3,4,5,6,7]
229 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
230 ; SSE-NEXT: packuswb %xmm2, %xmm10
231 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
232 ; SSE-NEXT: pand %xmm2, %xmm10
233 ; SSE-NEXT: movdqa %xmm0, %xmm7
234 ; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
235 ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,0,65535,65535,0,65535,65535]
236 ; SSE-NEXT: movdqa %xmm9, %xmm8
237 ; SSE-NEXT: pandn %xmm7, %xmm8
238 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
239 ; SSE-NEXT: movdqa %xmm0, %xmm11
240 ; SSE-NEXT: pand %xmm9, %xmm11
241 ; SSE-NEXT: por %xmm8, %xmm11
242 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm11[3,1,2,0]
243 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[2,1,2,3,4,5,6,7]
244 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,0]
245 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,7,6,5]
246 ; SSE-NEXT: packuswb %xmm8, %xmm11
247 ; SSE-NEXT: movdqa %xmm2, %xmm8
248 ; SSE-NEXT: pandn %xmm11, %xmm8
249 ; SSE-NEXT: por %xmm10, %xmm8
250 ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
251 ; SSE-NEXT: movdqa %xmm4, %xmm11
252 ; SSE-NEXT: pand %xmm10, %xmm11
253 ; SSE-NEXT: pandn %xmm5, %xmm10
254 ; SSE-NEXT: por %xmm11, %xmm10
255 ; SSE-NEXT: movdqa %xmm10, %xmm11
256 ; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm6[8],xmm11[9],xmm6[9],xmm11[10],xmm6[10],xmm11[11],xmm6[11],xmm11[12],xmm6[12],xmm11[13],xmm6[13],xmm11[14],xmm6[14],xmm11[15],xmm6[15]
257 ; SSE-NEXT: movdqa %xmm9, %xmm12
258 ; SSE-NEXT: pandn %xmm11, %xmm12
259 ; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3],xmm10[4],xmm6[4],xmm10[5],xmm6[5],xmm10[6],xmm6[6],xmm10[7],xmm6[7]
260 ; SSE-NEXT: pand %xmm9, %xmm10
261 ; SSE-NEXT: por %xmm12, %xmm10
262 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[2,1,0,3,4,5,6,7]
263 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,5,4,7]
264 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,3,2,1]
265 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[1,2,3,0,4,5,6,7]
266 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,6,7,4]
267 ; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,7,6,7]
268 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,2,2,3]
269 ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[1,2,3,3,4,5,6,7]
270 ; SSE-NEXT: packuswb %xmm11, %xmm10
271 ; SSE-NEXT: pand %xmm2, %xmm10
272 ; SSE-NEXT: movdqa %xmm7, %xmm11
273 ; SSE-NEXT: pand %xmm9, %xmm11
274 ; SSE-NEXT: pandn %xmm0, %xmm9
275 ; SSE-NEXT: por %xmm11, %xmm9
276 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,3,2,3,4,5,6,7]
277 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,6,5,6,7]
278 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,2,0]
279 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[2,2,2,2,4,5,6,7]
280 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,5,6,7,4]
281 ; SSE-NEXT: packuswb %xmm9, %xmm9
282 ; SSE-NEXT: pandn %xmm9, %xmm2
283 ; SSE-NEXT: por %xmm10, %xmm2
284 ; SSE-NEXT: pand %xmm3, %xmm4
285 ; SSE-NEXT: pandn %xmm5, %xmm3
286 ; SSE-NEXT: por %xmm4, %xmm3
287 ; SSE-NEXT: movdqa %xmm3, %xmm4
288 ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
289 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,65535,65535,0,65535,65535,0,65535]
290 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
291 ; SSE-NEXT: pand %xmm5, %xmm3
292 ; SSE-NEXT: pandn %xmm4, %xmm5
293 ; SSE-NEXT: por %xmm3, %xmm5
294 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[3,1,2,0]
295 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,5]
296 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,0]
297 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,0,3,4,5,6,7]
298 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
299 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
300 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[2,1,2,3,4,5,6,7]
301 ; SSE-NEXT: packuswb %xmm4, %xmm3
302 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,65535,0,0,0]
303 ; SSE-NEXT: pand %xmm4, %xmm3
304 ; SSE-NEXT: pand %xmm1, %xmm0
305 ; SSE-NEXT: pandn %xmm7, %xmm1
306 ; SSE-NEXT: por %xmm0, %xmm1
307 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,1,3]
308 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
309 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
310 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
311 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
312 ; SSE-NEXT: packuswb %xmm0, %xmm0
313 ; SSE-NEXT: pandn %xmm0, %xmm4
314 ; SSE-NEXT: por %xmm3, %xmm4
315 ; SSE-NEXT: movdqa %xmm8, (%rsi)
316 ; SSE-NEXT: movdqa %xmm2, (%rdx)
317 ; SSE-NEXT: movdqa %xmm4, (%rcx)
320 ; AVX1-LABEL: load_i8_stride3_vf16:
322 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
323 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
324 ; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2
325 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
326 ; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
327 ; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
328 ; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
329 ; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm2[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10]
330 ; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
331 ; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
332 ; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
333 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
334 ; AVX1-NEXT: vpblendvb %xmm4, %xmm0, %xmm1, %xmm1
335 ; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
336 ; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
337 ; AVX1-NEXT: vmovdqa %xmm0, (%rsi)
338 ; AVX1-NEXT: vmovdqa %xmm1, (%rdx)
339 ; AVX1-NEXT: vmovdqa %xmm2, (%rcx)
342 ; AVX512F-LABEL: load_i8_stride3_vf16:
344 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
345 ; AVX512F-NEXT: vmovdqa 16(%rdi), %xmm1
346 ; AVX512F-NEXT: vmovdqa 32(%rdi), %xmm2
347 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
348 ; AVX512F-NEXT: vpshufb %xmm3, %xmm0, %xmm0
349 ; AVX512F-NEXT: vpshufb %xmm3, %xmm1, %xmm1
350 ; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2
351 ; AVX512F-NEXT: vpalignr {{.*#+}} xmm3 = xmm2[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10]
352 ; AVX512F-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
353 ; AVX512F-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
354 ; AVX512F-NEXT: vpalignr {{.*#+}} xmm2 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
355 ; AVX512F-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
356 ; AVX512F-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
357 ; AVX512F-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
358 ; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
359 ; AVX512F-NEXT: vmovdqa %xmm1, (%rdx)
360 ; AVX512F-NEXT: vmovdqa %xmm2, (%rcx)
363 ; AVX512BW-LABEL: load_i8_stride3_vf16:
365 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
366 ; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm1
367 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %xmm2
368 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
369 ; AVX512BW-NEXT: vpshufb %xmm3, %xmm0, %xmm0
370 ; AVX512BW-NEXT: vpshufb %xmm3, %xmm1, %xmm1
371 ; AVX512BW-NEXT: vpshufb %xmm3, %xmm2, %xmm2
372 ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm3 = xmm2[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10]
373 ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
374 ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
375 ; AVX512BW-NEXT: movw $-2048, %ax # imm = 0xF800
376 ; AVX512BW-NEXT: kmovd %eax, %k1
377 ; AVX512BW-NEXT: vpblendmb %xmm1, %xmm0, %xmm2 {%k1}
378 ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm1 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
379 ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
380 ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
381 ; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
382 ; AVX512BW-NEXT: vmovdqa %xmm2, (%rdx)
383 ; AVX512BW-NEXT: vmovdqa %xmm1, (%rcx)
384 ; AVX512BW-NEXT: retq
385 %wide.vec = load <48 x i8>, ptr %in.vec, align 64
386 %strided.vec0 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
387 %strided.vec1 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
388 %strided.vec2 = shufflevector <48 x i8> %wide.vec, <48 x i8> poison, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
389 store <16 x i8> %strided.vec0, ptr %out.vec0, align 64
390 store <16 x i8> %strided.vec1, ptr %out.vec1, align 64
391 store <16 x i8> %strided.vec2, ptr %out.vec2, align 64
395 define void @load_i8_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
396 ; SSE-LABEL: load_i8_stride3_vf32:
398 ; SSE-NEXT: movdqa 64(%rdi), %xmm2
399 ; SSE-NEXT: movdqa (%rdi), %xmm6
400 ; SSE-NEXT: movdqa 16(%rdi), %xmm4
401 ; SSE-NEXT: movdqa 32(%rdi), %xmm8
402 ; SSE-NEXT: movdqa 48(%rdi), %xmm12
403 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
404 ; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
405 ; SSE-NEXT: movdqa %xmm13, %xmm7
406 ; SSE-NEXT: pandn %xmm4, %xmm7
407 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
408 ; SSE-NEXT: movdqa %xmm5, %xmm14
409 ; SSE-NEXT: pandn %xmm6, %xmm14
410 ; SSE-NEXT: movdqa %xmm13, %xmm0
411 ; SSE-NEXT: pandn %xmm6, %xmm0
412 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
413 ; SSE-NEXT: movdqa %xmm6, %xmm0
414 ; SSE-NEXT: pand %xmm13, %xmm0
415 ; SSE-NEXT: por %xmm7, %xmm0
416 ; SSE-NEXT: pxor %xmm9, %xmm9
417 ; SSE-NEXT: movdqa %xmm0, %xmm1
418 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
419 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,0,65535,65535,0]
420 ; SSE-NEXT: movdqa %xmm6, %xmm3
421 ; SSE-NEXT: pandn %xmm1, %xmm3
422 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
423 ; SSE-NEXT: pand %xmm6, %xmm0
424 ; SSE-NEXT: por %xmm3, %xmm0
425 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
426 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
427 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
428 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
429 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
430 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
431 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
432 ; SSE-NEXT: packuswb %xmm1, %xmm0
433 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
434 ; SSE-NEXT: pand %xmm7, %xmm0
435 ; SSE-NEXT: movdqa %xmm8, %xmm3
436 ; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm9[8],xmm3[9],xmm9[9],xmm3[10],xmm9[10],xmm3[11],xmm9[11],xmm3[12],xmm9[12],xmm3[13],xmm9[13],xmm3[14],xmm9[14],xmm3[15],xmm9[15]
437 ; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,65535,0,65535,65535,0,65535,65535]
438 ; SSE-NEXT: movdqa %xmm15, %xmm1
439 ; SSE-NEXT: pandn %xmm3, %xmm1
440 ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
441 ; SSE-NEXT: movdqa %xmm8, %xmm10
442 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
443 ; SSE-NEXT: pand %xmm15, %xmm10
444 ; SSE-NEXT: por %xmm1, %xmm10
445 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[3,1,2,0]
446 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
447 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,0]
448 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
449 ; SSE-NEXT: packuswb %xmm1, %xmm1
450 ; SSE-NEXT: movdqa %xmm7, %xmm10
451 ; SSE-NEXT: pandn %xmm1, %xmm10
452 ; SSE-NEXT: por %xmm0, %xmm10
453 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
454 ; SSE-NEXT: movdqa %xmm13, %xmm0
455 ; SSE-NEXT: pandn %xmm2, %xmm0
456 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
457 ; SSE-NEXT: movdqa %xmm12, %xmm1
458 ; SSE-NEXT: pand %xmm13, %xmm1
459 ; SSE-NEXT: por %xmm0, %xmm1
460 ; SSE-NEXT: movdqa %xmm1, %xmm0
461 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
462 ; SSE-NEXT: movdqa %xmm6, %xmm10
463 ; SSE-NEXT: pandn %xmm0, %xmm10
464 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
465 ; SSE-NEXT: pand %xmm6, %xmm1
466 ; SSE-NEXT: por %xmm10, %xmm1
467 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
468 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
469 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
470 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
471 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
472 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
473 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
474 ; SSE-NEXT: packuswb %xmm0, %xmm1
475 ; SSE-NEXT: movdqa 80(%rdi), %xmm10
476 ; SSE-NEXT: movdqa %xmm10, %xmm12
477 ; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15]
478 ; SSE-NEXT: movdqa %xmm15, %xmm0
479 ; SSE-NEXT: pandn %xmm12, %xmm0
480 ; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
481 ; SSE-NEXT: movdqa %xmm10, %xmm11
482 ; SSE-NEXT: pand %xmm15, %xmm11
483 ; SSE-NEXT: por %xmm0, %xmm11
484 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[3,1,2,0]
485 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
486 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
487 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
488 ; SSE-NEXT: packuswb %xmm0, %xmm0
489 ; SSE-NEXT: movdqa %xmm7, %xmm11
490 ; SSE-NEXT: pandn %xmm0, %xmm11
491 ; SSE-NEXT: pand %xmm7, %xmm1
492 ; SSE-NEXT: por %xmm1, %xmm11
493 ; SSE-NEXT: movdqa %xmm4, %xmm0
494 ; SSE-NEXT: pand %xmm5, %xmm0
495 ; SSE-NEXT: por %xmm14, %xmm0
496 ; SSE-NEXT: movdqa %xmm0, %xmm1
497 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
498 ; SSE-NEXT: movdqa %xmm15, %xmm14
499 ; SSE-NEXT: pandn %xmm1, %xmm14
500 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3],xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
501 ; SSE-NEXT: pand %xmm15, %xmm0
502 ; SSE-NEXT: por %xmm14, %xmm0
503 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
504 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
505 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
506 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
507 ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm0[0,1,2,3,5,6,7,4]
508 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,7,6,7]
509 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
510 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,3,4,5,6,7]
511 ; SSE-NEXT: packuswb %xmm0, %xmm14
512 ; SSE-NEXT: movdqa %xmm15, %xmm0
513 ; SSE-NEXT: pandn %xmm8, %xmm0
514 ; SSE-NEXT: movdqa %xmm6, %xmm1
515 ; SSE-NEXT: pandn %xmm3, %xmm1
516 ; SSE-NEXT: pand %xmm15, %xmm3
517 ; SSE-NEXT: por %xmm0, %xmm3
518 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,3,2,3,4,5,6,7]
519 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
520 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
521 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
522 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
523 ; SSE-NEXT: packuswb %xmm0, %xmm0
524 ; SSE-NEXT: movdqa %xmm7, %xmm3
525 ; SSE-NEXT: pandn %xmm0, %xmm3
526 ; SSE-NEXT: pand %xmm7, %xmm14
527 ; SSE-NEXT: por %xmm14, %xmm3
528 ; SSE-NEXT: pand %xmm5, %xmm2
529 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
530 ; SSE-NEXT: pandn %xmm8, %xmm5
531 ; SSE-NEXT: por %xmm2, %xmm5
532 ; SSE-NEXT: movdqa %xmm5, %xmm0
533 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
534 ; SSE-NEXT: movdqa %xmm15, %xmm14
535 ; SSE-NEXT: pandn %xmm0, %xmm14
536 ; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
537 ; SSE-NEXT: pand %xmm15, %xmm5
538 ; SSE-NEXT: por %xmm14, %xmm5
539 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,1,0,3,4,5,6,7]
540 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,4,7]
541 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
542 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
543 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,6,7,4]
544 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
545 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
546 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,3,4,5,6,7]
547 ; SSE-NEXT: packuswb %xmm0, %xmm5
548 ; SSE-NEXT: movdqa %xmm12, %xmm0
549 ; SSE-NEXT: pand %xmm15, %xmm0
550 ; SSE-NEXT: pandn %xmm10, %xmm15
551 ; SSE-NEXT: por %xmm0, %xmm15
552 ; SSE-NEXT: pand %xmm7, %xmm5
553 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm15[0,3,2,3,4,5,6,7]
554 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
555 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
556 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
557 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
558 ; SSE-NEXT: packuswb %xmm0, %xmm0
559 ; SSE-NEXT: pandn %xmm0, %xmm7
560 ; SSE-NEXT: por %xmm5, %xmm7
561 ; SSE-NEXT: pand %xmm13, %xmm4
562 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
563 ; SSE-NEXT: movdqa %xmm4, %xmm0
564 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm9[8],xmm0[9],xmm9[9],xmm0[10],xmm9[10],xmm0[11],xmm9[11],xmm0[12],xmm9[12],xmm0[13],xmm9[13],xmm0[14],xmm9[14],xmm0[15],xmm9[15]
565 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [0,65535,65535,0,65535,65535,0,65535]
566 ; SSE-NEXT: movdqa %xmm5, %xmm2
567 ; SSE-NEXT: pandn %xmm0, %xmm2
568 ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
569 ; SSE-NEXT: pand %xmm5, %xmm4
570 ; SSE-NEXT: por %xmm2, %xmm4
571 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[3,1,2,0]
572 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
573 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,0]
574 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
575 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,5,4,7]
576 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
577 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
578 ; SSE-NEXT: packuswb %xmm0, %xmm4
579 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,0,0,0]
580 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
581 ; SSE-NEXT: pand %xmm6, %xmm0
582 ; SSE-NEXT: por %xmm1, %xmm0
583 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
584 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
585 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
586 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
587 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
588 ; SSE-NEXT: packuswb %xmm0, %xmm1
589 ; SSE-NEXT: movdqa %xmm2, %xmm0
590 ; SSE-NEXT: pandn %xmm1, %xmm0
591 ; SSE-NEXT: pand %xmm2, %xmm4
592 ; SSE-NEXT: por %xmm4, %xmm0
593 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
594 ; SSE-NEXT: pand %xmm13, %xmm1
595 ; SSE-NEXT: pandn %xmm8, %xmm13
596 ; SSE-NEXT: por %xmm1, %xmm13
597 ; SSE-NEXT: movdqa %xmm13, %xmm1
598 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
599 ; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1],xmm13[2],xmm9[2],xmm13[3],xmm9[3],xmm13[4],xmm9[4],xmm13[5],xmm9[5],xmm13[6],xmm9[6],xmm13[7],xmm9[7]
600 ; SSE-NEXT: pand %xmm5, %xmm13
601 ; SSE-NEXT: pandn %xmm1, %xmm5
602 ; SSE-NEXT: por %xmm13, %xmm5
603 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[3,1,2,0]
604 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
605 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[3,1,2,0]
606 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[2,1,0,3,4,5,6,7]
607 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,4,7]
608 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
609 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
610 ; SSE-NEXT: packuswb %xmm1, %xmm4
611 ; SSE-NEXT: pand %xmm6, %xmm10
612 ; SSE-NEXT: pandn %xmm12, %xmm6
613 ; SSE-NEXT: por %xmm10, %xmm6
614 ; SSE-NEXT: pand %xmm2, %xmm4
615 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,2,1,3]
616 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7]
617 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
618 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
619 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
620 ; SSE-NEXT: packuswb %xmm1, %xmm1
621 ; SSE-NEXT: pandn %xmm1, %xmm2
622 ; SSE-NEXT: por %xmm4, %xmm2
623 ; SSE-NEXT: movdqa %xmm11, 16(%rsi)
624 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
625 ; SSE-NEXT: movaps %xmm1, (%rsi)
626 ; SSE-NEXT: movdqa %xmm7, 16(%rdx)
627 ; SSE-NEXT: movdqa %xmm3, (%rdx)
628 ; SSE-NEXT: movdqa %xmm2, 16(%rcx)
629 ; SSE-NEXT: movdqa %xmm0, (%rcx)
632 ; AVX1-ONLY-LABEL: load_i8_stride3_vf32:
633 ; AVX1-ONLY: # %bb.0:
634 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0
635 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
636 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
637 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm3
638 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm4
639 ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm5
640 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm6 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
641 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm0, %xmm0
642 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm3, %xmm3
643 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm1, %xmm1
644 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm4, %xmm4
645 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm2, %xmm2
646 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm5, %xmm5
647 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm6 = xmm5[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
648 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm2[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10]
649 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10]
650 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
651 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm8
652 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7,8,9,10]
653 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
654 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm2
655 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm7[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
656 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = xmm6[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10]
657 ; AVX1-ONLY-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
658 ; AVX1-ONLY-NEXT: # ymm5 = mem[0,1,0,1]
659 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm5, %ymm2
660 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm8, %ymm5
661 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm5, %ymm2
662 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7,8,9,10]
663 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
664 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7,8,9,10]
665 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
666 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, 16(%rsi)
667 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rsi)
668 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rdx)
669 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, 16(%rcx)
670 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%rcx)
671 ; AVX1-ONLY-NEXT: vzeroupper
672 ; AVX1-ONLY-NEXT: retq
674 ; AVX2-ONLY-LABEL: load_i8_stride3_vf32:
675 ; AVX2-ONLY: # %bb.0:
676 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm0
677 ; AVX2-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
678 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
679 ; AVX2-ONLY-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0
680 ; AVX2-ONLY-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1
681 ; AVX2-ONLY-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2
682 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
683 ; AVX2-ONLY-NEXT: # ymm3 = mem[0,1,0,1]
684 ; AVX2-ONLY-NEXT: vpshufb %ymm3, %ymm0, %ymm0
685 ; AVX2-ONLY-NEXT: vpshufb %ymm3, %ymm1, %ymm1
686 ; AVX2-ONLY-NEXT: vpshufb %ymm3, %ymm2, %ymm2
687 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm2[11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10],ymm2[27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26]
688 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
689 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm1[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26]
690 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
691 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
692 ; AVX2-ONLY-NEXT: # ymm4 = mem[0,1,0,1]
693 ; AVX2-ONLY-NEXT: vpblendvb %ymm4, %ymm0, %ymm1, %ymm1
694 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
695 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
696 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, (%rsi)
697 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, (%rdx)
698 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, (%rcx)
699 ; AVX2-ONLY-NEXT: vzeroupper
700 ; AVX2-ONLY-NEXT: retq
702 ; AVX512F-LABEL: load_i8_stride3_vf32:
704 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
705 ; AVX512F-NEXT: vmovdqa 16(%rdi), %xmm1
706 ; AVX512F-NEXT: vmovdqa 32(%rdi), %xmm2
707 ; AVX512F-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0
708 ; AVX512F-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1
709 ; AVX512F-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2
710 ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
711 ; AVX512F-NEXT: # ymm3 = mem[0,1,0,1]
712 ; AVX512F-NEXT: vpshufb %ymm3, %ymm0, %ymm0
713 ; AVX512F-NEXT: vpshufb %ymm3, %ymm1, %ymm1
714 ; AVX512F-NEXT: vpshufb %ymm3, %ymm2, %ymm2
715 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm3 = ymm2[11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10],ymm2[27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26]
716 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
717 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm1[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26]
718 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
719 ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
720 ; AVX512F-NEXT: # ymm4 = mem[0,1,0,1]
721 ; AVX512F-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
722 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
723 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
724 ; AVX512F-NEXT: vmovdqa %ymm0, (%rsi)
725 ; AVX512F-NEXT: vmovdqa %ymm4, (%rdx)
726 ; AVX512F-NEXT: vmovdqa %ymm2, (%rcx)
727 ; AVX512F-NEXT: vzeroupper
730 ; AVX512BW-LABEL: load_i8_stride3_vf32:
732 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
733 ; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm1
734 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %xmm2
735 ; AVX512BW-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0
736 ; AVX512BW-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1
737 ; AVX512BW-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2
738 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
739 ; AVX512BW-NEXT: # ymm3 = mem[0,1,0,1]
740 ; AVX512BW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
741 ; AVX512BW-NEXT: vpshufb %ymm3, %ymm1, %ymm1
742 ; AVX512BW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
743 ; AVX512BW-NEXT: vpalignr {{.*#+}} ymm3 = ymm2[11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10],ymm2[27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26]
744 ; AVX512BW-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
745 ; AVX512BW-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm1[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26]
746 ; AVX512BW-NEXT: movl $-134154240, %eax # imm = 0xF800F800
747 ; AVX512BW-NEXT: kmovd %eax, %k1
748 ; AVX512BW-NEXT: vpblendmb %ymm1, %ymm0, %ymm2 {%k1}
749 ; AVX512BW-NEXT: vpalignr {{.*#+}} ymm1 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
750 ; AVX512BW-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
751 ; AVX512BW-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
752 ; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi)
753 ; AVX512BW-NEXT: vmovdqa %ymm2, (%rdx)
754 ; AVX512BW-NEXT: vmovdqa %ymm1, (%rcx)
755 ; AVX512BW-NEXT: vzeroupper
756 ; AVX512BW-NEXT: retq
757 %wide.vec = load <96 x i8>, ptr %in.vec, align 64
758 %strided.vec0 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <32 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93>
759 %strided.vec1 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <32 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94>
760 %strided.vec2 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <32 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47, i32 50, i32 53, i32 56, i32 59, i32 62, i32 65, i32 68, i32 71, i32 74, i32 77, i32 80, i32 83, i32 86, i32 89, i32 92, i32 95>
761 store <32 x i8> %strided.vec0, ptr %out.vec0, align 64
762 store <32 x i8> %strided.vec1, ptr %out.vec1, align 64
763 store <32 x i8> %strided.vec2, ptr %out.vec2, align 64
767 define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
768 ; SSE-LABEL: load_i8_stride3_vf64:
770 ; SSE-NEXT: subq $168, %rsp
771 ; SSE-NEXT: movdqa 80(%rdi), %xmm9
772 ; SSE-NEXT: movdqa (%rdi), %xmm0
773 ; SSE-NEXT: movdqa 16(%rdi), %xmm10
774 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
775 ; SSE-NEXT: movdqa 48(%rdi), %xmm5
776 ; SSE-NEXT: movdqa 64(%rdi), %xmm1
777 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
778 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
779 ; SSE-NEXT: movdqa %xmm2, %xmm6
780 ; SSE-NEXT: pandn %xmm1, %xmm6
781 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
782 ; SSE-NEXT: movdqa %xmm4, %xmm1
783 ; SSE-NEXT: pandn %xmm5, %xmm1
784 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
785 ; SSE-NEXT: movdqa %xmm2, %xmm1
786 ; SSE-NEXT: pandn %xmm5, %xmm1
787 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
788 ; SSE-NEXT: pand %xmm2, %xmm5
789 ; SSE-NEXT: movdqa %xmm2, %xmm11
790 ; SSE-NEXT: por %xmm6, %xmm5
791 ; SSE-NEXT: pxor %xmm8, %xmm8
792 ; SSE-NEXT: movdqa %xmm5, %xmm1
793 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
794 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,0,65535,65535,0]
795 ; SSE-NEXT: movdqa %xmm6, %xmm2
796 ; SSE-NEXT: pandn %xmm1, %xmm2
797 ; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
798 ; SSE-NEXT: pand %xmm6, %xmm5
799 ; SSE-NEXT: por %xmm2, %xmm5
800 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,2,1,3]
801 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
802 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
803 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,2,1,4,5,6,7]
804 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
805 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
806 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
807 ; SSE-NEXT: packuswb %xmm1, %xmm2
808 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
809 ; SSE-NEXT: pand %xmm7, %xmm2
810 ; SSE-NEXT: movdqa %xmm9, %xmm13
811 ; SSE-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm8[8],xmm13[9],xmm8[9],xmm13[10],xmm8[10],xmm13[11],xmm8[11],xmm13[12],xmm8[12],xmm13[13],xmm8[13],xmm13[14],xmm8[14],xmm13[15],xmm8[15]
812 ; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,65535,0,65535,65535,0,65535,65535]
813 ; SSE-NEXT: movdqa %xmm12, %xmm1
814 ; SSE-NEXT: pandn %xmm13, %xmm1
815 ; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
816 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
817 ; SSE-NEXT: pand %xmm12, %xmm9
818 ; SSE-NEXT: por %xmm1, %xmm9
819 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[3,1,2,0]
820 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
821 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,0]
822 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
823 ; SSE-NEXT: packuswb %xmm1, %xmm1
824 ; SSE-NEXT: movdqa %xmm7, %xmm3
825 ; SSE-NEXT: pandn %xmm1, %xmm3
826 ; SSE-NEXT: por %xmm2, %xmm3
827 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
828 ; SSE-NEXT: movdqa %xmm11, %xmm1
829 ; SSE-NEXT: pandn %xmm10, %xmm1
830 ; SSE-NEXT: movdqa %xmm4, %xmm2
831 ; SSE-NEXT: pandn %xmm0, %xmm2
832 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
833 ; SSE-NEXT: movdqa %xmm11, %xmm2
834 ; SSE-NEXT: pandn %xmm0, %xmm2
835 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
836 ; SSE-NEXT: pand %xmm11, %xmm0
837 ; SSE-NEXT: por %xmm1, %xmm0
838 ; SSE-NEXT: movdqa %xmm0, %xmm1
839 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
840 ; SSE-NEXT: movdqa %xmm6, %xmm2
841 ; SSE-NEXT: pandn %xmm1, %xmm2
842 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
843 ; SSE-NEXT: pand %xmm6, %xmm0
844 ; SSE-NEXT: por %xmm2, %xmm0
845 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
846 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
847 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
848 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
849 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
850 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
851 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
852 ; SSE-NEXT: packuswb %xmm1, %xmm0
853 ; SSE-NEXT: movdqa 32(%rdi), %xmm15
854 ; SSE-NEXT: movdqa %xmm15, %xmm10
855 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
856 ; SSE-NEXT: movdqa %xmm12, %xmm1
857 ; SSE-NEXT: pandn %xmm10, %xmm1
858 ; SSE-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
859 ; SSE-NEXT: movdqa %xmm15, %xmm2
860 ; SSE-NEXT: pand %xmm12, %xmm2
861 ; SSE-NEXT: por %xmm1, %xmm2
862 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[3,1,2,0]
863 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
864 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,0]
865 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
866 ; SSE-NEXT: packuswb %xmm1, %xmm1
867 ; SSE-NEXT: movdqa %xmm7, %xmm2
868 ; SSE-NEXT: pandn %xmm1, %xmm2
869 ; SSE-NEXT: pand %xmm7, %xmm0
870 ; SSE-NEXT: por %xmm0, %xmm2
871 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
872 ; SSE-NEXT: movdqa 160(%rdi), %xmm14
873 ; SSE-NEXT: movdqa %xmm11, %xmm0
874 ; SSE-NEXT: pandn %xmm14, %xmm0
875 ; SSE-NEXT: movdqa 144(%rdi), %xmm1
876 ; SSE-NEXT: movdqa %xmm4, %xmm2
877 ; SSE-NEXT: pandn %xmm1, %xmm2
878 ; SSE-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
879 ; SSE-NEXT: movdqa %xmm11, %xmm5
880 ; SSE-NEXT: movdqa %xmm11, %xmm2
881 ; SSE-NEXT: pandn %xmm1, %xmm2
882 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
883 ; SSE-NEXT: pand %xmm11, %xmm1
884 ; SSE-NEXT: por %xmm0, %xmm1
885 ; SSE-NEXT: movdqa %xmm1, %xmm0
886 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
887 ; SSE-NEXT: movdqa %xmm6, %xmm2
888 ; SSE-NEXT: pandn %xmm0, %xmm2
889 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
890 ; SSE-NEXT: pand %xmm6, %xmm1
891 ; SSE-NEXT: por %xmm2, %xmm1
892 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
893 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
894 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
895 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
896 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm1[0,1,2,3,6,5,4,7]
897 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
898 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
899 ; SSE-NEXT: packuswb %xmm0, %xmm9
900 ; SSE-NEXT: movdqa 176(%rdi), %xmm3
901 ; SSE-NEXT: movdqa %xmm3, %xmm1
902 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
903 ; SSE-NEXT: movdqa %xmm12, %xmm0
904 ; SSE-NEXT: pandn %xmm1, %xmm0
905 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
906 ; SSE-NEXT: movdqa %xmm3, %xmm11
907 ; SSE-NEXT: pand %xmm12, %xmm11
908 ; SSE-NEXT: por %xmm0, %xmm11
909 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[3,1,2,0]
910 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
911 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
912 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
913 ; SSE-NEXT: packuswb %xmm0, %xmm0
914 ; SSE-NEXT: movdqa %xmm7, %xmm2
915 ; SSE-NEXT: pandn %xmm0, %xmm2
916 ; SSE-NEXT: pand %xmm7, %xmm9
917 ; SSE-NEXT: por %xmm9, %xmm2
918 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
919 ; SSE-NEXT: movdqa 112(%rdi), %xmm9
920 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
921 ; SSE-NEXT: movdqa %xmm5, %xmm0
922 ; SSE-NEXT: pandn %xmm9, %xmm0
923 ; SSE-NEXT: movdqa 96(%rdi), %xmm9
924 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
925 ; SSE-NEXT: pand %xmm5, %xmm9
926 ; SSE-NEXT: por %xmm0, %xmm9
927 ; SSE-NEXT: movdqa %xmm9, %xmm0
928 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
929 ; SSE-NEXT: movdqa %xmm6, %xmm11
930 ; SSE-NEXT: pandn %xmm0, %xmm11
931 ; SSE-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
932 ; SSE-NEXT: pand %xmm6, %xmm9
933 ; SSE-NEXT: por %xmm11, %xmm9
934 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,2,1,3]
935 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,5]
936 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,3,2,1]
937 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[0,3,2,1,4,5,6,7]
938 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,6,5,4,7]
939 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
940 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
941 ; SSE-NEXT: packuswb %xmm0, %xmm9
942 ; SSE-NEXT: movdqa 128(%rdi), %xmm2
943 ; SSE-NEXT: movdqa %xmm2, %xmm11
944 ; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm8[8],xmm11[9],xmm8[9],xmm11[10],xmm8[10],xmm11[11],xmm8[11],xmm11[12],xmm8[12],xmm11[13],xmm8[13],xmm11[14],xmm8[14],xmm11[15],xmm8[15]
945 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
946 ; SSE-NEXT: movdqa %xmm12, %xmm0
947 ; SSE-NEXT: pandn %xmm11, %xmm0
948 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
949 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
950 ; SSE-NEXT: movdqa %xmm2, %xmm11
951 ; SSE-NEXT: pand %xmm12, %xmm11
952 ; SSE-NEXT: por %xmm0, %xmm11
953 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[3,1,2,0]
954 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
955 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,0]
956 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
957 ; SSE-NEXT: packuswb %xmm0, %xmm0
958 ; SSE-NEXT: movdqa %xmm7, %xmm2
959 ; SSE-NEXT: pandn %xmm0, %xmm2
960 ; SSE-NEXT: pand %xmm7, %xmm9
961 ; SSE-NEXT: por %xmm9, %xmm2
962 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
963 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
964 ; SSE-NEXT: pand %xmm4, %xmm0
965 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
966 ; SSE-NEXT: movdqa %xmm0, %xmm9
967 ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
968 ; SSE-NEXT: movdqa %xmm12, %xmm11
969 ; SSE-NEXT: pandn %xmm9, %xmm11
970 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
971 ; SSE-NEXT: pand %xmm12, %xmm0
972 ; SSE-NEXT: por %xmm11, %xmm0
973 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
974 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
975 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
976 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
977 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
978 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,7]
979 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,2,2,3]
980 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,2,3,3,4,5,6,7]
981 ; SSE-NEXT: packuswb %xmm9, %xmm0
982 ; SSE-NEXT: movdqa %xmm12, %xmm9
983 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
984 ; SSE-NEXT: pandn %xmm5, %xmm9
985 ; SSE-NEXT: movdqa %xmm6, %xmm2
986 ; SSE-NEXT: pandn %xmm13, %xmm2
987 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
988 ; SSE-NEXT: pand %xmm12, %xmm13
989 ; SSE-NEXT: por %xmm9, %xmm13
990 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm13[0,3,2,3,4,5,6,7]
991 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,6,5,6,7]
992 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,1,2,0]
993 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[2,2,2,2,4,5,6,7]
994 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,5,6,7,4]
995 ; SSE-NEXT: packuswb %xmm9, %xmm9
996 ; SSE-NEXT: movdqa %xmm7, %xmm2
997 ; SSE-NEXT: pandn %xmm9, %xmm2
998 ; SSE-NEXT: pand %xmm7, %xmm0
999 ; SSE-NEXT: por %xmm0, %xmm2
1000 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1001 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1002 ; SSE-NEXT: pand %xmm4, %xmm0
1003 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1004 ; SSE-NEXT: movdqa %xmm0, %xmm9
1005 ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
1006 ; SSE-NEXT: movdqa %xmm12, %xmm11
1007 ; SSE-NEXT: pandn %xmm9, %xmm11
1008 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
1009 ; SSE-NEXT: pand %xmm12, %xmm0
1010 ; SSE-NEXT: por %xmm11, %xmm0
1011 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
1012 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
1013 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
1014 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
1015 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
1016 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,7]
1017 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,2,2,3]
1018 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,2,3,3,4,5,6,7]
1019 ; SSE-NEXT: packuswb %xmm9, %xmm0
1020 ; SSE-NEXT: movdqa %xmm12, %xmm11
1021 ; SSE-NEXT: pandn %xmm15, %xmm11
1022 ; SSE-NEXT: movdqa %xmm6, %xmm9
1023 ; SSE-NEXT: pandn %xmm10, %xmm9
1024 ; SSE-NEXT: pand %xmm12, %xmm10
1025 ; SSE-NEXT: por %xmm11, %xmm10
1026 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[0,3,2,3,4,5,6,7]
1027 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,5,6,7]
1028 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,2,0]
1029 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[2,2,2,2,4,5,6,7]
1030 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,6,7,4]
1031 ; SSE-NEXT: packuswb %xmm10, %xmm11
1032 ; SSE-NEXT: movdqa %xmm7, %xmm10
1033 ; SSE-NEXT: pandn %xmm11, %xmm10
1034 ; SSE-NEXT: pand %xmm7, %xmm0
1035 ; SSE-NEXT: por %xmm0, %xmm10
1036 ; SSE-NEXT: movdqa %xmm14, %xmm0
1037 ; SSE-NEXT: pand %xmm4, %xmm0
1038 ; SSE-NEXT: por (%rsp), %xmm0 # 16-byte Folded Reload
1039 ; SSE-NEXT: movdqa %xmm0, %xmm11
1040 ; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm8[8],xmm11[9],xmm8[9],xmm11[10],xmm8[10],xmm11[11],xmm8[11],xmm11[12],xmm8[12],xmm11[13],xmm8[13],xmm11[14],xmm8[14],xmm11[15],xmm8[15]
1041 ; SSE-NEXT: movdqa %xmm12, %xmm13
1042 ; SSE-NEXT: pandn %xmm11, %xmm13
1043 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
1044 ; SSE-NEXT: pand %xmm12, %xmm0
1045 ; SSE-NEXT: por %xmm13, %xmm0
1046 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
1047 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
1048 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
1049 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
1050 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
1051 ; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,7,6,7]
1052 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,2,2,3]
1053 ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm11[1,2,3,3,4,5,6,7]
1054 ; SSE-NEXT: packuswb %xmm11, %xmm0
1055 ; SSE-NEXT: movdqa %xmm12, %xmm13
1056 ; SSE-NEXT: pandn %xmm3, %xmm13
1057 ; SSE-NEXT: movdqa %xmm6, %xmm11
1058 ; SSE-NEXT: pandn %xmm1, %xmm11
1059 ; SSE-NEXT: pand %xmm12, %xmm1
1060 ; SSE-NEXT: por %xmm13, %xmm1
1061 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
1062 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
1063 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
1064 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,2,2,4,5,6,7]
1065 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
1066 ; SSE-NEXT: packuswb %xmm1, %xmm1
1067 ; SSE-NEXT: movdqa %xmm7, %xmm13
1068 ; SSE-NEXT: pandn %xmm1, %xmm13
1069 ; SSE-NEXT: pand %xmm7, %xmm0
1070 ; SSE-NEXT: por %xmm0, %xmm13
1071 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1072 ; SSE-NEXT: pand %xmm4, %xmm0
1073 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
1074 ; SSE-NEXT: por %xmm0, %xmm4
1075 ; SSE-NEXT: movdqa %xmm4, %xmm0
1076 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
1077 ; SSE-NEXT: movdqa %xmm12, %xmm1
1078 ; SSE-NEXT: pandn %xmm0, %xmm1
1079 ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
1080 ; SSE-NEXT: pand %xmm12, %xmm4
1081 ; SSE-NEXT: por %xmm1, %xmm4
1082 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[2,1,0,3,4,5,6,7]
1083 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
1084 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
1085 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
1086 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
1087 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
1088 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1089 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,3,4,5,6,7]
1090 ; SSE-NEXT: packuswb %xmm0, %xmm1
1091 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1092 ; SSE-NEXT: pand %xmm12, %xmm0
1093 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
1094 ; SSE-NEXT: por %xmm0, %xmm12
1095 ; SSE-NEXT: pand %xmm7, %xmm1
1096 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm12[0,3,2,3,4,5,6,7]
1097 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
1098 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
1099 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7]
1100 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,6,7,4]
1101 ; SSE-NEXT: packuswb %xmm0, %xmm0
1102 ; SSE-NEXT: pandn %xmm0, %xmm7
1103 ; SSE-NEXT: por %xmm1, %xmm7
1104 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1105 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
1106 ; SSE-NEXT: pand %xmm2, %xmm0
1107 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1108 ; SSE-NEXT: movdqa %xmm0, %xmm4
1109 ; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
1110 ; SSE-NEXT: movdqa {{.*#+}} xmm12 = [0,65535,65535,0,65535,65535,0,65535]
1111 ; SSE-NEXT: movdqa %xmm12, %xmm1
1112 ; SSE-NEXT: pandn %xmm4, %xmm1
1113 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
1114 ; SSE-NEXT: pand %xmm12, %xmm0
1115 ; SSE-NEXT: por %xmm1, %xmm0
1116 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,0]
1117 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
1118 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,0]
1119 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
1120 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,6,5,4,7]
1121 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,1,2,3]
1122 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
1123 ; SSE-NEXT: packuswb %xmm1, %xmm0
1124 ; SSE-NEXT: movdqa %xmm5, %xmm1
1125 ; SSE-NEXT: pand %xmm6, %xmm1
1126 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
1127 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
1128 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7]
1129 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
1130 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
1131 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
1132 ; SSE-NEXT: packuswb %xmm1, %xmm1
1133 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535,65535,0,0,0]
1134 ; SSE-NEXT: movdqa %xmm4, %xmm5
1135 ; SSE-NEXT: pandn %xmm1, %xmm5
1136 ; SSE-NEXT: pand %xmm4, %xmm0
1137 ; SSE-NEXT: por %xmm0, %xmm5
1138 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1139 ; SSE-NEXT: pand %xmm2, %xmm0
1140 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1141 ; SSE-NEXT: movdqa %xmm0, %xmm2
1142 ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm8[8],xmm2[9],xmm8[9],xmm2[10],xmm8[10],xmm2[11],xmm8[11],xmm2[12],xmm8[12],xmm2[13],xmm8[13],xmm2[14],xmm8[14],xmm2[15],xmm8[15]
1143 ; SSE-NEXT: movdqa %xmm12, %xmm1
1144 ; SSE-NEXT: pandn %xmm2, %xmm1
1145 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
1146 ; SSE-NEXT: pand %xmm12, %xmm0
1147 ; SSE-NEXT: por %xmm1, %xmm0
1148 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,0]
1149 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
1150 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,0]
1151 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
1152 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
1153 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,1,2,3]
1154 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
1155 ; SSE-NEXT: packuswb %xmm0, %xmm1
1156 ; SSE-NEXT: pand %xmm6, %xmm15
1157 ; SSE-NEXT: por %xmm9, %xmm15
1158 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[0,2,1,3]
1159 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
1160 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
1161 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
1162 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
1163 ; SSE-NEXT: packuswb %xmm0, %xmm0
1164 ; SSE-NEXT: movdqa %xmm4, %xmm9
1165 ; SSE-NEXT: pandn %xmm0, %xmm9
1166 ; SSE-NEXT: pand %xmm4, %xmm1
1167 ; SSE-NEXT: por %xmm1, %xmm9
1168 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
1169 ; SSE-NEXT: pand %xmm2, %xmm14
1170 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
1171 ; SSE-NEXT: movdqa %xmm14, %xmm0
1172 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
1173 ; SSE-NEXT: movdqa %xmm12, %xmm1
1174 ; SSE-NEXT: pandn %xmm0, %xmm1
1175 ; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm8[0],xmm14[1],xmm8[1],xmm14[2],xmm8[2],xmm14[3],xmm8[3],xmm14[4],xmm8[4],xmm14[5],xmm8[5],xmm14[6],xmm8[6],xmm14[7],xmm8[7]
1176 ; SSE-NEXT: pand %xmm12, %xmm14
1177 ; SSE-NEXT: por %xmm1, %xmm14
1178 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[3,1,2,0]
1179 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
1180 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,0]
1181 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
1182 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
1183 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
1184 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
1185 ; SSE-NEXT: packuswb %xmm0, %xmm1
1186 ; SSE-NEXT: pand %xmm6, %xmm3
1187 ; SSE-NEXT: por %xmm11, %xmm3
1188 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,1,3]
1189 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
1190 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
1191 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,3,4,5,6,7]
1192 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
1193 ; SSE-NEXT: packuswb %xmm0, %xmm3
1194 ; SSE-NEXT: movdqa %xmm4, %xmm0
1195 ; SSE-NEXT: pandn %xmm3, %xmm0
1196 ; SSE-NEXT: pand %xmm4, %xmm1
1197 ; SSE-NEXT: por %xmm1, %xmm0
1198 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1199 ; SSE-NEXT: pand %xmm2, %xmm1
1200 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1201 ; SSE-NEXT: por %xmm1, %xmm2
1202 ; SSE-NEXT: movdqa %xmm2, %xmm1
1203 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
1204 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
1205 ; SSE-NEXT: pand %xmm12, %xmm2
1206 ; SSE-NEXT: pandn %xmm1, %xmm12
1207 ; SSE-NEXT: por %xmm2, %xmm12
1208 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[3,1,2,0]
1209 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,5]
1210 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,0]
1211 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[2,1,0,3,4,5,6,7]
1212 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
1213 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
1214 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
1215 ; SSE-NEXT: packuswb %xmm1, %xmm3
1216 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1217 ; SSE-NEXT: pand %xmm6, %xmm1
1218 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
1219 ; SSE-NEXT: por %xmm1, %xmm6
1220 ; SSE-NEXT: pand %xmm4, %xmm3
1221 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,2,1,3]
1222 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7]
1223 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
1224 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
1225 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,4,7]
1226 ; SSE-NEXT: packuswb %xmm1, %xmm1
1227 ; SSE-NEXT: pandn %xmm1, %xmm4
1228 ; SSE-NEXT: por %xmm3, %xmm4
1229 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1230 ; SSE-NEXT: movaps %xmm1, 32(%rsi)
1231 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1232 ; SSE-NEXT: movaps %xmm1, 48(%rsi)
1233 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1234 ; SSE-NEXT: movaps %xmm1, (%rsi)
1235 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1236 ; SSE-NEXT: movaps %xmm1, 16(%rsi)
1237 ; SSE-NEXT: movdqa %xmm7, 32(%rdx)
1238 ; SSE-NEXT: movdqa %xmm13, 48(%rdx)
1239 ; SSE-NEXT: movdqa %xmm10, (%rdx)
1240 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1241 ; SSE-NEXT: movaps %xmm1, 16(%rdx)
1242 ; SSE-NEXT: movdqa %xmm4, 32(%rcx)
1243 ; SSE-NEXT: movdqa %xmm0, 48(%rcx)
1244 ; SSE-NEXT: movdqa %xmm9, (%rcx)
1245 ; SSE-NEXT: movdqa %xmm5, 16(%rcx)
1246 ; SSE-NEXT: addq $168, %rsp
1249 ; AVX1-ONLY-LABEL: load_i8_stride3_vf64:
1250 ; AVX1-ONLY: # %bb.0:
1251 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm6
1252 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm0
1253 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1254 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm4
1255 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm7
1256 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm2
1257 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1258 ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm11
1259 ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm3
1260 ; AVX1-ONLY-NEXT: vmovdqa 144(%rdi), %xmm10
1261 ; AVX1-ONLY-NEXT: vmovdqa 160(%rdi), %xmm1
1262 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1263 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm12 = [128,128,128,128,128,0,3,6,9,12,15,2,5,8,11,14]
1264 ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm6, %xmm5
1265 ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm10, %xmm8
1266 ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm11, %xmm9
1267 ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm7, %xmm13
1268 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm14 = <1,4,7,10,13,128,128,128,128,128,128,u,u,u,u,u>
1269 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm6, %xmm6
1270 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm15 = <128,128,128,128,128,0,3,6,9,12,15,u,u,u,u,u>
1271 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm0, %xmm12
1272 ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm12, %xmm6
1273 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1274 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm10, %xmm10
1275 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm1, %xmm12
1276 ; AVX1-ONLY-NEXT: vpor %xmm10, %xmm12, %xmm6
1277 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1278 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm11, %xmm11
1279 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm3, %xmm12
1280 ; AVX1-ONLY-NEXT: vpor %xmm11, %xmm12, %xmm6
1281 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1282 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm7, %xmm7
1283 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm2, %xmm12
1284 ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm12, %xmm6
1285 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1286 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm0, %xmm12
1287 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm4, %xmm6
1288 ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm12, %xmm6
1289 ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm12
1290 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm1, %xmm0
1291 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm12, %xmm10
1292 ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm10, %xmm1
1293 ; AVX1-ONLY-NEXT: vmovdqa 128(%rdi), %xmm10
1294 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm3, %xmm0
1295 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm10, %xmm11
1296 ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm11, %xmm0
1297 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm2, %xmm11
1298 ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm14
1299 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm14, %xmm15
1300 ; AVX1-ONLY-NEXT: vpor %xmm11, %xmm15, %xmm11
1301 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm15 = [1,4,7,10,13,128,128,128,128,128,128,128,128,128,128,128]
1302 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm14, %xmm7
1303 ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm13, %xmm7
1304 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm13 = xmm13[11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7,8,9,10]
1305 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm10, %xmm11
1306 ; AVX1-ONLY-NEXT: vpor %xmm11, %xmm9, %xmm11
1307 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm9 = xmm9[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10]
1308 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm12, %xmm0
1309 ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm8, %xmm2
1310 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = xmm8[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
1311 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm4, %xmm1
1312 ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm5, %xmm1
1313 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7,8,9,10]
1314 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,4,5,6,7,8,9,10,128,128,128,128,128]
1315 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1316 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm0, %xmm15
1317 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = [128,128,128,128,128,128,128,128,128,128,128,2,5,8,11,14]
1318 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm14, %xmm14
1319 ; AVX1-ONLY-NEXT: vpor %xmm14, %xmm15, %xmm14
1320 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1321 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm15, %xmm15
1322 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm10, %xmm10
1323 ; AVX1-ONLY-NEXT: vpor %xmm10, %xmm15, %xmm10
1324 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1325 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm15, %xmm15
1326 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm12, %xmm12
1327 ; AVX1-ONLY-NEXT: vpor %xmm12, %xmm15, %xmm12
1328 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1329 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm15, %xmm6
1330 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm4, %xmm0
1331 ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm6, %xmm0
1332 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm4 = [5,6,7,8,9,10,128,128,128,128,128,0,1,2,3,4]
1333 ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm7, %xmm6
1334 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = [128,128,128,128,128,128,2,5,8,11,14,128,128,128,128,128]
1335 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1336 ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm15, %xmm15
1337 ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm15, %xmm6
1338 ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm11, %xmm11
1339 ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm3, %xmm15
1340 ; AVX1-ONLY-NEXT: vpor %xmm15, %xmm11, %xmm11
1341 ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm2, %xmm2
1342 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1343 ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm3, %xmm3
1344 ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm2, %xmm2
1345 ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm1, %xmm1
1346 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1347 ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm3, %xmm3
1348 ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm1, %xmm1
1349 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%rsi)
1350 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, 48(%rsi)
1351 ; AVX1-ONLY-NEXT: vmovdqa %xmm11, 32(%rsi)
1352 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, 16(%rsi)
1353 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rdx)
1354 ; AVX1-ONLY-NEXT: vmovdqa %xmm12, 48(%rdx)
1355 ; AVX1-ONLY-NEXT: vmovdqa %xmm10, 32(%rdx)
1356 ; AVX1-ONLY-NEXT: vmovdqa %xmm14, 16(%rdx)
1357 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, (%rcx)
1358 ; AVX1-ONLY-NEXT: vmovdqa %xmm8, 48(%rcx)
1359 ; AVX1-ONLY-NEXT: vmovdqa %xmm9, 32(%rcx)
1360 ; AVX1-ONLY-NEXT: vmovdqa %xmm13, 16(%rcx)
1361 ; AVX1-ONLY-NEXT: retq
1363 ; AVX2-ONLY-LABEL: load_i8_stride3_vf64:
1364 ; AVX2-ONLY: # %bb.0:
1365 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm0
1366 ; AVX2-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
1367 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
1368 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %xmm3
1369 ; AVX2-ONLY-NEXT: vmovdqa 112(%rdi), %xmm4
1370 ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %xmm5
1371 ; AVX2-ONLY-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm6
1372 ; AVX2-ONLY-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm0
1373 ; AVX2-ONLY-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm1
1374 ; AVX2-ONLY-NEXT: vinserti128 $1, 144(%rdi), %ymm3, %ymm2
1375 ; AVX2-ONLY-NEXT: vinserti128 $1, 160(%rdi), %ymm4, %ymm3
1376 ; AVX2-ONLY-NEXT: vinserti128 $1, 176(%rdi), %ymm5, %ymm4
1377 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
1378 ; AVX2-ONLY-NEXT: # ymm5 = mem[0,1,0,1]
1379 ; AVX2-ONLY-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm7
1380 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14]
1381 ; AVX2-ONLY-NEXT: # ymm8 = mem[0,1,0,1]
1382 ; AVX2-ONLY-NEXT: vpshufb %ymm8, %ymm7, %ymm7
1383 ; AVX2-ONLY-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm5
1384 ; AVX2-ONLY-NEXT: vpshufb %ymm8, %ymm5, %ymm5
1385 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [255,0,0,255,0,0,255,0,0,255,0,0,255,0,0,255,255,0,0,255,0,0,255,0,0,255,0,0,255,0,0,255]
1386 ; AVX2-ONLY-NEXT: # ymm8 = mem[0,1,0,1]
1387 ; AVX2-ONLY-NEXT: vpblendvb %ymm8, %ymm0, %ymm6, %ymm6
1388 ; AVX2-ONLY-NEXT: vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
1389 ; AVX2-ONLY-NEXT: vpblendvb %ymm8, %ymm1, %ymm0, %ymm9
1390 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [1,4,7,10,13,0,3,6,9,12,15,0,0,0,0,0,1,4,7,10,13,0,3,6,9,12,15,0,0,0,0,0]
1391 ; AVX2-ONLY-NEXT: # ymm10 = mem[0,1,0,1]
1392 ; AVX2-ONLY-NEXT: vpshufb %ymm10, %ymm9, %ymm9
1393 ; AVX2-ONLY-NEXT: vpblendvb %ymm8, %ymm4, %ymm3, %ymm8
1394 ; AVX2-ONLY-NEXT: vpshufb %ymm10, %ymm8, %ymm8
1395 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm8 = ymm5[11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7,8,9,10],ymm5[27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23,24,25,26]
1396 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm9 = ymm7[11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7,8,9,10],ymm7[27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23,24,25,26]
1397 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm10 = [1,4,7,10,13,0,3,6,9,12,15,128,128,128,128,128,17,20,23,26,29,16,19,22,25,28,31,128,128,128,128,128]
1398 ; AVX2-ONLY-NEXT: vpshufb %ymm10, %ymm2, %ymm2
1399 ; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,2,5,8,11,14,128,128,128,128,128,128,128,128,128,128,128,18,21,24,27,30]
1400 ; AVX2-ONLY-NEXT: vpshufb %ymm11, %ymm4, %ymm4
1401 ; AVX2-ONLY-NEXT: vpor %ymm4, %ymm2, %ymm2
1402 ; AVX2-ONLY-NEXT: vpshufb %ymm10, %ymm6, %ymm4
1403 ; AVX2-ONLY-NEXT: vpshufb %ymm11, %ymm1, %ymm1
1404 ; AVX2-ONLY-NEXT: vpor %ymm1, %ymm4, %ymm1
1405 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm4 = ymm5[5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20]
1406 ; AVX2-ONLY-NEXT: vpbroadcastq {{.*#+}} ymm5 = [8,11,14,0,0,0,2,5,8,11,14,0,0,0,2,5,8,11,14,0,0,0,2,5,8,11,14,0,0,0,2,5]
1407 ; AVX2-ONLY-NEXT: vpshufb %ymm5, %ymm3, %ymm3
1408 ; AVX2-ONLY-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255]
1409 ; AVX2-ONLY-NEXT: # ymm6 = mem[0,1,0,1]
1410 ; AVX2-ONLY-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
1411 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm4 = ymm7[5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20]
1412 ; AVX2-ONLY-NEXT: vpshufb %ymm5, %ymm0, %ymm0
1413 ; AVX2-ONLY-NEXT: vpblendvb %ymm6, %ymm4, %ymm0, %ymm0
1414 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, (%rsi)
1415 ; AVX2-ONLY-NEXT: vmovdqa %ymm3, 32(%rsi)
1416 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, (%rdx)
1417 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, 32(%rdx)
1418 ; AVX2-ONLY-NEXT: vmovdqa %ymm9, (%rcx)
1419 ; AVX2-ONLY-NEXT: vmovdqa %ymm8, 32(%rcx)
1420 ; AVX2-ONLY-NEXT: vzeroupper
1421 ; AVX2-ONLY-NEXT: retq
1423 ; AVX512F-LABEL: load_i8_stride3_vf64:
1425 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1426 ; AVX512F-NEXT: vmovdqa 16(%rdi), %xmm1
1427 ; AVX512F-NEXT: vmovdqa 32(%rdi), %xmm2
1428 ; AVX512F-NEXT: vmovdqa 96(%rdi), %xmm3
1429 ; AVX512F-NEXT: vmovdqa 112(%rdi), %xmm4
1430 ; AVX512F-NEXT: vmovdqa 128(%rdi), %xmm5
1431 ; AVX512F-NEXT: vinserti128 $1, 144(%rdi), %ymm3, %ymm3
1432 ; AVX512F-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0
1433 ; AVX512F-NEXT: vinserti128 $1, 160(%rdi), %ymm4, %ymm4
1434 ; AVX512F-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1
1435 ; AVX512F-NEXT: vinserti128 $1, 176(%rdi), %ymm5, %ymm5
1436 ; AVX512F-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2
1437 ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
1438 ; AVX512F-NEXT: # ymm6 = mem[0,1,0,1]
1439 ; AVX512F-NEXT: vpshufb %ymm6, %ymm0, %ymm0
1440 ; AVX512F-NEXT: vpshufb %ymm6, %ymm3, %ymm3
1441 ; AVX512F-NEXT: vpshufb %ymm6, %ymm1, %ymm1
1442 ; AVX512F-NEXT: vpshufb %ymm6, %ymm4, %ymm4
1443 ; AVX512F-NEXT: vpshufb %ymm6, %ymm2, %ymm2
1444 ; AVX512F-NEXT: vpshufb %ymm6, %ymm5, %ymm5
1445 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm6 = ymm5[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm5[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
1446 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm7 = ymm2[11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10],ymm2[27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26]
1447 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
1448 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
1449 ; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm8
1450 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm4 = ymm4[11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7,8,9,10],ymm4[27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23,24,25,26]
1451 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm1[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26]
1452 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm2
1453 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm1 = ymm7[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm7[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
1454 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm4 = ymm6[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
1455 ; AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
1456 ; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
1457 ; AVX512F-NEXT: vpternlogq $202, %zmm2, %zmm8, %zmm5
1458 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
1459 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
1460 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
1461 ; AVX512F-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
1462 ; AVX512F-NEXT: vmovdqa %ymm2, 32(%rsi)
1463 ; AVX512F-NEXT: vmovdqa %ymm0, (%rsi)
1464 ; AVX512F-NEXT: vmovdqa64 %zmm5, (%rdx)
1465 ; AVX512F-NEXT: vmovdqa %ymm4, 32(%rcx)
1466 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
1467 ; AVX512F-NEXT: vzeroupper
1468 ; AVX512F-NEXT: retq
1470 ; AVX512BW-ONLY-SLOW-LABEL: load_i8_stride3_vf64:
1471 ; AVX512BW-ONLY-SLOW: # %bb.0:
1472 ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa (%rdi), %xmm0
1473 ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 16(%rdi), %xmm1
1474 ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 32(%rdi), %xmm2
1475 ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 96(%rdi), %xmm3
1476 ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 112(%rdi), %xmm4
1477 ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa 128(%rdi), %xmm5
1478 ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, 144(%rdi), %ymm3, %ymm3
1479 ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0
1480 ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
1481 ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, 160(%rdi), %ymm4, %ymm3
1482 ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1
1483 ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
1484 ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, 176(%rdi), %ymm5, %ymm3
1485 ; AVX512BW-ONLY-SLOW-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2
1486 ; AVX512BW-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
1487 ; AVX512BW-ONLY-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
1488 ; AVX512BW-ONLY-SLOW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
1489 ; AVX512BW-ONLY-SLOW-NEXT: vpshufb %zmm3, %zmm0, %zmm0
1490 ; AVX512BW-ONLY-SLOW-NEXT: vpshufb %zmm3, %zmm1, %zmm1
1491 ; AVX512BW-ONLY-SLOW-NEXT: vpshufb %zmm3, %zmm2, %zmm2
1492 ; AVX512BW-ONLY-SLOW-NEXT: vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58]
1493 ; AVX512BW-ONLY-SLOW-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
1494 ; AVX512BW-ONLY-SLOW-NEXT: vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58]
1495 ; AVX512BW-ONLY-SLOW-NEXT: movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800
1496 ; AVX512BW-ONLY-SLOW-NEXT: kmovq %rax, %k1
1497 ; AVX512BW-ONLY-SLOW-NEXT: vpblendmb %zmm1, %zmm0, %zmm2 {%k1}
1498 ; AVX512BW-ONLY-SLOW-NEXT: vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
1499 ; AVX512BW-ONLY-SLOW-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm3[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm3[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm3[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm3[48,49,50,51,52,53,54,55,56,57,58]
1500 ; AVX512BW-ONLY-SLOW-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,42,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,58,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57]
1501 ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, (%rsi)
1502 ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, (%rdx)
1503 ; AVX512BW-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, (%rcx)
1504 ; AVX512BW-ONLY-SLOW-NEXT: vzeroupper
1505 ; AVX512BW-ONLY-SLOW-NEXT: retq
1507 ; AVX512BW-ONLY-FAST-LABEL: load_i8_stride3_vf64:
1508 ; AVX512BW-ONLY-FAST: # %bb.0:
1509 ; AVX512BW-ONLY-FAST-NEXT: vmovdqa (%rdi), %xmm0
1510 ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
1511 ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 32(%rdi), %xmm2
1512 ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 96(%rdi), %xmm3
1513 ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 112(%rdi), %xmm4
1514 ; AVX512BW-ONLY-FAST-NEXT: vmovdqa 128(%rdi), %xmm5
1515 ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, 144(%rdi), %ymm3, %ymm3
1516 ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0
1517 ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
1518 ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, 160(%rdi), %ymm4, %ymm3
1519 ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1
1520 ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
1521 ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, 176(%rdi), %ymm5, %ymm3
1522 ; AVX512BW-ONLY-FAST-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2
1523 ; AVX512BW-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
1524 ; AVX512BW-ONLY-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
1525 ; AVX512BW-ONLY-FAST-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
1526 ; AVX512BW-ONLY-FAST-NEXT: vpshufb %zmm3, %zmm0, %zmm0
1527 ; AVX512BW-ONLY-FAST-NEXT: vpshufb %zmm3, %zmm1, %zmm1
1528 ; AVX512BW-ONLY-FAST-NEXT: vpshufb %zmm3, %zmm2, %zmm2
1529 ; AVX512BW-ONLY-FAST-NEXT: vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58]
1530 ; AVX512BW-ONLY-FAST-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
1531 ; AVX512BW-ONLY-FAST-NEXT: vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58]
1532 ; AVX512BW-ONLY-FAST-NEXT: movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800
1533 ; AVX512BW-ONLY-FAST-NEXT: kmovq %rax, %k1
1534 ; AVX512BW-ONLY-FAST-NEXT: vpblendmb %zmm1, %zmm0, %zmm2 {%k1}
1535 ; AVX512BW-ONLY-FAST-NEXT: vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
1536 ; AVX512BW-ONLY-FAST-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm3[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm3[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm3[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm3[48,49,50,51,52,53,54,55,56,57,58]
1537 ; AVX512BW-ONLY-FAST-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,42,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,58,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57]
1538 ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm0, (%rsi)
1539 ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm2, (%rdx)
1540 ; AVX512BW-ONLY-FAST-NEXT: vmovdqa64 %zmm1, (%rcx)
1541 ; AVX512BW-ONLY-FAST-NEXT: vzeroupper
1542 ; AVX512BW-ONLY-FAST-NEXT: retq
1544 ; AVX512DQBW-SLOW-LABEL: load_i8_stride3_vf64:
1545 ; AVX512DQBW-SLOW: # %bb.0:
1546 ; AVX512DQBW-SLOW-NEXT: vmovdqa (%rdi), %xmm0
1547 ; AVX512DQBW-SLOW-NEXT: vmovdqa 16(%rdi), %xmm1
1548 ; AVX512DQBW-SLOW-NEXT: vmovdqa 32(%rdi), %xmm2
1549 ; AVX512DQBW-SLOW-NEXT: vmovdqa 96(%rdi), %xmm3
1550 ; AVX512DQBW-SLOW-NEXT: vmovdqa 112(%rdi), %xmm4
1551 ; AVX512DQBW-SLOW-NEXT: vmovdqa 128(%rdi), %xmm5
1552 ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, 144(%rdi), %ymm3, %ymm3
1553 ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0
1554 ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
1555 ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, 160(%rdi), %ymm4, %ymm3
1556 ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1
1557 ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
1558 ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, 176(%rdi), %ymm5, %ymm3
1559 ; AVX512DQBW-SLOW-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2
1560 ; AVX512DQBW-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
1561 ; AVX512DQBW-SLOW-NEXT: vbroadcasti64x2 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
1562 ; AVX512DQBW-SLOW-NEXT: # zmm3 = mem[0,1,0,1,0,1,0,1]
1563 ; AVX512DQBW-SLOW-NEXT: vpshufb %zmm3, %zmm0, %zmm0
1564 ; AVX512DQBW-SLOW-NEXT: vpshufb %zmm3, %zmm1, %zmm1
1565 ; AVX512DQBW-SLOW-NEXT: vpshufb %zmm3, %zmm2, %zmm2
1566 ; AVX512DQBW-SLOW-NEXT: vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58]
1567 ; AVX512DQBW-SLOW-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
1568 ; AVX512DQBW-SLOW-NEXT: vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58]
1569 ; AVX512DQBW-SLOW-NEXT: movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800
1570 ; AVX512DQBW-SLOW-NEXT: kmovq %rax, %k1
1571 ; AVX512DQBW-SLOW-NEXT: vpblendmb %zmm1, %zmm0, %zmm2 {%k1}
1572 ; AVX512DQBW-SLOW-NEXT: vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
1573 ; AVX512DQBW-SLOW-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm3[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm3[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm3[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm3[48,49,50,51,52,53,54,55,56,57,58]
1574 ; AVX512DQBW-SLOW-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,42,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,58,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57]
1575 ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm0, (%rsi)
1576 ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm2, (%rdx)
1577 ; AVX512DQBW-SLOW-NEXT: vmovdqa64 %zmm1, (%rcx)
1578 ; AVX512DQBW-SLOW-NEXT: vzeroupper
1579 ; AVX512DQBW-SLOW-NEXT: retq
1581 ; AVX512DQBW-FAST-LABEL: load_i8_stride3_vf64:
1582 ; AVX512DQBW-FAST: # %bb.0:
1583 ; AVX512DQBW-FAST-NEXT: vmovdqa (%rdi), %xmm0
1584 ; AVX512DQBW-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
1585 ; AVX512DQBW-FAST-NEXT: vmovdqa 32(%rdi), %xmm2
1586 ; AVX512DQBW-FAST-NEXT: vmovdqa 96(%rdi), %xmm3
1587 ; AVX512DQBW-FAST-NEXT: vmovdqa 112(%rdi), %xmm4
1588 ; AVX512DQBW-FAST-NEXT: vmovdqa 128(%rdi), %xmm5
1589 ; AVX512DQBW-FAST-NEXT: vinserti128 $1, 144(%rdi), %ymm3, %ymm3
1590 ; AVX512DQBW-FAST-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0
1591 ; AVX512DQBW-FAST-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
1592 ; AVX512DQBW-FAST-NEXT: vinserti128 $1, 160(%rdi), %ymm4, %ymm3
1593 ; AVX512DQBW-FAST-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1
1594 ; AVX512DQBW-FAST-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
1595 ; AVX512DQBW-FAST-NEXT: vinserti128 $1, 176(%rdi), %ymm5, %ymm3
1596 ; AVX512DQBW-FAST-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2
1597 ; AVX512DQBW-FAST-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
1598 ; AVX512DQBW-FAST-NEXT: vbroadcasti64x2 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
1599 ; AVX512DQBW-FAST-NEXT: # zmm3 = mem[0,1,0,1,0,1,0,1]
1600 ; AVX512DQBW-FAST-NEXT: vpshufb %zmm3, %zmm0, %zmm0
1601 ; AVX512DQBW-FAST-NEXT: vpshufb %zmm3, %zmm1, %zmm1
1602 ; AVX512DQBW-FAST-NEXT: vpshufb %zmm3, %zmm2, %zmm2
1603 ; AVX512DQBW-FAST-NEXT: vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58]
1604 ; AVX512DQBW-FAST-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
1605 ; AVX512DQBW-FAST-NEXT: vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58]
1606 ; AVX512DQBW-FAST-NEXT: movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800
1607 ; AVX512DQBW-FAST-NEXT: kmovq %rax, %k1
1608 ; AVX512DQBW-FAST-NEXT: vpblendmb %zmm1, %zmm0, %zmm2 {%k1}
1609 ; AVX512DQBW-FAST-NEXT: vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
1610 ; AVX512DQBW-FAST-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm3[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm3[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm3[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm3[48,49,50,51,52,53,54,55,56,57,58]
1611 ; AVX512DQBW-FAST-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,42,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,58,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57]
1612 ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm0, (%rsi)
1613 ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm2, (%rdx)
1614 ; AVX512DQBW-FAST-NEXT: vmovdqa64 %zmm1, (%rcx)
1615 ; AVX512DQBW-FAST-NEXT: vzeroupper
1616 ; AVX512DQBW-FAST-NEXT: retq
1617 %wide.vec = load <192 x i8>, ptr %in.vec, align 64
1618 %strided.vec0 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <64 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93, i32 96, i32 99, i32 102, i32 105, i32 108, i32 111, i32 114, i32 117, i32 120, i32 123, i32 126, i32 129, i32 132, i32 135, i32 138, i32 141, i32 144, i32 147, i32 150, i32 153, i32 156, i32 159, i32 162, i32 165, i32 168, i32 171, i32 174, i32 177, i32 180, i32 183, i32 186, i32 189>
1619 %strided.vec1 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <64 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94, i32 97, i32 100, i32 103, i32 106, i32 109, i32 112, i32 115, i32 118, i32 121, i32 124, i32 127, i32 130, i32 133, i32 136, i32 139, i32 142, i32 145, i32 148, i32 151, i32 154, i32 157, i32 160, i32 163, i32 166, i32 169, i32 172, i32 175, i32 178, i32 181, i32 184, i32 187, i32 190>
1620 %strided.vec2 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <64 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47, i32 50, i32 53, i32 56, i32 59, i32 62, i32 65, i32 68, i32 71, i32 74, i32 77, i32 80, i32 83, i32 86, i32 89, i32 92, i32 95, i32 98, i32 101, i32 104, i32 107, i32 110, i32 113, i32 116, i32 119, i32 122, i32 125, i32 128, i32 131, i32 134, i32 137, i32 140, i32 143, i32 146, i32 149, i32 152, i32 155, i32 158, i32 161, i32 164, i32 167, i32 170, i32 173, i32 176, i32 179, i32 182, i32 185, i32 188, i32 191>
1621 store <64 x i8> %strided.vec0, ptr %out.vec0, align 64
1622 store <64 x i8> %strided.vec1, ptr %out.vec1, align 64
1623 store <64 x i8> %strided.vec2, ptr %out.vec2, align 64
1626 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
1629 ; AVX2-FAST-PERLANE: {{.*}}
1632 ; AVX512-FAST: {{.*}}
1633 ; AVX512-SLOW: {{.*}}
1634 ; AVX512BW-FAST: {{.*}}
1635 ; AVX512BW-SLOW: {{.*}}
1636 ; AVX512DQ-FAST: {{.*}}
1637 ; AVX512DQ-SLOW: {{.*}}
1638 ; AVX512F-FAST: {{.*}}
1639 ; AVX512F-ONLY-FAST: {{.*}}
1640 ; AVX512F-ONLY-SLOW: {{.*}}
1641 ; AVX512F-SLOW: {{.*}}
1644 ; FALLBACK10: {{.*}}
1645 ; FALLBACK11: {{.*}}
1646 ; FALLBACK12: {{.*}}