1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i16_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
19 ; SSE-LABEL: load_i16_stride6_vf2:
21 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
22 ; SSE-NEXT: movdqa (%rdi), %xmm0
23 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
24 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
25 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
26 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
27 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
28 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
29 ; SSE-NEXT: movdqa %xmm0, %xmm5
30 ; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
31 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
32 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,3,2,3,4,5,6,7]
33 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,1,1]
34 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
35 ; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
36 ; SSE-NEXT: psrlq $48, %xmm1
37 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
38 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
39 ; SSE-NEXT: movd %xmm3, (%rsi)
40 ; SSE-NEXT: movd %xmm2, (%rdx)
41 ; SSE-NEXT: movd %xmm4, (%rcx)
42 ; SSE-NEXT: movd %xmm5, (%r8)
43 ; SSE-NEXT: movd %xmm7, (%r9)
44 ; SSE-NEXT: movd %xmm0, (%rax)
47 ; AVX1-ONLY-LABEL: load_i16_stride6_vf2:
49 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
50 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0
51 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
52 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
53 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
54 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
55 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
56 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
57 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
58 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
59 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,3,2,3,4,5,6,7]
60 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[1,1,1,1]
61 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
62 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
63 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm1, %xmm1
64 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
65 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
66 ; AVX1-ONLY-NEXT: vmovd %xmm3, (%rsi)
67 ; AVX1-ONLY-NEXT: vmovd %xmm2, (%rdx)
68 ; AVX1-ONLY-NEXT: vmovd %xmm4, (%rcx)
69 ; AVX1-ONLY-NEXT: vmovd %xmm5, (%r8)
70 ; AVX1-ONLY-NEXT: vmovd %xmm6, (%r9)
71 ; AVX1-ONLY-NEXT: vmovd %xmm0, (%rax)
72 ; AVX1-ONLY-NEXT: retq
74 ; AVX2-SLOW-LABEL: load_i16_stride6_vf2:
76 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
77 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0
78 ; AVX2-SLOW-NEXT: vmovdqa 16(%rdi), %xmm1
79 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
80 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
81 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
82 ; AVX2-SLOW-NEXT: vpbroadcastw 4(%rdi), %xmm4
83 ; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
84 ; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
85 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
86 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,3,2,3,4,5,6,7]
87 ; AVX2-SLOW-NEXT: vpbroadcastw 20(%rdi), %xmm6
88 ; AVX2-SLOW-NEXT: vpbroadcastw 8(%rdi), %xmm7
89 ; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
90 ; AVX2-SLOW-NEXT: vpsrlq $48, %xmm1, %xmm1
91 ; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
92 ; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
93 ; AVX2-SLOW-NEXT: vmovd %xmm3, (%rsi)
94 ; AVX2-SLOW-NEXT: vmovd %xmm2, (%rdx)
95 ; AVX2-SLOW-NEXT: vmovd %xmm4, (%rcx)
96 ; AVX2-SLOW-NEXT: vmovd %xmm5, (%r8)
97 ; AVX2-SLOW-NEXT: vmovd %xmm6, (%r9)
98 ; AVX2-SLOW-NEXT: vmovd %xmm0, (%rax)
99 ; AVX2-SLOW-NEXT: retq
101 ; AVX2-FAST-LABEL: load_i16_stride6_vf2:
102 ; AVX2-FAST: # %bb.0:
103 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
104 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
105 ; AVX2-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
106 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
107 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
108 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
109 ; AVX2-FAST-NEXT: vpbroadcastw 4(%rdi), %xmm4
110 ; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
111 ; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
112 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[12,13,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
113 ; AVX2-FAST-NEXT: vpbroadcastw 20(%rdi), %xmm6
114 ; AVX2-FAST-NEXT: vpbroadcastw 8(%rdi), %xmm7
115 ; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
116 ; AVX2-FAST-NEXT: vpsrlq $48, %xmm1, %xmm1
117 ; AVX2-FAST-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
118 ; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
119 ; AVX2-FAST-NEXT: vmovd %xmm3, (%rsi)
120 ; AVX2-FAST-NEXT: vmovd %xmm2, (%rdx)
121 ; AVX2-FAST-NEXT: vmovd %xmm4, (%rcx)
122 ; AVX2-FAST-NEXT: vmovd %xmm5, (%r8)
123 ; AVX2-FAST-NEXT: vmovd %xmm6, (%r9)
124 ; AVX2-FAST-NEXT: vmovd %xmm0, (%rax)
125 ; AVX2-FAST-NEXT: retq
127 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride6_vf2:
128 ; AVX2-FAST-PERLANE: # %bb.0:
129 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
130 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
131 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 16(%rdi), %xmm1
132 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
133 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
134 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
135 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastw 4(%rdi), %xmm4
136 ; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
137 ; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
138 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[12,13,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
139 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastw 20(%rdi), %xmm6
140 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastw 8(%rdi), %xmm7
141 ; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
142 ; AVX2-FAST-PERLANE-NEXT: vpsrlq $48, %xmm1, %xmm1
143 ; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
144 ; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
145 ; AVX2-FAST-PERLANE-NEXT: vmovd %xmm3, (%rsi)
146 ; AVX2-FAST-PERLANE-NEXT: vmovd %xmm2, (%rdx)
147 ; AVX2-FAST-PERLANE-NEXT: vmovd %xmm4, (%rcx)
148 ; AVX2-FAST-PERLANE-NEXT: vmovd %xmm5, (%r8)
149 ; AVX2-FAST-PERLANE-NEXT: vmovd %xmm6, (%r9)
150 ; AVX2-FAST-PERLANE-NEXT: vmovd %xmm0, (%rax)
151 ; AVX2-FAST-PERLANE-NEXT: retq
153 ; AVX512-SLOW-LABEL: load_i16_stride6_vf2:
154 ; AVX512-SLOW: # %bb.0:
155 ; AVX512-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
156 ; AVX512-SLOW-NEXT: vmovdqa (%rdi), %xmm0
157 ; AVX512-SLOW-NEXT: vmovdqa 16(%rdi), %xmm1
158 ; AVX512-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
159 ; AVX512-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
160 ; AVX512-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
161 ; AVX512-SLOW-NEXT: vpbroadcastw 4(%rdi), %xmm4
162 ; AVX512-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
163 ; AVX512-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
164 ; AVX512-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
165 ; AVX512-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,3,2,3,4,5,6,7]
166 ; AVX512-SLOW-NEXT: vpbroadcastw 20(%rdi), %xmm6
167 ; AVX512-SLOW-NEXT: vpbroadcastw 8(%rdi), %xmm7
168 ; AVX512-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
169 ; AVX512-SLOW-NEXT: vpsrlq $48, %xmm1, %xmm1
170 ; AVX512-SLOW-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
171 ; AVX512-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
172 ; AVX512-SLOW-NEXT: vmovd %xmm3, (%rsi)
173 ; AVX512-SLOW-NEXT: vmovd %xmm2, (%rdx)
174 ; AVX512-SLOW-NEXT: vmovd %xmm4, (%rcx)
175 ; AVX512-SLOW-NEXT: vmovd %xmm5, (%r8)
176 ; AVX512-SLOW-NEXT: vmovd %xmm6, (%r9)
177 ; AVX512-SLOW-NEXT: vmovd %xmm0, (%rax)
178 ; AVX512-SLOW-NEXT: retq
180 ; AVX512F-FAST-LABEL: load_i16_stride6_vf2:
181 ; AVX512F-FAST: # %bb.0:
182 ; AVX512F-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
183 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0
184 ; AVX512F-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
185 ; AVX512F-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
186 ; AVX512F-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
187 ; AVX512F-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
188 ; AVX512F-FAST-NEXT: vpbroadcastw 4(%rdi), %xmm4
189 ; AVX512F-FAST-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
190 ; AVX512F-FAST-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
191 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[12,13,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
192 ; AVX512F-FAST-NEXT: vpbroadcastw 20(%rdi), %xmm6
193 ; AVX512F-FAST-NEXT: vpbroadcastw 8(%rdi), %xmm7
194 ; AVX512F-FAST-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
195 ; AVX512F-FAST-NEXT: vpsrlq $48, %xmm1, %xmm1
196 ; AVX512F-FAST-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
197 ; AVX512F-FAST-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
198 ; AVX512F-FAST-NEXT: vmovd %xmm3, (%rsi)
199 ; AVX512F-FAST-NEXT: vmovd %xmm2, (%rdx)
200 ; AVX512F-FAST-NEXT: vmovd %xmm4, (%rcx)
201 ; AVX512F-FAST-NEXT: vmovd %xmm5, (%r8)
202 ; AVX512F-FAST-NEXT: vmovd %xmm6, (%r9)
203 ; AVX512F-FAST-NEXT: vmovd %xmm0, (%rax)
204 ; AVX512F-FAST-NEXT: retq
206 ; AVX512BW-FAST-LABEL: load_i16_stride6_vf2:
207 ; AVX512BW-FAST: # %bb.0:
208 ; AVX512BW-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
209 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %xmm0
210 ; AVX512BW-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
211 ; AVX512BW-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
212 ; AVX512BW-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
213 ; AVX512BW-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
214 ; AVX512BW-FAST-NEXT: vpbroadcastw 4(%rdi), %xmm4
215 ; AVX512BW-FAST-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
216 ; AVX512BW-FAST-NEXT: vpbroadcastd {{.*#+}} xmm5 = [3,9,3,9,3,9,3,9]
217 ; AVX512BW-FAST-NEXT: vpermi2w %xmm1, %xmm0, %xmm5
218 ; AVX512BW-FAST-NEXT: vpbroadcastw 20(%rdi), %xmm6
219 ; AVX512BW-FAST-NEXT: vpbroadcastw 8(%rdi), %xmm7
220 ; AVX512BW-FAST-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
221 ; AVX512BW-FAST-NEXT: vpsrlq $48, %xmm1, %xmm1
222 ; AVX512BW-FAST-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
223 ; AVX512BW-FAST-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
224 ; AVX512BW-FAST-NEXT: vmovd %xmm3, (%rsi)
225 ; AVX512BW-FAST-NEXT: vmovd %xmm2, (%rdx)
226 ; AVX512BW-FAST-NEXT: vmovd %xmm4, (%rcx)
227 ; AVX512BW-FAST-NEXT: vmovd %xmm5, (%r8)
228 ; AVX512BW-FAST-NEXT: vmovd %xmm6, (%r9)
229 ; AVX512BW-FAST-NEXT: vmovd %xmm0, (%rax)
230 ; AVX512BW-FAST-NEXT: retq
231 %wide.vec = load <12 x i16>, ptr %in.vec, align 64
232 %strided.vec0 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 0, i32 6>
233 %strided.vec1 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 1, i32 7>
234 %strided.vec2 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 2, i32 8>
235 %strided.vec3 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 3, i32 9>
236 %strided.vec4 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 4, i32 10>
237 %strided.vec5 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 5, i32 11>
238 store <2 x i16> %strided.vec0, ptr %out.vec0, align 64
239 store <2 x i16> %strided.vec1, ptr %out.vec1, align 64
240 store <2 x i16> %strided.vec2, ptr %out.vec2, align 64
241 store <2 x i16> %strided.vec3, ptr %out.vec3, align 64
242 store <2 x i16> %strided.vec4, ptr %out.vec4, align 64
243 store <2 x i16> %strided.vec5, ptr %out.vec5, align 64
247 define void @load_i16_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
248 ; SSE-LABEL: load_i16_stride6_vf4:
250 ; SSE-NEXT: movdqa (%rdi), %xmm0
251 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
252 ; SSE-NEXT: movdqa 32(%rdi), %xmm5
253 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,0,3]
254 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,6,6,7]
255 ; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
256 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
257 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,65535,65535]
258 ; SSE-NEXT: movdqa %xmm2, %xmm6
259 ; SSE-NEXT: pandn %xmm5, %xmm6
260 ; SSE-NEXT: movdqa %xmm1, %xmm7
261 ; SSE-NEXT: psrld $16, %xmm7
262 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7]
263 ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm7[2],xmm3[3],xmm7[3]
264 ; SSE-NEXT: pand %xmm2, %xmm3
265 ; SSE-NEXT: por %xmm6, %xmm3
266 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,2,3,3]
267 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,3,2,3]
268 ; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
269 ; SSE-NEXT: movdqa %xmm2, %xmm8
270 ; SSE-NEXT: pandn %xmm5, %xmm8
271 ; SSE-NEXT: movdqa %xmm0, %xmm5
272 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm1[0,0]
273 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm1[2,3]
274 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm5[0,2,2,3,4,5,6,7]
275 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,3,2,3]
276 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm9[1,0,2,3,4,5,6,7]
277 ; SSE-NEXT: pand %xmm2, %xmm9
278 ; SSE-NEXT: por %xmm8, %xmm9
279 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
280 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
281 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
282 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
283 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
284 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,3,3,4,5,6,7]
285 ; SSE-NEXT: pand %xmm2, %xmm5
286 ; SSE-NEXT: pandn %xmm6, %xmm2
287 ; SSE-NEXT: por %xmm5, %xmm2
288 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
289 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,2,3]
290 ; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
291 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[0,2,2,3,4,5,6,7]
292 ; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
293 ; SSE-NEXT: psrlq $48, %xmm1
294 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
295 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
296 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[1,3,2,3,4,5,6,7]
297 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
298 ; SSE-NEXT: movq %xmm4, (%rsi)
299 ; SSE-NEXT: movq %xmm3, (%rdx)
300 ; SSE-NEXT: movq %xmm9, (%rcx)
301 ; SSE-NEXT: movq %xmm2, (%r8)
302 ; SSE-NEXT: movq %xmm6, (%r9)
303 ; SSE-NEXT: movq %xmm0, (%rax)
306 ; AVX1-ONLY-LABEL: load_i16_stride6_vf4:
307 ; AVX1-ONLY: # %bb.0:
308 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
309 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0
310 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
311 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
312 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,1,0,3]
313 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,6,6,7]
314 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
315 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
316 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
317 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
318 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm1, %xmm5
319 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7]
320 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
321 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[3],xmm3[4,5,6,7]
322 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm5 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
323 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
324 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm6[4,5,0,1,12,13,u,u,u,u,u,u,u,u,u,u]
325 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1,2],xmm5[3],xmm7[4,5,6,7]
326 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[2,2,3,3]
327 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[6,7,2,3,14,15,u,u,u,u,u,u,u,u,u,u]
328 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3],xmm6[4,5,6,7]
329 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,1,1]
330 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
331 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
332 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
333 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm8 = xmm2[0,2,2,3,4,5,6,7]
334 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
335 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm1, %xmm1
336 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
337 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
338 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm1 = xmm2[1,3,2,3,4,5,6,7]
339 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
340 ; AVX1-ONLY-NEXT: vmovq %xmm4, (%rsi)
341 ; AVX1-ONLY-NEXT: vmovq %xmm3, (%rdx)
342 ; AVX1-ONLY-NEXT: vmovq %xmm5, (%rcx)
343 ; AVX1-ONLY-NEXT: vmovq %xmm6, (%r8)
344 ; AVX1-ONLY-NEXT: vmovq %xmm7, (%r9)
345 ; AVX1-ONLY-NEXT: vmovq %xmm0, (%rax)
346 ; AVX1-ONLY-NEXT: retq
348 ; AVX2-SLOW-LABEL: load_i16_stride6_vf4:
349 ; AVX2-SLOW: # %bb.0:
350 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
351 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0
352 ; AVX2-SLOW-NEXT: vmovdqa 16(%rdi), %xmm1
353 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %xmm2
354 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm2[1],xmm0[2,3]
355 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
356 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,12,13,8,9,4,5,u,u,u,u,u,u,u,u]
357 ; AVX2-SLOW-NEXT: vpsrld $16, %xmm1, %xmm4
358 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[0,1,0,3]
359 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,7,6,7]
360 ; AVX2-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
361 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm2[3],xmm4[4,5,6,7]
362 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm5 = xmm0[0,1],xmm2[2,3]
363 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm5 = xmm1[0],xmm5[1,2],xmm1[3]
364 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
365 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
366 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3]
367 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
368 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
369 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
370 ; AVX2-SLOW-NEXT: vmovq %xmm3, (%rsi)
371 ; AVX2-SLOW-NEXT: vmovq %xmm4, (%rdx)
372 ; AVX2-SLOW-NEXT: vmovq %xmm6, (%rcx)
373 ; AVX2-SLOW-NEXT: vmovq %xmm5, (%r8)
374 ; AVX2-SLOW-NEXT: vmovq %xmm1, (%r9)
375 ; AVX2-SLOW-NEXT: vmovq %xmm0, (%rax)
376 ; AVX2-SLOW-NEXT: retq
378 ; AVX2-FAST-LABEL: load_i16_stride6_vf4:
379 ; AVX2-FAST: # %bb.0:
380 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
381 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
382 ; AVX2-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
383 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %xmm2
384 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm2[1],xmm0[2,3]
385 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
386 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,12,13,8,9,4,5,u,u,u,u,u,u,u,u]
387 ; AVX2-FAST-NEXT: vpsrld $16, %xmm1, %xmm4
388 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[u,u,u,u,u,u,u,u,2,3,14,15,12,13,14,15]
389 ; AVX2-FAST-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
390 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm2[3],xmm4[4,5,6,7]
391 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm5 = xmm0[0,1],xmm2[2,3]
392 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm5 = xmm1[0],xmm5[1,2],xmm1[3]
393 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
394 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
395 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3]
396 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
397 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
398 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
399 ; AVX2-FAST-NEXT: vmovq %xmm3, (%rsi)
400 ; AVX2-FAST-NEXT: vmovq %xmm4, (%rdx)
401 ; AVX2-FAST-NEXT: vmovq %xmm6, (%rcx)
402 ; AVX2-FAST-NEXT: vmovq %xmm5, (%r8)
403 ; AVX2-FAST-NEXT: vmovq %xmm1, (%r9)
404 ; AVX2-FAST-NEXT: vmovq %xmm0, (%rax)
405 ; AVX2-FAST-NEXT: retq
407 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride6_vf4:
408 ; AVX2-FAST-PERLANE: # %bb.0:
409 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
410 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
411 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 16(%rdi), %xmm1
412 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %xmm2
413 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm2[1],xmm0[2,3]
414 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
415 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,12,13,8,9,4,5,u,u,u,u,u,u,u,u]
416 ; AVX2-FAST-PERLANE-NEXT: vpsrld $16, %xmm1, %xmm4
417 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[u,u,u,u,u,u,u,u,2,3,14,15,12,13,14,15]
418 ; AVX2-FAST-PERLANE-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
419 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm2[3],xmm4[4,5,6,7]
420 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm5 = xmm0[0,1],xmm2[2,3]
421 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm5 = xmm1[0],xmm5[1,2],xmm1[3]
422 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
423 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
424 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3]
425 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
426 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
427 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
428 ; AVX2-FAST-PERLANE-NEXT: vmovq %xmm3, (%rsi)
429 ; AVX2-FAST-PERLANE-NEXT: vmovq %xmm4, (%rdx)
430 ; AVX2-FAST-PERLANE-NEXT: vmovq %xmm6, (%rcx)
431 ; AVX2-FAST-PERLANE-NEXT: vmovq %xmm5, (%r8)
432 ; AVX2-FAST-PERLANE-NEXT: vmovq %xmm1, (%r9)
433 ; AVX2-FAST-PERLANE-NEXT: vmovq %xmm0, (%rax)
434 ; AVX2-FAST-PERLANE-NEXT: retq
436 ; AVX512F-SLOW-LABEL: load_i16_stride6_vf4:
437 ; AVX512F-SLOW: # %bb.0:
438 ; AVX512F-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
439 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0
440 ; AVX512F-SLOW-NEXT: vmovdqa 16(%rdi), %xmm1
441 ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdi), %xmm2
442 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm2[1],xmm0[2,3]
443 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
444 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,12,13,8,9,4,5,u,u,u,u,u,u,u,u]
445 ; AVX512F-SLOW-NEXT: vpsrld $16, %xmm1, %xmm1
446 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
447 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,7,6,7]
448 ; AVX512F-SLOW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
449 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
450 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,1,10,7]
451 ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdi), %ymm2
452 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %ymm4
453 ; AVX512F-SLOW-NEXT: vpermi2d %ymm2, %ymm4, %ymm1
454 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
455 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
456 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} xmm6 = [0,13,10,3]
457 ; AVX512F-SLOW-NEXT: vpermi2d %ymm4, %ymm2, %ymm6
458 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm2 = xmm6[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
459 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm4 = xmm6[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
460 ; AVX512F-SLOW-NEXT: vmovq %xmm3, (%rsi)
461 ; AVX512F-SLOW-NEXT: vmovq %xmm0, (%rdx)
462 ; AVX512F-SLOW-NEXT: vmovq %xmm5, (%rcx)
463 ; AVX512F-SLOW-NEXT: vmovq %xmm1, (%r8)
464 ; AVX512F-SLOW-NEXT: vmovq %xmm2, (%r9)
465 ; AVX512F-SLOW-NEXT: vmovq %xmm4, (%rax)
466 ; AVX512F-SLOW-NEXT: vzeroupper
467 ; AVX512F-SLOW-NEXT: retq
469 ; AVX512F-FAST-LABEL: load_i16_stride6_vf4:
470 ; AVX512F-FAST: # %bb.0:
471 ; AVX512F-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
472 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0
473 ; AVX512F-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
474 ; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %xmm2
475 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0],xmm2[1],xmm0[2,3]
476 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
477 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,12,13,8,9,4,5,u,u,u,u,u,u,u,u]
478 ; AVX512F-FAST-NEXT: vpsrld $16, %xmm1, %xmm1
479 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,2,3,14,15,12,13,14,15]
480 ; AVX512F-FAST-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
481 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
482 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [4,1,10,7]
483 ; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %ymm2
484 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm4
485 ; AVX512F-FAST-NEXT: vpermi2d %ymm2, %ymm4, %ymm1
486 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm1[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
487 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
488 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} xmm6 = [0,13,10,3]
489 ; AVX512F-FAST-NEXT: vpermi2d %ymm4, %ymm2, %ymm6
490 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm2 = xmm6[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
491 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm4 = xmm6[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
492 ; AVX512F-FAST-NEXT: vmovq %xmm3, (%rsi)
493 ; AVX512F-FAST-NEXT: vmovq %xmm0, (%rdx)
494 ; AVX512F-FAST-NEXT: vmovq %xmm5, (%rcx)
495 ; AVX512F-FAST-NEXT: vmovq %xmm1, (%r8)
496 ; AVX512F-FAST-NEXT: vmovq %xmm2, (%r9)
497 ; AVX512F-FAST-NEXT: vmovq %xmm4, (%rax)
498 ; AVX512F-FAST-NEXT: vzeroupper
499 ; AVX512F-FAST-NEXT: retq
501 ; AVX512BW-LABEL: load_i16_stride6_vf4:
503 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
504 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm0 = [0,6,12,18,0,6,12,18]
505 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm1
506 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %ymm2
507 ; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
508 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,7,13,19,1,7,13,19]
509 ; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm3
510 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm4 = [2,8,14,20,2,8,14,20]
511 ; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm4
512 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm5 = [3,9,15,21,3,9,15,21]
513 ; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm5
514 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm6 = [4,10,16,22,4,10,16,22]
515 ; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm6
516 ; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm7 = [5,11,17,23,5,11,17,23]
517 ; AVX512BW-NEXT: vpermi2w %ymm2, %ymm1, %ymm7
518 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
519 ; AVX512BW-NEXT: vmovq %xmm3, (%rdx)
520 ; AVX512BW-NEXT: vmovq %xmm4, (%rcx)
521 ; AVX512BW-NEXT: vmovq %xmm5, (%r8)
522 ; AVX512BW-NEXT: vmovq %xmm6, (%r9)
523 ; AVX512BW-NEXT: vmovq %xmm7, (%rax)
524 ; AVX512BW-NEXT: vzeroupper
525 ; AVX512BW-NEXT: retq
526 %wide.vec = load <24 x i16>, ptr %in.vec, align 64
527 %strided.vec0 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 0, i32 6, i32 12, i32 18>
528 %strided.vec1 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 1, i32 7, i32 13, i32 19>
529 %strided.vec2 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 2, i32 8, i32 14, i32 20>
530 %strided.vec3 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 3, i32 9, i32 15, i32 21>
531 %strided.vec4 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 4, i32 10, i32 16, i32 22>
532 %strided.vec5 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 5, i32 11, i32 17, i32 23>
533 store <4 x i16> %strided.vec0, ptr %out.vec0, align 64
534 store <4 x i16> %strided.vec1, ptr %out.vec1, align 64
535 store <4 x i16> %strided.vec2, ptr %out.vec2, align 64
536 store <4 x i16> %strided.vec3, ptr %out.vec3, align 64
537 store <4 x i16> %strided.vec4, ptr %out.vec4, align 64
538 store <4 x i16> %strided.vec5, ptr %out.vec5, align 64
542 define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
543 ; SSE-LABEL: load_i16_stride6_vf8:
545 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
546 ; SSE-NEXT: movdqa 64(%rdi), %xmm1
547 ; SSE-NEXT: movdqa 80(%rdi), %xmm8
548 ; SSE-NEXT: movdqa (%rdi), %xmm3
549 ; SSE-NEXT: movdqa 16(%rdi), %xmm5
550 ; SSE-NEXT: movdqa 32(%rdi), %xmm6
551 ; SSE-NEXT: movdqa 48(%rdi), %xmm4
552 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm6[0,1,1,2,4,5,6,7]
553 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,0,0,65535,65535]
554 ; SSE-NEXT: movdqa %xmm0, %xmm9
555 ; SSE-NEXT: pandn %xmm2, %xmm9
556 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[0,1,0,3]
557 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,4,6,6,7]
558 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
559 ; SSE-NEXT: pand %xmm0, %xmm2
560 ; SSE-NEXT: por %xmm9, %xmm2
561 ; SSE-NEXT: movdqa %xmm1, %xmm9
562 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm1[2,2,3,3]
563 ; SSE-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm8[0],xmm11[1],xmm8[1],xmm11[2],xmm8[2],xmm11[3],xmm8[3]
564 ; SSE-NEXT: movdqa %xmm8, %xmm12
565 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[2,0],xmm1[3,0]
566 ; SSE-NEXT: movaps %xmm1, %xmm10
567 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm8[0,0]
568 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm8[2,3]
569 ; SSE-NEXT: pslld $16, %xmm8
570 ; SSE-NEXT: psrldq {{.*#+}} xmm9 = xmm9[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
571 ; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
572 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[0,3,2,3]
573 ; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm8[0,1,0,2,4,5,6,7]
574 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[3,1],xmm13[1,3]
575 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,0]
576 ; SSE-NEXT: movdqa %xmm5, %xmm9
577 ; SSE-NEXT: psrld $16, %xmm9
578 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,7,6,7]
579 ; SSE-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm9[2],xmm7[3],xmm9[3]
580 ; SSE-NEXT: pand %xmm0, %xmm7
581 ; SSE-NEXT: pandn %xmm6, %xmm0
582 ; SSE-NEXT: por %xmm7, %xmm0
583 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm8[0,1,1,3,4,5,6,7]
584 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[3,1],xmm7[1,3]
585 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,0]
586 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,1,1,1]
587 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm6[2,2,3,3]
588 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,1,0,3]
589 ; SSE-NEXT: psrldq {{.*#+}} xmm6 = xmm6[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
590 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm8[0]
591 ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,0,0,65535,65535,65535]
592 ; SSE-NEXT: movdqa %xmm11, %xmm8
593 ; SSE-NEXT: pandn %xmm6, %xmm8
594 ; SSE-NEXT: movdqa %xmm3, %xmm13
595 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[1,0],xmm5[0,0]
596 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,0],xmm5[2,3]
597 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm13[0,2,2,3,4,5,6,7]
598 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,2,3]
599 ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm6[1,0,2,3,4,5,6,7]
600 ; SSE-NEXT: pand %xmm11, %xmm14
601 ; SSE-NEXT: por %xmm8, %xmm14
602 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
603 ; SSE-NEXT: pand %xmm6, %xmm14
604 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,1],xmm12[0,2]
605 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm10[0,1,2,3,4,6,6,7]
606 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,2,0]
607 ; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm8[0,1,2,3,4,6,5,4]
608 ; SSE-NEXT: movdqa %xmm6, %xmm8
609 ; SSE-NEXT: pandn %xmm12, %xmm8
610 ; SSE-NEXT: por %xmm14, %xmm8
611 ; SSE-NEXT: movdqa %xmm4, %xmm12
612 ; SSE-NEXT: psrlq $48, %xmm12
613 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm12[0]
614 ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm13[3,1,2,3,4,5,6,7]
615 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,3,2,3]
616 ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm12[0,1,3,3,4,5,6,7]
617 ; SSE-NEXT: pand %xmm11, %xmm12
618 ; SSE-NEXT: pandn %xmm9, %xmm11
619 ; SSE-NEXT: por %xmm12, %xmm11
620 ; SSE-NEXT: pand %xmm6, %xmm11
621 ; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm10[0,1,2,3,7,5,6,7]
622 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm9[0,1,0,2]
623 ; SSE-NEXT: movdqa %xmm6, %xmm9
624 ; SSE-NEXT: pandn %xmm10, %xmm9
625 ; SSE-NEXT: por %xmm11, %xmm9
626 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm5[1,1,1,1]
627 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm3[2,3,2,3]
628 ; SSE-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
629 ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm7[0,1,2,3,4,5,4,6]
630 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm10 = xmm10[1],xmm4[1]
631 ; SSE-NEXT: movss {{.*#+}} xmm10 = xmm11[0],xmm10[1,2,3]
632 ; SSE-NEXT: andps %xmm6, %xmm10
633 ; SSE-NEXT: pshuflw {{.*#+}} xmm11 = xmm1[0,2,2,3,4,5,6,7]
634 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,1,0,3]
635 ; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,4,6]
636 ; SSE-NEXT: movdqa %xmm6, %xmm12
637 ; SSE-NEXT: pandn %xmm11, %xmm12
638 ; SSE-NEXT: por %xmm10, %xmm12
639 ; SSE-NEXT: psrlq $48, %xmm5
640 ; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
641 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
642 ; SSE-NEXT: psrld $16, %xmm4
643 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm7[0,1,2,3,4,5,5,7]
644 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm4[1]
645 ; SSE-NEXT: movss {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3]
646 ; SSE-NEXT: andps %xmm6, %xmm5
647 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
648 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
649 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,7]
650 ; SSE-NEXT: pandn %xmm1, %xmm6
651 ; SSE-NEXT: por %xmm5, %xmm6
652 ; SSE-NEXT: movaps %xmm2, (%rsi)
653 ; SSE-NEXT: movaps %xmm0, (%rdx)
654 ; SSE-NEXT: movdqa %xmm8, (%rcx)
655 ; SSE-NEXT: movdqa %xmm9, (%r8)
656 ; SSE-NEXT: movdqa %xmm12, (%r9)
657 ; SSE-NEXT: movdqa %xmm6, (%rax)
660 ; AVX1-ONLY-LABEL: load_i16_stride6_vf8:
661 ; AVX1-ONLY: # %bb.0:
662 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
663 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0
664 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2
665 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm4
666 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm1
667 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm4, %xmm3
668 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[0,3,2,3]
669 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm5 = xmm7[0,1,0,2,4,5,6,7]
670 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
671 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[0,1,0,3]
672 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm5 = xmm8[0,1,2,3,4,6,6,7]
673 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm2[2],xmm5[3],xmm2[3]
674 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[3,4,5],xmm5[6,7]
675 ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm5
676 ; AVX1-ONLY-NEXT: vpslld $16, %xmm5, %xmm9
677 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm6
678 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm10 = xmm6[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
679 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
680 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5],xmm9[6,7]
681 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm9 = xmm4[1,1,1,1]
682 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
683 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
684 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm9
685 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
686 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
687 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3,4,5],xmm8[6,7]
688 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm8 = xmm6[2,2,3,3]
689 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3]
690 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5],xmm8[6,7]
691 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm8 = xmm1[1,1,1,1]
692 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm9 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
693 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm9[0],xmm8[0]
694 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm9 = xmm2[0,1],xmm0[2,3],xmm2[4,5,6,7]
695 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[4,5,0,1,12,13,u,u,u,u,u,u,u,u,u,u]
696 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0,1,2],xmm8[3,4],xmm10[5,6,7]
697 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm10 = xmm6[0,1,2,3],xmm5[4,5],xmm6[6,7]
698 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
699 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm11[5,6,7]
700 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm1, %xmm11
701 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm12 = xmm4[2,2,3,3]
702 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm11 = xmm12[0],xmm11[0]
703 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[6,7,2,3,14,15,u,u,u,u,u,u,u,u,u,u]
704 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm11[3,4],xmm9[5,6,7]
705 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
706 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
707 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm10 = xmm2[1,1,1,1]
708 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm11 = xmm0[2,3,2,3]
709 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
710 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
711 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm11 = xmm4[0,1,2,3,4,5,4,6]
712 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm1[1]
713 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm11[2,3,4,5,6,7]
714 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5,6,7]
715 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
716 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm10[0,1,2,3,4],xmm6[5,6,7]
717 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm2, %xmm2
718 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
719 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
720 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm1, %xmm1
721 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,4,5,5,7]
722 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm2[1],xmm1[1]
723 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
724 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm5[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
725 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
726 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, (%rsi)
727 ; AVX1-ONLY-NEXT: vmovdqa %xmm7, (%rdx)
728 ; AVX1-ONLY-NEXT: vmovdqa %xmm8, (%rcx)
729 ; AVX1-ONLY-NEXT: vmovdqa %xmm9, (%r8)
730 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, (%r9)
731 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rax)
732 ; AVX1-ONLY-NEXT: retq
734 ; AVX2-SLOW-LABEL: load_i16_stride6_vf8:
735 ; AVX2-SLOW: # %bb.0:
736 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
737 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm3
738 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm4
739 ; AVX2-SLOW-NEXT: vmovdqa 80(%rdi), %xmm0
740 ; AVX2-SLOW-NEXT: vpslld $16, %xmm0, %xmm2
741 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %xmm1
742 ; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm5 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
743 ; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
744 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
745 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
746 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm5, %xmm7
747 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[0,2,0,3]
748 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,6,6,7]
749 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
750 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3]
751 ; AVX2-SLOW-NEXT: vpbroadcastw 74(%rdi), %xmm6
752 ; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
753 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
754 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
755 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3],xmm7[4,5],xmm5[6,7]
756 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
757 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
758 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
759 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
760 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm6, %xmm6
761 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
762 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm9 = xmm6[0,0,2,3,4,5,6,7]
763 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,3]
764 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
765 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
766 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
767 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
768 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
769 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
770 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
771 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
772 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
773 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
774 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
775 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm4
776 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm4[2,2,2,2,4,5,6,7]
777 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm9 = xmm3[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
778 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
779 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
780 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
781 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
782 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
783 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
784 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
785 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
786 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
787 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7]
788 ; AVX2-SLOW-NEXT: vmovdqa %xmm2, (%rsi)
789 ; AVX2-SLOW-NEXT: vmovdqa %xmm5, (%rdx)
790 ; AVX2-SLOW-NEXT: vmovdqa %xmm8, (%rcx)
791 ; AVX2-SLOW-NEXT: vmovdqa %xmm6, (%r8)
792 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, (%r9)
793 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, (%rax)
794 ; AVX2-SLOW-NEXT: vzeroupper
795 ; AVX2-SLOW-NEXT: retq
797 ; AVX2-FAST-LABEL: load_i16_stride6_vf8:
798 ; AVX2-FAST: # %bb.0:
799 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
800 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm2
801 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm4
802 ; AVX2-FAST-NEXT: vmovdqa 80(%rdi), %xmm0
803 ; AVX2-FAST-NEXT: vpslld $16, %xmm0, %xmm3
804 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %xmm1
805 ; AVX2-FAST-NEXT: vpsrldq {{.*#+}} xmm5 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
806 ; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
807 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
808 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
809 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm5, %xmm7
810 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
811 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
812 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
813 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3]
814 ; AVX2-FAST-NEXT: vpbroadcastw 74(%rdi), %xmm6
815 ; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
816 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
817 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
818 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3],xmm7[4,5],xmm5[6,7]
819 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
820 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
821 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
822 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
823 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm6, %xmm6
824 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
825 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
826 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
827 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
828 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
829 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
830 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
831 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
832 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
833 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
834 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
835 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7]
836 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm2, %xmm4
837 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm7 = xmm4[2,2,2,2,4,5,6,7]
838 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm9 = xmm2[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
839 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
840 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
841 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
842 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
843 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
844 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
845 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6,7]
846 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
847 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4],xmm0[5,6,7]
848 ; AVX2-FAST-NEXT: vmovdqa %xmm3, (%rsi)
849 ; AVX2-FAST-NEXT: vmovdqa %xmm5, (%rdx)
850 ; AVX2-FAST-NEXT: vmovdqa %xmm8, (%rcx)
851 ; AVX2-FAST-NEXT: vmovdqa %xmm6, (%r8)
852 ; AVX2-FAST-NEXT: vmovdqa %xmm1, (%r9)
853 ; AVX2-FAST-NEXT: vmovdqa %xmm0, (%rax)
854 ; AVX2-FAST-NEXT: vzeroupper
855 ; AVX2-FAST-NEXT: retq
857 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride6_vf8:
858 ; AVX2-FAST-PERLANE: # %bb.0:
859 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
860 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm2
861 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm4
862 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 80(%rdi), %xmm0
863 ; AVX2-FAST-PERLANE-NEXT: vpslld $16, %xmm0, %xmm3
864 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %xmm1
865 ; AVX2-FAST-PERLANE-NEXT: vpsrldq {{.*#+}} xmm5 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
866 ; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
867 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
868 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
869 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm5, %xmm7
870 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
871 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
872 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
873 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3]
874 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastw 74(%rdi), %xmm6
875 ; AVX2-FAST-PERLANE-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
876 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
877 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
878 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3],xmm7[4,5],xmm5[6,7]
879 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
880 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
881 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
882 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
883 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm6, %xmm6
884 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
885 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
886 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
887 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
888 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
889 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
890 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
891 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
892 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
893 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
894 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
895 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7]
896 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm2, %xmm4
897 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm7 = xmm4[2,2,2,2,4,5,6,7]
898 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm9 = xmm2[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
899 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
900 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
901 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
902 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
903 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
904 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
905 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6,7]
906 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
907 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4],xmm0[5,6,7]
908 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm3, (%rsi)
909 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm5, (%rdx)
910 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm8, (%rcx)
911 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm6, (%r8)
912 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm1, (%r9)
913 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, (%rax)
914 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
915 ; AVX2-FAST-PERLANE-NEXT: retq
917 ; AVX512F-SLOW-LABEL: load_i16_stride6_vf8:
918 ; AVX512F-SLOW: # %bb.0:
919 ; AVX512F-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
920 ; AVX512F-SLOW-NEXT: vmovdqa 80(%rdi), %xmm0
921 ; AVX512F-SLOW-NEXT: vpslld $16, %xmm0, %xmm2
922 ; AVX512F-SLOW-NEXT: vmovdqa 64(%rdi), %xmm1
923 ; AVX512F-SLOW-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
924 ; AVX512F-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
925 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %ymm4
926 ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdi), %ymm5
927 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
928 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm6 = xmm3[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
929 ; AVX512F-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm7
930 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[0,2,0,3]
931 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,6,6,7]
932 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
933 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3]
934 ; AVX512F-SLOW-NEXT: vpbroadcastw 74(%rdi), %xmm6
935 ; AVX512F-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
936 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
937 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[2,3,14,15,u,u,6,7,u,u,u,u,u,u,u,u]
938 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2],xmm3[3],xmm7[4,5],xmm3[6,7]
939 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm6[3]
940 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
941 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
942 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
943 ; AVX512F-SLOW-NEXT: vextracti128 $1, %ymm6, %xmm6
944 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
945 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm9 = xmm6[0,0,2,3,4,5,6,7]
946 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,3]
947 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
948 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
949 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
950 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
951 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
952 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
953 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
954 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
955 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
956 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
957 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
958 ; AVX512F-SLOW-NEXT: vextracti128 $1, %ymm4, %xmm5
959 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm5[2,2,2,2,4,5,6,7]
960 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
961 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
962 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
963 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
964 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
965 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
966 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
967 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
968 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6,7]
969 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
970 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3,4],xmm0[5,6,7]
971 ; AVX512F-SLOW-NEXT: vmovdqa %xmm2, (%rsi)
972 ; AVX512F-SLOW-NEXT: vmovdqa %xmm3, (%rdx)
973 ; AVX512F-SLOW-NEXT: vmovdqa %xmm8, (%rcx)
974 ; AVX512F-SLOW-NEXT: vmovdqa %xmm6, (%r8)
975 ; AVX512F-SLOW-NEXT: vmovdqa %xmm1, (%r9)
976 ; AVX512F-SLOW-NEXT: vmovdqa %xmm0, (%rax)
977 ; AVX512F-SLOW-NEXT: vzeroupper
978 ; AVX512F-SLOW-NEXT: retq
980 ; AVX512F-FAST-LABEL: load_i16_stride6_vf8:
981 ; AVX512F-FAST: # %bb.0:
982 ; AVX512F-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
983 ; AVX512F-FAST-NEXT: vmovdqa 80(%rdi), %xmm0
984 ; AVX512F-FAST-NEXT: vpslld $16, %xmm0, %xmm2
985 ; AVX512F-FAST-NEXT: vmovdqa 64(%rdi), %xmm1
986 ; AVX512F-FAST-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
987 ; AVX512F-FAST-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
988 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm3
989 ; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %ymm4
990 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
991 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
992 ; AVX512F-FAST-NEXT: vextracti128 $1, %ymm5, %xmm7
993 ; AVX512F-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
994 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
995 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
996 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3]
997 ; AVX512F-FAST-NEXT: vpbroadcastw 74(%rdi), %xmm6
998 ; AVX512F-FAST-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
999 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,14,15,u,u,6,7,u,u,u,u,u,u,u,u]
1000 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
1001 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3],xmm7[4,5],xmm5[6,7]
1002 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
1003 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
1004 ; AVX512F-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
1005 ; AVX512F-FAST-NEXT: vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
1006 ; AVX512F-FAST-NEXT: vextracti128 $1, %ymm6, %xmm6
1007 ; AVX512F-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
1008 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm9 = xmm6[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
1009 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
1010 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
1011 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
1012 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
1013 ; AVX512F-FAST-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
1014 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
1015 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
1016 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
1017 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
1018 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
1019 ; AVX512F-FAST-NEXT: vextracti128 $1, %ymm3, %xmm4
1020 ; AVX512F-FAST-NEXT: vpshuflw {{.*#+}} xmm7 = xmm4[2,2,2,2,4,5,6,7]
1021 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm9 = xmm3[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
1022 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
1023 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
1024 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
1025 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
1026 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
1027 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
1028 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
1029 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
1030 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7]
1031 ; AVX512F-FAST-NEXT: vmovdqa %xmm2, (%rsi)
1032 ; AVX512F-FAST-NEXT: vmovdqa %xmm5, (%rdx)
1033 ; AVX512F-FAST-NEXT: vmovdqa %xmm8, (%rcx)
1034 ; AVX512F-FAST-NEXT: vmovdqa %xmm6, (%r8)
1035 ; AVX512F-FAST-NEXT: vmovdqa %xmm1, (%r9)
1036 ; AVX512F-FAST-NEXT: vmovdqa %xmm0, (%rax)
1037 ; AVX512F-FAST-NEXT: vzeroupper
1038 ; AVX512F-FAST-NEXT: retq
1040 ; AVX512BW-LABEL: load_i16_stride6_vf8:
1041 ; AVX512BW: # %bb.0:
1042 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1043 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
1044 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
1045 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,6,12,18,24,30,36,42]
1046 ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm2
1047 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,7,13,19,25,31,37,43]
1048 ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm3
1049 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm4 = [2,8,14,20,26,32,38,44]
1050 ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm4
1051 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm5 = [3,9,15,21,27,33,39,45]
1052 ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm5
1053 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm6 = [4,10,16,22,28,34,40,46]
1054 ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm6
1055 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm7 = [5,11,17,23,29,35,41,47]
1056 ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm7
1057 ; AVX512BW-NEXT: vmovdqa %xmm2, (%rsi)
1058 ; AVX512BW-NEXT: vmovdqa %xmm3, (%rdx)
1059 ; AVX512BW-NEXT: vmovdqa %xmm4, (%rcx)
1060 ; AVX512BW-NEXT: vmovdqa %xmm5, (%r8)
1061 ; AVX512BW-NEXT: vmovdqa %xmm6, (%r9)
1062 ; AVX512BW-NEXT: vmovdqa %xmm7, (%rax)
1063 ; AVX512BW-NEXT: vzeroupper
1064 ; AVX512BW-NEXT: retq
1065 %wide.vec = load <48 x i16>, ptr %in.vec, align 64
1066 %strided.vec0 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42>
1067 %strided.vec1 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43>
1068 %strided.vec2 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44>
1069 %strided.vec3 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45>
1070 %strided.vec4 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46>
1071 %strided.vec5 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47>
1072 store <8 x i16> %strided.vec0, ptr %out.vec0, align 64
1073 store <8 x i16> %strided.vec1, ptr %out.vec1, align 64
1074 store <8 x i16> %strided.vec2, ptr %out.vec2, align 64
1075 store <8 x i16> %strided.vec3, ptr %out.vec3, align 64
1076 store <8 x i16> %strided.vec4, ptr %out.vec4, align 64
1077 store <8 x i16> %strided.vec5, ptr %out.vec5, align 64
1081 define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
1082 ; SSE-LABEL: load_i16_stride6_vf16:
1084 ; SSE-NEXT: subq $136, %rsp
1085 ; SSE-NEXT: movdqa 112(%rdi), %xmm9
1086 ; SSE-NEXT: movdqa 128(%rdi), %xmm7
1087 ; SSE-NEXT: movdqa 64(%rdi), %xmm2
1088 ; SSE-NEXT: movdqa 80(%rdi), %xmm11
1089 ; SSE-NEXT: movdqa (%rdi), %xmm3
1090 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1091 ; SSE-NEXT: movdqa 16(%rdi), %xmm6
1092 ; SSE-NEXT: movdqa 32(%rdi), %xmm0
1093 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1094 ; SSE-NEXT: movdqa 48(%rdi), %xmm8
1095 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
1096 ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,0,0,0,65535,65535]
1097 ; SSE-NEXT: movdqa %xmm10, %xmm1
1098 ; SSE-NEXT: pandn %xmm0, %xmm1
1099 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3]
1100 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1101 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
1102 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
1103 ; SSE-NEXT: pand %xmm10, %xmm0
1104 ; SSE-NEXT: por %xmm1, %xmm0
1105 ; SSE-NEXT: movdqa %xmm0, %xmm1
1106 ; SSE-NEXT: movdqa %xmm2, %xmm13
1107 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm2[2,2,3,3]
1108 ; SSE-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
1109 ; SSE-NEXT: movdqa %xmm11, %xmm0
1110 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[3,0]
1111 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1112 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1113 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm11[0,0]
1114 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm11[2,3]
1115 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1116 ; SSE-NEXT: pslld $16, %xmm11
1117 ; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
1118 ; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
1119 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm8[0,3,2,3]
1120 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[0,1,0,2,4,5,6,7]
1121 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[3,1],xmm0[1,3]
1122 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,0]
1123 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1124 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm7[0,1,1,2,4,5,6,7]
1125 ; SSE-NEXT: movdqa %xmm10, %xmm3
1126 ; SSE-NEXT: pandn %xmm0, %xmm3
1127 ; SSE-NEXT: movdqa 96(%rdi), %xmm0
1128 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1129 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,0,3]
1130 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm5[0,1,2,3,4,6,6,7]
1131 ; SSE-NEXT: movdqa %xmm9, %xmm11
1132 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1133 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm9[2],xmm2[3],xmm9[3]
1134 ; SSE-NEXT: pand %xmm10, %xmm2
1135 ; SSE-NEXT: por %xmm3, %xmm2
1136 ; SSE-NEXT: movdqa 160(%rdi), %xmm14
1137 ; SSE-NEXT: movdqa 176(%rdi), %xmm3
1138 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,3,3]
1139 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
1140 ; SSE-NEXT: movdqa %xmm3, %xmm1
1141 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm14[3,0]
1142 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1143 ; SSE-NEXT: movdqa %xmm14, %xmm13
1144 ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1145 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[1,0],xmm3[0,0]
1146 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,0],xmm3[2,3]
1147 ; SSE-NEXT: pslld $16, %xmm3
1148 ; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
1149 ; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3]
1150 ; SSE-NEXT: movdqa 144(%rdi), %xmm1
1151 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1152 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
1153 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm1[0,1,0,2,4,5,6,7]
1154 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[3,1],xmm9[1,3]
1155 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm13[2,0]
1156 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1157 ; SSE-NEXT: movdqa %xmm6, %xmm13
1158 ; SSE-NEXT: psrld $16, %xmm13
1159 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
1160 ; SSE-NEXT: # xmm9 = mem[0,1,2,3,5,7,6,7]
1161 ; SSE-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm13[2],xmm9[3],xmm13[3]
1162 ; SSE-NEXT: movdqa %xmm10, %xmm13
1163 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1164 ; SSE-NEXT: pandn %xmm15, %xmm13
1165 ; SSE-NEXT: pand %xmm10, %xmm9
1166 ; SSE-NEXT: por %xmm13, %xmm9
1167 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
1168 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[3,1],xmm4[1,3]
1169 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm12[2,0]
1170 ; SSE-NEXT: movdqa %xmm11, %xmm4
1171 ; SSE-NEXT: psrld $16, %xmm4
1172 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm5[0,1,2,3,5,7,6,7]
1173 ; SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
1174 ; SSE-NEXT: pand %xmm10, %xmm2
1175 ; SSE-NEXT: movdqa %xmm7, %xmm5
1176 ; SSE-NEXT: pandn %xmm7, %xmm10
1177 ; SSE-NEXT: por %xmm2, %xmm10
1178 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
1179 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[1,3]
1180 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,1],xmm0[2,0]
1181 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1182 ; SSE-NEXT: movdqa %xmm15, %xmm1
1183 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
1184 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
1185 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1186 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,0,65535,65535,65535]
1187 ; SSE-NEXT: movdqa %xmm2, %xmm4
1188 ; SSE-NEXT: pandn %xmm1, %xmm4
1189 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1190 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1191 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm6[0,0]
1192 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm6[2,3]
1193 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,2,2,3,4,5,6,7]
1194 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
1195 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,2,3,4,5,6,7]
1196 ; SSE-NEXT: pand %xmm2, %xmm1
1197 ; SSE-NEXT: por %xmm4, %xmm1
1198 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1199 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1200 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
1201 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1202 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,4,6,6,7]
1203 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
1204 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,5,4]
1205 ; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,65535,65535,65535,65535,0,0,0]
1206 ; SSE-NEXT: movdqa %xmm12, %xmm0
1207 ; SSE-NEXT: pandn %xmm4, %xmm0
1208 ; SSE-NEXT: pand %xmm12, %xmm1
1209 ; SSE-NEXT: por %xmm1, %xmm0
1210 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1211 ; SSE-NEXT: movdqa %xmm7, %xmm1
1212 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1213 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
1214 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
1215 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,1,1]
1216 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
1217 ; SSE-NEXT: movdqa %xmm2, %xmm4
1218 ; SSE-NEXT: pandn %xmm1, %xmm4
1219 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
1220 ; SSE-NEXT: movaps %xmm10, %xmm13
1221 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1222 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[1,0],xmm15[0,0]
1223 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[2,0],xmm15[2,3]
1224 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm13[0,2,2,3,4,5,6,7]
1225 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
1226 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[1,0,2,3,4,5,6,7]
1227 ; SSE-NEXT: pand %xmm2, %xmm0
1228 ; SSE-NEXT: por %xmm4, %xmm0
1229 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
1230 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
1231 ; SSE-NEXT: # xmm6 = xmm6[0,1],mem[0,2]
1232 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,4,6,6,7]
1233 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
1234 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,6,5,4]
1235 ; SSE-NEXT: movdqa %xmm12, %xmm1
1236 ; SSE-NEXT: pandn %xmm4, %xmm1
1237 ; SSE-NEXT: pand %xmm12, %xmm0
1238 ; SSE-NEXT: por %xmm0, %xmm1
1239 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1240 ; SSE-NEXT: movdqa %xmm8, %xmm1
1241 ; SSE-NEXT: movdqa %xmm8, (%rsp) # 16-byte Spill
1242 ; SSE-NEXT: movdqa %xmm8, %xmm0
1243 ; SSE-NEXT: psrlq $48, %xmm0
1244 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
1245 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,2,3,3]
1246 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
1247 ; SSE-NEXT: movdqa %xmm2, %xmm0
1248 ; SSE-NEXT: pandn %xmm4, %xmm0
1249 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
1250 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
1251 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,3,4,5,6,7]
1252 ; SSE-NEXT: pand %xmm2, %xmm3
1253 ; SSE-NEXT: por %xmm0, %xmm3
1254 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1255 ; SSE-NEXT: # xmm0 = mem[0,1,2,3,7,5,6,7]
1256 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
1257 ; SSE-NEXT: movdqa %xmm12, %xmm11
1258 ; SSE-NEXT: pandn %xmm0, %xmm11
1259 ; SSE-NEXT: pand %xmm12, %xmm3
1260 ; SSE-NEXT: por %xmm3, %xmm11
1261 ; SSE-NEXT: movdqa %xmm7, %xmm4
1262 ; SSE-NEXT: movdqa %xmm7, %xmm0
1263 ; SSE-NEXT: psrlq $48, %xmm0
1264 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[2,2,3,3]
1265 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
1266 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm13[3,1,2,3,4,5,6,7]
1267 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
1268 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
1269 ; SSE-NEXT: pand %xmm2, %xmm0
1270 ; SSE-NEXT: pandn %xmm3, %xmm2
1271 ; SSE-NEXT: por %xmm0, %xmm2
1272 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,7,5,6,7]
1273 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
1274 ; SSE-NEXT: movdqa %xmm12, %xmm7
1275 ; SSE-NEXT: pandn %xmm0, %xmm7
1276 ; SSE-NEXT: pand %xmm12, %xmm2
1277 ; SSE-NEXT: por %xmm2, %xmm7
1278 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1279 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
1280 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
1281 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[2,3,2,3]
1282 ; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
1283 ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm8[0,1,0,3]
1284 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,4,5,4,6]
1285 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1]
1286 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm2[0],xmm3[1,2,3]
1287 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
1288 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[0,2,2,3,4,5,6,7]
1289 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
1290 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5,4,6]
1291 ; SSE-NEXT: movdqa %xmm12, %xmm1
1292 ; SSE-NEXT: pandn %xmm2, %xmm1
1293 ; SSE-NEXT: andps %xmm12, %xmm3
1294 ; SSE-NEXT: por %xmm3, %xmm1
1295 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm15[1,1,1,1]
1296 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[2,3,2,3]
1297 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
1298 ; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1299 ; SSE-NEXT: # xmm0 = mem[0,1,0,3]
1300 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1301 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5,4,6]
1302 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm4[1]
1303 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3]
1304 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm14[0,2,2,3,4,5,6,7]
1305 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
1306 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
1307 ; SSE-NEXT: movdqa %xmm12, %xmm4
1308 ; SSE-NEXT: pandn %xmm3, %xmm4
1309 ; SSE-NEXT: andps %xmm12, %xmm2
1310 ; SSE-NEXT: por %xmm2, %xmm4
1311 ; SSE-NEXT: psrlq $48, %xmm5
1312 ; SSE-NEXT: psrldq {{.*#+}} xmm6 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1313 ; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
1314 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
1315 ; SSE-NEXT: psrld $16, %xmm0
1316 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm13[0,1,2,3,4,5,5,7]
1317 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1]
1318 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm6[0],xmm2[1,2,3]
1319 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[3,1,2,3,4,5,6,7]
1320 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
1321 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,7]
1322 ; SSE-NEXT: movdqa %xmm12, %xmm10
1323 ; SSE-NEXT: pandn %xmm3, %xmm10
1324 ; SSE-NEXT: andps %xmm12, %xmm2
1325 ; SSE-NEXT: por %xmm2, %xmm10
1326 ; SSE-NEXT: psrlq $48, %xmm15
1327 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1328 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1329 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
1330 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
1331 ; SSE-NEXT: psrld $16, %xmm3
1332 ; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1333 ; SSE-NEXT: # xmm2 = mem[0,1,2,3,4,5,5,7]
1334 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
1335 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
1336 ; SSE-NEXT: andps %xmm12, %xmm2
1337 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm14[3,1,2,3,4,5,6,7]
1338 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
1339 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,7]
1340 ; SSE-NEXT: pandn %xmm3, %xmm12
1341 ; SSE-NEXT: por %xmm2, %xmm12
1342 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1343 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
1344 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1345 ; SSE-NEXT: movaps %xmm0, (%rsi)
1346 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1347 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
1348 ; SSE-NEXT: movaps %xmm9, (%rdx)
1349 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1350 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
1351 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1352 ; SSE-NEXT: movaps %xmm0, (%rcx)
1353 ; SSE-NEXT: movdqa %xmm7, 16(%r8)
1354 ; SSE-NEXT: movdqa %xmm11, (%r8)
1355 ; SSE-NEXT: movdqa %xmm4, 16(%r9)
1356 ; SSE-NEXT: movdqa %xmm1, (%r9)
1357 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1358 ; SSE-NEXT: movdqa %xmm12, 16(%rax)
1359 ; SSE-NEXT: movdqa %xmm10, (%rax)
1360 ; SSE-NEXT: addq $136, %rsp
1363 ; AVX1-ONLY-LABEL: load_i16_stride6_vf16:
1364 ; AVX1-ONLY: # %bb.0:
1365 ; AVX1-ONLY-NEXT: subq $88, %rsp
1366 ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm0
1367 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1368 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm13 = xmm0[0,1,0,3]
1369 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,4,6,6,7]
1370 ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm1
1371 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1372 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, %xmm8
1373 ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm1
1374 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1375 ; AVX1-ONLY-NEXT: vpslld $16, %xmm1, %xmm2
1376 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm4
1377 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm3 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
1378 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
1379 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm5
1380 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0
1381 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1382 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2
1383 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1384 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm7
1385 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm1
1386 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm7, %xmm10
1387 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[0,3,2,3]
1388 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, %xmm6
1389 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm11 = xmm3[0,1,0,2,4,5,6,7]
1390 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
1391 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm15 = xmm0[0,1,0,3]
1392 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm11 = xmm15[0,1,2,3,4,6,6,7]
1393 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm11 = xmm11[2],xmm2[2],xmm11[3],xmm2[3]
1394 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0,1,2],xmm10[3,4,5],xmm11[6,7]
1395 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1,2],ymm5[3,4,5,6,7]
1396 ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm10
1397 ; AVX1-ONLY-NEXT: vpslld $16, %xmm10, %xmm5
1398 ; AVX1-ONLY-NEXT: vmovdqa 160(%rdi), %xmm11
1399 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm12 = xmm11[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
1400 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm14 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3]
1401 ; AVX1-ONLY-NEXT: vmovdqa 128(%rdi), %xmm12
1402 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm12, %xmm0
1403 ; AVX1-ONLY-NEXT: vmovdqa 144(%rdi), %xmm1
1404 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1405 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
1406 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm9 = xmm1[0,1,0,2,4,5,6,7]
1407 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
1408 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm14[6,7]
1409 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm14 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0]
1410 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm14, %ymm2
1411 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1412 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm14, %ymm0
1413 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm2, %ymm0
1414 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1415 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,5,7,6,7]
1416 ; AVX1-ONLY-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1417 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm8, %xmm2
1418 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1419 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1420 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm4[2,2,3,3]
1421 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
1422 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1],xmm2[2],xmm13[2],xmm2[3],xmm13[3]
1423 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
1424 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[0,1,1,3,4,5,6,7]
1425 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[1,1,1,1]
1426 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
1427 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm3 = xmm15[0,1,2,3,5,7,6,7]
1428 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1429 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm5, %xmm9
1430 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm3[2],xmm9[2],xmm3[3],xmm9[3]
1431 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4,5],xmm3[6,7]
1432 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
1433 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
1434 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm12[1,1,1,1]
1435 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
1436 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm11[2,2,3,3]
1437 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3]
1438 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm2[6,7]
1439 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm14, %ymm0
1440 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1441 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm14, %ymm1
1442 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0
1443 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1444 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, %xmm2
1445 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
1446 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm1 = xmm7[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
1447 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
1448 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5,0,1,12,13,14,15,8,9,10,11,12,13,14,15]
1449 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm6 # 16-byte Folded Reload
1450 ; AVX1-ONLY-NEXT: # xmm6 = xmm5[0,1],mem[2,3],xmm5[4,5,6,7]
1451 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm6, %xmm3
1452 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[3,4],xmm3[5,6,7]
1453 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1454 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm8[0,1],xmm5[2,3],xmm8[4,5,6,7]
1455 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm3, %xmm1
1456 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,6,7,8,9,0,1,12,13,8,9]
1457 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm15 = xmm4[0,1,2,3],xmm13[4,5],xmm4[6,7]
1458 ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm15, %xmm13
1459 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm13, %ymm1
1460 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm13 = [0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
1461 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm13, %ymm0
1462 ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm13, %ymm1
1463 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm1, %ymm0
1464 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1465 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
1466 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm8 = xmm12[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
1467 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm8[0],xmm1[0]
1468 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm11[0,1,2,3],xmm10[4,5],xmm11[6,7]
1469 ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm8, %xmm9
1470 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm9[0,1,2],xmm1[3,4],xmm9[5,6,7]
1471 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm14, %ymm0
1472 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1473 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm14, %ymm1
1474 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0
1475 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1476 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, %xmm0
1477 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm2, %xmm1
1478 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm9 = xmm7[2,2,3,3]
1479 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm9[0],xmm1[0]
1480 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm9 = [6,7,2,3,14,15,14,15,8,9,10,11,12,13,14,15]
1481 ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm6, %xmm2
1482 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4],xmm2[5,6,7]
1483 ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm3, %xmm2
1484 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,14,15,2,3,14,15,10,11]
1485 ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm15, %xmm9
1486 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm9, %ymm2
1487 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm13, %ymm1
1488 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm13, %ymm2
1489 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm2, %ymm1
1490 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm4, %xmm2
1491 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm9 = xmm12[2,2,3,3]
1492 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm9[0],xmm2[0]
1493 ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm8, %xmm3
1494 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4],xmm3[5,6,7]
1495 ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm14, %ymm1
1496 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
1497 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm14, %ymm2
1498 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm1, %ymm14
1499 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
1500 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[1,1,1,1]
1501 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, %xmm13
1502 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
1503 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
1504 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1505 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
1506 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0,1],mem[2,3],xmm2[4,5,6,7]
1507 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,4,5,6,7,0,1,4,5,0,1,12,13]
1508 ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm2, %xmm8
1509 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm8, %ymm1
1510 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
1511 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm8 = xmm4[1,1,1,1]
1512 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
1513 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm9 = xmm15[2,3,2,3]
1514 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
1515 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
1516 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm9 = xmm7[0,1,2,3,4,5,4,6]
1517 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm9[1],xmm0[1]
1518 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3,4,5,6,7]
1519 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm9 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
1520 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm9, %ymm1
1521 ; AVX1-ONLY-NEXT: vandps %ymm9, %ymm8, %ymm8
1522 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm8, %ymm1
1523 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0,1],xmm11[2,3],xmm10[4,5,6,7]
1524 ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm8, %xmm3
1525 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm10 = xmm12[0,1,0,3]
1526 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm11 = xmm10[0,1,2,3,4,5,4,6]
1527 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
1528 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm5[1]
1529 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm11[0,1,2,3,4],xmm3[5,6,7]
1530 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
1531 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
1532 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm6, %xmm3
1533 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm11 = xmm13[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1534 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm3[0],xmm11[1],xmm3[1],xmm11[2],xmm3[2],xmm11[3],xmm3[3]
1535 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm11 = [6,7,2,3,4,5,6,7,6,7,6,7,2,3,14,15]
1536 ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm2, %xmm2
1537 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
1538 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm4, %xmm3
1539 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm4 = xmm15[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1540 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
1541 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm0, %xmm4
1542 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,4,5,5,7]
1543 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm6[1],xmm4[1]
1544 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3,4,5,6,7]
1545 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm9, %ymm2
1546 ; AVX1-ONLY-NEXT: vandps %ymm3, %ymm9, %ymm3
1547 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm3, %ymm2
1548 ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm8, %xmm3
1549 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm5, %xmm4
1550 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm5 = xmm10[0,1,2,3,4,5,5,7]
1551 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm5[1],xmm4[1]
1552 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3,4],xmm3[5,6,7]
1553 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
1554 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
1555 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1556 ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rsi)
1557 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1558 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rdx)
1559 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1560 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rcx)
1561 ; AVX1-ONLY-NEXT: vmovaps %ymm14, (%r8)
1562 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%r9)
1563 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
1564 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rax)
1565 ; AVX1-ONLY-NEXT: addq $88, %rsp
1566 ; AVX1-ONLY-NEXT: vzeroupper
1567 ; AVX1-ONLY-NEXT: retq
1569 ; AVX2-SLOW-LABEL: load_i16_stride6_vf16:
1570 ; AVX2-SLOW: # %bb.0:
1571 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm4
1572 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm5
1573 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm0
1574 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm3
1575 ; AVX2-SLOW-NEXT: vmovdqa 160(%rdi), %ymm1
1576 ; AVX2-SLOW-NEXT: vmovdqa 128(%rdi), %ymm2
1577 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
1578 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm8, %xmm9
1579 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm6 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
1580 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm8[2,2,2,2,4,5,6,7]
1581 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,2]
1582 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3],xmm6[4,5],xmm7[6],xmm6[7]
1583 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm10
1584 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
1585 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
1586 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm11, %xmm12
1587 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm12[0,2,0,3]
1588 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
1589 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm13 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6,7]
1590 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm3[2,3]
1591 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm0[0,1],ymm3[0,1]
1592 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
1593 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
1594 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm0[3,4,5,6,7]
1595 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
1596 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
1597 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
1598 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
1599 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
1600 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
1601 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
1602 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm10 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
1603 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1],xmm9[2],xmm10[3],xmm9[4,5],xmm10[6,7]
1604 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
1605 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm9[0,1,2],ymm3[3,4,5,6,7]
1606 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm8 = ymm3[0,1,2],ymm8[3,4,5,6,7],ymm3[8,9,10],ymm8[11,12,13,14,15]
1607 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5,6,7]
1608 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
1609 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm8, %xmm9
1610 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
1611 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
1612 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
1613 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm8 = xmm11[0,0,0,0,4,5,6,7]
1614 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,6,7]
1615 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
1616 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
1617 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
1618 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
1619 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
1620 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
1621 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm12, %xmm12
1622 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
1623 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm15 = xmm12[0,0,2,3,4,5,6,7]
1624 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[0,1,3,3]
1625 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
1626 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
1627 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
1628 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,5,4]
1629 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
1630 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
1631 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
1632 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm11 = xmm11[1,1,1,1,4,5,6,7]
1633 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,7,7]
1634 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
1635 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
1636 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[0,1,3,3,4,5,6,7]
1637 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,7,7,7]
1638 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
1639 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
1640 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
1641 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
1642 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,2]
1643 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
1644 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1645 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
1646 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm4, %xmm5
1647 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm5[2,2,2,2,4,5,6,7]
1648 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm11 = xmm4[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
1649 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
1650 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
1651 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
1652 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
1653 ; AVX2-SLOW-NEXT: vpblendvb %ymm11, %ymm10, %ymm7, %ymm7
1654 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
1655 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm2 = xmm4[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
1656 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm5[1,1,2,3]
1657 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
1658 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6,7]
1659 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm4
1660 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm6[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
1661 ; AVX2-SLOW-NEXT: vpblendvb %ymm11, %ymm2, %ymm5, %ymm2
1662 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
1663 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
1664 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm1[0,1,0,2,4,5,6,7]
1665 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,6,6,6]
1666 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4],xmm6[5],xmm5[6,7]
1667 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
1668 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5,6,7]
1669 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
1670 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
1671 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
1672 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5],xmm4[6,7]
1673 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
1674 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
1675 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rsi)
1676 ; AVX2-SLOW-NEXT: vmovdqa %ymm3, (%rdx)
1677 ; AVX2-SLOW-NEXT: vmovdqa %ymm8, (%rcx)
1678 ; AVX2-SLOW-NEXT: vmovdqa %ymm9, (%r8)
1679 ; AVX2-SLOW-NEXT: vmovdqa %ymm5, (%r9)
1680 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
1681 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rax)
1682 ; AVX2-SLOW-NEXT: vzeroupper
1683 ; AVX2-SLOW-NEXT: retq
1685 ; AVX2-FAST-LABEL: load_i16_stride6_vf16:
1686 ; AVX2-FAST: # %bb.0:
1687 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm4
1688 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm5
1689 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm0
1690 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm3
1691 ; AVX2-FAST-NEXT: vmovdqa 160(%rdi), %ymm1
1692 ; AVX2-FAST-NEXT: vmovdqa 128(%rdi), %ymm2
1693 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
1694 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
1695 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm8, %xmm9
1696 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
1697 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
1698 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm10
1699 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
1700 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
1701 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm11, %xmm7
1702 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm12 = xmm7[2,1,0,3]
1703 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm12[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
1704 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm13 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6,7]
1705 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm3[2,3]
1706 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm0[0,1],ymm3[0,1]
1707 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
1708 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
1709 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm0[3,4,5,6,7]
1710 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
1711 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
1712 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
1713 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
1714 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
1715 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
1716 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
1717 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
1718 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
1719 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
1720 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm9[0,1,2],ymm3[3,4,5,6,7]
1721 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm8 = ymm3[0,1,2],ymm8[3,4,5,6,7],ymm3[8,9,10],ymm8[11,12,13,14,15]
1722 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5,6,7]
1723 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
1724 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm8, %xmm9
1725 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
1726 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
1727 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
1728 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm8 = xmm11[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
1729 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
1730 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
1731 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
1732 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
1733 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
1734 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
1735 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm12, %xmm12
1736 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
1737 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm15 = xmm12[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
1738 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
1739 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
1740 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
1741 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,5,4]
1742 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
1743 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
1744 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
1745 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
1746 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
1747 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
1748 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
1749 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
1750 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
1751 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
1752 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
1753 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,2]
1754 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
1755 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1756 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
1757 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm4, %xmm5
1758 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm10 = xmm5[2,2,2,2,4,5,6,7]
1759 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm11 = xmm4[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
1760 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
1761 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
1762 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
1763 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
1764 ; AVX2-FAST-NEXT: vpblendvb %ymm11, %ymm10, %ymm7, %ymm7
1765 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
1766 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm2 = xmm5[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
1767 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
1768 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1],xmm4[2,3],xmm2[4],xmm4[5,6,7]
1769 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm4
1770 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm5 = ymm6[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
1771 ; AVX2-FAST-NEXT: vpblendvb %ymm11, %ymm2, %ymm5, %ymm2
1772 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
1773 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
1774 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
1775 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4],xmm6[5],xmm5[6,7]
1776 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
1777 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5,6,7]
1778 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
1779 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
1780 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5],xmm4[6,7]
1781 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
1782 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
1783 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rsi)
1784 ; AVX2-FAST-NEXT: vmovdqa %ymm3, (%rdx)
1785 ; AVX2-FAST-NEXT: vmovdqa %ymm8, (%rcx)
1786 ; AVX2-FAST-NEXT: vmovdqa %ymm9, (%r8)
1787 ; AVX2-FAST-NEXT: vmovdqa %ymm5, (%r9)
1788 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
1789 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rax)
1790 ; AVX2-FAST-NEXT: vzeroupper
1791 ; AVX2-FAST-NEXT: retq
1793 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride6_vf16:
1794 ; AVX2-FAST-PERLANE: # %bb.0:
1795 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm4
1796 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm5
1797 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %ymm0
1798 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %ymm3
1799 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rdi), %ymm1
1800 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rdi), %ymm2
1801 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
1802 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
1803 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm8, %xmm9
1804 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm7 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
1805 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
1806 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm10
1807 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
1808 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
1809 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm11, %xmm7
1810 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm12 = xmm7[2,1,0,3]
1811 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm7 = xmm12[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
1812 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm13 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6,7]
1813 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm3[2,3]
1814 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm0[0,1],ymm3[0,1]
1815 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
1816 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
1817 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm0[3,4,5,6,7]
1818 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
1819 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
1820 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
1821 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
1822 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
1823 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
1824 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
1825 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
1826 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
1827 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
1828 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm9[0,1,2],ymm3[3,4,5,6,7]
1829 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm8 = ymm3[0,1,2],ymm8[3,4,5,6,7],ymm3[8,9,10],ymm8[11,12,13,14,15]
1830 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5,6,7]
1831 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
1832 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm8, %xmm9
1833 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
1834 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
1835 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
1836 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm8 = xmm11[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
1837 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
1838 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
1839 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
1840 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
1841 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
1842 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
1843 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm12, %xmm12
1844 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
1845 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm15 = xmm12[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
1846 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
1847 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
1848 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
1849 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,5,4]
1850 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
1851 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
1852 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
1853 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
1854 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
1855 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
1856 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
1857 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
1858 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
1859 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
1860 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
1861 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,2]
1862 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
1863 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1864 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
1865 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm4, %xmm5
1866 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm10 = xmm5[2,2,2,2,4,5,6,7]
1867 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm11 = xmm4[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
1868 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
1869 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
1870 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
1871 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
1872 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm11, %ymm10, %ymm7, %ymm7
1873 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
1874 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm2 = xmm5[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
1875 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
1876 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1],xmm4[2,3],xmm2[4],xmm4[5,6,7]
1877 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm4
1878 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm5 = ymm6[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
1879 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm11, %ymm2, %ymm5, %ymm2
1880 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
1881 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
1882 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
1883 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4],xmm6[5],xmm5[6,7]
1884 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
1885 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5,6,7]
1886 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
1887 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
1888 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5],xmm4[6,7]
1889 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
1890 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
1891 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rsi)
1892 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, (%rdx)
1893 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm8, (%rcx)
1894 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm9, (%r8)
1895 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm5, (%r9)
1896 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
1897 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rax)
1898 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
1899 ; AVX2-FAST-PERLANE-NEXT: retq
1901 ; AVX512F-SLOW-LABEL: load_i16_stride6_vf16:
1902 ; AVX512F-SLOW: # %bb.0:
1903 ; AVX512F-SLOW-NEXT: vmovdqa 160(%rdi), %ymm0
1904 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %ymm3
1905 ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdi), %ymm4
1906 ; AVX512F-SLOW-NEXT: vmovdqa 64(%rdi), %ymm1
1907 ; AVX512F-SLOW-NEXT: vmovdqa 128(%rdi), %ymm2
1908 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
1909 ; AVX512F-SLOW-NEXT: vextracti128 $1, %ymm5, %xmm8
1910 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm6 = xmm8[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
1911 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm5[2,2,2,2,4,5,6,7]
1912 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,2]
1913 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3],xmm6[4,5],xmm7[6],xmm6[7]
1914 ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm9
1915 ; AVX512F-SLOW-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm1[2,3],mem[2,3]
1916 ; AVX512F-SLOW-NEXT: vinserti128 $1, 96(%rdi), %ymm1, %ymm7
1917 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
1918 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
1919 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
1920 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
1921 ; AVX512F-SLOW-NEXT: vextracti128 $1, %ymm11, %xmm13
1922 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm14 = xmm13[0,2,0,3]
1923 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,6,7]
1924 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm14[2],xmm12[3],xmm14[4,5],xmm12[6,7]
1925 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm12[0,1,2],ymm1[3,4,5,6,7]
1926 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} ymm9 = ymm1[0,1,2],ymm9[3,4,5,6,7],ymm1[8,9,10],ymm9[11,12,13,14,15]
1927 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
1928 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
1929 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
1930 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
1931 ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
1932 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
1933 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm9 = xmm13[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
1934 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm10 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
1935 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0,1],xmm9[2],xmm10[3],xmm9[4,5],xmm10[6,7]
1936 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7]
1937 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7],ymm8[8,9,10],ymm5[11,12,13,14,15]
1938 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
1939 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
1940 ; AVX512F-SLOW-NEXT: vextracti128 $1, %ymm8, %xmm9
1941 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
1942 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
1943 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
1944 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm8 = xmm11[0,0,0,0,4,5,6,7]
1945 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,6,7]
1946 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
1947 ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
1948 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
1949 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
1950 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
1951 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
1952 ; AVX512F-SLOW-NEXT: vextracti128 $1, %ymm12, %xmm12
1953 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
1954 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm15 = xmm12[0,0,2,3,4,5,6,7]
1955 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[0,1,3,3]
1956 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
1957 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
1958 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
1959 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
1960 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
1961 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
1962 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm11 = xmm11[1,1,1,1,4,5,6,7]
1963 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,7,7]
1964 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
1965 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
1966 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm12 = xmm12[0,1,3,3,4,5,6,7]
1967 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,7,7,7]
1968 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
1969 ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
1970 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
1971 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
1972 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
1973 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
1974 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
1975 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1976 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
1977 ; AVX512F-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm4
1978 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
1979 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
1980 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
1981 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
1982 ; AVX512F-SLOW-NEXT: vpternlogq $236, %ymm11, %ymm7, %ymm10
1983 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
1984 ; AVX512F-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm2
1985 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
1986 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
1987 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm12 = xmm0[0,1,0,2,4,5,6,7]
1988 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,6,6,6,6]
1989 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3],xmm7[4],xmm12[5],xmm7[6,7]
1990 ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
1991 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3,4],ymm7[5,6,7]
1992 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
1993 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
1994 ; AVX512F-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
1995 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
1996 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1997 ; AVX512F-SLOW-NEXT: vpternlogq $236, %ymm11, %ymm4, %ymm3
1998 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
1999 ; AVX512F-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
2000 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
2001 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5],xmm2[6,7]
2002 ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
2003 ; AVX512F-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
2004 ; AVX512F-SLOW-NEXT: vmovdqa %ymm1, (%rsi)
2005 ; AVX512F-SLOW-NEXT: vmovdqa %ymm5, (%rdx)
2006 ; AVX512F-SLOW-NEXT: vmovdqa %ymm8, (%rcx)
2007 ; AVX512F-SLOW-NEXT: vmovdqa %ymm9, (%r8)
2008 ; AVX512F-SLOW-NEXT: vmovdqa %ymm7, (%r9)
2009 ; AVX512F-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
2010 ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, (%rax)
2011 ; AVX512F-SLOW-NEXT: vzeroupper
2012 ; AVX512F-SLOW-NEXT: retq
2014 ; AVX512F-FAST-LABEL: load_i16_stride6_vf16:
2015 ; AVX512F-FAST: # %bb.0:
2016 ; AVX512F-FAST-NEXT: vmovdqa 160(%rdi), %ymm0
2017 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm3
2018 ; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %ymm4
2019 ; AVX512F-FAST-NEXT: vmovdqa 64(%rdi), %ymm2
2020 ; AVX512F-FAST-NEXT: vmovdqa 128(%rdi), %ymm1
2021 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
2022 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
2023 ; AVX512F-FAST-NEXT: vextracti128 $1, %ymm5, %xmm8
2024 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm8[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
2025 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
2026 ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm9
2027 ; AVX512F-FAST-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm2[2,3],mem[2,3]
2028 ; AVX512F-FAST-NEXT: vinserti128 $1, 96(%rdi), %ymm2, %ymm7
2029 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
2030 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
2031 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
2032 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
2033 ; AVX512F-FAST-NEXT: vextracti128 $1, %ymm11, %xmm13
2034 ; AVX512F-FAST-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,1,0,3]
2035 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
2036 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm14[2],xmm12[3],xmm14[4,5],xmm12[6,7]
2037 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3,4,5,6,7]
2038 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm9 = ymm2[0,1,2],ymm9[3,4,5,6,7],ymm2[8,9,10],ymm9[11,12,13,14,15]
2039 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
2040 ; AVX512F-FAST-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
2041 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
2042 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
2043 ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
2044 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm8 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
2045 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
2046 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm10 = xmm13[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
2047 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
2048 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7]
2049 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7],ymm8[8,9,10],ymm5[11,12,13,14,15]
2050 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
2051 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
2052 ; AVX512F-FAST-NEXT: vextracti128 $1, %ymm8, %xmm9
2053 ; AVX512F-FAST-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
2054 ; AVX512F-FAST-NEXT: vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
2055 ; AVX512F-FAST-NEXT: vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
2056 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm8 = xmm11[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
2057 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
2058 ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
2059 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
2060 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
2061 ; AVX512F-FAST-NEXT: vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
2062 ; AVX512F-FAST-NEXT: vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
2063 ; AVX512F-FAST-NEXT: vextracti128 $1, %ymm12, %xmm12
2064 ; AVX512F-FAST-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
2065 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm15 = xmm12[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
2066 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
2067 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
2068 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
2069 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
2070 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
2071 ; AVX512F-FAST-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
2072 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
2073 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
2074 ; AVX512F-FAST-NEXT: vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
2075 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
2076 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
2077 ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
2078 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
2079 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
2080 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
2081 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
2082 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
2083 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
2084 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
2085 ; AVX512F-FAST-NEXT: vextracti128 $1, %ymm3, %xmm4
2086 ; AVX512F-FAST-NEXT: vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
2087 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
2088 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
2089 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
2090 ; AVX512F-FAST-NEXT: vpternlogq $236, %ymm11, %ymm7, %ymm10
2091 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
2092 ; AVX512F-FAST-NEXT: vextracti128 $1, %ymm0, %xmm1
2093 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
2094 ; AVX512F-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
2095 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
2096 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3],xmm7[4],xmm12[5],xmm7[6,7]
2097 ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
2098 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3,4],ymm7[5,6,7]
2099 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
2100 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
2101 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
2102 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
2103 ; AVX512F-FAST-NEXT: vpternlogq $236, %ymm11, %ymm4, %ymm3
2104 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
2105 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
2106 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6,7]
2107 ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
2108 ; AVX512F-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
2109 ; AVX512F-FAST-NEXT: vmovdqa %ymm2, (%rsi)
2110 ; AVX512F-FAST-NEXT: vmovdqa %ymm5, (%rdx)
2111 ; AVX512F-FAST-NEXT: vmovdqa %ymm8, (%rcx)
2112 ; AVX512F-FAST-NEXT: vmovdqa %ymm9, (%r8)
2113 ; AVX512F-FAST-NEXT: vmovdqa %ymm7, (%r9)
2114 ; AVX512F-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
2115 ; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rax)
2116 ; AVX512F-FAST-NEXT: vzeroupper
2117 ; AVX512F-FAST-NEXT: retq
2119 ; AVX512BW-LABEL: load_i16_stride6_vf16:
2120 ; AVX512BW: # %bb.0:
2121 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
2122 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm2
2123 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm3
2124 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,0,0,2,8,14,20,26,0,0,0,2,8,14,20,26]
2125 ; AVX512BW-NEXT: # ymm0 = mem[0,1,0,1]
2126 ; AVX512BW-NEXT: vmovdqa 160(%rdi), %ymm4
2127 ; AVX512BW-NEXT: vmovdqa 128(%rdi), %ymm5
2128 ; AVX512BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm0
2129 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = <0,6,12,18,24,30,36,42,48,54,60,u,u,u,u,u>
2130 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm1
2131 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
2132 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
2133 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
2134 ; AVX512BW-NEXT: # ymm1 = mem[0,1,0,1]
2135 ; AVX512BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm1
2136 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm6 = <1,7,13,19,25,31,37,43,49,55,61,u,u,u,u,u>
2137 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm6
2138 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
2139 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
2140 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
2141 ; AVX512BW-NEXT: # ymm6 = mem[0,1,0,1]
2142 ; AVX512BW-NEXT: vpermi2w %ymm5, %ymm4, %ymm6
2143 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm7 = <34,40,46,52,58,0,6,12,18,24,30,u,u,u,u,u>
2144 ; AVX512BW-NEXT: vpermi2w %zmm2, %zmm3, %zmm7
2145 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
2146 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
2147 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
2148 ; AVX512BW-NEXT: # ymm7 = mem[0,1,0,1]
2149 ; AVX512BW-NEXT: vpermi2w %ymm5, %ymm4, %ymm7
2150 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = <35,41,47,53,59,1,7,13,19,25,31,u,u,u,u,u>
2151 ; AVX512BW-NEXT: vpermi2w %zmm2, %zmm3, %zmm8
2152 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
2153 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
2154 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
2155 ; AVX512BW-NEXT: # ymm8 = mem[0,1,0,1]
2156 ; AVX512BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm8
2157 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm9 = <4,10,16,22,28,34,40,46,52,58,u,u,u,u,u,u>
2158 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm9
2159 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6,7]
2160 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
2161 ; AVX512BW-NEXT: # ymm9 = mem[0,1,0,1]
2162 ; AVX512BW-NEXT: vpermi2w %ymm4, %ymm5, %ymm9
2163 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm4 = <5,11,17,23,29,35,41,47,53,59,u,u,u,u,u,u>
2164 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm4
2165 ; AVX512BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm9[5,6,7]
2166 ; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi)
2167 ; AVX512BW-NEXT: vmovdqa %ymm1, (%rdx)
2168 ; AVX512BW-NEXT: vmovdqa %ymm6, (%rcx)
2169 ; AVX512BW-NEXT: vmovdqa %ymm7, (%r8)
2170 ; AVX512BW-NEXT: vmovdqa %ymm8, (%r9)
2171 ; AVX512BW-NEXT: vmovdqa %ymm2, (%rax)
2172 ; AVX512BW-NEXT: vzeroupper
2173 ; AVX512BW-NEXT: retq
2174 %wide.vec = load <96 x i16>, ptr %in.vec, align 64
2175 %strided.vec0 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90>
2176 %strided.vec1 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91>
2177 %strided.vec2 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92>
2178 %strided.vec3 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93>
2179 %strided.vec4 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94>
2180 %strided.vec5 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95>
2181 store <16 x i16> %strided.vec0, ptr %out.vec0, align 64
2182 store <16 x i16> %strided.vec1, ptr %out.vec1, align 64
2183 store <16 x i16> %strided.vec2, ptr %out.vec2, align 64
2184 store <16 x i16> %strided.vec3, ptr %out.vec3, align 64
2185 store <16 x i16> %strided.vec4, ptr %out.vec4, align 64
2186 store <16 x i16> %strided.vec5, ptr %out.vec5, align 64
2190 define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
2191 ; SSE-LABEL: load_i16_stride6_vf32:
2193 ; SSE-NEXT: subq $456, %rsp # imm = 0x1C8
2194 ; SSE-NEXT: movdqa 304(%rdi), %xmm9
2195 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2196 ; SSE-NEXT: movdqa 320(%rdi), %xmm5
2197 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2198 ; SSE-NEXT: movdqa 64(%rdi), %xmm3
2199 ; SSE-NEXT: movdqa 80(%rdi), %xmm0
2200 ; SSE-NEXT: movdqa (%rdi), %xmm4
2201 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2202 ; SSE-NEXT: movdqa 16(%rdi), %xmm6
2203 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2204 ; SSE-NEXT: movdqa 32(%rdi), %xmm1
2205 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2206 ; SSE-NEXT: movdqa 48(%rdi), %xmm7
2207 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2208 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,2,4,5,6,7]
2209 ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,0,0,0,65535,65535]
2210 ; SSE-NEXT: movdqa %xmm10, %xmm2
2211 ; SSE-NEXT: pandn %xmm1, %xmm2
2212 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,1,0,3]
2213 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2214 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
2215 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
2216 ; SSE-NEXT: pand %xmm10, %xmm1
2217 ; SSE-NEXT: por %xmm2, %xmm1
2218 ; SSE-NEXT: movdqa %xmm1, %xmm2
2219 ; SSE-NEXT: movdqa %xmm3, %xmm1
2220 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,2,3,3]
2221 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
2222 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2223 ; SSE-NEXT: movdqa %xmm0, %xmm4
2224 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
2225 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2226 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2227 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[0,0]
2228 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[2,3]
2229 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2230 ; SSE-NEXT: pslld $16, %xmm0
2231 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2232 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2233 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,3,2,3]
2234 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[0,1,0,2,4,5,6,7]
2235 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[1,3]
2236 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
2237 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2238 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[0,1,1,2,4,5,6,7]
2239 ; SSE-NEXT: movdqa %xmm10, %xmm4
2240 ; SSE-NEXT: movdqa %xmm10, %xmm1
2241 ; SSE-NEXT: pandn %xmm0, %xmm1
2242 ; SSE-NEXT: movdqa 288(%rdi), %xmm0
2243 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2244 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
2245 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2246 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
2247 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm9[2],xmm0[3],xmm9[3]
2248 ; SSE-NEXT: pand %xmm10, %xmm0
2249 ; SSE-NEXT: por %xmm1, %xmm0
2250 ; SSE-NEXT: movdqa %xmm0, %xmm5
2251 ; SSE-NEXT: movdqa 352(%rdi), %xmm2
2252 ; SSE-NEXT: movdqa 368(%rdi), %xmm1
2253 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
2254 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
2255 ; SSE-NEXT: movdqa %xmm1, %xmm0
2256 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[3,0]
2257 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2258 ; SSE-NEXT: movdqa %xmm2, %xmm0
2259 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2260 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm1[0,0]
2261 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,3]
2262 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2263 ; SSE-NEXT: pslld $16, %xmm1
2264 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2265 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2266 ; SSE-NEXT: movdqa 336(%rdi), %xmm1
2267 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2268 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
2269 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,1,0,2,4,5,6,7]
2270 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm2[1,3]
2271 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm0[2,0]
2272 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2273 ; SSE-NEXT: movdqa 224(%rdi), %xmm0
2274 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2275 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
2276 ; SSE-NEXT: movdqa %xmm10, %xmm2
2277 ; SSE-NEXT: pandn %xmm0, %xmm2
2278 ; SSE-NEXT: movdqa 208(%rdi), %xmm5
2279 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2280 ; SSE-NEXT: movdqa 192(%rdi), %xmm0
2281 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2282 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
2283 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2284 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
2285 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
2286 ; SSE-NEXT: pand %xmm10, %xmm0
2287 ; SSE-NEXT: por %xmm2, %xmm0
2288 ; SSE-NEXT: movdqa %xmm0, %xmm2
2289 ; SSE-NEXT: movdqa 256(%rdi), %xmm5
2290 ; SSE-NEXT: movdqa 272(%rdi), %xmm7
2291 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm5[2,2,3,3]
2292 ; SSE-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3]
2293 ; SSE-NEXT: movdqa %xmm7, %xmm0
2294 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm5[3,0]
2295 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
2296 ; SSE-NEXT: movdqa %xmm5, %xmm0
2297 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2298 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm7[0,0]
2299 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm7[2,3]
2300 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2301 ; SSE-NEXT: pslld $16, %xmm7
2302 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2303 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
2304 ; SSE-NEXT: movdqa 240(%rdi), %xmm5
2305 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2306 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm5[0,3,2,3]
2307 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm14[0,1,0,2,4,5,6,7]
2308 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm7[1,3]
2309 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
2310 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2311 ; SSE-NEXT: movdqa 128(%rdi), %xmm0
2312 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2313 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
2314 ; SSE-NEXT: movdqa %xmm10, %xmm7
2315 ; SSE-NEXT: pandn %xmm0, %xmm7
2316 ; SSE-NEXT: movdqa 112(%rdi), %xmm11
2317 ; SSE-NEXT: movdqa 96(%rdi), %xmm0
2318 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2319 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm0[0,1,0,3]
2320 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm15[0,1,2,3,4,6,6,7]
2321 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm11[2],xmm0[3],xmm11[3]
2322 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2323 ; SSE-NEXT: pand %xmm10, %xmm0
2324 ; SSE-NEXT: por %xmm7, %xmm0
2325 ; SSE-NEXT: movdqa 160(%rdi), %xmm5
2326 ; SSE-NEXT: movdqa 176(%rdi), %xmm9
2327 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,2,3,3]
2328 ; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3]
2329 ; SSE-NEXT: movdqa %xmm9, %xmm2
2330 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm5[3,0]
2331 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2332 ; SSE-NEXT: movdqa %xmm5, %xmm10
2333 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2334 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm9[0,0]
2335 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm9[2,3]
2336 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2337 ; SSE-NEXT: pslld $16, %xmm9
2338 ; SSE-NEXT: psrldq {{.*#+}} xmm10 = xmm10[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2339 ; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
2340 ; SSE-NEXT: movdqa 144(%rdi), %xmm2
2341 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2342 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm2[0,3,2,3]
2343 ; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm9[0,1,0,2,4,5,6,7]
2344 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[3,1],xmm13[1,3]
2345 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm10[2,0]
2346 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2347 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2348 ; SSE-NEXT: movdqa %xmm5, %xmm10
2349 ; SSE-NEXT: psrld $16, %xmm10
2350 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2351 ; SSE-NEXT: # xmm0 = mem[0,1,2,3,5,7,6,7]
2352 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3]
2353 ; SSE-NEXT: movdqa %xmm4, %xmm10
2354 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2355 ; SSE-NEXT: pandn %xmm2, %xmm10
2356 ; SSE-NEXT: pand %xmm4, %xmm0
2357 ; SSE-NEXT: movdqa %xmm4, %xmm13
2358 ; SSE-NEXT: por %xmm10, %xmm0
2359 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7]
2360 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
2361 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm8[1,3]
2362 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,0]
2363 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2364 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2365 ; SSE-NEXT: movdqa %xmm7, %xmm8
2366 ; SSE-NEXT: psrld $16, %xmm8
2367 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2368 ; SSE-NEXT: # xmm0 = mem[0,1,2,3,5,7,6,7]
2369 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
2370 ; SSE-NEXT: movdqa %xmm13, %xmm8
2371 ; SSE-NEXT: movdqa %xmm13, %xmm4
2372 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2373 ; SSE-NEXT: pandn %xmm10, %xmm4
2374 ; SSE-NEXT: pand %xmm13, %xmm0
2375 ; SSE-NEXT: por %xmm4, %xmm0
2376 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
2377 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[1,3]
2378 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,0]
2379 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2380 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2381 ; SSE-NEXT: psrld $16, %xmm1
2382 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2383 ; SSE-NEXT: # xmm0 = mem[0,1,2,3,5,7,6,7]
2384 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2385 ; SSE-NEXT: movdqa %xmm13, %xmm1
2386 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2387 ; SSE-NEXT: pandn %xmm13, %xmm1
2388 ; SSE-NEXT: pand %xmm8, %xmm0
2389 ; SSE-NEXT: por %xmm1, %xmm0
2390 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm14[0,1,1,3,4,5,6,7]
2391 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[3,1],xmm1[1,3]
2392 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm12[2,0]
2393 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2394 ; SSE-NEXT: psrld $16, %xmm11
2395 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm15[0,1,2,3,5,7,6,7]
2396 ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
2397 ; SSE-NEXT: pand %xmm8, %xmm3
2398 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
2399 ; SSE-NEXT: pandn %xmm11, %xmm8
2400 ; SSE-NEXT: por %xmm3, %xmm8
2401 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[0,1,1,3,4,5,6,7]
2402 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,1],xmm1[1,3]
2403 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm6[2,0]
2404 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2405 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2406 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2407 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[1,1,1,1]
2408 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
2409 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,0,65535,65535,65535]
2410 ; SSE-NEXT: movdqa %xmm1, %xmm4
2411 ; SSE-NEXT: pandn %xmm2, %xmm4
2412 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2413 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm5[0,0]
2414 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm5[2,3]
2415 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,2,2,3,4,5,6,7]
2416 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
2417 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[1,0,2,3,4,5,6,7]
2418 ; SSE-NEXT: pand %xmm1, %xmm6
2419 ; SSE-NEXT: por %xmm4, %xmm6
2420 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2421 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2422 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
2423 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2424 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
2425 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
2426 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,4,6,5,4]
2427 ; SSE-NEXT: movdqa {{.*#+}} xmm15 = [65535,65535,65535,65535,65535,0,0,0]
2428 ; SSE-NEXT: movdqa %xmm15, %xmm0
2429 ; SSE-NEXT: pandn %xmm4, %xmm0
2430 ; SSE-NEXT: pand %xmm15, %xmm6
2431 ; SSE-NEXT: por %xmm6, %xmm0
2432 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2433 ; SSE-NEXT: movdqa %xmm10, %xmm4
2434 ; SSE-NEXT: psrldq {{.*#+}} xmm4 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2435 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
2436 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm12[1,1,1,1]
2437 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
2438 ; SSE-NEXT: movdqa %xmm1, %xmm6
2439 ; SSE-NEXT: pandn %xmm4, %xmm6
2440 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
2441 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm7[0,0]
2442 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm7[2,3]
2443 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm4[0,2,2,3,4,5,6,7]
2444 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,3]
2445 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,0,2,3,4,5,6,7]
2446 ; SSE-NEXT: pand %xmm1, %xmm8
2447 ; SSE-NEXT: por %xmm6, %xmm8
2448 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2449 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2450 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
2451 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2452 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,4,6,6,7]
2453 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
2454 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,4]
2455 ; SSE-NEXT: movdqa %xmm15, %xmm0
2456 ; SSE-NEXT: pandn %xmm5, %xmm0
2457 ; SSE-NEXT: pand %xmm15, %xmm8
2458 ; SSE-NEXT: por %xmm8, %xmm0
2459 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2460 ; SSE-NEXT: movdqa %xmm13, %xmm5
2461 ; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2462 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2463 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm10[1,1,1,1]
2464 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
2465 ; SSE-NEXT: movdqa %xmm1, %xmm6
2466 ; SSE-NEXT: pandn %xmm5, %xmm6
2467 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2468 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2469 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm0[0,0]
2470 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm0[2,3]
2471 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm5[0,2,2,3,4,5,6,7]
2472 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,3]
2473 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,0,2,3,4,5,6,7]
2474 ; SSE-NEXT: pand %xmm1, %xmm8
2475 ; SSE-NEXT: por %xmm6, %xmm8
2476 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2477 ; SSE-NEXT: shufps $132, (%rsp), %xmm0 # 16-byte Folded Reload
2478 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
2479 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2480 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,6,6,7]
2481 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
2482 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,4]
2483 ; SSE-NEXT: movdqa %xmm15, %xmm0
2484 ; SSE-NEXT: pandn %xmm2, %xmm0
2485 ; SSE-NEXT: pand %xmm15, %xmm8
2486 ; SSE-NEXT: por %xmm8, %xmm0
2487 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
2488 ; SSE-NEXT: psrldq {{.*#+}} xmm11 = xmm11[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2489 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2490 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[1,1,1,1]
2491 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm11 = xmm11[0],xmm6[0]
2492 ; SSE-NEXT: movdqa %xmm1, %xmm6
2493 ; SSE-NEXT: pandn %xmm11, %xmm6
2494 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2495 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2496 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[0,0]
2497 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[2,3]
2498 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm2[0,2,2,3,4,5,6,7]
2499 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,3]
2500 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[1,0,2,3,4,5,6,7]
2501 ; SSE-NEXT: pand %xmm1, %xmm8
2502 ; SSE-NEXT: por %xmm6, %xmm8
2503 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
2504 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
2505 ; SSE-NEXT: # xmm11 = xmm11[0,1],mem[0,2]
2506 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm11[0,1,2,3,4,6,6,7]
2507 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,2,0]
2508 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
2509 ; SSE-NEXT: movdqa %xmm15, %xmm0
2510 ; SSE-NEXT: pandn %xmm6, %xmm0
2511 ; SSE-NEXT: pand %xmm15, %xmm8
2512 ; SSE-NEXT: por %xmm8, %xmm0
2513 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2514 ; SSE-NEXT: movdqa %xmm14, %xmm0
2515 ; SSE-NEXT: movdqa %xmm14, %xmm6
2516 ; SSE-NEXT: psrlq $48, %xmm6
2517 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2518 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,2,3,3]
2519 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm6[0]
2520 ; SSE-NEXT: movdqa %xmm1, %xmm6
2521 ; SSE-NEXT: pandn %xmm7, %xmm6
2522 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
2523 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
2524 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,3,4,5,6,7]
2525 ; SSE-NEXT: pand %xmm1, %xmm3
2526 ; SSE-NEXT: por %xmm6, %xmm3
2527 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
2528 ; SSE-NEXT: # xmm6 = mem[0,1,2,3,7,5,6,7]
2529 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
2530 ; SSE-NEXT: movdqa %xmm15, %xmm7
2531 ; SSE-NEXT: pandn %xmm6, %xmm7
2532 ; SSE-NEXT: pand %xmm15, %xmm3
2533 ; SSE-NEXT: por %xmm3, %xmm7
2534 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2535 ; SSE-NEXT: movdqa %xmm12, %xmm3
2536 ; SSE-NEXT: psrlq $48, %xmm3
2537 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
2538 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm12[2,2,3,3]
2539 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm3[0]
2540 ; SSE-NEXT: movdqa %xmm1, %xmm3
2541 ; SSE-NEXT: pandn %xmm6, %xmm3
2542 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7]
2543 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
2544 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,3,4,5,6,7]
2545 ; SSE-NEXT: pand %xmm1, %xmm4
2546 ; SSE-NEXT: por %xmm3, %xmm4
2547 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2548 ; SSE-NEXT: # xmm3 = mem[0,1,2,3,7,5,6,7]
2549 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
2550 ; SSE-NEXT: movdqa %xmm15, %xmm6
2551 ; SSE-NEXT: pandn %xmm3, %xmm6
2552 ; SSE-NEXT: pand %xmm15, %xmm4
2553 ; SSE-NEXT: por %xmm4, %xmm6
2554 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2555 ; SSE-NEXT: movdqa %xmm10, %xmm3
2556 ; SSE-NEXT: movdqa %xmm10, %xmm14
2557 ; SSE-NEXT: psrlq $48, %xmm3
2558 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm13[2,2,3,3]
2559 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
2560 ; SSE-NEXT: movdqa %xmm1, %xmm3
2561 ; SSE-NEXT: pandn %xmm4, %xmm3
2562 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[3,1,2,3,4,5,6,7]
2563 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
2564 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,3,4,5,6,7]
2565 ; SSE-NEXT: pand %xmm1, %xmm4
2566 ; SSE-NEXT: por %xmm3, %xmm4
2567 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2568 ; SSE-NEXT: # xmm3 = mem[0,1,2,3,7,5,6,7]
2569 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
2570 ; SSE-NEXT: movdqa %xmm15, %xmm5
2571 ; SSE-NEXT: pandn %xmm3, %xmm5
2572 ; SSE-NEXT: pand %xmm15, %xmm4
2573 ; SSE-NEXT: por %xmm4, %xmm5
2574 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2575 ; SSE-NEXT: movdqa %xmm9, %xmm3
2576 ; SSE-NEXT: psrlq $48, %xmm3
2577 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2578 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[2,2,3,3]
2579 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
2580 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
2581 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
2582 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
2583 ; SSE-NEXT: pand %xmm1, %xmm2
2584 ; SSE-NEXT: pandn %xmm4, %xmm1
2585 ; SSE-NEXT: por %xmm2, %xmm1
2586 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm11[0,1,2,3,7,5,6,7]
2587 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
2588 ; SSE-NEXT: movdqa %xmm15, %xmm3
2589 ; SSE-NEXT: pandn %xmm2, %xmm3
2590 ; SSE-NEXT: pand %xmm15, %xmm1
2591 ; SSE-NEXT: por %xmm1, %xmm3
2592 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2593 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
2594 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
2595 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2596 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
2597 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
2598 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,0,3]
2599 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,4,5,4,6]
2600 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm0[1]
2601 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm3[0],xmm1[1,2,3]
2602 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
2603 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm10[0,2,2,3,4,5,6,7]
2604 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
2605 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
2606 ; SSE-NEXT: movdqa %xmm15, %xmm11
2607 ; SSE-NEXT: pandn %xmm3, %xmm11
2608 ; SSE-NEXT: andps %xmm15, %xmm1
2609 ; SSE-NEXT: por %xmm1, %xmm11
2610 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2611 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[1,1,1,1]
2612 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2613 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
2614 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
2615 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[0,1,0,3]
2616 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2617 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,4,5,4,6]
2618 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
2619 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm12[1]
2620 ; SSE-NEXT: movss {{.*#+}} xmm4 = xmm3[0],xmm4[1,2,3]
2621 ; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2622 ; SSE-NEXT: # xmm3 = mem[0,2,2,3,4,5,6,7]
2623 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
2624 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
2625 ; SSE-NEXT: movdqa %xmm15, %xmm5
2626 ; SSE-NEXT: pandn %xmm3, %xmm5
2627 ; SSE-NEXT: andps %xmm15, %xmm4
2628 ; SSE-NEXT: por %xmm4, %xmm5
2629 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2630 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
2631 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2632 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
2633 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
2634 ; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2635 ; SSE-NEXT: # xmm0 = mem[0,1,0,3]
2636 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2637 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,5,4,6]
2638 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm14[1]
2639 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3]
2640 ; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
2641 ; SSE-NEXT: # xmm4 = mem[0,2,2,3,4,5,6,7]
2642 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
2643 ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm4[0,1,2,3,4,5,4,6]
2644 ; SSE-NEXT: movdqa %xmm15, %xmm4
2645 ; SSE-NEXT: pandn %xmm14, %xmm4
2646 ; SSE-NEXT: andps %xmm15, %xmm3
2647 ; SSE-NEXT: por %xmm3, %xmm4
2648 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
2649 ; SSE-NEXT: # xmm14 = mem[1,1,1,1]
2650 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2651 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
2652 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
2653 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm7[0,1,0,3]
2654 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm14[0,1,2,3,4,5,4,6]
2655 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm9[1]
2656 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm3[0],xmm0[1,2,3]
2657 ; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
2658 ; SSE-NEXT: # xmm3 = mem[0,2,2,3,4,5,6,7]
2659 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
2660 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,4,5,4,6]
2661 ; SSE-NEXT: movdqa %xmm15, %xmm3
2662 ; SSE-NEXT: pandn %xmm1, %xmm3
2663 ; SSE-NEXT: andps %xmm15, %xmm0
2664 ; SSE-NEXT: por %xmm0, %xmm3
2665 ; SSE-NEXT: movdqa %xmm2, %xmm1
2666 ; SSE-NEXT: psrlq $48, %xmm1
2667 ; SSE-NEXT: psrldq {{.*#+}} xmm6 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
2668 ; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
2669 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2670 ; SSE-NEXT: psrld $16, %xmm1
2671 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,4,5,5,7]
2672 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
2673 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm6[0],xmm0[1,2,3]
2674 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[3,1,2,3,4,5,6,7]
2675 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
2676 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,7]
2677 ; SSE-NEXT: movdqa %xmm15, %xmm2
2678 ; SSE-NEXT: pandn %xmm1, %xmm2
2679 ; SSE-NEXT: andps %xmm15, %xmm0
2680 ; SSE-NEXT: por %xmm0, %xmm2
2681 ; SSE-NEXT: psrlq $48, %xmm13
2682 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2683 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
2684 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
2685 ; SSE-NEXT: movdqa %xmm0, %xmm1
2686 ; SSE-NEXT: psrld $16, %xmm12
2687 ; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2688 ; SSE-NEXT: # xmm0 = mem[0,1,2,3,4,5,5,7]
2689 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm12[1]
2690 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
2691 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
2692 ; SSE-NEXT: # xmm1 = mem[3,1,2,3,4,5,6,7]
2693 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
2694 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm1[0,1,2,3,4,4,5,7]
2695 ; SSE-NEXT: movdqa %xmm15, %xmm1
2696 ; SSE-NEXT: pandn %xmm8, %xmm1
2697 ; SSE-NEXT: andps %xmm15, %xmm0
2698 ; SSE-NEXT: por %xmm0, %xmm1
2699 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2700 ; SSE-NEXT: psrlq $48, %xmm6
2701 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2702 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
2703 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
2704 ; SSE-NEXT: movdqa %xmm0, %xmm6
2705 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2706 ; SSE-NEXT: psrld $16, %xmm7
2707 ; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
2708 ; SSE-NEXT: # xmm0 = mem[0,1,2,3,4,5,5,7]
2709 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm7[1]
2710 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm6[0],xmm0[1,2,3]
2711 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
2712 ; SSE-NEXT: # xmm8 = mem[3,1,2,3,4,5,6,7]
2713 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,0,3]
2714 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,5,7]
2715 ; SSE-NEXT: movdqa %xmm15, %xmm9
2716 ; SSE-NEXT: pandn %xmm8, %xmm9
2717 ; SSE-NEXT: andps %xmm15, %xmm0
2718 ; SSE-NEXT: por %xmm0, %xmm9
2719 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
2720 ; SSE-NEXT: psrlq $48, %xmm6
2721 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2722 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
2723 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
2724 ; SSE-NEXT: movdqa %xmm0, %xmm6
2725 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
2726 ; SSE-NEXT: psrld $16, %xmm7
2727 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm14[0,1,2,3,4,5,5,7]
2728 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm7[1]
2729 ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm6[0],xmm0[1,2,3]
2730 ; SSE-NEXT: andps %xmm15, %xmm0
2731 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
2732 ; SSE-NEXT: # xmm8 = mem[3,1,2,3,4,5,6,7]
2733 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,0,3]
2734 ; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,5,7]
2735 ; SSE-NEXT: pandn %xmm8, %xmm15
2736 ; SSE-NEXT: por %xmm0, %xmm15
2737 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2738 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
2739 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2740 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
2741 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2742 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
2743 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2744 ; SSE-NEXT: movaps %xmm0, (%rsi)
2745 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2746 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
2747 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2748 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
2749 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2750 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
2751 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2752 ; SSE-NEXT: movaps %xmm0, (%rdx)
2753 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2754 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
2755 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2756 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
2757 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2758 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
2759 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2760 ; SSE-NEXT: movaps %xmm0, (%rcx)
2761 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2762 ; SSE-NEXT: movaps %xmm0, 16(%r8)
2763 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2764 ; SSE-NEXT: movaps %xmm0, 32(%r8)
2765 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2766 ; SSE-NEXT: movaps %xmm0, 48(%r8)
2767 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
2768 ; SSE-NEXT: movaps %xmm0, (%r8)
2769 ; SSE-NEXT: movdqa %xmm3, 16(%r9)
2770 ; SSE-NEXT: movdqa %xmm4, 32(%r9)
2771 ; SSE-NEXT: movdqa %xmm5, 48(%r9)
2772 ; SSE-NEXT: movdqa %xmm11, (%r9)
2773 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
2774 ; SSE-NEXT: movdqa %xmm15, 16(%rax)
2775 ; SSE-NEXT: movdqa %xmm9, 32(%rax)
2776 ; SSE-NEXT: movdqa %xmm1, 48(%rax)
2777 ; SSE-NEXT: movdqa %xmm2, (%rax)
2778 ; SSE-NEXT: addq $456, %rsp # imm = 0x1C8
2781 ; AVX1-ONLY-LABEL: load_i16_stride6_vf32:
2782 ; AVX1-ONLY: # %bb.0:
2783 ; AVX1-ONLY-NEXT: subq $552, %rsp # imm = 0x228
2784 ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm0
2785 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2786 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm11 = xmm0[0,1,0,3]
2787 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,6,6,7]
2788 ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm1
2789 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2790 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2791 ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm1
2792 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2793 ; AVX1-ONLY-NEXT: vpslld $16, %xmm1, %xmm1
2794 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm3
2795 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm2 = xmm3[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2796 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm12
2797 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2798 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
2799 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
2800 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm3
2801 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2802 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm4
2803 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm1
2804 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2805 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm2
2806 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2807 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm1, %xmm1
2808 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,3]
2809 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm7[0,1,0,2,4,5,6,7]
2810 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2811 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm8 = xmm3[0,1,0,3]
2812 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,4,6,6,7]
2813 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
2814 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, %xmm10
2815 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4,5],xmm2[6,7]
2816 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2817 ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm1
2818 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2819 ; AVX1-ONLY-NEXT: vpslld $16, %xmm1, %xmm1
2820 ; AVX1-ONLY-NEXT: vmovdqa 160(%rdi), %xmm2
2821 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2822 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2823 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
2824 ; AVX1-ONLY-NEXT: vmovdqa 128(%rdi), %xmm3
2825 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm3, %xmm2
2826 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, %xmm14
2827 ; AVX1-ONLY-NEXT: vmovdqa 144(%rdi), %xmm3
2828 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2829 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[0,3,2,3]
2830 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm3 = xmm6[0,1,0,2,4,5,6,7]
2831 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
2832 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5],xmm1[6,7]
2833 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0]
2834 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm0, %ymm0
2835 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2836 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm2, %ymm1
2837 ; AVX1-ONLY-NEXT: vmovaps %ymm2, %ymm5
2838 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0
2839 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2840 ; AVX1-ONLY-NEXT: vmovdqa 272(%rdi), %xmm0
2841 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
2842 ; AVX1-ONLY-NEXT: vpslld $16, %xmm0, %xmm0
2843 ; AVX1-ONLY-NEXT: vmovdqa 256(%rdi), %xmm1
2844 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2845 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2846 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2847 ; AVX1-ONLY-NEXT: vmovdqa 288(%rdi), %xmm1
2848 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2849 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm13 = xmm1[0,1,0,3]
2850 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm1 = xmm13[0,1,2,3,4,6,6,7]
2851 ; AVX1-ONLY-NEXT: vmovdqa 304(%rdi), %xmm2
2852 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2853 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
2854 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
2855 ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm1
2856 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2857 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm1, %xmm1
2858 ; AVX1-ONLY-NEXT: vmovdqa 240(%rdi), %xmm2
2859 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2860 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,3,2,3]
2861 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[0,1,0,2,4,5,6,7]
2862 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2863 ; AVX1-ONLY-NEXT: vmovdqa 192(%rdi), %xmm2
2864 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2865 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,1,0,3]
2866 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm9 = xmm4[0,1,2,3,4,6,6,7]
2867 ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm2
2868 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2869 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm9 = xmm9[2],xmm2[2],xmm9[3],xmm2[3]
2870 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm9[0,1,2],xmm1[3,4,5],xmm9[6,7]
2871 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2],ymm0[3,4,5,6,7]
2872 ; AVX1-ONLY-NEXT: vmovdqa 368(%rdi), %xmm0
2873 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2874 ; AVX1-ONLY-NEXT: vpslld $16, %xmm0, %xmm1
2875 ; AVX1-ONLY-NEXT: vmovdqa 352(%rdi), %xmm0
2876 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2877 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm9 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2878 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3]
2879 ; AVX1-ONLY-NEXT: vmovdqa 320(%rdi), %xmm0
2880 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2881 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm0, %xmm0
2882 ; AVX1-ONLY-NEXT: vmovdqa 336(%rdi), %xmm1
2883 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2884 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
2885 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm15 = xmm1[0,1,0,2,4,5,6,7]
2886 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
2887 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm9[6,7]
2888 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm2, %ymm2
2889 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2890 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm5, %ymm0
2891 ; AVX1-ONLY-NEXT: vmovaps %ymm5, %ymm9
2892 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm2, %ymm0
2893 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2894 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,5,7,6,7]
2895 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
2896 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm11, %xmm2
2897 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2898 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm12[2,2,3,3]
2899 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
2900 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
2901 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
2902 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm7[0,1,1,3,4,5,6,7]
2903 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
2904 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[1,1,1,1]
2905 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
2906 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm7 = xmm8[0,1,2,3,5,7,6,7]
2907 ; AVX1-ONLY-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2908 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm10, %xmm8
2909 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
2910 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0,1,2],xmm2[3,4,5],xmm7[6,7]
2911 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
2912 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm6[0,1,1,3,4,5,6,7]
2913 ; AVX1-ONLY-NEXT: vmovdqa %xmm14, %xmm7
2914 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm6 = xmm14[1,1,1,1]
2915 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
2916 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2917 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm6 = xmm14[2,2,3,3]
2918 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
2919 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
2920 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm6[6,7]
2921 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm9, %ymm0
2922 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
2923 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm9, %ymm2
2924 ; AVX1-ONLY-NEXT: vmovaps %ymm9, %ymm6
2925 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0
2926 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2927 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm13[0,1,2,3,5,7,6,7]
2928 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
2929 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm15, %xmm2
2930 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2931 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
2932 ; AVX1-ONLY-NEXT: # xmm2 = mem[2,2,3,3]
2933 ; AVX1-ONLY-NEXT: vpunpcklwd (%rsp), %xmm2, %xmm2 # 16-byte Folded Reload
2934 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
2935 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
2936 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[0,1,1,3,4,5,6,7]
2937 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
2938 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm9[1,1,1,1]
2939 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
2940 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,5,7,6,7]
2941 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
2942 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm13, %xmm4
2943 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
2944 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4,5],xmm3[6,7]
2945 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
2946 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
2947 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
2948 ; AVX1-ONLY-NEXT: # xmm2 = mem[1,1,1,1]
2949 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
2950 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
2951 ; AVX1-ONLY-NEXT: # xmm2 = mem[2,2,3,3]
2952 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
2953 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
2954 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm2[6,7]
2955 ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm0, %ymm0
2956 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2957 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm6, %ymm1
2958 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0
2959 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2960 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
2961 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
2962 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm1 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2963 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2964 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = [4,5,0,1,12,13,14,15,8,9,10,11,12,13,14,15]
2965 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm2 # 16-byte Folded Reload
2966 ; AVX1-ONLY-NEXT: # xmm2 = xmm10[0,1],mem[2,3],xmm10[4,5,6,7]
2967 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2968 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm2, %xmm2
2969 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm2[0,1,2],xmm1[3,4],xmm2[5,6,7]
2970 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm1 # 16-byte Folded Reload
2971 ; AVX1-ONLY-NEXT: # xmm1 = xmm11[0,1],mem[2,3],xmm11[4,5,6,7]
2972 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2973 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm1, %xmm1
2974 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,0,1,12,13,8,9]
2975 ; AVX1-ONLY-NEXT: vpblendw $207, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm10 # 16-byte Folded Reload
2976 ; AVX1-ONLY-NEXT: # xmm10 = mem[0,1,2,3],xmm12[4,5],mem[6,7]
2977 ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm10, %xmm6
2978 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm6, %ymm6
2979 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm1 = [0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
2980 ; AVX1-ONLY-NEXT: vandnps %ymm5, %ymm1, %ymm5
2981 ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm6, %ymm6
2982 ; AVX1-ONLY-NEXT: vorps %ymm5, %ymm6, %ymm6
2983 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
2984 ; AVX1-ONLY-NEXT: # xmm5 = mem[1,1,1,1]
2985 ; AVX1-ONLY-NEXT: vmovdqa %xmm7, %xmm12
2986 ; AVX1-ONLY-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2987 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm7 = xmm7[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
2988 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm5[0]
2989 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm14[0,1,2,3],xmm8[4,5],xmm14[6,7]
2990 ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm5, %xmm8
2991 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3,4],xmm8[5,6,7]
2992 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm8 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0]
2993 ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm8, %ymm6
2994 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
2995 ; AVX1-ONLY-NEXT: vandnps %ymm7, %ymm8, %ymm7
2996 ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm6, %ymm6
2997 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2998 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
2999 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm6 = xmm14[1,1,1,1]
3000 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm7 = xmm9[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
3001 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm6[0]
3002 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm6 # 16-byte Folded Reload
3003 ; AVX1-ONLY-NEXT: # xmm6 = xmm13[0,1],mem[2,3],xmm13[4,5,6,7]
3004 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3005 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm6, %xmm8
3006 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm8[0,1,2],xmm7[3,4],xmm8[5,6,7]
3007 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm7 # 16-byte Folded Reload
3008 ; AVX1-ONLY-NEXT: # xmm7 = xmm15[0,1],mem[2,3],xmm15[4,5,6,7]
3009 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm7, %xmm0
3010 ; AVX1-ONLY-NEXT: vmovdqa (%rsp), %xmm6 # 16-byte Reload
3011 ; AVX1-ONLY-NEXT: vpblendw $207, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm8 # 16-byte Folded Reload
3012 ; AVX1-ONLY-NEXT: # xmm8 = mem[0,1,2,3],xmm6[4,5],mem[6,7]
3013 ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm8, %xmm15
3014 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm15, %ymm0
3015 ; AVX1-ONLY-NEXT: vandnps %ymm11, %ymm1, %ymm11
3016 ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm0, %ymm0
3017 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm11, %ymm11
3018 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
3019 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm0 = xmm13[1,1,1,1]
3020 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3021 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm15 = xmm6[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
3022 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm15 = xmm15[0],xmm0[0]
3023 ; AVX1-ONLY-NEXT: vpblendw $207, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm0 # 16-byte Folded Reload
3024 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1,2,3],xmm3[4,5],mem[6,7]
3025 ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm0, %xmm2
3026 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm15[3,4],xmm2[5,6,7]
3027 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm15 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0]
3028 ; AVX1-ONLY-NEXT: vandps %ymm15, %ymm11, %ymm11
3029 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
3030 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm15, %ymm2
3031 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm11, %ymm2
3032 ; AVX1-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3033 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm4, %xmm2
3034 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
3035 ; AVX1-ONLY-NEXT: # xmm11 = mem[2,2,3,3]
3036 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm11[0],xmm2[0]
3037 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm11 = [6,7,2,3,14,15,14,15,8,9,10,11,12,13,14,15]
3038 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
3039 ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm3, %xmm9
3040 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm2[3,4],xmm9[5,6,7]
3041 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3042 ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm2, %xmm3
3043 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,14,15,2,3,14,15,10,11]
3044 ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm10, %xmm4
3045 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
3046 ; AVX1-ONLY-NEXT: vandnps %ymm9, %ymm1, %ymm4
3047 ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm3, %ymm3
3048 ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm3, %ymm3
3049 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
3050 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm10, %xmm4
3051 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm9 = xmm12[2,2,3,3]
3052 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm9[0],xmm4[0]
3053 ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm5, %xmm5
3054 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3,4],xmm5[5,6,7]
3055 ; AVX1-ONLY-NEXT: vandps %ymm3, %ymm15, %ymm3
3056 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
3057 ; AVX1-ONLY-NEXT: vandnps %ymm4, %ymm15, %ymm4
3058 ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm3, %ymm3
3059 ; AVX1-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3060 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm14, %xmm3
3061 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
3062 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm4 = xmm12[2,2,3,3]
3063 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
3064 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3065 ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm4, %xmm4
3066 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3,4],xmm4[5,6,7]
3067 ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm7, %xmm4
3068 ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm8, %xmm5
3069 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
3070 ; AVX1-ONLY-NEXT: vandnps %ymm3, %ymm1, %ymm3
3071 ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm4, %ymm1
3072 ; AVX1-ONLY-NEXT: vorps %ymm3, %ymm1, %ymm1
3073 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm13, %xmm3
3074 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm4 = xmm6[2,2,3,3]
3075 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
3076 ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm0, %xmm0
3077 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3,4],xmm0[5,6,7]
3078 ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm15, %ymm1
3079 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3080 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm15, %ymm0
3081 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm1, %ymm0
3082 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3083 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
3084 ; AVX1-ONLY-NEXT: # xmm1 = mem[1,1,1,1]
3085 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3086 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
3087 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
3088 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3089 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3090 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6,7]
3091 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3092 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,4,5,6,7,0,1,4,5,0,1,12,13]
3093 ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm0, %xmm2
3094 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
3095 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
3096 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm14[1,1,1,1]
3097 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3098 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm5 = xmm6[2,3,2,3]
3099 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
3100 ; AVX1-ONLY-NEXT: vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3101 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1,0,3]
3102 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3103 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,4,5,4,6]
3104 ; AVX1-ONLY-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
3105 ; AVX1-ONLY-NEXT: # xmm5 = xmm5[1],mem[1]
3106 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm2[0,1],xmm5[2,3,4,5,6,7]
3107 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
3108 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm2, %ymm1
3109 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm5, %ymm5
3110 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm5, %ymm1
3111 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3112 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm5 # 16-byte Folded Reload
3113 ; AVX1-ONLY-NEXT: # xmm5 = xmm0[0,1],mem[2,3],xmm0[4,5,6,7]
3114 ; AVX1-ONLY-NEXT: vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
3115 ; AVX1-ONLY-NEXT: # xmm7 = mem[0,1,0,3]
3116 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm8 = xmm7[0,1,2,3,4,5,4,6]
3117 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm8[1],xmm10[1]
3118 ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm5, %xmm9
3119 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm9[5,6,7]
3120 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
3121 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm8[5,6,7]
3122 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3123 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
3124 ; AVX1-ONLY-NEXT: # xmm8 = mem[1,1,1,1]
3125 ; AVX1-ONLY-NEXT: vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
3126 ; AVX1-ONLY-NEXT: # xmm9 = mem[2,3,2,3]
3127 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
3128 ; AVX1-ONLY-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
3129 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
3130 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6,7]
3131 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3132 ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm0, %xmm10
3133 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm10, %ymm9
3134 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
3135 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm10 = xmm15[1,1,1,1]
3136 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
3137 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm11 = xmm8[2,3,2,3]
3138 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
3139 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm10 = xmm12[0,1,0,3]
3140 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm13 = xmm10[0,1,2,3,4,5,4,6]
3141 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3142 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm13 = xmm13[1],xmm1[1]
3143 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1],xmm13[2,3,4,5,6,7]
3144 ; AVX1-ONLY-NEXT: vandnps %ymm9, %ymm2, %ymm9
3145 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm11, %ymm11
3146 ; AVX1-ONLY-NEXT: vorps %ymm9, %ymm11, %ymm13
3147 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3148 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm11 # 16-byte Folded Reload
3149 ; AVX1-ONLY-NEXT: # xmm11 = xmm0[0,1],mem[2,3],xmm0[4,5,6,7]
3150 ; AVX1-ONLY-NEXT: vpshufb %xmm3, %xmm11, %xmm3
3151 ; AVX1-ONLY-NEXT: vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
3152 ; AVX1-ONLY-NEXT: # xmm9 = mem[0,1,0,3]
3153 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm12 = xmm9[0,1,2,3,4,5,4,6]
3154 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3155 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm12 = xmm12[1],xmm0[1]
3156 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm12[0,1,2,3,4],xmm3[5,6,7]
3157 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
3158 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm13[0,1,2,3,4],ymm3[5,6,7]
3159 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
3160 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm12, %xmm12
3161 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm13 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3162 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
3163 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm12 = [6,7,2,3,4,5,6,7,6,7,6,7,2,3,14,15]
3164 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
3165 ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm4, %xmm4
3166 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm4, %ymm4
3167 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm14, %xmm13
3168 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm14 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3169 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
3170 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3171 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm6, %xmm14
3172 ; AVX1-ONLY-NEXT: vpshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
3173 ; AVX1-ONLY-NEXT: # xmm6 = mem[0,1,2,3,4,5,5,7]
3174 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm14[1]
3175 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm13[0,1],xmm6[2,3,4,5,6,7]
3176 ; AVX1-ONLY-NEXT: vandnps %ymm4, %ymm2, %ymm4
3177 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm6, %ymm6
3178 ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm6, %ymm4
3179 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3180 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm6, %xmm6
3181 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,5,7]
3182 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm7[1],xmm6[1]
3183 ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm5, %xmm5
3184 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3,4],xmm5[5,6,7]
3185 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
3186 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5,6,7]
3187 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
3188 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm5, %xmm5
3189 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3190 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm6 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3191 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
3192 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3193 ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm6, %xmm6
3194 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
3195 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm15, %xmm6
3196 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm7 = xmm8[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3197 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
3198 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm1, %xmm7
3199 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm8 = xmm10[0,1,2,3,4,5,5,7]
3200 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm8[1],xmm7[1]
3201 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3,4,5,6,7]
3202 ; AVX1-ONLY-NEXT: vandnps %ymm5, %ymm2, %ymm5
3203 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm6, %ymm2
3204 ; AVX1-ONLY-NEXT: vorps %ymm5, %ymm2, %ymm2
3205 ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm11, %xmm5
3206 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm0, %xmm6
3207 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm7 = xmm9[0,1,2,3,4,5,5,7]
3208 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm7[1],xmm6[1]
3209 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3,4],xmm5[5,6,7]
3210 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
3211 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6,7]
3212 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3213 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rsi)
3214 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
3215 ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rsi)
3216 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3217 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rdx)
3218 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3219 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rdx)
3220 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3221 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rcx)
3222 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3223 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rcx)
3224 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3225 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%r8)
3226 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3227 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r8)
3228 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%r9)
3229 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3230 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r9)
3231 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
3232 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rax)
3233 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rax)
3234 ; AVX1-ONLY-NEXT: addq $552, %rsp # imm = 0x228
3235 ; AVX1-ONLY-NEXT: vzeroupper
3236 ; AVX1-ONLY-NEXT: retq
3238 ; AVX2-SLOW-LABEL: load_i16_stride6_vf32:
3239 ; AVX2-SLOW: # %bb.0:
3240 ; AVX2-SLOW-NEXT: subq $488, %rsp # imm = 0x1E8
3241 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm4
3242 ; AVX2-SLOW-NEXT: vmovdqu %ymm4, (%rsp) # 32-byte Spill
3243 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm5
3244 ; AVX2-SLOW-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3245 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm0
3246 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm1
3247 ; AVX2-SLOW-NEXT: vmovdqa 224(%rdi), %ymm10
3248 ; AVX2-SLOW-NEXT: vmovdqa 192(%rdi), %ymm11
3249 ; AVX2-SLOW-NEXT: vmovdqa 288(%rdi), %ymm2
3250 ; AVX2-SLOW-NEXT: vmovdqa 256(%rdi), %ymm3
3251 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm3[2,3],ymm2[2,3]
3252 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm3[0,1],ymm2[0,1]
3253 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
3254 ; AVX2-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3255 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
3256 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3257 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
3258 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
3259 ; AVX2-SLOW-NEXT: vpshufb %ymm8, %ymm1, %ymm6
3260 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
3261 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm7 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
3262 ; AVX2-SLOW-NEXT: vpshufb %xmm7, %xmm2, %xmm0
3263 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm3
3264 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm9 = xmm3[2,2,2,2,4,5,6,7]
3265 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm9 = xmm0[0],xmm9[1],xmm0[2,3],xmm9[4],xmm0[5,6,7]
3266 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
3267 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm9, %ymm6, %ymm4
3268 ; AVX2-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3269 ; AVX2-SLOW-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3270 ; AVX2-SLOW-NEXT: vmovdqa %ymm11, %ymm4
3271 ; AVX2-SLOW-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3272 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7]
3273 ; AVX2-SLOW-NEXT: vpshufb %xmm7, %xmm6, %xmm9
3274 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm6, %xmm7
3275 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm11 = xmm7[2,2,2,2,4,5,6,7]
3276 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0],xmm11[1],xmm9[2,3],xmm11[4],xmm9[5,6,7]
3277 ; AVX2-SLOW-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3278 ; AVX2-SLOW-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3279 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm13[0],ymm14[1],ymm13[2,3,4,5],ymm14[6],ymm13[7]
3280 ; AVX2-SLOW-NEXT: vpshufb %ymm8, %ymm11, %ymm8
3281 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm9, %ymm8, %ymm5
3282 ; AVX2-SLOW-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3283 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm8 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
3284 ; AVX2-SLOW-NEXT: vpshufb %xmm8, %xmm2, %xmm2
3285 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
3286 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
3287 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
3288 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
3289 ; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm1, %ymm1
3290 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
3291 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3292 ; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm11, %ymm1
3293 ; AVX2-SLOW-NEXT: vpshufb %xmm8, %xmm6, %xmm3
3294 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0],ymm10[1],ymm4[2,3],ymm10[4],ymm4[5,6],ymm10[7]
3295 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm7[1,1,2,3]
3296 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
3297 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2,3],xmm6[4],xmm3[5,6,7]
3298 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm6 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
3299 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm3, %ymm1, %ymm0
3300 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3301 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm5
3302 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm5[0,2,0,3]
3303 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
3304 ; AVX2-SLOW-NEXT: vpshufb %xmm6, %xmm2, %xmm3
3305 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7]
3306 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21>
3307 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm14[0],ymm13[1],ymm14[2,3,4,5],ymm13[6],ymm14[7]
3308 ; AVX2-SLOW-NEXT: vpshufb %ymm12, %ymm7, %ymm3
3309 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0,1,2],ymm3[3,4,5,6,7]
3310 ; AVX2-SLOW-NEXT: vmovdqa 352(%rdi), %ymm0
3311 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3312 ; AVX2-SLOW-NEXT: vmovdqa 320(%rdi), %ymm14
3313 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm0[2],ymm14[3,4],ymm0[5],ymm14[6,7]
3314 ; AVX2-SLOW-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3315 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[2,2,2,2,4,5,6,7]
3316 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm3[0,1,2,2]
3317 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm15 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
3318 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm4
3319 ; AVX2-SLOW-NEXT: vpshufb %xmm15, %xmm4, %xmm11
3320 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2],xmm9[3],xmm11[4,5],xmm9[6],xmm11[7]
3321 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
3322 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm9 = ymm8[0,1,2],ymm9[3,4,5,6,7],ymm8[8,9,10],ymm9[11,12,13,14,15]
3323 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm9[4,5,6,7]
3324 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3325 ; AVX2-SLOW-NEXT: vmovdqa 160(%rdi), %ymm1
3326 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3327 ; AVX2-SLOW-NEXT: vmovdqa 128(%rdi), %ymm3
3328 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
3329 ; AVX2-SLOW-NEXT: vmovdqa %ymm3, %ymm9
3330 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm11, %xmm3
3331 ; AVX2-SLOW-NEXT: vpshufb %xmm15, %xmm3, %xmm15
3332 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm11[2,2,2,2,4,5,6,7]
3333 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,2]
3334 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm15[0,1,2],xmm10[3],xmm15[4,5],xmm10[6],xmm15[7]
3335 ; AVX2-SLOW-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload
3336 ; AVX2-SLOW-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
3337 ; AVX2-SLOW-NEXT: # ymm15 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7]
3338 ; AVX2-SLOW-NEXT: vpshufb %xmm6, %xmm15, %xmm6
3339 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm15, %xmm1
3340 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm1[0,2,0,3]
3341 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,6,6,7]
3342 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm13[2],xmm6[3],xmm13[4,5],xmm6[6,7]
3343 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
3344 ; AVX2-SLOW-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm13 # 32-byte Folded Reload
3345 ; AVX2-SLOW-NEXT: # ymm13 = mem[0],ymm8[1],mem[2,3,4,5],ymm8[6],mem[7]
3346 ; AVX2-SLOW-NEXT: vpshufb %ymm12, %ymm13, %ymm12
3347 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm12[3,4,5,6,7]
3348 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
3349 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm10 = ymm6[0,1,2],ymm10[3,4,5,6,7],ymm6[8,9,10],ymm10[11,12,13,14,15]
3350 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm10[4,5,6,7]
3351 ; AVX2-SLOW-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3352 ; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm6 = [2,3,14,15,10,11,0,0,2,3,14,15,10,11,0,0]
3353 ; AVX2-SLOW-NEXT: vpshufb %xmm6, %xmm5, %xmm5
3354 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm10 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
3355 ; AVX2-SLOW-NEXT: vpshufb %xmm10, %xmm2, %xmm2
3356 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3],xmm5[4,5],xmm2[6,7]
3357 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23>
3358 ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm7, %ymm7
3359 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm7[3,4,5,6,7]
3360 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm7 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
3361 ; AVX2-SLOW-NEXT: vpshufb %xmm7, %xmm4, %xmm4
3362 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
3363 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[3],xmm4[4,5],xmm0[6],xmm4[7]
3364 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
3365 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
3366 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
3367 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3368 ; AVX2-SLOW-NEXT: vpshufb %xmm7, %xmm3, %xmm0
3369 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm11[0,1,2,3,5,5,5,5]
3370 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
3371 ; AVX2-SLOW-NEXT: vpshufb %xmm6, %xmm1, %xmm1
3372 ; AVX2-SLOW-NEXT: vpshufb %xmm10, %xmm15, %xmm2
3373 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
3374 ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm13, %ymm2
3375 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
3376 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
3377 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
3378 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3379 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3380 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3381 ; AVX2-SLOW-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
3382 ; AVX2-SLOW-NEXT: # ymm8 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
3383 ; AVX2-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
3384 ; AVX2-SLOW-NEXT: # ymm0 = mem[0,1],ymm14[2],mem[3,4],ymm14[5],mem[6,7]
3385 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
3386 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[0,1,2,1]
3387 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,1,0,3]
3388 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm6[0,0,0,0,4,5,6,7]
3389 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
3390 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,6,5,6,4]
3391 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
3392 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3393 ; AVX2-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3394 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
3395 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm1[2,1,2,3]
3396 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm1
3397 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[0,3,2,1]
3398 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm5[0,0,2,3,4,5,6,7]
3399 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
3400 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm10[2,1,2,0,4,5,6,7]
3401 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
3402 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
3403 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm11 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
3404 ; AVX2-SLOW-NEXT: vpshufb %ymm11, %ymm8, %ymm2
3405 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
3406 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,4]
3407 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
3408 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3409 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3410 ; AVX2-SLOW-NEXT: vmovdqa %ymm9, %ymm14
3411 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
3412 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm14[2],ymm9[3,4],ymm14[5],ymm9[6,7]
3413 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
3414 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[0,1,2,1]
3415 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,1,0,3]
3416 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm3[0,0,0,0,4,5,6,7]
3417 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
3418 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,6,5,6,4]
3419 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm13 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
3420 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3421 ; AVX2-SLOW-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
3422 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
3423 ; AVX2-SLOW-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
3424 ; AVX2-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3425 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
3426 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,1,2,3]
3427 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm0
3428 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
3429 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm15 = xmm0[0,0,2,3,4,5,6,7]
3430 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[0,1,3,3]
3431 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm12 = xmm1[2,1,2,0,4,5,6,7]
3432 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0],xmm15[1,2],xmm12[3],xmm15[4,5,6,7]
3433 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
3434 ; AVX2-SLOW-NEXT: vpshufb %ymm11, %ymm2, %ymm11
3435 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm13 = ymm11[0,1,2],ymm13[3,4,5,6,7],ymm11[8,9,10],ymm13[11,12,13,14,15]
3436 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,6,5,4]
3437 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0,1,2,3,4],xmm11[5,6,7]
3438 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5,6,7]
3439 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,5]
3440 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[1,1,1,1,4,5,6,7]
3441 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,7,7]
3442 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5,6],xmm7[7]
3443 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm10[3,1,2,1,4,5,6,7]
3444 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,3,4,5,6,7]
3445 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
3446 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0],xmm5[1,2],xmm7[3],xmm5[4,5,6,7]
3447 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
3448 ; AVX2-SLOW-NEXT: vpshufb %ymm7, %ymm8, %ymm8
3449 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
3450 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm6 = ymm8[0,1,2],ymm6[3,4,5,6,7],ymm8[8,9,10],ymm6[11,12,13,14,15]
3451 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[0,1,3,2]
3452 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm8[5,6,7]
3453 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
3454 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,5]
3455 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7]
3456 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
3457 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
3458 ; AVX2-SLOW-NEXT: vpshufb %ymm7, %ymm2, %ymm2
3459 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7]
3460 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
3461 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
3462 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
3463 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm1
3464 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
3465 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
3466 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
3467 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3468 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm14[0],ymm9[1],ymm14[2,3],ymm9[4],ymm14[5,6],ymm9[7]
3469 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
3470 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
3471 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,1,0,2,4,5,6,7]
3472 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
3473 ; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm4 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
3474 ; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm6
3475 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4],xmm3[5],xmm6[6,7]
3476 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
3477 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
3478 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
3479 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3480 ; AVX2-SLOW-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
3481 ; AVX2-SLOW-NEXT: # ymm6 = ymm6[0],mem[1],ymm6[2,3],mem[4],ymm6[5,6],mem[7]
3482 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm6, %xmm7
3483 ; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm7, %xmm4
3484 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
3485 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm8 = xmm6[0,1,0,2,4,5,6,7]
3486 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,6,6,6,6]
3487 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm8[0,1,2,3],xmm4[4],xmm8[5],xmm4[6,7]
3488 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3489 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
3490 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,2,3,4],ymm4[5,6,7]
3491 ; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm8 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
3492 ; AVX2-SLOW-NEXT: vpshufb %xmm8, %xmm2, %xmm2
3493 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
3494 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
3495 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6,7]
3496 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
3497 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3498 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
3499 ; AVX2-SLOW-NEXT: vpshufb %xmm8, %xmm7, %xmm2
3500 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7]
3501 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,3]
3502 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3],xmm2[4],xmm6[5],xmm2[6,7]
3503 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
3504 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
3505 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
3506 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3507 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 32(%rsi)
3508 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3509 ; AVX2-SLOW-NEXT: vmovaps %ymm6, (%rsi)
3510 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3511 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 32(%rdx)
3512 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3513 ; AVX2-SLOW-NEXT: vmovaps %ymm6, (%rdx)
3514 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3515 ; AVX2-SLOW-NEXT: vmovaps %ymm6, 32(%rcx)
3516 ; AVX2-SLOW-NEXT: vmovdqa %ymm11, (%rcx)
3517 ; AVX2-SLOW-NEXT: vmovdqa %ymm5, 32(%r8)
3518 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%r8)
3519 ; AVX2-SLOW-NEXT: vmovdqa %ymm4, 32(%r9)
3520 ; AVX2-SLOW-NEXT: vmovdqa %ymm3, (%r9)
3521 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
3522 ; AVX2-SLOW-NEXT: vmovdqa %ymm2, 32(%rax)
3523 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rax)
3524 ; AVX2-SLOW-NEXT: addq $488, %rsp # imm = 0x1E8
3525 ; AVX2-SLOW-NEXT: vzeroupper
3526 ; AVX2-SLOW-NEXT: retq
3528 ; AVX2-FAST-LABEL: load_i16_stride6_vf32:
3529 ; AVX2-FAST: # %bb.0:
3530 ; AVX2-FAST-NEXT: subq $488, %rsp # imm = 0x1E8
3531 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm4
3532 ; AVX2-FAST-NEXT: vmovdqu %ymm4, (%rsp) # 32-byte Spill
3533 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm5
3534 ; AVX2-FAST-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3535 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm0
3536 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm1
3537 ; AVX2-FAST-NEXT: vmovdqa 224(%rdi), %ymm9
3538 ; AVX2-FAST-NEXT: vmovdqa 192(%rdi), %ymm11
3539 ; AVX2-FAST-NEXT: vmovdqa 288(%rdi), %ymm2
3540 ; AVX2-FAST-NEXT: vmovdqa 256(%rdi), %ymm3
3541 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm3[2,3],ymm2[2,3]
3542 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm3[0,1],ymm2[0,1]
3543 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
3544 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3545 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
3546 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3547 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
3548 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
3549 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm1, %ymm6
3550 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
3551 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm8 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
3552 ; AVX2-FAST-NEXT: vpshufb %xmm8, %xmm2, %xmm0
3553 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm2, %xmm3
3554 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm10 = xmm3[2,2,2,2,4,5,6,7]
3555 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm10 = xmm0[0],xmm10[1],xmm0[2,3],xmm10[4],xmm0[5,6,7]
3556 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
3557 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm10, %ymm6, %ymm4
3558 ; AVX2-FAST-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3559 ; AVX2-FAST-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3560 ; AVX2-FAST-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3561 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1],ymm11[2],ymm9[3,4],ymm11[5],ymm9[6,7]
3562 ; AVX2-FAST-NEXT: vpshufb %xmm8, %xmm6, %xmm10
3563 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm6, %xmm8
3564 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm12 = xmm8[2,2,2,2,4,5,6,7]
3565 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0],xmm12[1],xmm10[2,3],xmm12[4],xmm10[5,6,7]
3566 ; AVX2-FAST-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3567 ; AVX2-FAST-NEXT: vmovdqa %ymm14, %ymm4
3568 ; AVX2-FAST-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3569 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0],ymm14[1],ymm13[2,3,4,5],ymm14[6],ymm13[7]
3570 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm12, %ymm7
3571 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm10, %ymm7, %ymm5
3572 ; AVX2-FAST-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3573 ; AVX2-FAST-NEXT: vpbroadcastd {{.*#+}} xmm7 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
3574 ; AVX2-FAST-NEXT: vpshufb %xmm7, %xmm3, %xmm3
3575 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
3576 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm2, %xmm2
3577 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
3578 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
3579 ; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm1, %ymm1
3580 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
3581 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3582 ; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm12, %ymm1
3583 ; AVX2-FAST-NEXT: vpshufb %xmm7, %xmm8, %xmm3
3584 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm6, %xmm6
3585 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm11[0],ymm9[1],ymm11[2,3],ymm9[4],ymm11[5,6],ymm9[7]
3586 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0],xmm3[1],xmm6[2,3],xmm3[4],xmm6[5,6,7]
3587 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm15 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
3588 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm3, %ymm1, %ymm0
3589 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3590 ; AVX2-FAST-NEXT: vpshufb %xmm15, %xmm5, %xmm0
3591 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm5, %xmm1
3592 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm11 = xmm1[2,1,0,3]
3593 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm12 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
3594 ; AVX2-FAST-NEXT: vpshufb %xmm12, %xmm11, %xmm1
3595 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
3596 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21>
3597 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0],ymm13[1],ymm4[2,3,4,5],ymm13[6],ymm4[7]
3598 ; AVX2-FAST-NEXT: vpshufb %ymm14, %ymm7, %ymm1
3599 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
3600 ; AVX2-FAST-NEXT: vmovdqa 352(%rdi), %ymm2
3601 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3602 ; AVX2-FAST-NEXT: vmovdqa 320(%rdi), %ymm1
3603 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3604 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
3605 ; AVX2-FAST-NEXT: vpbroadcastd {{.*#+}} xmm4 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
3606 ; AVX2-FAST-NEXT: vpshufb %xmm4, %xmm1, %xmm6
3607 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm9 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
3608 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm10
3609 ; AVX2-FAST-NEXT: vpshufb %xmm9, %xmm10, %xmm8
3610 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3],xmm8[4,5],xmm6[6],xmm8[7]
3611 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
3612 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
3613 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
3614 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3615 ; AVX2-FAST-NEXT: vmovdqa 160(%rdi), %ymm0
3616 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3617 ; AVX2-FAST-NEXT: vmovdqa 128(%rdi), %ymm8
3618 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm0[2],ymm8[3,4],ymm0[5],ymm8[6,7]
3619 ; AVX2-FAST-NEXT: vpshufb %xmm4, %xmm0, %xmm4
3620 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm3
3621 ; AVX2-FAST-NEXT: vpshufb %xmm9, %xmm3, %xmm9
3622 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm9[0,1,2],xmm4[3],xmm9[4,5],xmm4[6],xmm9[7]
3623 ; AVX2-FAST-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
3624 ; AVX2-FAST-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm9 # 32-byte Folded Reload
3625 ; AVX2-FAST-NEXT: # ymm9 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
3626 ; AVX2-FAST-NEXT: vpshufb %xmm15, %xmm9, %xmm15
3627 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm9, %xmm13
3628 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,1,0,3]
3629 ; AVX2-FAST-NEXT: vpshufb %xmm12, %xmm13, %xmm12
3630 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm12 = xmm15[0,1],xmm12[2],xmm15[3],xmm12[4,5],xmm15[6,7]
3631 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3632 ; AVX2-FAST-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
3633 ; AVX2-FAST-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
3634 ; AVX2-FAST-NEXT: vpshufb %ymm14, %ymm2, %ymm14
3635 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm14[3,4,5,6,7]
3636 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3637 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm4 = ymm12[0,1,2],ymm4[3,4,5,6,7],ymm12[8,9,10],ymm4[11,12,13,14,15]
3638 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm4[4,5,6,7]
3639 ; AVX2-FAST-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3640 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm4 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
3641 ; AVX2-FAST-NEXT: vpshufb %xmm4, %xmm5, %xmm5
3642 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm14 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
3643 ; AVX2-FAST-NEXT: vpshufb %xmm14, %xmm11, %xmm11
3644 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm11[2],xmm5[3],xmm11[4,5],xmm5[6,7]
3645 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23>
3646 ; AVX2-FAST-NEXT: vpshufb %ymm11, %ymm7, %ymm7
3647 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm7[3,4,5,6,7]
3648 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm7 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
3649 ; AVX2-FAST-NEXT: vpshufb %xmm7, %xmm10, %xmm10
3650 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
3651 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm10[0,1,2],xmm1[3],xmm10[4,5],xmm1[6],xmm10[7]
3652 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
3653 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3,4,5,6,7],ymm5[8,9,10],ymm1[11,12,13,14,15]
3654 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]
3655 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3656 ; AVX2-FAST-NEXT: vpshufb %xmm7, %xmm3, %xmm1
3657 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
3658 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
3659 ; AVX2-FAST-NEXT: vpshufb %xmm4, %xmm9, %xmm1
3660 ; AVX2-FAST-NEXT: vpshufb %xmm14, %xmm13, %xmm3
3661 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
3662 ; AVX2-FAST-NEXT: vpshufb %ymm11, %ymm2, %ymm2
3663 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
3664 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
3665 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
3666 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3667 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3668 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3669 ; AVX2-FAST-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
3670 ; AVX2-FAST-NEXT: # ymm6 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
3671 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3672 ; AVX2-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3673 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
3674 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[2,1,0,3]
3675 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
3676 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[0,1,2,1]
3677 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
3678 ; AVX2-FAST-NEXT: vpshufb %xmm0, %xmm5, %xmm1
3679 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,6,5,6,4]
3680 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
3681 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3682 ; AVX2-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
3683 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
3684 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm2, %xmm7
3685 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm11 = xmm7[0,3,2,1]
3686 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,1,2,3]
3687 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm9 = [12,13,0,1,4,5,0,0,12,13,0,1,4,5,0,0]
3688 ; AVX2-FAST-NEXT: vpshufb %xmm9, %xmm11, %xmm7
3689 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm13 = xmm3[2,1,2,0,4,5,6,7]
3690 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm13[0],xmm7[1,2],xmm13[3],xmm7[4,5,6,7]
3691 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
3692 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm13 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
3693 ; AVX2-FAST-NEXT: vpshufb %ymm13, %ymm6, %ymm14
3694 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm14[0,1,2],ymm1[3,4,5,6,7],ymm14[8,9,10],ymm1[11,12,13,14,15]
3695 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,5,4]
3696 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm14[5,6,7]
3697 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
3698 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3699 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
3700 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm14 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7]
3701 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm14[2,1,0,3]
3702 ; AVX2-FAST-NEXT: vpshufb %xmm0, %xmm2, %xmm0
3703 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm14, %xmm14
3704 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,1]
3705 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm15 = xmm14[0,1,2,3,6,5,6,4]
3706 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm15[4],xmm0[5,6],xmm15[7]
3707 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3708 ; AVX2-FAST-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
3709 ; AVX2-FAST-NEXT: # ymm15 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
3710 ; AVX2-FAST-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
3711 ; AVX2-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3712 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
3713 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm12
3714 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
3715 ; AVX2-FAST-NEXT: vpshufb %xmm9, %xmm12, %xmm9
3716 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
3717 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm10 = xmm0[2,1,2,0,4,5,6,7]
3718 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1,2],xmm10[3],xmm9[4,5,6,7]
3719 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
3720 ; AVX2-FAST-NEXT: vpshufb %ymm13, %ymm15, %ymm10
3721 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0,1,2],ymm1[3,4,5,6,7],ymm10[8,9,10],ymm1[11,12,13,14,15]
3722 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,6,5,4]
3723 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
3724 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm1[4,5,6,7]
3725 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
3726 ; AVX2-FAST-NEXT: vpshufb %xmm1, %xmm5, %xmm5
3727 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,5]
3728 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5,6],xmm4[7]
3729 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm5 = [14,15,2,3,6,7,0,0,14,15,2,3,6,7,0,0]
3730 ; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm11, %xmm10
3731 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,1,4,5,6,7]
3732 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm10[1,2],xmm3[3],xmm10[4,5,6,7]
3733 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm10 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
3734 ; AVX2-FAST-NEXT: vpshufb %ymm10, %ymm6, %ymm6
3735 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3736 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm4 = ymm6[0,1,2],ymm4[3,4,5,6,7],ymm6[8,9,10],ymm4[11,12,13,14,15]
3737 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
3738 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm6[5,6,7]
3739 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3],ymm4[4,5,6,7]
3740 ; AVX2-FAST-NEXT: vpshufb %xmm1, %xmm2, %xmm1
3741 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm14[0,1,2,3,7,5,6,5]
3742 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
3743 ; AVX2-FAST-NEXT: vpshufb %ymm10, %ymm15, %ymm2
3744 ; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm12, %xmm3
3745 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
3746 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2],xmm0[3],xmm3[4,5,6,7]
3747 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
3748 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
3749 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
3750 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
3751 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3752 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7]
3753 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm2
3754 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
3755 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm3 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
3756 ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm2, %xmm5
3757 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm6 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
3758 ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm1, %xmm8
3759 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5],xmm5[6,7]
3760 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
3761 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
3762 ; AVX2-FAST-NEXT: # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
3763 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
3764 ; AVX2-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
3765 ; AVX2-FAST-NEXT: # ymm8 = mem[0],ymm8[1],mem[2,3],ymm8[4],mem[5,6],ymm8[7]
3766 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm8, %xmm10
3767 ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm10, %xmm3
3768 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
3769 ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm8, %xmm6
3770 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4],xmm6[5],xmm3[6,7]
3771 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
3772 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
3773 ; AVX2-FAST-NEXT: # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
3774 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm6 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
3775 ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm2, %xmm2
3776 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
3777 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm1, %xmm1
3778 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6,7]
3779 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
3780 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
3781 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
3782 ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm10, %xmm2
3783 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm8, %xmm6
3784 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3],xmm2[4],xmm6[5],xmm2[6,7]
3785 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
3786 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
3787 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
3788 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3789 ; AVX2-FAST-NEXT: vmovaps %ymm6, 32(%rsi)
3790 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3791 ; AVX2-FAST-NEXT: vmovaps %ymm6, (%rsi)
3792 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3793 ; AVX2-FAST-NEXT: vmovaps %ymm6, 32(%rdx)
3794 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3795 ; AVX2-FAST-NEXT: vmovaps %ymm6, (%rdx)
3796 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
3797 ; AVX2-FAST-NEXT: vmovaps %ymm6, 32(%rcx)
3798 ; AVX2-FAST-NEXT: vmovdqa %ymm9, (%rcx)
3799 ; AVX2-FAST-NEXT: vmovdqa %ymm4, 32(%r8)
3800 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%r8)
3801 ; AVX2-FAST-NEXT: vmovdqa %ymm3, 32(%r9)
3802 ; AVX2-FAST-NEXT: vmovdqa %ymm5, (%r9)
3803 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
3804 ; AVX2-FAST-NEXT: vmovdqa %ymm2, 32(%rax)
3805 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rax)
3806 ; AVX2-FAST-NEXT: addq $488, %rsp # imm = 0x1E8
3807 ; AVX2-FAST-NEXT: vzeroupper
3808 ; AVX2-FAST-NEXT: retq
3810 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride6_vf32:
3811 ; AVX2-FAST-PERLANE: # %bb.0:
3812 ; AVX2-FAST-PERLANE-NEXT: subq $488, %rsp # imm = 0x1E8
3813 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm4
3814 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm4, (%rsp) # 32-byte Spill
3815 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm5
3816 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3817 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %ymm0
3818 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %ymm1
3819 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rdi), %ymm9
3820 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rdi), %ymm11
3821 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 288(%rdi), %ymm2
3822 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 256(%rdi), %ymm3
3823 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm3[2,3],ymm2[2,3]
3824 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm3[0,1],ymm2[0,1]
3825 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
3826 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3827 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
3828 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3829 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
3830 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
3831 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm1, %ymm6
3832 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
3833 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm8 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
3834 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm8, %xmm2, %xmm0
3835 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm2, %xmm3
3836 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm10 = xmm3[2,2,2,2,4,5,6,7]
3837 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm10 = xmm0[0],xmm10[1],xmm0[2,3],xmm10[4],xmm0[5,6,7]
3838 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
3839 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm10, %ymm6, %ymm4
3840 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3841 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3842 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3843 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1],ymm11[2],ymm9[3,4],ymm11[5],ymm9[6,7]
3844 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm8, %xmm6, %xmm10
3845 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm6, %xmm8
3846 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm12 = xmm8[2,2,2,2,4,5,6,7]
3847 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0],xmm12[1],xmm10[2,3],xmm12[4],xmm10[5,6,7]
3848 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3849 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm14, %ymm4
3850 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3851 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0],ymm14[1],ymm13[2,3,4,5],ymm14[6],ymm13[7]
3852 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm12, %ymm7
3853 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm10, %ymm7, %ymm5
3854 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3855 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{.*#+}} xmm7 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
3856 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm7, %xmm3, %xmm3
3857 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm10 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
3858 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm2, %xmm2
3859 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
3860 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm3 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
3861 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm3, %ymm1, %ymm1
3862 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
3863 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3864 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm3, %ymm12, %ymm1
3865 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm7, %xmm8, %xmm3
3866 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm6, %xmm6
3867 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm5 = ymm11[0],ymm9[1],ymm11[2,3],ymm9[4],ymm11[5,6],ymm9[7]
3868 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0],xmm3[1],xmm6[2,3],xmm3[4],xmm6[5,6,7]
3869 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm15 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
3870 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm3, %ymm1, %ymm0
3871 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3872 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm15, %xmm5, %xmm0
3873 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm5, %xmm1
3874 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm11 = xmm1[2,1,0,3]
3875 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm12 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
3876 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm12, %xmm11, %xmm1
3877 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
3878 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21>
3879 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0],ymm13[1],ymm4[2,3,4,5],ymm13[6],ymm4[7]
3880 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm14, %ymm7, %ymm1
3881 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
3882 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 352(%rdi), %ymm2
3883 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3884 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 320(%rdi), %ymm1
3885 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3886 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
3887 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{.*#+}} xmm4 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
3888 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm4, %xmm1, %xmm6
3889 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm9 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
3890 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm10
3891 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm9, %xmm10, %xmm8
3892 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3],xmm8[4,5],xmm6[6],xmm8[7]
3893 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
3894 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
3895 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
3896 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3897 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rdi), %ymm0
3898 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3899 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rdi), %ymm8
3900 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm0[2],ymm8[3,4],ymm0[5],ymm8[6,7]
3901 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm4, %xmm0, %xmm4
3902 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm0, %xmm3
3903 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm9, %xmm3, %xmm9
3904 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm4 = xmm9[0,1,2],xmm4[3],xmm9[4,5],xmm4[6],xmm9[7]
3905 ; AVX2-FAST-PERLANE-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
3906 ; AVX2-FAST-PERLANE-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm9 # 32-byte Folded Reload
3907 ; AVX2-FAST-PERLANE-NEXT: # ymm9 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
3908 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm15, %xmm9, %xmm15
3909 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm9, %xmm13
3910 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[2,1,0,3]
3911 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm12, %xmm13, %xmm12
3912 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm12 = xmm15[0,1],xmm12[2],xmm15[3],xmm12[4,5],xmm15[6,7]
3913 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3914 ; AVX2-FAST-PERLANE-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
3915 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
3916 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm14, %ymm2, %ymm14
3917 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm14[3,4,5,6,7]
3918 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
3919 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm4 = ymm12[0,1,2],ymm4[3,4,5,6,7],ymm12[8,9,10],ymm4[11,12,13,14,15]
3920 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm4[4,5,6,7]
3921 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3922 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm4 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
3923 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm4, %xmm5, %xmm5
3924 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm14 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
3925 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm14, %xmm11, %xmm11
3926 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm11[2],xmm5[3],xmm11[4,5],xmm5[6,7]
3927 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23>
3928 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm11, %ymm7, %ymm7
3929 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm7[3,4,5,6,7]
3930 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm7 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
3931 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm7, %xmm10, %xmm10
3932 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
3933 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm10[0,1,2],xmm1[3],xmm10[4,5],xmm1[6],xmm10[7]
3934 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
3935 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3,4,5,6,7],ymm5[8,9,10],ymm1[11,12,13,14,15]
3936 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]
3937 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3938 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm7, %xmm3, %xmm1
3939 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
3940 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
3941 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm4, %xmm9, %xmm1
3942 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm14, %xmm13, %xmm3
3943 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
3944 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm11, %ymm2, %ymm2
3945 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
3946 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
3947 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
3948 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3949 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3950 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3951 ; AVX2-FAST-PERLANE-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
3952 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
3953 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3954 ; AVX2-FAST-PERLANE-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3955 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
3956 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[2,1,0,3]
3957 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm0, %xmm0
3958 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[0,1,2,1]
3959 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm0 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
3960 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm0, %xmm5, %xmm1
3961 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,6,5,6,4]
3962 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
3963 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
3964 ; AVX2-FAST-PERLANE-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
3965 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
3966 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm2, %xmm7
3967 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm11 = xmm7[0,3,2,1]
3968 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,1,2,3]
3969 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm9 = [12,13,0,1,4,5,0,0,12,13,0,1,4,5,0,0]
3970 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm9, %xmm11, %xmm7
3971 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm13 = xmm3[2,1,2,0,4,5,6,7]
3972 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm7 = xmm13[0],xmm7[1,2],xmm13[3],xmm7[4,5,6,7]
3973 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
3974 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm13 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
3975 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm13, %ymm6, %ymm14
3976 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm14[0,1,2],ymm1[3,4,5,6,7],ymm14[8,9,10],ymm1[11,12,13,14,15]
3977 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,5,4]
3978 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm14[5,6,7]
3979 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
3980 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3981 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
3982 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm7[0,1],ymm8[2],ymm7[3,4],ymm8[5],ymm7[6,7]
3983 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm14[2,1,0,3]
3984 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm0, %xmm2, %xmm0
3985 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm14, %xmm14
3986 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,1]
3987 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm15 = xmm14[0,1,2,3,6,5,6,4]
3988 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm15[4],xmm0[5,6],xmm15[7]
3989 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3990 ; AVX2-FAST-PERLANE-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
3991 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
3992 ; AVX2-FAST-PERLANE-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
3993 ; AVX2-FAST-PERLANE-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
3994 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
3995 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm0, %xmm12
3996 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
3997 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm9, %xmm12, %xmm9
3998 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
3999 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm10 = xmm0[2,1,2,0,4,5,6,7]
4000 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1,2],xmm10[3],xmm9[4,5,6,7]
4001 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4002 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm13, %ymm15, %ymm10
4003 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0,1,2],ymm1[3,4,5,6,7],ymm10[8,9,10],ymm1[11,12,13,14,15]
4004 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,6,5,4]
4005 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
4006 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm1[4,5,6,7]
4007 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm1 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
4008 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm1, %xmm5, %xmm5
4009 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,5]
4010 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5,6],xmm4[7]
4011 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm5 = [14,15,2,3,6,7,0,0,14,15,2,3,6,7,0,0]
4012 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm5, %xmm11, %xmm10
4013 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,1,4,5,6,7]
4014 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm10[1,2],xmm3[3],xmm10[4,5,6,7]
4015 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm10 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
4016 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm10, %ymm6, %ymm6
4017 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
4018 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm4 = ymm6[0,1,2],ymm4[3,4,5,6,7],ymm6[8,9,10],ymm4[11,12,13,14,15]
4019 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
4020 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm6[5,6,7]
4021 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3],ymm4[4,5,6,7]
4022 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm1, %xmm2, %xmm1
4023 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm2 = xmm14[0,1,2,3,7,5,6,5]
4024 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
4025 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm10, %ymm15, %ymm2
4026 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm5, %xmm12, %xmm3
4027 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
4028 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2],xmm0[3],xmm3[4,5,6,7]
4029 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4030 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
4031 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
4032 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
4033 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4034 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7]
4035 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm2
4036 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
4037 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm3 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
4038 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm2, %xmm5
4039 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm6 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
4040 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm6, %xmm1, %xmm8
4041 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5],xmm5[6,7]
4042 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
4043 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
4044 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
4045 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
4046 ; AVX2-FAST-PERLANE-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
4047 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = mem[0],ymm8[1],mem[2,3],ymm8[4],mem[5,6],ymm8[7]
4048 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm8, %xmm10
4049 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm10, %xmm3
4050 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
4051 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm6, %xmm8, %xmm6
4052 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4],xmm6[5],xmm3[6,7]
4053 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
4054 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
4055 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
4056 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm6 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
4057 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm6, %xmm2, %xmm2
4058 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm11 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
4059 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm1, %xmm1
4060 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6,7]
4061 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4062 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
4063 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
4064 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm6, %xmm10, %xmm2
4065 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm8, %xmm6
4066 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3],xmm2[4],xmm6[5],xmm2[6,7]
4067 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
4068 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
4069 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
4070 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4071 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 32(%rsi)
4072 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4073 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, (%rsi)
4074 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4075 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 32(%rdx)
4076 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4077 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, (%rdx)
4078 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
4079 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm6, 32(%rcx)
4080 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm9, (%rcx)
4081 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm4, 32(%r8)
4082 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%r8)
4083 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, 32(%r9)
4084 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm5, (%r9)
4085 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
4086 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, 32(%rax)
4087 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rax)
4088 ; AVX2-FAST-PERLANE-NEXT: addq $488, %rsp # imm = 0x1E8
4089 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
4090 ; AVX2-FAST-PERLANE-NEXT: retq
4092 ; AVX512F-ONLY-SLOW-LABEL: load_i16_stride6_vf32:
4093 ; AVX512F-ONLY-SLOW: # %bb.0:
4094 ; AVX512F-ONLY-SLOW-NEXT: subq $136, %rsp
4095 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
4096 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 224(%rdi), %ymm12
4097 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 192(%rdi), %ymm1
4098 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0],ymm12[1],ymm1[2,3],ymm12[4],ymm1[5,6],ymm12[7]
4099 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa %ymm1, %ymm14
4100 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm1
4101 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm0
4102 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,2,0,3]
4103 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
4104 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
4105 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 160(%rdi), %ymm5
4106 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa (%rdi), %ymm15
4107 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 32(%rdi), %ymm11
4108 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 64(%rdi), %ymm6
4109 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 128(%rdi), %ymm7
4110 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm5[2],ymm7[3,4],ymm5[5],ymm7[6,7]
4111 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm7, %ymm23
4112 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm5, %ymm25
4113 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm5
4114 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm8 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
4115 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm8, %xmm5, %xmm7
4116 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm9 = xmm1[2,2,2,2,4,5,6,7]
4117 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,2]
4118 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm9[3],xmm7[4,5],xmm9[6],xmm7[7]
4119 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
4120 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm3, %zmm7, %zmm3
4121 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4122 ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm6[2,3],mem[2,3]
4123 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm15[0],ymm11[1],ymm15[2,3],ymm11[4],ymm15[5,6],ymm11[7]
4124 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm11, %ymm16
4125 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm4, %xmm3, %xmm7
4126 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm4
4127 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm4[0,2,0,3]
4128 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,6,7]
4129 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm9[2],xmm7[3],xmm9[4,5],xmm7[6,7]
4130 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, 96(%rdi), %ymm6, %ymm9
4131 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0],ymm13[1],ymm9[2,3,4,5],ymm13[6],ymm9[7]
4132 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm9, %ymm22
4133 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm13, %ymm28
4134 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
4135 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm9[3,4,5,6,7]
4136 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4137 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 352(%rdi), %ymm9
4138 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 320(%rdi), %ymm10
4139 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
4140 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm10, %ymm19
4141 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm9, %ymm20
4142 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm7, %xmm10
4143 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm8, %xmm10, %xmm8
4144 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm9 = xmm7[2,2,2,2,4,5,6,7]
4145 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,2]
4146 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm9[3],xmm8[4,5],xmm9[6],xmm8[7]
4147 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm9
4148 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 256(%rdi), %ymm8
4149 ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm11 = ymm8[2,3],mem[2,3]
4150 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, 288(%rdi), %ymm8, %ymm13
4151 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm13[0],ymm11[1],ymm13[2,3,4,5],ymm11[6],ymm13[7]
4152 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm13, %ymm26
4153 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm11, %ymm27
4154 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm11 = ymm8[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
4155 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm9 = ymm11[0,1,2],ymm9[3,4,5,6,7],ymm11[8,9,10],ymm9[11,12,13,14,15]
4156 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,4,6]
4157 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
4158 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm9, %ymm29
4159 ; AVX512F-ONLY-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm9 = [2,3,14,15,10,11,0,0,2,3,14,15,10,11,0,0]
4160 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm9, %xmm0, %xmm0
4161 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm11 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
4162 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm11, %xmm2, %xmm2
4163 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
4164 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm2 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
4165 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm2, %xmm5, %xmm5
4166 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
4167 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2],xmm1[3],xmm5[4,5],xmm1[6],xmm5[7]
4168 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4169 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
4170 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4171 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm9, %xmm4, %xmm0
4172 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm11, %xmm3, %xmm1
4173 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
4174 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
4175 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4176 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
4177 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm2, %xmm10, %xmm0
4178 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,5,5,5,5]
4179 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
4180 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
4181 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm8[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
4182 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
4183 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
4184 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4185 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm0, %ymm18
4186 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm12[2],ymm14[3,4],ymm12[5],ymm14[6,7]
4187 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm12, %ymm30
4188 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm14, %ymm31
4189 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
4190 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm0[2,1,2,3]
4191 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm1[0,3,2,1]
4192 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm11[0,0,2,3,4,5,6,7]
4193 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
4194 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm10[2,1,2,0,4,5,6,7]
4195 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
4196 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm23, %ymm1
4197 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm25, %ymm2
4198 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
4199 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
4200 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm1[2,1,0,3]
4201 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm9[0,0,0,0,4,5,6,7]
4202 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
4203 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm2[0,1,2,1]
4204 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,6,5,6,4]
4205 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
4206 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4207 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm17
4208 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm16, %ymm24
4209 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm16, %ymm0
4210 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2],ymm15[3,4],ymm0[5],ymm15[6,7]
4211 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm15, %ymm21
4212 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
4213 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,1,2,3]
4214 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[0,3,2,1]
4215 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm7[0,0,2,3,4,5,6,7]
4216 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
4217 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm6[2,1,2,0,4,5,6,7]
4218 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
4219 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm22, %ymm13
4220 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm28, %ymm12
4221 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm13[0,1],ymm12[2],ymm13[3],ymm12[4],ymm13[5,6],ymm12[7]
4222 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
4223 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
4224 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4225 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm20, %ymm15
4226 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm19, %ymm0
4227 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1],ymm0[2],ymm15[3,4],ymm0[5],ymm15[6,7]
4228 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm0
4229 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
4230 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[0,0,0,0,4,5,6,7]
4231 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
4232 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
4233 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,6,5,6,4]
4234 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm14[4],xmm1[5,6],xmm14[7]
4235 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm14
4236 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm26, %ymm0
4237 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm27, %ymm1
4238 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3],ymm1[4],ymm0[5,6],ymm1[7]
4239 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
4240 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
4241 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
4242 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
4243 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm16
4244 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
4245 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, %zmm17, %zmm0, %zmm2
4246 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
4247 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm2, %zmm17, %zmm16
4248 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm10[3,1,2,1,4,5,6,7]
4249 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm11[0,1,3,3,4,5,6,7]
4250 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,7,7,7]
4251 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm10[1,2],xmm2[3],xmm10[4,5,6,7]
4252 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,5,6,5]
4253 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm9 = xmm9[1,1,1,1,4,5,6,7]
4254 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
4255 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4],xmm9[5,6],xmm8[7]
4256 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
4257 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm2, %zmm8, %zmm2
4258 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm29, %zmm0, %zmm22
4259 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm18, %zmm0, %zmm18
4260 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,1,4,5,6,7]
4261 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,1,3,3,4,5,6,7]
4262 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
4263 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1,2],xmm6[3],xmm7[4,5,6,7]
4264 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
4265 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm5[5,6,7]
4266 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
4267 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
4268 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
4269 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
4270 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
4271 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
4272 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
4273 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
4274 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
4275 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
4276 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, %zmm2, %zmm0, %zmm5
4277 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm20
4278 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm5, %zmm17, %zmm20
4279 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm7 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
4280 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm30, %ymm0
4281 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm31, %ymm1
4282 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
4283 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm7, %xmm0, %xmm2
4284 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
4285 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[2,2,2,2,4,5,6,7]
4286 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
4287 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm23, %ymm3
4288 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm25, %ymm4
4289 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
4290 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm4
4291 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[0,3,2,1]
4292 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm6[0,1,0,2,4,5,6,7]
4293 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
4294 ; AVX512F-ONLY-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm9 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
4295 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm9, %xmm4, %xmm5
4296 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6,7]
4297 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
4298 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm2, %zmm3, %zmm2
4299 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm12[0],ymm13[1],ymm12[2,3,4,5],ymm13[6],ymm12[7]
4300 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm24, %ymm3
4301 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm21, %ymm8
4302 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm8[2],ymm3[3,4],ymm8[5],ymm3[6,7]
4303 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm7, %xmm3, %xmm8
4304 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm7
4305 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm7[2,2,2,2,4,5,6,7]
4306 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm10[1],xmm8[2,3],xmm10[4],xmm8[5,6,7]
4307 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4308 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
4309 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $236, %ymm11, %ymm10, %ymm8
4310 ; AVX512F-ONLY-SLOW-NEXT: movw $31, %ax
4311 ; AVX512F-ONLY-SLOW-NEXT: kmovw %eax, %k1
4312 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm8, %zmm2 {%k1}
4313 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm19, %ymm8
4314 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm8[0],ymm15[1],ymm8[2,3],ymm15[4],ymm8[5,6],ymm15[7]
4315 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm10, %xmm8
4316 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm9, %xmm8, %xmm14
4317 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm10[0,3,2,1]
4318 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm9[0,1,0,2,4,5,6,7]
4319 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,6,6,6]
4320 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm14[4],xmm10[5],xmm14[6,7]
4321 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm14
4322 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm26, %ymm10
4323 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm27, %ymm12
4324 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3,4,5],ymm10[6],ymm12[7]
4325 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm15 = ymm10[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
4326 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4],ymm14[5,6,7]
4327 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm14
4328 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm15 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
4329 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm15, %xmm0, %xmm0
4330 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
4331 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
4332 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
4333 ; AVX512F-ONLY-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm1 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
4334 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm1, %xmm4, %xmm4
4335 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7]
4336 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,3]
4337 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4],xmm6[5],xmm4[6,7]
4338 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
4339 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm4, %zmm0
4340 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm15, %xmm3, %xmm3
4341 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[1,1,2,3]
4342 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
4343 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
4344 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4345 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $236, %ymm11, %ymm4, %ymm3
4346 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm3, %zmm0 {%k1}
4347 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm1, %xmm8, %xmm1
4348 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm9[0,1,1,3,4,5,6,7]
4349 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,3]
4350 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
4351 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4352 ; AVX512F-ONLY-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
4353 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
4354 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
4355 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
4356 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
4357 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
4358 ; AVX512F-ONLY-SLOW-NEXT: movw $-2048, %ax # imm = 0xF800
4359 ; AVX512F-ONLY-SLOW-NEXT: kmovw %eax, %k1
4360 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm22, %zmm4 {%k1}
4361 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, (%rsi)
4362 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
4363 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
4364 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm18, %zmm4 {%k1}
4365 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, (%rdx)
4366 ; AVX512F-ONLY-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
4367 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm2, %zmm17, %zmm14
4368 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm0, %zmm17, %zmm1
4369 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm16, (%rcx)
4370 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm20, (%r8)
4371 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm14, (%r9)
4372 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm1, (%rax)
4373 ; AVX512F-ONLY-SLOW-NEXT: addq $136, %rsp
4374 ; AVX512F-ONLY-SLOW-NEXT: vzeroupper
4375 ; AVX512F-ONLY-SLOW-NEXT: retq
4377 ; AVX512F-ONLY-FAST-LABEL: load_i16_stride6_vf32:
4378 ; AVX512F-ONLY-FAST: # %bb.0:
4379 ; AVX512F-ONLY-FAST-NEXT: subq $136, %rsp
4380 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
4381 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 224(%rdi), %ymm15
4382 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 192(%rdi), %ymm1
4383 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0],ymm15[1],ymm1[2,3],ymm15[4],ymm1[5,6],ymm15[7]
4384 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm1, %ymm17
4385 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm5, %xmm3, %xmm0
4386 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
4387 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm3, %xmm1
4388 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm11 = xmm1[2,1,0,3]
4389 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm6, %xmm11, %xmm1
4390 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
4391 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 160(%rdi), %ymm4
4392 ; AVX512F-ONLY-FAST-NEXT: vmovdqa (%rdi), %ymm0
4393 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 128(%rdi), %ymm13
4394 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm4[2],ymm13[3,4],ymm4[5],ymm13[6,7]
4395 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm4, %ymm24
4396 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastd {{.*#+}} xmm7 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
4397 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm1, %xmm8
4398 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm1, %xmm4
4399 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
4400 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm10, %xmm4, %xmm9
4401 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
4402 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 32(%rdi), %ymm12
4403 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 64(%rdi), %ymm9
4404 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
4405 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm2, %zmm8, %zmm2
4406 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4407 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm12[1],ymm0[2,3],ymm12[4],ymm0[5,6],ymm12[7]
4408 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm12, %ymm16
4409 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm0, %ymm22
4410 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm8
4411 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm2, %xmm5
4412 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,1,0,3]
4413 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm6, %xmm5, %xmm6
4414 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm6[2],xmm8[3],xmm6[4,5],xmm8[6,7]
4415 ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm9[2,3],mem[2,3]
4416 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, 96(%rdi), %ymm9, %ymm14
4417 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm14[0],ymm0[1],ymm14[2,3,4,5],ymm0[6],ymm14[7]
4418 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm0, %ymm28
4419 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
4420 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm9[3,4,5,6,7]
4421 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4422 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 352(%rdi), %ymm0
4423 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 320(%rdi), %ymm8
4424 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm8[0,1],ymm0[2],ymm8[3,4],ymm0[5],ymm8[6,7]
4425 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm8, %ymm19
4426 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm0, %ymm20
4427 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm9, %xmm8
4428 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm9, %xmm7
4429 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm10, %xmm7, %xmm10
4430 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0,1,2],xmm8[3],xmm10[4,5],xmm8[6],xmm10[7]
4431 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm10
4432 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 256(%rdi), %ymm8
4433 ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm8[2,3],mem[2,3]
4434 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, 288(%rdi), %ymm8, %ymm12
4435 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0],ymm0[1],ymm12[2,3,4,5],ymm0[6],ymm12[7]
4436 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm12, %ymm25
4437 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm0, %ymm26
4438 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm8[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
4439 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
4440 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
4441 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
4442 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm0, %ymm29
4443 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
4444 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm3, %xmm3
4445 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastq {{.*#+}} xmm10 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
4446 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm10, %xmm11, %xmm11
4447 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm11[2],xmm3[3],xmm11[4,5],xmm3[6,7]
4448 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
4449 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm4, %xmm4
4450 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
4451 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3],xmm4[4,5],xmm1[6],xmm4[7]
4452 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4453 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm3, %zmm1, %zmm1
4454 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4455 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm2, %xmm0
4456 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm10, %xmm5, %xmm1
4457 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
4458 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
4459 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4460 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
4461 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm7, %xmm0
4462 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,5,5,5,5]
4463 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
4464 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
4465 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm8[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
4466 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
4467 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
4468 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4469 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm0, %ymm18
4470 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm17, %ymm0
4471 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm15[2],ymm0[3,4],ymm15[5],ymm0[6,7]
4472 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm15, %ymm30
4473 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm17, %ymm31
4474 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm0, %xmm1
4475 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm10 = xmm0[2,1,2,3]
4476 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm0 = xmm10[2,1,2,0,4,5,6,7]
4477 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm9 = xmm1[0,3,2,1]
4478 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm9[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
4479 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
4480 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm24, %ymm0
4481 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3,4],ymm13[5],ymm0[6,7]
4482 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm13, %ymm21
4483 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm0, %xmm2
4484 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[2,1,0,3]
4485 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
4486 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm8, %xmm3
4487 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[0,1,2,1]
4488 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,6,5,6,4]
4489 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
4490 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
4491 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm17
4492 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm16, %ymm23
4493 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm16, %ymm1
4494 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm22, %ymm2
4495 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
4496 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm1, %xmm2
4497 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
4498 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm5[2,1,2,0,4,5,6,7]
4499 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
4500 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} xmm2 = xmm4[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
4501 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2],xmm1[3],xmm2[4,5,6,7]
4502 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm28, %ymm12
4503 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm14[0,1],ymm12[2],ymm14[3],ymm12[4],ymm14[5,6],ymm12[7]
4504 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm6[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
4505 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
4506 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4507 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm19, %ymm27
4508 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm20, %ymm15
4509 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm19, %ymm1
4510 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1],ymm1[2],ymm15[3,4],ymm1[5],ymm15[6,7]
4511 ; AVX512F-ONLY-FAST-NEXT: vextracti32x4 $1, %ymm1, %xmm16
4512 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
4513 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm3, %xmm0
4514 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm16[0,1,2,1]
4515 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm13 = xmm2[0,1,2,3,6,5,6,4]
4516 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm13[4],xmm0[5,6],xmm13[7]
4517 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm13
4518 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm25, %ymm0
4519 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm26, %ymm1
4520 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3],ymm1[4],ymm0[5,6],ymm1[7]
4521 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
4522 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7],ymm0[8,9,10],ymm13[11,12,13,14,15]
4523 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
4524 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
4525 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm16
4526 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
4527 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, %zmm17, %zmm0, %zmm11
4528 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
4529 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm11, %zmm17, %zmm16
4530 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm10 = xmm10[3,1,2,1,4,5,6,7]
4531 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
4532 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1,2],xmm10[3],xmm9[4,5,6,7]
4533 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
4534 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm10, %xmm8, %xmm8
4535 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,5]
4536 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5,6],xmm7[7]
4537 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
4538 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm9, %zmm7, %zmm7
4539 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[3,1,2,1,4,5,6,7]
4540 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
4541 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2],xmm5[3],xmm4[4,5,6,7]
4542 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm29, %zmm0, %zmm19
4543 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm18, %zmm0, %zmm28
4544 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm5 = ymm6[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
4545 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
4546 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
4547 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm10, %xmm3, %xmm3
4548 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
4549 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
4550 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
4551 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
4552 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
4553 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
4554 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4555 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, %zmm7, %zmm0, %zmm4
4556 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm20
4557 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm4, %zmm17, %zmm20
4558 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm4 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
4559 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm30, %ymm0
4560 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm31, %ymm1
4561 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
4562 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm4, %xmm1, %xmm0
4563 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm1, %xmm5
4564 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm5[2,2,2,2,4,5,6,7]
4565 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
4566 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm24, %ymm2
4567 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm21, %ymm3
4568 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
4569 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm2, %xmm6
4570 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
4571 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastq {{.*#+}} xmm8 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
4572 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm8, %xmm6, %xmm3
4573 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm9 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
4574 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm9, %xmm2, %xmm7
4575 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm7[0,1,2,3],xmm3[4],xmm7[5],xmm3[6,7]
4576 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
4577 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm3
4578 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0],ymm14[1],ymm12[2,3,4,5],ymm14[6],ymm12[7]
4579 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm23, %ymm7
4580 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm22, %ymm10
4581 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
4582 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm4, %xmm7, %xmm4
4583 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm7, %xmm10
4584 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm11 = xmm10[2,2,2,2,4,5,6,7]
4585 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm11[1],xmm4[2,3],xmm11[4],xmm4[5,6,7]
4586 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 {{.*#+}} ymm23 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
4587 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4588 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $236, %ymm23, %ymm13, %ymm4
4589 ; AVX512F-ONLY-FAST-NEXT: movw $31, %ax
4590 ; AVX512F-ONLY-FAST-NEXT: kmovw %eax, %k1
4591 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm4, %zmm3 {%k1}
4592 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm27, %ymm4
4593 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm13 = ymm4[0],ymm15[1],ymm4[2,3],ymm15[4],ymm4[5,6],ymm15[7]
4594 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm13, %xmm4
4595 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm8, %xmm4, %xmm15
4596 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm13[0,3,2,1]
4597 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm9, %xmm8, %xmm9
4598 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm15[4],xmm9[5],xmm15[6,7]
4599 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm13
4600 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm25, %ymm9
4601 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm26, %ymm11
4602 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3,4,5],ymm9[6],ymm11[7]
4603 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm15 = ymm9[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
4604 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4],ymm13[5,6,7]
4605 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm13
4606 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastd {{.*#+}} xmm15 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
4607 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm15, %xmm10, %xmm10
4608 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
4609 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm7, %xmm7
4610 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm10[1],xmm7[2,3],xmm10[4],xmm7[5,6,7]
4611 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4612 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $248, %ymm23, %ymm7, %ymm0
4613 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm15, %xmm5, %xmm5
4614 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm1, %xmm1
4615 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3],xmm5[4],xmm1[5,6,7]
4616 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastq {{.*#+}} xmm5 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
4617 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm5, %xmm6, %xmm6
4618 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm7 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
4619 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm2, %xmm2
4620 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4],xmm2[5],xmm6[6,7]
4621 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
4622 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
4623 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
4624 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm5, %xmm4, %xmm0
4625 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm8, %xmm2
4626 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4],xmm2[5],xmm0[6,7]
4627 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
4628 ; AVX512F-ONLY-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm9[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
4629 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
4630 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
4631 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
4632 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
4633 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
4634 ; AVX512F-ONLY-FAST-NEXT: movw $-2048, %ax # imm = 0xF800
4635 ; AVX512F-ONLY-FAST-NEXT: kmovw %eax, %k1
4636 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm19, %zmm4 {%k1}
4637 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm4, (%rsi)
4638 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
4639 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
4640 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm28, %zmm4 {%k1}
4641 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm4, (%rdx)
4642 ; AVX512F-ONLY-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
4643 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm3, %zmm17, %zmm13
4644 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm1, %zmm17, %zmm0
4645 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm16, (%rcx)
4646 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm20, (%r8)
4647 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm13, (%r9)
4648 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, (%rax)
4649 ; AVX512F-ONLY-FAST-NEXT: addq $136, %rsp
4650 ; AVX512F-ONLY-FAST-NEXT: vzeroupper
4651 ; AVX512F-ONLY-FAST-NEXT: retq
4653 ; AVX512DQ-SLOW-LABEL: load_i16_stride6_vf32:
4654 ; AVX512DQ-SLOW: # %bb.0:
4655 ; AVX512DQ-SLOW-NEXT: pushq %rax
4656 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
4657 ; AVX512DQ-SLOW-NEXT: vmovdqa 224(%rdi), %ymm0
4658 ; AVX512DQ-SLOW-NEXT: vmovdqa 192(%rdi), %ymm13
4659 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm13[0],ymm0[1],ymm13[2,3],ymm0[4],ymm13[5,6],ymm0[7]
4660 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm0, %ymm18
4661 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm2, %xmm8, %xmm0
4662 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm8, %xmm14
4663 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm14[0,2,0,3]
4664 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
4665 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
4666 ; AVX512DQ-SLOW-NEXT: vmovdqa 160(%rdi), %ymm4
4667 ; AVX512DQ-SLOW-NEXT: vmovdqa (%rdi), %ymm0
4668 ; AVX512DQ-SLOW-NEXT: vmovdqa 32(%rdi), %ymm9
4669 ; AVX512DQ-SLOW-NEXT: vmovdqa 64(%rdi), %ymm3
4670 ; AVX512DQ-SLOW-NEXT: vmovdqa 128(%rdi), %ymm6
4671 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7]
4672 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm6, %ymm22
4673 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm4, %ymm23
4674 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm5, %xmm15
4675 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm7 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
4676 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm7, %xmm15, %xmm4
4677 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[2,2,2,2,4,5,6,7]
4678 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,2,2]
4679 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm6[3],xmm4[4,5],xmm6[6],xmm4[7]
4680 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
4681 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm1, %zmm4, %zmm16
4682 ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm10 = ymm3[2,3],mem[2,3]
4683 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm9[1],ymm0[2,3],ymm9[4],ymm0[5,6],ymm9[7]
4684 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm9, %ymm19
4685 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm0, %ymm21
4686 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm2, %xmm1, %xmm2
4687 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm6
4688 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm6[0,2,0,3]
4689 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
4690 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3],xmm4[4,5],xmm2[6,7]
4691 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm12
4692 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm12[0],ymm10[1],ymm12[2,3,4,5],ymm10[6],ymm12[7]
4693 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm10, %ymm29
4694 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
4695 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm2[0,1,2],ymm4[3,4,5,6,7]
4696 ; AVX512DQ-SLOW-NEXT: vmovdqa 352(%rdi), %ymm0
4697 ; AVX512DQ-SLOW-NEXT: vmovdqa 320(%rdi), %ymm2
4698 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
4699 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm2, %ymm24
4700 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm0, %ymm25
4701 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm4, %xmm2
4702 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm7, %xmm2, %xmm7
4703 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm11 = xmm4[2,2,2,2,4,5,6,7]
4704 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,2]
4705 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm11[3],xmm7[4,5],xmm11[6],xmm7[7]
4706 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm9
4707 ; AVX512DQ-SLOW-NEXT: vmovdqa 256(%rdi), %ymm7
4708 ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm7[2,3],mem[2,3]
4709 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, 288(%rdi), %ymm7, %ymm11
4710 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0],ymm0[1],ymm11[2,3,4,5],ymm0[6],ymm11[7]
4711 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm11, %ymm26
4712 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm0, %ymm27
4713 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm7[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
4714 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
4715 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
4716 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
4717 ; AVX512DQ-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
4718 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm16, %zmm17, %zmm10
4719 ; AVX512DQ-SLOW-NEXT: movw $-2048, %ax # imm = 0xF800
4720 ; AVX512DQ-SLOW-NEXT: kmovw %eax, %k1
4721 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm10 {%k1}
4722 ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4723 ; AVX512DQ-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm0 = [2,3,14,15,10,11,0,0,2,3,14,15,10,11,0,0]
4724 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm0, %xmm14, %xmm9
4725 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm14 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
4726 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm14, %xmm8, %xmm8
4727 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2],xmm8[3],xmm9[4,5],xmm8[6,7]
4728 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm9 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
4729 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm9, %xmm15, %xmm10
4730 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
4731 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm10[0,1,2],xmm5[3],xmm10[4,5],xmm5[6],xmm10[7]
4732 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
4733 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm8, %zmm5, %zmm5
4734 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm0, %xmm6, %xmm0
4735 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm14, %xmm1, %xmm1
4736 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
4737 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
4738 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0,1,2],ymm1[3,4,5,6,7]
4739 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm9, %xmm2, %xmm0
4740 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,5,5,5,5]
4741 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
4742 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm5, %zmm17, %zmm3
4743 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
4744 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm7[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
4745 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
4746 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
4747 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4748 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm3 {%k1}
4749 ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4750 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm18, %ymm20
4751 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm18, %ymm0
4752 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2],ymm13[3,4],ymm0[5],ymm13[6,7]
4753 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm13, %ymm30
4754 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm15 = xmm0[2,1,2,3]
4755 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm0
4756 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm0[0,3,2,1]
4757 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm10[0,0,2,3,4,5,6,7]
4758 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
4759 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm15[2,1,2,0,4,5,6,7]
4760 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
4761 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm22, %ymm1
4762 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm23, %ymm2
4763 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
4764 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
4765 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm14 = xmm1[2,1,0,3]
4766 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm14[0,0,0,0,4,5,6,7]
4767 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
4768 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[0,1,2,1]
4769 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,6,5,6,4]
4770 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
4771 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4772 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm16
4773 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm21, %ymm13
4774 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm19, %ymm0
4775 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2],ymm13[3,4],ymm0[5],ymm13[6,7]
4776 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
4777 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,1,2,3]
4778 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[0,3,2,1]
4779 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm5[0,0,2,3,4,5,6,7]
4780 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
4781 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[2,1,2,0,4,5,6,7]
4782 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
4783 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm29, %ymm11
4784 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm12[0,1],ymm11[2],ymm12[3],ymm11[4],ymm12[5,6],ymm11[7]
4785 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
4786 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
4787 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4788 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm24, %ymm0
4789 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm25, %ymm1
4790 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
4791 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
4792 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,1,0,3]
4793 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm6[0,0,0,0,4,5,6,7]
4794 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm0[0,1,2,3,4,4,6,7]
4795 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,1,2,1]
4796 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,6,5,6,4]
4797 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm8[0,1,2,3],xmm1[4],xmm8[5,6],xmm1[7]
4798 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm8
4799 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm27, %ymm0
4800 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm26, %ymm1
4801 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7]
4802 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
4803 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm8 = ymm0[0,1,2],ymm8[3,4,5,6,7],ymm0[8,9,10],ymm8[11,12,13,14,15]
4804 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
4805 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
4806 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm18
4807 ; AVX512DQ-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
4808 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm16, %zmm0, %zmm9
4809 ; AVX512DQ-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
4810 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm9, %zmm17, %zmm18
4811 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm8 = xmm15[3,1,2,1,4,5,6,7]
4812 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm9 = xmm10[0,1,3,3,4,5,6,7]
4813 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,7,7,7]
4814 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
4815 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,5]
4816 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm9 = xmm14[1,1,1,1,4,5,6,7]
4817 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
4818 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4],xmm9[5,6],xmm7[7]
4819 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
4820 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm8, %zmm7, %zmm7
4821 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
4822 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,3,4,5,6,7]
4823 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
4824 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2],xmm4[3],xmm5[4,5,6,7]
4825 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
4826 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
4827 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
4828 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
4829 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm6[1,1,1,1,4,5,6,7]
4830 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
4831 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4],xmm4[5,6],xmm2[7]
4832 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
4833 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
4834 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
4835 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
4836 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4837 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm7, %zmm0, %zmm3
4838 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm16
4839 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm3, %zmm17, %zmm16
4840 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm5 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
4841 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm20, %ymm0
4842 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm30, %ymm1
4843 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
4844 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm5, %xmm0, %xmm2
4845 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
4846 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[2,2,2,2,4,5,6,7]
4847 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
4848 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm22, %ymm2
4849 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm23, %ymm4
4850 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
4851 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm4, %xmm2
4852 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm4[0,3,2,1]
4853 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm8[0,1,0,2,4,5,6,7]
4854 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,6,6,6]
4855 ; AVX512DQ-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm7 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
4856 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm7, %xmm2, %xmm6
4857 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6,7]
4858 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
4859 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm3, %zmm4, %zmm3
4860 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm11[0],ymm12[1],ymm11[2,3,4,5],ymm12[6],ymm11[7]
4861 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm19, %ymm6
4862 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm13[2],ymm6[3,4],ymm13[5],ymm6[6,7]
4863 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm5, %xmm6, %xmm5
4864 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm6, %xmm13
4865 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm9 = xmm13[2,2,2,2,4,5,6,7]
4866 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm9[1],xmm5[2,3],xmm9[4],xmm5[5,6,7]
4867 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4868 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
4869 ; AVX512DQ-SLOW-NEXT: vpternlogq $236, %ymm10, %ymm9, %ymm5
4870 ; AVX512DQ-SLOW-NEXT: movw $31, %ax
4871 ; AVX512DQ-SLOW-NEXT: kmovw %eax, %k1
4872 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $0, %ymm5, %zmm0, %zmm3 {%k1}
4873 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm24, %ymm5
4874 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm25, %ymm9
4875 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm5[0],ymm9[1],ymm5[2,3],ymm9[4],ymm5[5,6],ymm9[7]
4876 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm9, %xmm5
4877 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm7, %xmm5, %xmm14
4878 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm9[0,3,2,1]
4879 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm9 = xmm7[0,1,0,2,4,5,6,7]
4880 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,6,6,6,6]
4881 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm14[4],xmm9[5],xmm14[6,7]
4882 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
4883 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm27, %ymm14
4884 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm26, %ymm11
4885 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm14[0],ymm11[1],ymm14[2,3,4,5],ymm11[6],ymm14[7]
4886 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm14 = ymm11[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
4887 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm14[0,1,2,3,4],ymm9[5,6,7]
4888 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm9
4889 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm14 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
4890 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm14, %xmm0, %xmm0
4891 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
4892 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
4893 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
4894 ; AVX512DQ-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm1 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
4895 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm1, %xmm2, %xmm2
4896 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7]
4897 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[0,1,3,3]
4898 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5],xmm2[6,7]
4899 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
4900 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
4901 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm14, %xmm6, %xmm2
4902 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm13[1,1,2,3]
4903 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
4904 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2,3],xmm6[4],xmm2[5,6,7]
4905 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4906 ; AVX512DQ-SLOW-NEXT: vpternlogq $236, %ymm10, %ymm4, %ymm2
4907 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $0, %ymm2, %zmm0, %zmm0 {%k1}
4908 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm1, %xmm5, %xmm1
4909 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm7[0,1,1,3,4,5,6,7]
4910 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
4911 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5],xmm1[6,7]
4912 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
4913 ; AVX512DQ-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
4914 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
4915 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
4916 ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
4917 ; AVX512DQ-SLOW-NEXT: vmovaps %zmm2, (%rsi)
4918 ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
4919 ; AVX512DQ-SLOW-NEXT: vmovaps %zmm2, (%rdx)
4920 ; AVX512DQ-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
4921 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm3, %zmm17, %zmm9
4922 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm0, %zmm17, %zmm1
4923 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm18, (%rcx)
4924 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm16, (%r8)
4925 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm9, (%r9)
4926 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm1, (%rax)
4927 ; AVX512DQ-SLOW-NEXT: popq %rax
4928 ; AVX512DQ-SLOW-NEXT: vzeroupper
4929 ; AVX512DQ-SLOW-NEXT: retq
4931 ; AVX512DQ-FAST-LABEL: load_i16_stride6_vf32:
4932 ; AVX512DQ-FAST: # %bb.0:
4933 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
4934 ; AVX512DQ-FAST-NEXT: vmovdqa 224(%rdi), %ymm12
4935 ; AVX512DQ-FAST-NEXT: vmovdqa 192(%rdi), %ymm2
4936 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5,6],ymm12[7]
4937 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm2, %ymm25
4938 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm7, %xmm1
4939 ; AVX512DQ-FAST-NEXT: vpbroadcastq {{.*#+}} xmm3 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
4940 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm7, %xmm2
4941 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm14 = xmm2[2,1,0,3]
4942 ; AVX512DQ-FAST-NEXT: vpshufb %xmm3, %xmm14, %xmm2
4943 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
4944 ; AVX512DQ-FAST-NEXT: vmovdqa 160(%rdi), %ymm4
4945 ; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %ymm2
4946 ; AVX512DQ-FAST-NEXT: vmovdqa 128(%rdi), %ymm5
4947 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
4948 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm5, %ymm20
4949 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm4, %ymm22
4950 ; AVX512DQ-FAST-NEXT: vpbroadcastd {{.*#+}} xmm8 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
4951 ; AVX512DQ-FAST-NEXT: vpshufb %xmm8, %xmm6, %xmm4
4952 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm6, %xmm15
4953 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
4954 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm15, %xmm5
4955 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3],xmm5[4,5],xmm4[6],xmm5[7]
4956 ; AVX512DQ-FAST-NEXT: vmovdqa 32(%rdi), %ymm9
4957 ; AVX512DQ-FAST-NEXT: vmovdqa 64(%rdi), %ymm5
4958 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
4959 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm4, %zmm16
4960 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm9[1],ymm2[2,3],ymm9[4],ymm2[5,6],ymm9[7]
4961 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm9, %ymm18
4962 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm2, %ymm19
4963 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm1, %xmm0
4964 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm1, %xmm4
4965 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm9 = xmm4[2,1,0,3]
4966 ; AVX512DQ-FAST-NEXT: vpshufb %xmm3, %xmm9, %xmm3
4967 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
4968 ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm5[2,3],mem[2,3]
4969 ; AVX512DQ-FAST-NEXT: vinserti128 $1, 96(%rdi), %ymm5, %ymm3
4970 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm2[1],ymm3[2,3,4,5],ymm2[6],ymm3[7]
4971 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm3, %ymm27
4972 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm2, %ymm28
4973 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
4974 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm0[0,1,2],ymm3[3,4,5,6,7]
4975 ; AVX512DQ-FAST-NEXT: vmovdqa 352(%rdi), %ymm0
4976 ; AVX512DQ-FAST-NEXT: vmovdqa 320(%rdi), %ymm2
4977 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
4978 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm2, %ymm21
4979 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm0, %ymm23
4980 ; AVX512DQ-FAST-NEXT: vpshufb %xmm8, %xmm4, %xmm0
4981 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm4, %xmm3
4982 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm3, %xmm8
4983 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm8[0,1,2],xmm0[3],xmm8[4,5],xmm0[6],xmm8[7]
4984 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm8
4985 ; AVX512DQ-FAST-NEXT: vmovdqa 256(%rdi), %ymm0
4986 ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm10 = ymm0[2,3],mem[2,3]
4987 ; AVX512DQ-FAST-NEXT: vinserti128 $1, 288(%rdi), %ymm0, %ymm0
4988 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm10[1],ymm0[2,3,4,5],ymm10[6],ymm0[7]
4989 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm0, %ymm24
4990 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm10, %ymm26
4991 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
4992 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm8 = ymm0[0,1,2],ymm8[3,4,5,6,7],ymm0[8,9,10],ymm8[11,12,13,14,15]
4993 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
4994 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
4995 ; AVX512DQ-FAST-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
4996 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm16, %zmm17, %zmm11
4997 ; AVX512DQ-FAST-NEXT: movw $-2048, %ax # imm = 0xF800
4998 ; AVX512DQ-FAST-NEXT: kmovw %eax, %k1
4999 ; AVX512DQ-FAST-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm11 {%k1}
5000 ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5001 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
5002 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm7, %xmm7
5003 ; AVX512DQ-FAST-NEXT: vpbroadcastq {{.*#+}} xmm8 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
5004 ; AVX512DQ-FAST-NEXT: vpshufb %xmm8, %xmm14, %xmm14
5005 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm14[2],xmm7[3],xmm14[4,5],xmm7[6,7]
5006 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm14 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
5007 ; AVX512DQ-FAST-NEXT: vpshufb %xmm14, %xmm15, %xmm15
5008 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
5009 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm15[0,1,2],xmm6[3],xmm15[4,5],xmm6[6],xmm15[7]
5010 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
5011 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm7, %zmm6, %zmm6
5012 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm1, %xmm0
5013 ; AVX512DQ-FAST-NEXT: vpshufb %xmm8, %xmm9, %xmm1
5014 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
5015 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
5016 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm13 = ymm0[0,1,2],ymm1[3,4,5,6,7]
5017 ; AVX512DQ-FAST-NEXT: vpshufb %xmm14, %xmm3, %xmm0
5018 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,5,5,5,5]
5019 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
5020 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm6, %zmm17, %zmm13
5021 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
5022 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
5023 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
5024 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
5025 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5026 ; AVX512DQ-FAST-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm13 {%k1}
5027 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm25, %ymm0
5028 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm12[2],ymm0[3,4],ymm12[5],ymm0[6,7]
5029 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm12, %ymm29
5030 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm25, %ymm30
5031 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm15 = xmm0[2,1,2,3]
5032 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
5033 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm15[2,1,2,0,4,5,6,7]
5034 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[0,3,2,1]
5035 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm8[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
5036 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
5037 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm20, %ymm0
5038 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm22, %ymm2
5039 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
5040 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm0, %xmm2
5041 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm14 = xmm0[2,1,0,3]
5042 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
5043 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm14, %xmm3
5044 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,1,2,1]
5045 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,6,5,6,4]
5046 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
5047 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
5048 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm16
5049 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm18, %ymm25
5050 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm18, %ymm1
5051 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm19, %ymm2
5052 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
5053 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm1, %xmm3
5054 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,1,2,3]
5055 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[2,1,2,0,4,5,6,7]
5056 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
5057 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm3[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
5058 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm1[0],xmm5[1,2],xmm1[3],xmm5[4,5,6,7]
5059 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm27, %ymm12
5060 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm28, %ymm11
5061 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm12[0,1],ymm11[2],ymm12[3],ymm11[4],ymm12[5,6],ymm11[7]
5062 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
5063 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
5064 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm5[0,1,2,3],ymm7[4,5,6,7]
5065 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm21, %ymm1
5066 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm23, %ymm5
5067 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm1[2],ymm5[3,4],ymm1[5],ymm5[6,7]
5068 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm5, %xmm7
5069 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,1,0,3]
5070 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm5, %xmm0
5071 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,1]
5072 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm10 = xmm7[0,1,2,3,6,5,6,4]
5073 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm10[4],xmm0[5,6],xmm10[7]
5074 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm10
5075 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm26, %ymm0
5076 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm24, %ymm1
5077 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7]
5078 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
5079 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
5080 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
5081 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
5082 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm18
5083 ; AVX512DQ-FAST-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
5084 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm16, %zmm0, %zmm9
5085 ; AVX512DQ-FAST-NEXT: vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
5086 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm9, %zmm17, %zmm18
5087 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm9 = xmm15[3,1,2,1,4,5,6,7]
5088 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
5089 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1,2],xmm9[3],xmm8[4,5,6,7]
5090 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm9 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
5091 ; AVX512DQ-FAST-NEXT: vpshufb %xmm9, %xmm14, %xmm10
5092 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,5,6,5]
5093 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm10[0,1,2,3],xmm6[4],xmm10[5,6],xmm6[7]
5094 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
5095 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm8, %zmm6, %zmm6
5096 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
5097 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
5098 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5,6,7]
5099 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
5100 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm2[5,6,7]
5101 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
5102 ; AVX512DQ-FAST-NEXT: vpshufb %xmm9, %xmm5, %xmm3
5103 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm4 = xmm7[0,1,2,3,7,5,6,5]
5104 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
5105 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
5106 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
5107 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
5108 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
5109 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
5110 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm6, %zmm0, %zmm2
5111 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm16
5112 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm2, %zmm17, %zmm16
5113 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm6 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
5114 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm29, %ymm0
5115 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm30, %ymm1
5116 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
5117 ; AVX512DQ-FAST-NEXT: vpshufb %xmm6, %xmm0, %xmm1
5118 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm0, %xmm3
5119 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,2,2,2,4,5,6,7]
5120 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
5121 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm20, %ymm1
5122 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm22, %ymm4
5123 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3],ymm4[4],ymm1[5,6],ymm4[7]
5124 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm1, %xmm5
5125 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
5126 ; AVX512DQ-FAST-NEXT: vpbroadcastq {{.*#+}} xmm8 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
5127 ; AVX512DQ-FAST-NEXT: vpshufb %xmm8, %xmm5, %xmm4
5128 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm9 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
5129 ; AVX512DQ-FAST-NEXT: vpshufb %xmm9, %xmm1, %xmm7
5130 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3],xmm4[4],xmm7[5],xmm4[6,7]
5131 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
5132 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm2, %zmm4, %zmm2
5133 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm11[0],ymm12[1],ymm11[2,3,4,5],ymm12[6],ymm11[7]
5134 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm25, %ymm7
5135 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm19, %ymm10
5136 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm10[2],ymm7[3,4],ymm10[5],ymm7[6,7]
5137 ; AVX512DQ-FAST-NEXT: vpshufb %xmm6, %xmm7, %xmm6
5138 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm7, %xmm10
5139 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm12 = xmm10[2,2,2,2,4,5,6,7]
5140 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm12[1],xmm6[2,3],xmm12[4],xmm6[5,6,7]
5141 ; AVX512DQ-FAST-NEXT: vmovdqa64 {{.*#+}} ymm19 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
5142 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
5143 ; AVX512DQ-FAST-NEXT: vpternlogq $236, %ymm19, %ymm12, %ymm6
5144 ; AVX512DQ-FAST-NEXT: movw $31, %ax
5145 ; AVX512DQ-FAST-NEXT: kmovw %eax, %k1
5146 ; AVX512DQ-FAST-NEXT: vinserti32x8 $0, %ymm6, %zmm0, %zmm2 {%k1}
5147 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm21, %ymm6
5148 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm23, %ymm12
5149 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm6[0],ymm12[1],ymm6[2,3],ymm12[4],ymm6[5,6],ymm12[7]
5150 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm12, %xmm6
5151 ; AVX512DQ-FAST-NEXT: vpshufb %xmm8, %xmm6, %xmm8
5152 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
5153 ; AVX512DQ-FAST-NEXT: vpshufb %xmm9, %xmm12, %xmm9
5154 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4],xmm9[5],xmm8[6,7]
5155 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
5156 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm26, %ymm9
5157 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm24, %ymm11
5158 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0],ymm11[1],ymm9[2,3,4,5],ymm11[6],ymm9[7]
5159 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm15 = ymm9[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
5160 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3,4],ymm8[5,6,7]
5161 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm8
5162 ; AVX512DQ-FAST-NEXT: vpbroadcastd {{.*#+}} xmm15 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
5163 ; AVX512DQ-FAST-NEXT: vpshufb %xmm15, %xmm10, %xmm10
5164 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm14 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
5165 ; AVX512DQ-FAST-NEXT: vpshufb %xmm14, %xmm7, %xmm7
5166 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm10[1],xmm7[2,3],xmm10[4],xmm7[5,6,7]
5167 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
5168 ; AVX512DQ-FAST-NEXT: vpternlogq $248, %ymm19, %ymm7, %ymm4
5169 ; AVX512DQ-FAST-NEXT: vpshufb %xmm15, %xmm3, %xmm3
5170 ; AVX512DQ-FAST-NEXT: vpshufb %xmm14, %xmm0, %xmm0
5171 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
5172 ; AVX512DQ-FAST-NEXT: vpbroadcastq {{.*#+}} xmm3 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
5173 ; AVX512DQ-FAST-NEXT: vpshufb %xmm3, %xmm5, %xmm5
5174 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm7 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
5175 ; AVX512DQ-FAST-NEXT: vpshufb %xmm7, %xmm1, %xmm1
5176 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4],xmm1[5],xmm5[6,7]
5177 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
5178 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
5179 ; AVX512DQ-FAST-NEXT: vinserti32x8 $0, %ymm4, %zmm0, %zmm0 {%k1}
5180 ; AVX512DQ-FAST-NEXT: vpshufb %xmm3, %xmm6, %xmm1
5181 ; AVX512DQ-FAST-NEXT: vpshufb %xmm7, %xmm12, %xmm3
5182 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
5183 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
5184 ; AVX512DQ-FAST-NEXT: vpshufb {{.*#+}} ymm3 = ymm9[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
5185 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
5186 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
5187 ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
5188 ; AVX512DQ-FAST-NEXT: vmovaps %zmm3, (%rsi)
5189 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm13, (%rdx)
5190 ; AVX512DQ-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
5191 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm2, %zmm17, %zmm8
5192 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm0, %zmm17, %zmm1
5193 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm18, (%rcx)
5194 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm16, (%r8)
5195 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm8, (%r9)
5196 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm1, (%rax)
5197 ; AVX512DQ-FAST-NEXT: vzeroupper
5198 ; AVX512DQ-FAST-NEXT: retq
5200 ; AVX512BW-LABEL: load_i16_stride6_vf32:
5201 ; AVX512BW: # %bb.0:
5202 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
5203 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm3
5204 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm5
5205 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm2
5206 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm4
5207 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm0
5208 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm1
5209 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,0,0,0,4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0,4,10,16,22,28,34,40,46,52,58]
5210 ; AVX512BW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
5211 ; AVX512BW-NEXT: vpermi2w %zmm0, %zmm1, %zmm7
5212 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,6,12,18,24,30,0,0,0,0,0,34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0,34,40,46,52,58]
5213 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
5214 ; AVX512BW-NEXT: vpermi2w %zmm2, %zmm4, %zmm8
5215 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm6 = <0,6,12,18,24,30,36,42,48,54,60,u,u,u,u,u>
5216 ; AVX512BW-NEXT: vpermi2w %zmm5, %zmm3, %zmm6
5217 ; AVX512BW-NEXT: movl $4192256, %edi # imm = 0x3FF800
5218 ; AVX512BW-NEXT: kmovd %edi, %k1
5219 ; AVX512BW-NEXT: vmovdqu16 %zmm8, %zmm6 {%k1}
5220 ; AVX512BW-NEXT: movw $-2048, %di # imm = 0xF800
5221 ; AVX512BW-NEXT: kmovd %edi, %k2
5222 ; AVX512BW-NEXT: vmovdqa32 %zmm7, %zmm6 {%k2}
5223 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,0,0,0,5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0,5,11,17,23,29,35,41,47,53,59]
5224 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
5225 ; AVX512BW-NEXT: vpermi2w %zmm0, %zmm1, %zmm8
5226 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [1,7,13,19,25,31,0,0,0,0,0,35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0,35,41,47,53,59]
5227 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
5228 ; AVX512BW-NEXT: vpermi2w %zmm2, %zmm4, %zmm9
5229 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm7 = <1,7,13,19,25,31,37,43,49,55,61,u,u,u,u,u>
5230 ; AVX512BW-NEXT: vpermi2w %zmm5, %zmm3, %zmm7
5231 ; AVX512BW-NEXT: vmovdqu16 %zmm9, %zmm7 {%k1}
5232 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm7 {%k2}
5233 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,0,0,0,6,12,18,24,30,36,42,48,54,60,0,0,0,0,0,0,6,12,18,24,30,36,42,48,54,60]
5234 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
5235 ; AVX512BW-NEXT: vpermi2w %zmm0, %zmm1, %zmm8
5236 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [34,40,46,52,58,0,0,0,0,0,0,4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0,4,10,16,22,28]
5237 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
5238 ; AVX512BW-NEXT: vpermi2w %zmm4, %zmm2, %zmm9
5239 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm10 = <34,40,46,52,58,0,6,12,18,24,30,u,u,u,u,u>
5240 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm5, %zmm10
5241 ; AVX512BW-NEXT: movl $2095104, %edi # imm = 0x1FF800
5242 ; AVX512BW-NEXT: kmovd %edi, %k2
5243 ; AVX512BW-NEXT: vmovdqu16 %zmm9, %zmm10 {%k2}
5244 ; AVX512BW-NEXT: movl $-2097152, %edi # imm = 0xFFE00000
5245 ; AVX512BW-NEXT: kmovd %edi, %k1
5246 ; AVX512BW-NEXT: vmovdqu16 %zmm8, %zmm10 {%k1}
5247 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,0,0,1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0,1,7,13,19,25,31,37,43,49,55,61]
5248 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
5249 ; AVX512BW-NEXT: vpermi2w %zmm0, %zmm1, %zmm8
5250 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [35,41,47,53,59,0,0,0,0,0,0,5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0,5,11,17,23,29]
5251 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
5252 ; AVX512BW-NEXT: vpermi2w %zmm4, %zmm2, %zmm9
5253 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm11 = <35,41,47,53,59,1,7,13,19,25,31,u,u,u,u,u>
5254 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm5, %zmm11
5255 ; AVX512BW-NEXT: vmovdqu16 %zmm9, %zmm11 {%k2}
5256 ; AVX512BW-NEXT: vmovdqu16 %zmm8, %zmm11 {%k1}
5257 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = <4,10,16,22,28,34,40,46,52,58,u,u,u,u,u,u>
5258 ; AVX512BW-NEXT: vpermi2w %zmm5, %zmm3, %zmm8
5259 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [36,42,48,54,60,0,0,0,0,0,0,6,12,18,24,30,36,42,48,54,60,0,0,0,0,0,0,6,12,18,24,30]
5260 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
5261 ; AVX512BW-NEXT: vpermi2w %zmm4, %zmm2, %zmm9
5262 ; AVX512BW-NEXT: movw $31, %di
5263 ; AVX512BW-NEXT: kmovd %edi, %k2
5264 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm9 {%k2}
5265 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,0,0,0,34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0,34,40,46,52,58,0,6,12,18,24,30]
5266 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
5267 ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm8
5268 ; AVX512BW-NEXT: vmovdqu16 %zmm8, %zmm9 {%k1}
5269 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = <5,11,17,23,29,35,41,47,53,59,u,u,u,u,u,u>
5270 ; AVX512BW-NEXT: vpermi2w %zmm5, %zmm3, %zmm8
5271 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [37,43,49,55,61,0,0,0,0,0,1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0,1,7,13,19,25,31]
5272 ; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
5273 ; AVX512BW-NEXT: vpermi2w %zmm4, %zmm2, %zmm3
5274 ; AVX512BW-NEXT: vmovdqa32 %zmm8, %zmm3 {%k2}
5275 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm2 = [0,0,0,0,0,35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0,35,41,47,53,59,1,7,13,19,25,31]
5276 ; AVX512BW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3]
5277 ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm2
5278 ; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
5279 ; AVX512BW-NEXT: vmovdqa64 %zmm6, (%rsi)
5280 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rdx)
5281 ; AVX512BW-NEXT: vmovdqa64 %zmm10, (%rcx)
5282 ; AVX512BW-NEXT: vmovdqa64 %zmm11, (%r8)
5283 ; AVX512BW-NEXT: vmovdqa64 %zmm9, (%r9)
5284 ; AVX512BW-NEXT: vmovdqa64 %zmm3, (%rax)
5285 ; AVX512BW-NEXT: vzeroupper
5286 ; AVX512BW-NEXT: retq
5287 %wide.vec = load <192 x i16>, ptr %in.vec, align 64
5288 %strided.vec0 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186>
5289 %strided.vec1 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187>
5290 %strided.vec2 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92, i32 98, i32 104, i32 110, i32 116, i32 122, i32 128, i32 134, i32 140, i32 146, i32 152, i32 158, i32 164, i32 170, i32 176, i32 182, i32 188>
5291 %strided.vec3 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93, i32 99, i32 105, i32 111, i32 117, i32 123, i32 129, i32 135, i32 141, i32 147, i32 153, i32 159, i32 165, i32 171, i32 177, i32 183, i32 189>
5292 %strided.vec4 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94, i32 100, i32 106, i32 112, i32 118, i32 124, i32 130, i32 136, i32 142, i32 148, i32 154, i32 160, i32 166, i32 172, i32 178, i32 184, i32 190>
5293 %strided.vec5 = shufflevector <192 x i16> %wide.vec, <192 x i16> poison, <32 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95, i32 101, i32 107, i32 113, i32 119, i32 125, i32 131, i32 137, i32 143, i32 149, i32 155, i32 161, i32 167, i32 173, i32 179, i32 185, i32 191>
5294 store <32 x i16> %strided.vec0, ptr %out.vec0, align 64
5295 store <32 x i16> %strided.vec1, ptr %out.vec1, align 64
5296 store <32 x i16> %strided.vec2, ptr %out.vec2, align 64
5297 store <32 x i16> %strided.vec3, ptr %out.vec3, align 64
5298 store <32 x i16> %strided.vec4, ptr %out.vec4, align 64
5299 store <32 x i16> %strided.vec5, ptr %out.vec5, align 64
5303 define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4, ptr %out.vec5) nounwind {
5304 ; SSE-LABEL: load_i16_stride6_vf64:
5306 ; SSE-NEXT: subq $1176, %rsp # imm = 0x498
5307 ; SSE-NEXT: movdqa 496(%rdi), %xmm5
5308 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5309 ; SSE-NEXT: movdqa 512(%rdi), %xmm8
5310 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5311 ; SSE-NEXT: movdqa 144(%rdi), %xmm7
5312 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5313 ; SSE-NEXT: movdqa 160(%rdi), %xmm3
5314 ; SSE-NEXT: movdqa 176(%rdi), %xmm0
5315 ; SSE-NEXT: movdqa 112(%rdi), %xmm6
5316 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5317 ; SSE-NEXT: movdqa 96(%rdi), %xmm4
5318 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5319 ; SSE-NEXT: movdqa 128(%rdi), %xmm1
5320 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5321 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,2,4,5,6,7]
5322 ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [65535,65535,65535,0,0,0,65535,65535]
5323 ; SSE-NEXT: movdqa %xmm9, %xmm2
5324 ; SSE-NEXT: pandn %xmm1, %xmm2
5325 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,1,0,3]
5326 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5327 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
5328 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
5329 ; SSE-NEXT: pand %xmm9, %xmm1
5330 ; SSE-NEXT: por %xmm2, %xmm1
5331 ; SSE-NEXT: movdqa %xmm1, %xmm2
5332 ; SSE-NEXT: movdqa %xmm3, %xmm1
5333 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,2,3,3]
5334 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
5335 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5336 ; SSE-NEXT: movdqa %xmm0, %xmm4
5337 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
5338 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5339 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5340 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[0,0]
5341 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[2,3]
5342 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5343 ; SSE-NEXT: pslld $16, %xmm0
5344 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5345 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5346 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,3,2,3]
5347 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5348 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
5349 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[1,3]
5350 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
5351 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5352 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[0,1,1,2,4,5,6,7]
5353 ; SSE-NEXT: movdqa %xmm9, %xmm1
5354 ; SSE-NEXT: pandn %xmm0, %xmm1
5355 ; SSE-NEXT: movdqa 480(%rdi), %xmm0
5356 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5357 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
5358 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5359 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
5360 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
5361 ; SSE-NEXT: pand %xmm9, %xmm0
5362 ; SSE-NEXT: por %xmm1, %xmm0
5363 ; SSE-NEXT: movdqa %xmm0, %xmm2
5364 ; SSE-NEXT: movdqa 544(%rdi), %xmm3
5365 ; SSE-NEXT: movdqa 560(%rdi), %xmm1
5366 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,3,3]
5367 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
5368 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5369 ; SSE-NEXT: movdqa %xmm1, %xmm0
5370 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[3,0]
5371 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5372 ; SSE-NEXT: movdqa %xmm3, %xmm0
5373 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5374 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm1[0,0]
5375 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,3]
5376 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5377 ; SSE-NEXT: pslld $16, %xmm1
5378 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5379 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
5380 ; SSE-NEXT: movdqa 528(%rdi), %xmm1
5381 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5382 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
5383 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5384 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
5385 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[1,3]
5386 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
5387 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5388 ; SSE-NEXT: movdqa 32(%rdi), %xmm0
5389 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5390 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
5391 ; SSE-NEXT: movdqa %xmm9, %xmm1
5392 ; SSE-NEXT: pandn %xmm0, %xmm1
5393 ; SSE-NEXT: movdqa (%rdi), %xmm0
5394 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5395 ; SSE-NEXT: movdqa 16(%rdi), %xmm2
5396 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
5397 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5398 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
5399 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
5400 ; SSE-NEXT: movdqa %xmm2, %xmm6
5401 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5402 ; SSE-NEXT: pand %xmm9, %xmm0
5403 ; SSE-NEXT: por %xmm1, %xmm0
5404 ; SSE-NEXT: movdqa %xmm0, %xmm2
5405 ; SSE-NEXT: movdqa 64(%rdi), %xmm3
5406 ; SSE-NEXT: movdqa 80(%rdi), %xmm0
5407 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,2,3,3]
5408 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5409 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5410 ; SSE-NEXT: movdqa %xmm0, %xmm1
5411 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
5412 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5413 ; SSE-NEXT: movdqa %xmm3, %xmm1
5414 ; SSE-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill
5415 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[0,0]
5416 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[2,3]
5417 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5418 ; SSE-NEXT: pslld $16, %xmm0
5419 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5420 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5421 ; SSE-NEXT: movdqa 48(%rdi), %xmm0
5422 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5423 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
5424 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5425 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
5426 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[1,3]
5427 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
5428 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5429 ; SSE-NEXT: movdqa 416(%rdi), %xmm0
5430 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5431 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
5432 ; SSE-NEXT: movdqa %xmm9, %xmm1
5433 ; SSE-NEXT: pandn %xmm0, %xmm1
5434 ; SSE-NEXT: movdqa 400(%rdi), %xmm2
5435 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5436 ; SSE-NEXT: movdqa 384(%rdi), %xmm0
5437 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5438 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
5439 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5440 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
5441 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
5442 ; SSE-NEXT: pand %xmm9, %xmm0
5443 ; SSE-NEXT: por %xmm1, %xmm0
5444 ; SSE-NEXT: movdqa %xmm0, %xmm2
5445 ; SSE-NEXT: movdqa 448(%rdi), %xmm3
5446 ; SSE-NEXT: movdqa 464(%rdi), %xmm0
5447 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm3[2,2,3,3]
5448 ; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
5449 ; SSE-NEXT: movdqa %xmm0, %xmm1
5450 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
5451 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5452 ; SSE-NEXT: movdqa %xmm3, %xmm1
5453 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5454 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[0,0]
5455 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[2,3]
5456 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5457 ; SSE-NEXT: pslld $16, %xmm0
5458 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5459 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5460 ; SSE-NEXT: movdqa 432(%rdi), %xmm0
5461 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5462 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
5463 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5464 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
5465 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[1,3]
5466 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
5467 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5468 ; SSE-NEXT: movdqa 320(%rdi), %xmm0
5469 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5470 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
5471 ; SSE-NEXT: movdqa %xmm9, %xmm1
5472 ; SSE-NEXT: pandn %xmm0, %xmm1
5473 ; SSE-NEXT: movdqa 304(%rdi), %xmm2
5474 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5475 ; SSE-NEXT: movdqa 288(%rdi), %xmm0
5476 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5477 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
5478 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5479 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
5480 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
5481 ; SSE-NEXT: pand %xmm9, %xmm0
5482 ; SSE-NEXT: por %xmm1, %xmm0
5483 ; SSE-NEXT: movdqa %xmm0, %xmm2
5484 ; SSE-NEXT: movdqa 352(%rdi), %xmm3
5485 ; SSE-NEXT: movdqa 368(%rdi), %xmm0
5486 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,2,3,3]
5487 ; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
5488 ; SSE-NEXT: movdqa %xmm0, %xmm1
5489 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
5490 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5491 ; SSE-NEXT: movdqa %xmm3, %xmm1
5492 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5493 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[0,0]
5494 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[2,3]
5495 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5496 ; SSE-NEXT: pslld $16, %xmm0
5497 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5498 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5499 ; SSE-NEXT: movdqa 336(%rdi), %xmm0
5500 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5501 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
5502 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5503 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
5504 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[1,3]
5505 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
5506 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5507 ; SSE-NEXT: movdqa 704(%rdi), %xmm0
5508 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5509 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
5510 ; SSE-NEXT: movdqa %xmm9, %xmm1
5511 ; SSE-NEXT: pandn %xmm0, %xmm1
5512 ; SSE-NEXT: movdqa 688(%rdi), %xmm2
5513 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5514 ; SSE-NEXT: movdqa 672(%rdi), %xmm0
5515 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5516 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
5517 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5518 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
5519 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
5520 ; SSE-NEXT: pand %xmm9, %xmm0
5521 ; SSE-NEXT: por %xmm1, %xmm0
5522 ; SSE-NEXT: movdqa %xmm0, %xmm2
5523 ; SSE-NEXT: movdqa 736(%rdi), %xmm3
5524 ; SSE-NEXT: movdqa 752(%rdi), %xmm1
5525 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm3[2,2,3,3]
5526 ; SSE-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1],xmm15[2],xmm1[2],xmm15[3],xmm1[3]
5527 ; SSE-NEXT: movdqa %xmm1, %xmm0
5528 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[3,0]
5529 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5530 ; SSE-NEXT: movdqa %xmm3, %xmm0
5531 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5532 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm1[0,0]
5533 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,3]
5534 ; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5535 ; SSE-NEXT: pslld $16, %xmm1
5536 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5537 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
5538 ; SSE-NEXT: movdqa 720(%rdi), %xmm1
5539 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5540 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
5541 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5542 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
5543 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[1,3]
5544 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
5545 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5546 ; SSE-NEXT: movdqa 224(%rdi), %xmm0
5547 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5548 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
5549 ; SSE-NEXT: movdqa %xmm9, %xmm1
5550 ; SSE-NEXT: pandn %xmm0, %xmm1
5551 ; SSE-NEXT: movdqa 208(%rdi), %xmm2
5552 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5553 ; SSE-NEXT: movdqa 192(%rdi), %xmm0
5554 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5555 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
5556 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5557 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
5558 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
5559 ; SSE-NEXT: pand %xmm9, %xmm0
5560 ; SSE-NEXT: por %xmm1, %xmm0
5561 ; SSE-NEXT: movdqa %xmm0, %xmm1
5562 ; SSE-NEXT: movdqa 256(%rdi), %xmm4
5563 ; SSE-NEXT: movdqa 272(%rdi), %xmm0
5564 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,2,3,3]
5565 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
5566 ; SSE-NEXT: movdqa %xmm0, %xmm2
5567 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[3,0]
5568 ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5569 ; SSE-NEXT: movdqa %xmm4, %xmm2
5570 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5571 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm0[0,0]
5572 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm0[2,3]
5573 ; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5574 ; SSE-NEXT: pslld $16, %xmm0
5575 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5576 ; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
5577 ; SSE-NEXT: movdqa 240(%rdi), %xmm0
5578 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5579 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
5580 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5581 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
5582 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm0[1,3]
5583 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,0]
5584 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5585 ; SSE-NEXT: movdqa 608(%rdi), %xmm0
5586 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5587 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
5588 ; SSE-NEXT: movdqa %xmm9, %xmm2
5589 ; SSE-NEXT: pandn %xmm0, %xmm2
5590 ; SSE-NEXT: movdqa 592(%rdi), %xmm13
5591 ; SSE-NEXT: movdqa 576(%rdi), %xmm0
5592 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5593 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm0[0,1,0,3]
5594 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm14[0,1,2,3,4,6,6,7]
5595 ; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm13[2],xmm0[3],xmm13[3]
5596 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5597 ; SSE-NEXT: pand %xmm9, %xmm0
5598 ; SSE-NEXT: por %xmm2, %xmm0
5599 ; SSE-NEXT: movdqa %xmm0, %xmm1
5600 ; SSE-NEXT: movdqa 640(%rdi), %xmm5
5601 ; SSE-NEXT: movdqa 656(%rdi), %xmm2
5602 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,2,3,3]
5603 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
5604 ; SSE-NEXT: movdqa %xmm2, %xmm0
5605 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm5[3,0]
5606 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5607 ; SSE-NEXT: movdqa %xmm5, %xmm0
5608 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5609 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm2[0,0]
5610 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm2[2,3]
5611 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5612 ; SSE-NEXT: pslld $16, %xmm2
5613 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5614 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
5615 ; SSE-NEXT: movdqa 624(%rdi), %xmm2
5616 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5617 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,3,2,3]
5618 ; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm7[0,1,0,2,4,5,6,7]
5619 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm12[1,3]
5620 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
5621 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5622 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
5623 ; SSE-NEXT: movdqa %xmm11, %xmm0
5624 ; SSE-NEXT: psrld $16, %xmm0
5625 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5626 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,5,7,6,7]
5627 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5628 ; SSE-NEXT: movdqa %xmm9, %xmm0
5629 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5630 ; SSE-NEXT: pandn %xmm2, %xmm0
5631 ; SSE-NEXT: pand %xmm9, %xmm1
5632 ; SSE-NEXT: por %xmm0, %xmm1
5633 ; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5634 ; SSE-NEXT: # xmm0 = mem[0,1,1,3,4,5,6,7]
5635 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5636 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[3,1],xmm0[1,3]
5637 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm12[2,0]
5638 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5639 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5640 ; SSE-NEXT: psrld $16, %xmm0
5641 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5642 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,5,7,6,7]
5643 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5644 ; SSE-NEXT: movdqa %xmm9, %xmm0
5645 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5646 ; SSE-NEXT: pand %xmm9, %xmm1
5647 ; SSE-NEXT: por %xmm0, %xmm1
5648 ; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5649 ; SSE-NEXT: # xmm0 = mem[0,1,1,3,4,5,6,7]
5650 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5651 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,1],xmm0[1,3]
5652 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,0]
5653 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5654 ; SSE-NEXT: movdqa %xmm6, %xmm0
5655 ; SSE-NEXT: psrld $16, %xmm0
5656 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5657 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,5,7,6,7]
5658 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5659 ; SSE-NEXT: movdqa %xmm9, %xmm0
5660 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5661 ; SSE-NEXT: pandn %xmm6, %xmm0
5662 ; SSE-NEXT: pand %xmm9, %xmm1
5663 ; SSE-NEXT: por %xmm0, %xmm1
5664 ; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5665 ; SSE-NEXT: # xmm0 = mem[0,1,1,3,4,5,6,7]
5666 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5667 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,1],xmm0[1,3]
5668 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,0]
5669 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5670 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5671 ; SSE-NEXT: psrld $16, %xmm0
5672 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5673 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,5,7,6,7]
5674 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5675 ; SSE-NEXT: movdqa %xmm9, %xmm0
5676 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5677 ; SSE-NEXT: pand %xmm9, %xmm1
5678 ; SSE-NEXT: por %xmm0, %xmm1
5679 ; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5680 ; SSE-NEXT: # xmm0 = mem[0,1,1,3,4,5,6,7]
5681 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[3,1],xmm0[1,3]
5682 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm10[2,0]
5683 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5684 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5685 ; SSE-NEXT: movdqa %xmm12, %xmm0
5686 ; SSE-NEXT: psrld $16, %xmm0
5687 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5688 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,5,7,6,7]
5689 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5690 ; SSE-NEXT: movdqa %xmm9, %xmm0
5691 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5692 ; SSE-NEXT: pandn %xmm5, %xmm0
5693 ; SSE-NEXT: pand %xmm9, %xmm1
5694 ; SSE-NEXT: por %xmm0, %xmm1
5695 ; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5696 ; SSE-NEXT: # xmm0 = mem[0,1,1,3,4,5,6,7]
5697 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,1],xmm0[1,3]
5698 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm8[2,0]
5699 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5700 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5701 ; SSE-NEXT: psrld $16, %xmm0
5702 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5703 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,5,7,6,7]
5704 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5705 ; SSE-NEXT: movdqa %xmm9, %xmm0
5706 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5707 ; SSE-NEXT: pand %xmm9, %xmm1
5708 ; SSE-NEXT: por %xmm0, %xmm1
5709 ; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5710 ; SSE-NEXT: # xmm0 = mem[0,1,1,3,4,5,6,7]
5711 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[3,1],xmm0[1,3]
5712 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm15[2,0]
5713 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5714 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5715 ; SSE-NEXT: movdqa %xmm15, %xmm0
5716 ; SSE-NEXT: psrld $16, %xmm0
5717 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5718 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,5,7,6,7]
5719 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
5720 ; SSE-NEXT: movdqa %xmm9, %xmm0
5721 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5722 ; SSE-NEXT: pandn %xmm10, %xmm0
5723 ; SSE-NEXT: pand %xmm9, %xmm1
5724 ; SSE-NEXT: por %xmm0, %xmm1
5725 ; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5726 ; SSE-NEXT: # xmm0 = mem[0,1,1,3,4,5,6,7]
5727 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[1,3]
5728 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,0]
5729 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5730 ; SSE-NEXT: psrld $16, %xmm13
5731 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm14[0,1,2,3,5,7,6,7]
5732 ; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm13[2],xmm3[3],xmm13[3]
5733 ; SSE-NEXT: pand %xmm9, %xmm3
5734 ; SSE-NEXT: pandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
5735 ; SSE-NEXT: por %xmm3, %xmm9
5736 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm7[0,1,1,3,4,5,6,7]
5737 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm0[1,3]
5738 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm4[2,0]
5739 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5740 ; SSE-NEXT: movdqa %xmm2, %xmm0
5741 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5742 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
5743 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
5744 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
5745 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,0,65535,65535,65535]
5746 ; SSE-NEXT: movdqa %xmm4, %xmm2
5747 ; SSE-NEXT: pandn %xmm0, %xmm2
5748 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
5749 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm11[0,0]
5750 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm11[2,3]
5751 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
5752 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
5753 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[1,0,2,3,4,5,6,7]
5754 ; SSE-NEXT: pand %xmm4, %xmm3
5755 ; SSE-NEXT: por %xmm2, %xmm3
5756 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5757 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5758 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
5759 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5760 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
5761 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
5762 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,6,5,4]
5763 ; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,65535,65535,65535,0,0,0]
5764 ; SSE-NEXT: movdqa %xmm14, %xmm0
5765 ; SSE-NEXT: pandn %xmm2, %xmm0
5766 ; SSE-NEXT: pand %xmm14, %xmm3
5767 ; SSE-NEXT: por %xmm3, %xmm0
5768 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5769 ; SSE-NEXT: psrldq {{.*#+}} xmm6 = xmm6[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5770 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
5771 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[1,1,1,1]
5772 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm3[0]
5773 ; SSE-NEXT: movdqa %xmm4, %xmm3
5774 ; SSE-NEXT: pandn %xmm6, %xmm3
5775 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
5776 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5777 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0],xmm0[0,0]
5778 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm0[2,3]
5779 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[0,2,2,3,4,5,6,7]
5780 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
5781 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,2,3,4,5,6,7]
5782 ; SSE-NEXT: pand %xmm4, %xmm2
5783 ; SSE-NEXT: por %xmm3, %xmm2
5784 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
5785 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5786 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
5787 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
5788 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,6,6,7]
5789 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
5790 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,4]
5791 ; SSE-NEXT: movdqa %xmm14, %xmm0
5792 ; SSE-NEXT: pandn %xmm3, %xmm0
5793 ; SSE-NEXT: pand %xmm14, %xmm2
5794 ; SSE-NEXT: por %xmm2, %xmm0
5795 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5796 ; SSE-NEXT: movdqa %xmm5, %xmm2
5797 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5798 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
5799 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
5800 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
5801 ; SSE-NEXT: movdqa %xmm4, %xmm5
5802 ; SSE-NEXT: pandn %xmm2, %xmm5
5803 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
5804 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm12[0,0]
5805 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm12[2,3]
5806 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,2,2,3,4,5,6,7]
5807 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
5808 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,2,3,4,5,6,7]
5809 ; SSE-NEXT: pand %xmm4, %xmm2
5810 ; SSE-NEXT: por %xmm5, %xmm2
5811 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5812 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5813 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
5814 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5815 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,4,6,6,7]
5816 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
5817 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,4]
5818 ; SSE-NEXT: movdqa %xmm14, %xmm0
5819 ; SSE-NEXT: pandn %xmm5, %xmm0
5820 ; SSE-NEXT: pand %xmm14, %xmm2
5821 ; SSE-NEXT: por %xmm2, %xmm0
5822 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5823 ; SSE-NEXT: movdqa %xmm10, %xmm2
5824 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5825 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
5826 ; SSE-NEXT: # xmm5 = mem[1,1,1,1]
5827 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
5828 ; SSE-NEXT: movdqa %xmm4, %xmm5
5829 ; SSE-NEXT: pandn %xmm2, %xmm5
5830 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
5831 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,0],xmm15[0,0]
5832 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[2,0],xmm15[2,3]
5833 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm10[0,2,2,3,4,5,6,7]
5834 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
5835 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,2,3,4,5,6,7]
5836 ; SSE-NEXT: pand %xmm4, %xmm2
5837 ; SSE-NEXT: por %xmm5, %xmm2
5838 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5839 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5840 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
5841 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5842 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,4,6,6,7]
5843 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
5844 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,4]
5845 ; SSE-NEXT: movdqa %xmm14, %xmm0
5846 ; SSE-NEXT: pandn %xmm5, %xmm0
5847 ; SSE-NEXT: pand %xmm14, %xmm2
5848 ; SSE-NEXT: por %xmm2, %xmm0
5849 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5850 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5851 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5852 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
5853 ; SSE-NEXT: # xmm5 = mem[1,1,1,1]
5854 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
5855 ; SSE-NEXT: movdqa %xmm4, %xmm5
5856 ; SSE-NEXT: pandn %xmm2, %xmm5
5857 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
5858 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5859 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[0,0]
5860 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[2,3]
5861 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm2[0,2,2,3,4,5,6,7]
5862 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,2,3]
5863 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,2,3,4,5,6,7]
5864 ; SSE-NEXT: pand %xmm4, %xmm6
5865 ; SSE-NEXT: por %xmm5, %xmm6
5866 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5867 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5868 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
5869 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5870 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,4,6,6,7]
5871 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
5872 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,4]
5873 ; SSE-NEXT: movdqa %xmm14, %xmm0
5874 ; SSE-NEXT: pandn %xmm5, %xmm0
5875 ; SSE-NEXT: pand %xmm14, %xmm6
5876 ; SSE-NEXT: por %xmm6, %xmm0
5877 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5878 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5879 ; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5880 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
5881 ; SSE-NEXT: # xmm6 = mem[1,1,1,1]
5882 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
5883 ; SSE-NEXT: movdqa %xmm4, %xmm6
5884 ; SSE-NEXT: pandn %xmm5, %xmm6
5885 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
5886 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5887 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm0[0,0]
5888 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm0[2,3]
5889 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm11[0,2,2,3,4,5,6,7]
5890 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
5891 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,0,2,3,4,5,6,7]
5892 ; SSE-NEXT: pand %xmm4, %xmm5
5893 ; SSE-NEXT: por %xmm6, %xmm5
5894 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5895 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5896 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
5897 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5898 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm0[0,1,2,3,4,6,6,7]
5899 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,2,0]
5900 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
5901 ; SSE-NEXT: movdqa %xmm14, %xmm0
5902 ; SSE-NEXT: pandn %xmm6, %xmm0
5903 ; SSE-NEXT: pand %xmm14, %xmm5
5904 ; SSE-NEXT: por %xmm5, %xmm0
5905 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5906 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5907 ; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5908 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
5909 ; SSE-NEXT: # xmm6 = mem[1,1,1,1]
5910 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
5911 ; SSE-NEXT: movdqa %xmm4, %xmm6
5912 ; SSE-NEXT: pandn %xmm5, %xmm6
5913 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
5914 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5915 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm0[0,0]
5916 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm0[2,3]
5917 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm5[0,2,2,3,4,5,6,7]
5918 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,3,2,3]
5919 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,2,3,4,5,6,7]
5920 ; SSE-NEXT: pand %xmm4, %xmm7
5921 ; SSE-NEXT: por %xmm6, %xmm7
5922 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5923 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
5924 ; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
5925 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5926 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm0[0,1,2,3,4,6,6,7]
5927 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,2,0]
5928 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
5929 ; SSE-NEXT: movdqa %xmm14, %xmm0
5930 ; SSE-NEXT: pandn %xmm6, %xmm0
5931 ; SSE-NEXT: pand %xmm14, %xmm7
5932 ; SSE-NEXT: por %xmm7, %xmm0
5933 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5934 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
5935 ; SSE-NEXT: movdqa %xmm13, %xmm6
5936 ; SSE-NEXT: psrldq {{.*#+}} xmm6 = xmm6[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
5937 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
5938 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
5939 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
5940 ; SSE-NEXT: movdqa %xmm4, %xmm12
5941 ; SSE-NEXT: pandn %xmm6, %xmm12
5942 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
5943 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
5944 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm6[0,0]
5945 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm6[2,3]
5946 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,2,2,3,4,5,6,7]
5947 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,2,3]
5948 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,2,3,4,5,6,7]
5949 ; SSE-NEXT: pand %xmm4, %xmm6
5950 ; SSE-NEXT: por %xmm12, %xmm6
5951 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
5952 ; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
5953 ; SSE-NEXT: # xmm12 = xmm12[0,1],mem[0,2]
5954 ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5955 ; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,6,6,7]
5956 ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[0,1,2,0]
5957 ; SSE-NEXT: pshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,6,5,4]
5958 ; SSE-NEXT: movdqa %xmm14, %xmm15
5959 ; SSE-NEXT: pandn %xmm12, %xmm15
5960 ; SSE-NEXT: pand %xmm14, %xmm6
5961 ; SSE-NEXT: por %xmm6, %xmm15
5962 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5963 ; SSE-NEXT: psrlq $48, %xmm9
5964 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
5965 ; SSE-NEXT: # xmm12 = mem[2,2,3,3]
5966 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm12 = xmm12[0],xmm9[0]
5967 ; SSE-NEXT: movdqa %xmm4, %xmm6
5968 ; SSE-NEXT: pandn %xmm12, %xmm6
5969 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7]
5970 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,3,2,3]
5971 ; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm8[0,1,3,3,4,5,6,7]
5972 ; SSE-NEXT: pand %xmm4, %xmm8
5973 ; SSE-NEXT: por %xmm6, %xmm8
5974 ; SSE-NEXT: pshufhw $231, (%rsp), %xmm6 # 16-byte Folded Reload
5975 ; SSE-NEXT: # xmm6 = mem[0,1,2,3,7,5,6,7]
5976 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
5977 ; SSE-NEXT: movdqa %xmm14, %xmm12
5978 ; SSE-NEXT: pandn %xmm6, %xmm12
5979 ; SSE-NEXT: pand %xmm14, %xmm8
5980 ; SSE-NEXT: por %xmm8, %xmm12
5981 ; SSE-NEXT: movdqa %xmm12, (%rsp) # 16-byte Spill
5982 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
5983 ; SSE-NEXT: movdqa %xmm9, %xmm6
5984 ; SSE-NEXT: psrlq $48, %xmm6
5985 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
5986 ; SSE-NEXT: # xmm8 = mem[2,2,3,3]
5987 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm6[0]
5988 ; SSE-NEXT: movdqa %xmm4, %xmm6
5989 ; SSE-NEXT: pandn %xmm8, %xmm6
5990 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
5991 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
5992 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
5993 ; SSE-NEXT: pand %xmm4, %xmm1
5994 ; SSE-NEXT: por %xmm6, %xmm1
5995 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
5996 ; SSE-NEXT: # xmm6 = mem[0,1,2,3,7,5,6,7]
5997 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
5998 ; SSE-NEXT: movdqa %xmm14, %xmm8
5999 ; SSE-NEXT: pandn %xmm6, %xmm8
6000 ; SSE-NEXT: pand %xmm14, %xmm1
6001 ; SSE-NEXT: por %xmm1, %xmm8
6002 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6003 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
6004 ; SSE-NEXT: movdqa %xmm15, %xmm1
6005 ; SSE-NEXT: psrlq $48, %xmm1
6006 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
6007 ; SSE-NEXT: # xmm6 = mem[2,2,3,3]
6008 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm1[0]
6009 ; SSE-NEXT: movdqa %xmm4, %xmm1
6010 ; SSE-NEXT: pandn %xmm6, %xmm1
6011 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[3,1,2,3,4,5,6,7]
6012 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,3,2,3]
6013 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
6014 ; SSE-NEXT: pand %xmm4, %xmm6
6015 ; SSE-NEXT: por %xmm1, %xmm6
6016 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6017 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,7,5,6,7]
6018 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
6019 ; SSE-NEXT: movdqa %xmm14, %xmm8
6020 ; SSE-NEXT: pandn %xmm1, %xmm8
6021 ; SSE-NEXT: pand %xmm14, %xmm6
6022 ; SSE-NEXT: por %xmm6, %xmm8
6023 ; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6024 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
6025 ; SSE-NEXT: movdqa %xmm12, %xmm1
6026 ; SSE-NEXT: psrlq $48, %xmm1
6027 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
6028 ; SSE-NEXT: # xmm6 = mem[2,2,3,3]
6029 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm1[0]
6030 ; SSE-NEXT: movdqa %xmm4, %xmm1
6031 ; SSE-NEXT: pandn %xmm6, %xmm1
6032 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
6033 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
6034 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,3,4,5,6,7]
6035 ; SSE-NEXT: pand %xmm4, %xmm3
6036 ; SSE-NEXT: por %xmm1, %xmm3
6037 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6038 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,7,5,6,7]
6039 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
6040 ; SSE-NEXT: movdqa %xmm14, %xmm6
6041 ; SSE-NEXT: pandn %xmm1, %xmm6
6042 ; SSE-NEXT: pand %xmm14, %xmm3
6043 ; SSE-NEXT: por %xmm3, %xmm6
6044 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6045 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
6046 ; SSE-NEXT: movdqa %xmm8, %xmm1
6047 ; SSE-NEXT: psrlq $48, %xmm1
6048 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6049 ; SSE-NEXT: # xmm3 = mem[2,2,3,3]
6050 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
6051 ; SSE-NEXT: movdqa %xmm4, %xmm1
6052 ; SSE-NEXT: pandn %xmm3, %xmm1
6053 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm11[3,1,2,3,4,5,6,7]
6054 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
6055 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,3,4,5,6,7]
6056 ; SSE-NEXT: pand %xmm4, %xmm3
6057 ; SSE-NEXT: por %xmm1, %xmm3
6058 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6059 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,7,5,6,7]
6060 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
6061 ; SSE-NEXT: movdqa %xmm14, %xmm6
6062 ; SSE-NEXT: pandn %xmm1, %xmm6
6063 ; SSE-NEXT: pand %xmm14, %xmm3
6064 ; SSE-NEXT: por %xmm3, %xmm6
6065 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6066 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6067 ; SSE-NEXT: movdqa %xmm6, %xmm1
6068 ; SSE-NEXT: psrlq $48, %xmm1
6069 ; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6070 ; SSE-NEXT: # xmm3 = mem[2,2,3,3]
6071 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
6072 ; SSE-NEXT: movdqa %xmm4, %xmm1
6073 ; SSE-NEXT: pandn %xmm3, %xmm1
6074 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
6075 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
6076 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
6077 ; SSE-NEXT: pand %xmm4, %xmm2
6078 ; SSE-NEXT: por %xmm1, %xmm2
6079 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6080 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,7,5,6,7]
6081 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
6082 ; SSE-NEXT: movdqa %xmm14, %xmm3
6083 ; SSE-NEXT: pandn %xmm1, %xmm3
6084 ; SSE-NEXT: pand %xmm14, %xmm2
6085 ; SSE-NEXT: por %xmm2, %xmm3
6086 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6087 ; SSE-NEXT: psrlq $48, %xmm0
6088 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[2,2,3,3]
6089 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
6090 ; SSE-NEXT: movdqa %xmm4, %xmm1
6091 ; SSE-NEXT: pandn %xmm2, %xmm1
6092 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[3,1,2,3,4,5,6,7]
6093 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
6094 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
6095 ; SSE-NEXT: pand %xmm4, %xmm2
6096 ; SSE-NEXT: por %xmm1, %xmm2
6097 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6098 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,7,5,6,7]
6099 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
6100 ; SSE-NEXT: movdqa %xmm14, %xmm0
6101 ; SSE-NEXT: pandn %xmm1, %xmm0
6102 ; SSE-NEXT: pand %xmm14, %xmm2
6103 ; SSE-NEXT: por %xmm2, %xmm0
6104 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6105 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
6106 ; SSE-NEXT: movdqa %xmm7, %xmm1
6107 ; SSE-NEXT: psrlq $48, %xmm1
6108 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
6109 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[2,2,3,3]
6110 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
6111 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[3,1,2,3,4,5,6,7]
6112 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
6113 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
6114 ; SSE-NEXT: pand %xmm4, %xmm1
6115 ; SSE-NEXT: pandn %xmm2, %xmm4
6116 ; SSE-NEXT: por %xmm1, %xmm4
6117 ; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6118 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,7,5,6,7]
6119 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
6120 ; SSE-NEXT: movdqa %xmm14, %xmm0
6121 ; SSE-NEXT: pandn %xmm1, %xmm0
6122 ; SSE-NEXT: pand %xmm14, %xmm4
6123 ; SSE-NEXT: por %xmm4, %xmm0
6124 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6125 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6126 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
6127 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6128 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
6129 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
6130 ; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
6131 ; SSE-NEXT: # xmm11 = mem[0,1,0,3]
6132 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm11[0,1,2,3,4,5,4,6]
6133 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6134 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm5[1]
6135 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm3[0],xmm1[1,2,3]
6136 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
6137 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm13[0,2,2,3,4,5,6,7]
6138 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
6139 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
6140 ; SSE-NEXT: movdqa %xmm14, %xmm4
6141 ; SSE-NEXT: pandn %xmm3, %xmm4
6142 ; SSE-NEXT: andps %xmm14, %xmm1
6143 ; SSE-NEXT: por %xmm1, %xmm4
6144 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6145 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6146 ; SSE-NEXT: # xmm1 = mem[1,1,1,1]
6147 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6148 ; SSE-NEXT: # xmm3 = mem[2,3,2,3]
6149 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
6150 ; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6151 ; SSE-NEXT: # xmm1 = mem[0,1,0,3]
6152 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,5,4,6]
6153 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm9[1]
6154 ; SSE-NEXT: movss {{.*#+}} xmm4 = xmm3[0],xmm4[1,2,3]
6155 ; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6156 ; SSE-NEXT: # xmm3 = mem[0,2,2,3,4,5,6,7]
6157 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
6158 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
6159 ; SSE-NEXT: movdqa %xmm14, %xmm9
6160 ; SSE-NEXT: pandn %xmm3, %xmm9
6161 ; SSE-NEXT: andps %xmm14, %xmm4
6162 ; SSE-NEXT: por %xmm4, %xmm9
6163 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6164 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6165 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
6166 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6167 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
6168 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
6169 ; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6170 ; SSE-NEXT: # xmm3 = mem[0,1,0,3]
6171 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6172 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
6173 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm15[1]
6174 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3]
6175 ; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6176 ; SSE-NEXT: # xmm4 = mem[0,2,2,3,4,5,6,7]
6177 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
6178 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,6]
6179 ; SSE-NEXT: movdqa %xmm14, %xmm9
6180 ; SSE-NEXT: pandn %xmm4, %xmm9
6181 ; SSE-NEXT: andps %xmm14, %xmm3
6182 ; SSE-NEXT: por %xmm3, %xmm9
6183 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6184 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6185 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
6186 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6187 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
6188 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
6189 ; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6190 ; SSE-NEXT: # xmm3 = mem[0,1,0,3]
6191 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6192 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
6193 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm12[1]
6194 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3]
6195 ; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6196 ; SSE-NEXT: # xmm4 = mem[0,2,2,3,4,5,6,7]
6197 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
6198 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,6]
6199 ; SSE-NEXT: movdqa %xmm14, %xmm9
6200 ; SSE-NEXT: pandn %xmm4, %xmm9
6201 ; SSE-NEXT: andps %xmm14, %xmm3
6202 ; SSE-NEXT: por %xmm3, %xmm9
6203 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6204 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6205 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
6206 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6207 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
6208 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
6209 ; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6210 ; SSE-NEXT: # xmm3 = mem[0,1,0,3]
6211 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6212 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
6213 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm8[1]
6214 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3]
6215 ; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6216 ; SSE-NEXT: # xmm4 = mem[0,2,2,3,4,5,6,7]
6217 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
6218 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,6]
6219 ; SSE-NEXT: movdqa %xmm14, %xmm12
6220 ; SSE-NEXT: pandn %xmm4, %xmm12
6221 ; SSE-NEXT: andps %xmm14, %xmm3
6222 ; SSE-NEXT: por %xmm3, %xmm12
6223 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6224 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
6225 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6226 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
6227 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
6228 ; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6229 ; SSE-NEXT: # xmm3 = mem[0,1,0,3]
6230 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6231 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
6232 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm6[1]
6233 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3]
6234 ; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6235 ; SSE-NEXT: # xmm4 = mem[0,2,2,3,4,5,6,7]
6236 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
6237 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,6]
6238 ; SSE-NEXT: movdqa %xmm14, %xmm15
6239 ; SSE-NEXT: pandn %xmm4, %xmm15
6240 ; SSE-NEXT: andps %xmm14, %xmm3
6241 ; SSE-NEXT: por %xmm3, %xmm15
6242 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6243 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
6244 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6245 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
6246 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
6247 ; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6248 ; SSE-NEXT: # xmm3 = mem[0,1,0,3]
6249 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6250 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
6251 ; SSE-NEXT: punpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6252 ; SSE-NEXT: # xmm3 = xmm3[1],mem[1]
6253 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3]
6254 ; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6255 ; SSE-NEXT: # xmm4 = mem[0,2,2,3,4,5,6,7]
6256 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
6257 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,6]
6258 ; SSE-NEXT: movdqa %xmm14, %xmm8
6259 ; SSE-NEXT: pandn %xmm4, %xmm8
6260 ; SSE-NEXT: andps %xmm14, %xmm3
6261 ; SSE-NEXT: por %xmm3, %xmm8
6262 ; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
6263 ; SSE-NEXT: # xmm3 = mem[1,1,1,1]
6264 ; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6265 ; SSE-NEXT: # xmm4 = mem[2,3,2,3]
6266 ; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
6267 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm10[0,1,0,3]
6268 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6269 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
6270 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm7[1]
6271 ; SSE-NEXT: movss {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3]
6272 ; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
6273 ; SSE-NEXT: # xmm4 = mem[0,2,2,3,4,5,6,7]
6274 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
6275 ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,6]
6276 ; SSE-NEXT: movdqa %xmm14, %xmm7
6277 ; SSE-NEXT: pandn %xmm4, %xmm7
6278 ; SSE-NEXT: andps %xmm14, %xmm3
6279 ; SSE-NEXT: por %xmm3, %xmm7
6280 ; SSE-NEXT: psrlq $48, %xmm0
6281 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6282 ; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
6283 ; SSE-NEXT: movdqa %xmm2, %xmm3
6284 ; SSE-NEXT: psrld $16, %xmm5
6285 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm11[0,1,2,3,4,5,5,7]
6286 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm5[1]
6287 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3]
6288 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm13[3,1,2,3,4,5,6,7]
6289 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
6290 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,7]
6291 ; SSE-NEXT: movdqa %xmm14, %xmm6
6292 ; SSE-NEXT: pandn %xmm3, %xmm6
6293 ; SSE-NEXT: andps %xmm14, %xmm2
6294 ; SSE-NEXT: por %xmm2, %xmm6
6295 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6296 ; SSE-NEXT: psrlq $48, %xmm0
6297 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6298 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6299 ; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
6300 ; SSE-NEXT: movdqa %xmm2, %xmm3
6301 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6302 ; SSE-NEXT: psrld $16, %xmm2
6303 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
6304 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
6305 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm3[0],xmm1[1,2,3]
6306 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6307 ; SSE-NEXT: # xmm2 = mem[3,1,2,3,4,5,6,7]
6308 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
6309 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,7]
6310 ; SSE-NEXT: movdqa %xmm14, %xmm5
6311 ; SSE-NEXT: pandn %xmm2, %xmm5
6312 ; SSE-NEXT: andps %xmm14, %xmm1
6313 ; SSE-NEXT: por %xmm1, %xmm5
6314 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6315 ; SSE-NEXT: psrlq $48, %xmm1
6316 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6317 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6318 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
6319 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6320 ; SSE-NEXT: psrld $16, %xmm2
6321 ; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6322 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,4,5,5,7]
6323 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
6324 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
6325 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6326 ; SSE-NEXT: # xmm2 = mem[3,1,2,3,4,5,6,7]
6327 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
6328 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,7]
6329 ; SSE-NEXT: movdqa %xmm14, %xmm9
6330 ; SSE-NEXT: pandn %xmm2, %xmm9
6331 ; SSE-NEXT: andps %xmm14, %xmm1
6332 ; SSE-NEXT: por %xmm1, %xmm9
6333 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6334 ; SSE-NEXT: psrlq $48, %xmm0
6335 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6336 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6337 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
6338 ; SSE-NEXT: movdqa %xmm1, %xmm2
6339 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6340 ; SSE-NEXT: psrld $16, %xmm0
6341 ; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6342 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,4,5,5,7]
6343 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm0[1]
6344 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
6345 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6346 ; SSE-NEXT: # xmm2 = mem[3,1,2,3,4,5,6,7]
6347 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
6348 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,7]
6349 ; SSE-NEXT: movdqa %xmm14, %xmm11
6350 ; SSE-NEXT: pandn %xmm2, %xmm11
6351 ; SSE-NEXT: andps %xmm14, %xmm1
6352 ; SSE-NEXT: por %xmm1, %xmm11
6353 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6354 ; SSE-NEXT: psrlq $48, %xmm0
6355 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6356 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6357 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
6358 ; SSE-NEXT: movdqa %xmm1, %xmm2
6359 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6360 ; SSE-NEXT: psrld $16, %xmm3
6361 ; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6362 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,4,5,5,7]
6363 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm3[1]
6364 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
6365 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6366 ; SSE-NEXT: # xmm2 = mem[3,1,2,3,4,5,6,7]
6367 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
6368 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,7]
6369 ; SSE-NEXT: movdqa %xmm14, %xmm10
6370 ; SSE-NEXT: pandn %xmm2, %xmm10
6371 ; SSE-NEXT: andps %xmm14, %xmm1
6372 ; SSE-NEXT: por %xmm1, %xmm10
6373 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6374 ; SSE-NEXT: psrlq $48, %xmm0
6375 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6376 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6377 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
6378 ; SSE-NEXT: movdqa %xmm1, %xmm2
6379 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6380 ; SSE-NEXT: psrld $16, %xmm3
6381 ; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6382 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,4,5,5,7]
6383 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm3[1]
6384 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
6385 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6386 ; SSE-NEXT: # xmm2 = mem[3,1,2,3,4,5,6,7]
6387 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
6388 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,7]
6389 ; SSE-NEXT: movdqa %xmm14, %xmm4
6390 ; SSE-NEXT: pandn %xmm2, %xmm4
6391 ; SSE-NEXT: andps %xmm14, %xmm1
6392 ; SSE-NEXT: por %xmm1, %xmm4
6393 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6394 ; SSE-NEXT: psrlq $48, %xmm1
6395 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6396 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6397 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
6398 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6399 ; SSE-NEXT: psrld $16, %xmm1
6400 ; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6401 ; SSE-NEXT: # xmm2 = mem[0,1,2,3,4,5,5,7]
6402 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
6403 ; SSE-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
6404 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6405 ; SSE-NEXT: # xmm1 = mem[3,1,2,3,4,5,6,7]
6406 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
6407 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,7]
6408 ; SSE-NEXT: movdqa %xmm14, %xmm3
6409 ; SSE-NEXT: pandn %xmm1, %xmm3
6410 ; SSE-NEXT: andps %xmm14, %xmm2
6411 ; SSE-NEXT: por %xmm2, %xmm3
6412 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6413 ; SSE-NEXT: psrlq $48, %xmm0
6414 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6415 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6416 ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
6417 ; SSE-NEXT: movdqa %xmm1, %xmm2
6418 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6419 ; SSE-NEXT: psrld $16, %xmm0
6420 ; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
6421 ; SSE-NEXT: # xmm1 = mem[0,1,2,3,4,5,5,7]
6422 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm0[1]
6423 ; SSE-NEXT: movss {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
6424 ; SSE-NEXT: andps %xmm14, %xmm1
6425 ; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6426 ; SSE-NEXT: # xmm2 = mem[3,1,2,3,4,5,6,7]
6427 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
6428 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,7]
6429 ; SSE-NEXT: pandn %xmm2, %xmm14
6430 ; SSE-NEXT: por %xmm1, %xmm14
6431 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6432 ; SSE-NEXT: movaps %xmm0, 96(%rsi)
6433 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6434 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
6435 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6436 ; SSE-NEXT: movaps %xmm1, 112(%rsi)
6437 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6438 ; SSE-NEXT: movaps %xmm1, 48(%rsi)
6439 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6440 ; SSE-NEXT: movaps %xmm1, 64(%rsi)
6441 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6442 ; SSE-NEXT: movaps %xmm1, (%rsi)
6443 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6444 ; SSE-NEXT: movaps %xmm1, 80(%rsi)
6445 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6446 ; SSE-NEXT: movaps %xmm1, 16(%rsi)
6447 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6448 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
6449 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6450 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
6451 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6452 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
6453 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6454 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
6455 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6456 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
6457 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6458 ; SSE-NEXT: movaps %xmm0, (%rdx)
6459 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6460 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
6461 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6462 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
6463 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6464 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
6465 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6466 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
6467 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6468 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
6469 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6470 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
6471 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6472 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
6473 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6474 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
6475 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6476 ; SSE-NEXT: movaps %xmm0, (%rcx)
6477 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6478 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
6479 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6480 ; SSE-NEXT: movaps %xmm0, 112(%r8)
6481 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6482 ; SSE-NEXT: movaps %xmm0, 96(%r8)
6483 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6484 ; SSE-NEXT: movaps %xmm0, 80(%r8)
6485 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6486 ; SSE-NEXT: movaps %xmm0, 64(%r8)
6487 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6488 ; SSE-NEXT: movaps %xmm0, 48(%r8)
6489 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6490 ; SSE-NEXT: movaps %xmm0, 32(%r8)
6491 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6492 ; SSE-NEXT: movaps %xmm0, 16(%r8)
6493 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
6494 ; SSE-NEXT: movaps %xmm0, (%r8)
6495 ; SSE-NEXT: movdqa %xmm7, 112(%r9)
6496 ; SSE-NEXT: movdqa %xmm8, 96(%r9)
6497 ; SSE-NEXT: movdqa %xmm15, 80(%r9)
6498 ; SSE-NEXT: movdqa %xmm12, 64(%r9)
6499 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6500 ; SSE-NEXT: movaps %xmm0, 48(%r9)
6501 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6502 ; SSE-NEXT: movaps %xmm0, 32(%r9)
6503 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6504 ; SSE-NEXT: movaps %xmm0, 16(%r9)
6505 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
6506 ; SSE-NEXT: movaps %xmm0, (%r9)
6507 ; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
6508 ; SSE-NEXT: movdqa %xmm14, 112(%rax)
6509 ; SSE-NEXT: movdqa %xmm3, 96(%rax)
6510 ; SSE-NEXT: movdqa %xmm4, 80(%rax)
6511 ; SSE-NEXT: movdqa %xmm10, 64(%rax)
6512 ; SSE-NEXT: movdqa %xmm11, 48(%rax)
6513 ; SSE-NEXT: movdqa %xmm9, 32(%rax)
6514 ; SSE-NEXT: movdqa %xmm5, 16(%rax)
6515 ; SSE-NEXT: movdqa %xmm6, (%rax)
6516 ; SSE-NEXT: addq $1176, %rsp # imm = 0x498
6519 ; AVX1-ONLY-LABEL: load_i16_stride6_vf64:
6520 ; AVX1-ONLY: # %bb.0:
6521 ; AVX1-ONLY-NEXT: subq $1368, %rsp # imm = 0x558
6522 ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm0
6523 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6524 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
6525 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6526 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
6527 ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm1
6528 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6529 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
6530 ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm1
6531 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6532 ; AVX1-ONLY-NEXT: vpslld $16, %xmm1, %xmm1
6533 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm2
6534 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6535 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6536 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
6537 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
6538 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm3
6539 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6540 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm4
6541 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6542 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm5
6543 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm2
6544 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6545 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm5, %xmm1
6546 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6547 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
6548 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6549 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
6550 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
6551 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[0,1,0,3]
6552 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6553 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
6554 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
6555 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4,5],xmm2[6,7]
6556 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6,7]
6557 ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm0
6558 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6559 ; AVX1-ONLY-NEXT: vpslld $16, %xmm0, %xmm0
6560 ; AVX1-ONLY-NEXT: vmovdqa 160(%rdi), %xmm2
6561 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6562 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6563 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
6564 ; AVX1-ONLY-NEXT: vmovdqa 128(%rdi), %xmm2
6565 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6566 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm2, %xmm2
6567 ; AVX1-ONLY-NEXT: vmovdqa 144(%rdi), %xmm3
6568 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6569 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
6570 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6571 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
6572 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
6573 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm0[6,7]
6574 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0]
6575 ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm1, %ymm1
6576 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
6577 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm6, %ymm2
6578 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm1, %ymm0
6579 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6580 ; AVX1-ONLY-NEXT: vmovdqa 464(%rdi), %xmm0
6581 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
6582 ; AVX1-ONLY-NEXT: vpslld $16, %xmm0, %xmm1
6583 ; AVX1-ONLY-NEXT: vmovdqa 448(%rdi), %xmm0
6584 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6585 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm2 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6586 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
6587 ; AVX1-ONLY-NEXT: vmovdqa 480(%rdi), %xmm0
6588 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6589 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
6590 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6591 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,6,6,7]
6592 ; AVX1-ONLY-NEXT: vmovdqa 496(%rdi), %xmm0
6593 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6594 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
6595 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
6596 ; AVX1-ONLY-NEXT: vmovdqa 416(%rdi), %xmm0
6597 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6598 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm0, %xmm2
6599 ; AVX1-ONLY-NEXT: vmovdqa 432(%rdi), %xmm0
6600 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6601 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm13 = xmm0[0,3,2,3]
6602 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm3 = xmm13[0,1,0,2,4,5,6,7]
6603 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
6604 ; AVX1-ONLY-NEXT: vmovdqa 384(%rdi), %xmm0
6605 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6606 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm12 = xmm0[0,1,0,3]
6607 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm3 = xmm12[0,1,2,3,4,6,6,7]
6608 ; AVX1-ONLY-NEXT: vmovdqa 400(%rdi), %xmm0
6609 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6610 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
6611 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4,5],xmm3[6,7]
6612 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
6613 ; AVX1-ONLY-NEXT: vmovdqa 560(%rdi), %xmm0
6614 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6615 ; AVX1-ONLY-NEXT: vpslld $16, %xmm0, %xmm2
6616 ; AVX1-ONLY-NEXT: vmovdqa 544(%rdi), %xmm0
6617 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6618 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6619 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
6620 ; AVX1-ONLY-NEXT: vmovdqa 512(%rdi), %xmm0
6621 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6622 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm0, %xmm3
6623 ; AVX1-ONLY-NEXT: vmovdqa 528(%rdi), %xmm0
6624 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6625 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm11 = xmm0[0,3,2,3]
6626 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm4 = xmm11[0,1,0,2,4,5,6,7]
6627 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
6628 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,5],xmm2[6,7]
6629 ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm1, %ymm1
6630 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
6631 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm6, %ymm2
6632 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm1, %ymm0
6633 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6634 ; AVX1-ONLY-NEXT: vmovdqa 272(%rdi), %xmm0
6635 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6636 ; AVX1-ONLY-NEXT: vpslld $16, %xmm0, %xmm1
6637 ; AVX1-ONLY-NEXT: vmovdqa 256(%rdi), %xmm0
6638 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6639 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm2 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6640 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
6641 ; AVX1-ONLY-NEXT: vmovdqa 288(%rdi), %xmm0
6642 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6643 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
6644 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6645 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,6,6,7]
6646 ; AVX1-ONLY-NEXT: vmovdqa 304(%rdi), %xmm0
6647 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6648 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
6649 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
6650 ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm0
6651 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6652 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm0, %xmm2
6653 ; AVX1-ONLY-NEXT: vmovdqa 240(%rdi), %xmm0
6654 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6655 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[0,3,2,3]
6656 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm3 = xmm9[0,1,0,2,4,5,6,7]
6657 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
6658 ; AVX1-ONLY-NEXT: vmovdqa 192(%rdi), %xmm0
6659 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6660 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[0,1,0,3]
6661 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,6,6,7]
6662 ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm0
6663 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6664 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
6665 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4,5],xmm3[6,7]
6666 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
6667 ; AVX1-ONLY-NEXT: vmovdqa 368(%rdi), %xmm0
6668 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6669 ; AVX1-ONLY-NEXT: vpslld $16, %xmm0, %xmm2
6670 ; AVX1-ONLY-NEXT: vmovdqa 352(%rdi), %xmm0
6671 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6672 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6673 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
6674 ; AVX1-ONLY-NEXT: vmovdqa 320(%rdi), %xmm10
6675 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm10, %xmm3
6676 ; AVX1-ONLY-NEXT: vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6677 ; AVX1-ONLY-NEXT: vmovdqa 336(%rdi), %xmm0
6678 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6679 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[0,3,2,3]
6680 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm4 = xmm7[0,1,0,2,4,5,6,7]
6681 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
6682 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,5],xmm2[6,7]
6683 ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm1, %ymm1
6684 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
6685 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm6, %ymm2
6686 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm1, %ymm0
6687 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6688 ; AVX1-ONLY-NEXT: vmovdqa 656(%rdi), %xmm0
6689 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6690 ; AVX1-ONLY-NEXT: vpslld $16, %xmm0, %xmm1
6691 ; AVX1-ONLY-NEXT: vmovdqa 640(%rdi), %xmm0
6692 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6693 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm2 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6694 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
6695 ; AVX1-ONLY-NEXT: vmovdqa 672(%rdi), %xmm0
6696 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6697 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
6698 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6699 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,6,6,7]
6700 ; AVX1-ONLY-NEXT: vmovdqa 688(%rdi), %xmm0
6701 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6702 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
6703 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
6704 ; AVX1-ONLY-NEXT: vmovdqa 608(%rdi), %xmm0
6705 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6706 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm0, %xmm2
6707 ; AVX1-ONLY-NEXT: vmovdqa 624(%rdi), %xmm0
6708 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6709 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,3,2,3]
6710 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm15 = xmm3[0,1,0,2,4,5,6,7]
6711 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1]
6712 ; AVX1-ONLY-NEXT: vmovdqa 576(%rdi), %xmm0
6713 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6714 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[0,1,0,3]
6715 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm0 = xmm4[0,1,2,3,4,6,6,7]
6716 ; AVX1-ONLY-NEXT: vmovdqa 592(%rdi), %xmm2
6717 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6718 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6719 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm15[3,4,5],xmm0[6,7]
6720 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2],ymm1[3,4,5,6,7]
6721 ; AVX1-ONLY-NEXT: vmovdqa 752(%rdi), %xmm0
6722 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6723 ; AVX1-ONLY-NEXT: vpslld $16, %xmm0, %xmm1
6724 ; AVX1-ONLY-NEXT: vmovdqa 736(%rdi), %xmm0
6725 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6726 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm15 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6727 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1],xmm15[2],xmm1[2],xmm15[3],xmm1[3]
6728 ; AVX1-ONLY-NEXT: vmovdqa 704(%rdi), %xmm0
6729 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6730 ; AVX1-ONLY-NEXT: vpsrlq $16, %xmm0, %xmm0
6731 ; AVX1-ONLY-NEXT: vmovdqa 720(%rdi), %xmm1
6732 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6733 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
6734 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm14 = xmm1[0,1,0,2,4,5,6,7]
6735 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
6736 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm15[6,7]
6737 ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm2, %ymm2
6738 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
6739 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm6, %ymm0
6740 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm2, %ymm0
6741 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6742 ; AVX1-ONLY-NEXT: vpshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6743 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1,2,3,5,7,6,7]
6744 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6745 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm2
6746 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6747 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6748 ; AVX1-ONLY-NEXT: # xmm2 = mem[2,2,3,3]
6749 ; AVX1-ONLY-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
6750 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
6751 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
6752 ; AVX1-ONLY-NEXT: vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6753 ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1,1,3,4,5,6,7]
6754 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm14 = xmm5[1,1,1,1]
6755 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1]
6756 ; AVX1-ONLY-NEXT: vpshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
6757 ; AVX1-ONLY-NEXT: # xmm14 = mem[0,1,2,3,5,7,6,7]
6758 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6759 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm5, %xmm15
6760 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm14 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
6761 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm14[0,1,2],xmm2[3,4,5],xmm14[6,7]
6762 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
6763 ; AVX1-ONLY-NEXT: vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6764 ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1,1,3,4,5,6,7]
6765 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
6766 ; AVX1-ONLY-NEXT: # xmm14 = mem[1,1,1,1]
6767 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1]
6768 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
6769 ; AVX1-ONLY-NEXT: # xmm14 = mem[2,2,3,3]
6770 ; AVX1-ONLY-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload
6771 ; AVX1-ONLY-NEXT: # xmm14 = xmm14[0],mem[0],xmm14[1],mem[1],xmm14[2],mem[2],xmm14[3],mem[3]
6772 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm14[6,7]
6773 ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm0, %ymm0
6774 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
6775 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm6, %ymm2
6776 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0
6777 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6778 ; AVX1-ONLY-NEXT: vpshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6779 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1,2,3,5,7,6,7]
6780 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6781 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm2
6782 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6783 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6784 ; AVX1-ONLY-NEXT: # xmm2 = mem[2,2,3,3]
6785 ; AVX1-ONLY-NEXT: vpunpcklwd (%rsp), %xmm2, %xmm2 # 16-byte Folded Reload
6786 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
6787 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
6788 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm13[0,1,1,3,4,5,6,7]
6789 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
6790 ; AVX1-ONLY-NEXT: # xmm13 = mem[1,1,1,1]
6791 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm13[0],xmm2[1],xmm13[1]
6792 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,7,6,7]
6793 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6794 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm5, %xmm13
6795 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
6796 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm12[0,1,2],xmm2[3,4,5],xmm12[6,7]
6797 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
6798 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm11[0,1,1,3,4,5,6,7]
6799 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
6800 ; AVX1-ONLY-NEXT: # xmm11 = mem[1,1,1,1]
6801 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1]
6802 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
6803 ; AVX1-ONLY-NEXT: # xmm11 = mem[2,2,3,3]
6804 ; AVX1-ONLY-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
6805 ; AVX1-ONLY-NEXT: # xmm11 = xmm11[0],mem[0],xmm11[1],mem[1],xmm11[2],mem[2],xmm11[3],mem[3]
6806 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm11[6,7]
6807 ; AVX1-ONLY-NEXT: vmovaps %ymm6, %ymm13
6808 ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm0, %ymm0
6809 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
6810 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm6, %ymm2
6811 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0
6812 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6813 ; AVX1-ONLY-NEXT: vpshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6814 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1,2,3,5,7,6,7]
6815 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6816 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm2
6817 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6818 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6819 ; AVX1-ONLY-NEXT: # xmm2 = mem[2,2,3,3]
6820 ; AVX1-ONLY-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
6821 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
6822 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
6823 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm9[0,1,1,3,4,5,6,7]
6824 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
6825 ; AVX1-ONLY-NEXT: # xmm9 = mem[1,1,1,1]
6826 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
6827 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
6828 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6829 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm5, %xmm9
6830 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
6831 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0,1,2],xmm2[3,4,5],xmm8[6,7]
6832 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
6833 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm7[0,1,1,3,4,5,6,7]
6834 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm10[1,1,1,1]
6835 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
6836 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
6837 ; AVX1-ONLY-NEXT: # xmm7 = mem[2,2,3,3]
6838 ; AVX1-ONLY-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
6839 ; AVX1-ONLY-NEXT: # xmm7 = xmm7[0],mem[0],xmm7[1],mem[1],xmm7[2],mem[2],xmm7[3],mem[3]
6840 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm7[6,7]
6841 ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm0, %ymm0
6842 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
6843 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm6, %ymm2
6844 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0
6845 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6846 ; AVX1-ONLY-NEXT: vpshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6847 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1,2,3,5,7,6,7]
6848 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
6849 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm12, %xmm2
6850 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6851 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
6852 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm11[2,2,3,3]
6853 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
6854 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3]
6855 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
6856 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[0,1,1,3,4,5,6,7]
6857 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
6858 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[1,1,1,1]
6859 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
6860 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,5,7,6,7]
6861 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
6862 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm5, %xmm4
6863 ; AVX1-ONLY-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
6864 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4,5],xmm3[6,7]
6865 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
6866 ; AVX1-ONLY-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
6867 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
6868 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm8[1,1,1,1]
6869 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
6870 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
6871 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm7[2,2,3,3]
6872 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
6873 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
6874 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm2[6,7]
6875 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm13, %ymm0
6876 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
6877 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm13, %ymm1
6878 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0
6879 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6880 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6881 ; AVX1-ONLY-NEXT: # xmm0 = mem[1,1,1,1]
6882 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6883 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6884 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
6885 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm15 = [4,5,0,1,12,13,14,15,8,9,10,11,12,13,14,15]
6886 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6887 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
6888 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[0,1],mem[2,3],xmm1[4,5,6,7]
6889 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6890 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm1, %xmm1
6891 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
6892 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6893 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
6894 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[0,1],mem[2,3],xmm1[4,5,6,7]
6895 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6896 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm1, %xmm1
6897 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,2,3,4,5,6,7,8,9,0,1,12,13,8,9]
6898 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6899 ; AVX1-ONLY-NEXT: vpblendw $207, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
6900 ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1,2,3],xmm2[4,5],mem[6,7]
6901 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6902 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm2, %xmm2
6903 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
6904 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm9 = [0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
6905 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm9, %ymm0
6906 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm9, %ymm2
6907 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm2, %ymm0
6908 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6909 ; AVX1-ONLY-NEXT: # xmm2 = mem[1,1,1,1]
6910 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6911 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm3 = xmm3[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6912 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
6913 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6914 ; AVX1-ONLY-NEXT: vpblendw $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
6915 ; AVX1-ONLY-NEXT: # xmm3 = xmm3[0,1,2,3],mem[4,5],xmm3[6,7]
6916 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6917 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm3, %xmm3
6918 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4],xmm3[5,6,7]
6919 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm13, %ymm0
6920 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
6921 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm13, %ymm2
6922 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0
6923 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6924 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6925 ; AVX1-ONLY-NEXT: # xmm0 = mem[1,1,1,1]
6926 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
6927 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6928 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
6929 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6930 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
6931 ; AVX1-ONLY-NEXT: # xmm2 = xmm2[0,1],mem[2,3],xmm2[4,5,6,7]
6932 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6933 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm2, %xmm2
6934 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm0[3,4],xmm2[5,6,7]
6935 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
6936 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm0 # 16-byte Folded Reload
6937 ; AVX1-ONLY-NEXT: # xmm0 = xmm2[0,1],mem[2,3],xmm2[4,5,6,7]
6938 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6939 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm0, %xmm2
6940 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6941 ; AVX1-ONLY-NEXT: vpblendw $48, (%rsp), %xmm3, %xmm0 # 16-byte Folded Reload
6942 ; AVX1-ONLY-NEXT: # xmm0 = xmm3[0,1,2,3],mem[4,5],xmm3[6,7]
6943 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6944 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm0, %xmm3
6945 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
6946 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm9, %ymm0
6947 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm9, %ymm2
6948 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm2, %ymm0
6949 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6950 ; AVX1-ONLY-NEXT: # xmm2 = mem[1,1,1,1]
6951 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6952 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm3 = xmm3[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6953 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
6954 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
6955 ; AVX1-ONLY-NEXT: vpblendw $207, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm1 # 16-byte Folded Reload
6956 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm3[4,5],mem[6,7]
6957 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6958 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm1, %xmm3
6959 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4],xmm3[5,6,7]
6960 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm13, %ymm0
6961 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
6962 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm13, %ymm2
6963 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0
6964 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6965 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6966 ; AVX1-ONLY-NEXT: # xmm0 = mem[1,1,1,1]
6967 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm2 = xmm6[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6968 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
6969 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm2 # 16-byte Folded Reload
6970 ; AVX1-ONLY-NEXT: # xmm2 = xmm5[0,1],mem[2,3],xmm5[4,5,6,7]
6971 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6972 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm2, %xmm2
6973 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3,4],xmm2[5,6,7]
6974 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm2 # 16-byte Folded Reload
6975 ; AVX1-ONLY-NEXT: # xmm2 = xmm12[0,1],mem[2,3],xmm12[4,5,6,7]
6976 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6977 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm2, %xmm2
6978 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm11[0,1,2,3],xmm10[4,5],xmm11[6,7]
6979 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6980 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm3, %xmm3
6981 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
6982 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm9, %ymm0
6983 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm9, %ymm2
6984 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm2, %ymm2
6985 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
6986 ; AVX1-ONLY-NEXT: # xmm0 = mem[1,1,1,1]
6987 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm3 = xmm8[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
6988 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
6989 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm7[0,1,2,3],xmm4[4,5],xmm7[6,7]
6990 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
6991 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm0, %xmm4
6992 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3,4],xmm4[5,6,7]
6993 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm13, %ymm2
6994 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
6995 ; AVX1-ONLY-NEXT: vandnps %ymm3, %ymm13, %ymm3
6996 ; AVX1-ONLY-NEXT: vorps %ymm3, %ymm2, %ymm0
6997 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
6998 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
6999 ; AVX1-ONLY-NEXT: # xmm2 = mem[1,1,1,1]
7000 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
7001 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm3 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
7002 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
7003 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7004 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
7005 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6,7]
7006 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7007 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm0, %xmm3
7008 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4],xmm3[5,6,7]
7009 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7010 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
7011 ; AVX1-ONLY-NEXT: # xmm3 = xmm0[0,1],mem[2,3],xmm0[4,5,6,7]
7012 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm3, %xmm0
7013 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7014 ; AVX1-ONLY-NEXT: vpblendw $207, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm15 # 16-byte Folded Reload
7015 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1,2,3],xmm6[4,5],mem[6,7]
7016 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm15, %xmm6
7017 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm6, %ymm0
7018 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm9, %ymm2
7019 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm9, %ymm0
7020 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0
7021 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
7022 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm11[1,1,1,1]
7023 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
7024 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm6 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
7025 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm2[0]
7026 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7027 ; AVX1-ONLY-NEXT: vpblendw $207, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
7028 ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1,2,3],xmm2[4,5],mem[6,7]
7029 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm2, %xmm14
7030 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm14[0,1,2],xmm6[3,4],xmm14[5,6,7]
7031 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm13, %ymm0
7032 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
7033 ; AVX1-ONLY-NEXT: vandnps %ymm6, %ymm13, %ymm6
7034 ; AVX1-ONLY-NEXT: vmovaps %ymm13, %ymm5
7035 ; AVX1-ONLY-NEXT: vorps %ymm6, %ymm0, %ymm0
7036 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7037 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7038 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm0, %xmm0
7039 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
7040 ; AVX1-ONLY-NEXT: # xmm6 = mem[2,2,3,3]
7041 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm6[0],xmm0[0]
7042 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm14 = [6,7,2,3,14,15,14,15,8,9,10,11,12,13,14,15]
7043 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7044 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm6, %xmm6
7045 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm6[0,1,2],xmm0[3,4],xmm6[5,6,7]
7046 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7047 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm6, %xmm7
7048 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,4,5,6,7,14,15,2,3,14,15,10,11]
7049 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
7050 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm8, %xmm8
7051 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm7
7052 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm9, %ymm0
7053 ; AVX1-ONLY-NEXT: vandps %ymm7, %ymm9, %ymm7
7054 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm7, %ymm0
7055 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
7056 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm7, %xmm7
7057 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
7058 ; AVX1-ONLY-NEXT: # xmm8 = mem[2,2,3,3]
7059 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm7 = xmm8[0],xmm7[0]
7060 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
7061 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm8, %xmm8
7062 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3,4],xmm8[5,6,7]
7063 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm13, %ymm0
7064 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
7065 ; AVX1-ONLY-NEXT: vandnps %ymm7, %ymm13, %ymm7
7066 ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm0, %ymm0
7067 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7068 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
7069 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm13, %xmm0
7070 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
7071 ; AVX1-ONLY-NEXT: # xmm7 = mem[2,2,3,3]
7072 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm7[0],xmm0[0]
7073 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
7074 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm7, %xmm7
7075 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm7[0,1,2],xmm0[3,4],xmm7[5,6,7]
7076 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7077 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm1, %xmm7
7078 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7079 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm1, %xmm8
7080 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm7
7081 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm9, %ymm0
7082 ; AVX1-ONLY-NEXT: vandps %ymm7, %ymm9, %ymm7
7083 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm7, %ymm0
7084 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
7085 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm10, %xmm7
7086 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
7087 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm8 = xmm12[2,2,3,3]
7088 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm7 = xmm8[0],xmm7[0]
7089 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7090 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm1, %xmm8
7091 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3,4],xmm8[5,6,7]
7092 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm0, %ymm0
7093 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
7094 ; AVX1-ONLY-NEXT: vandnps %ymm7, %ymm5, %ymm7
7095 ; AVX1-ONLY-NEXT: vorps %ymm7, %ymm0, %ymm0
7096 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7097 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
7098 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm8, %xmm0
7099 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm4[2,2,3,3]
7100 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm7[0],xmm0[0]
7101 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7102 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm1, %xmm4
7103 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[3,4],xmm4[5,6,7]
7104 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm3, %xmm3
7105 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm15, %xmm4
7106 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
7107 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm9, %ymm0
7108 ; AVX1-ONLY-NEXT: vandps %ymm3, %ymm9, %ymm3
7109 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm3, %ymm0
7110 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm11, %xmm3
7111 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
7112 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm4 = xmm15[2,2,3,3]
7113 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
7114 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm2, %xmm2
7115 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3,4],xmm2[5,6,7]
7116 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm0, %ymm0
7117 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7118 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm5, %ymm2
7119 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm0, %ymm0
7120 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7121 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7122 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm0, %xmm0
7123 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
7124 ; AVX1-ONLY-NEXT: # xmm2 = mem[2,2,3,3]
7125 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
7126 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7127 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm2, %xmm2
7128 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3,4],xmm2[5,6,7]
7129 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7130 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm2, %xmm2
7131 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
7132 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm3, %xmm3
7133 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
7134 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm9, %ymm0
7135 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm9, %ymm1
7136 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm1, %ymm0
7137 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7138 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm1, %xmm1
7139 ; AVX1-ONLY-NEXT: vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
7140 ; AVX1-ONLY-NEXT: # xmm2 = mem[2,2,3,3]
7141 ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
7142 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7143 ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm2, %xmm2
7144 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4],xmm2[5,6,7]
7145 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm0, %ymm0
7146 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
7147 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm5, %ymm1
7148 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0
7149 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7150 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7151 ; AVX1-ONLY-NEXT: # xmm0 = mem[1,1,1,1]
7152 ; AVX1-ONLY-NEXT: vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7153 ; AVX1-ONLY-NEXT: # xmm1 = mem[2,3,2,3]
7154 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
7155 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7156 ; AVX1-ONLY-NEXT: vpblendw $243, (%rsp), %xmm1, %xmm2 # 16-byte Folded Reload
7157 ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1],xmm1[2,3],mem[4,5,6,7]
7158 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, (%rsp) # 16-byte Spill
7159 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,4,5,6,7,0,1,4,5,0,1,12,13]
7160 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm2, %xmm2
7161 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
7162 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
7163 ; AVX1-ONLY-NEXT: # xmm2 = mem[1,1,1,1]
7164 ; AVX1-ONLY-NEXT: vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
7165 ; AVX1-ONLY-NEXT: # xmm3 = mem[2,3,2,3]
7166 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
7167 ; AVX1-ONLY-NEXT: vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
7168 ; AVX1-ONLY-NEXT: # xmm3 = mem[0,1,0,3]
7169 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7170 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
7171 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm13[1]
7172 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3,4,5,6,7]
7173 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm5 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
7174 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm5, %ymm0
7175 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm2, %ymm2
7176 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm2, %ymm0
7177 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7178 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm4 # 16-byte Folded Reload
7179 ; AVX1-ONLY-NEXT: # xmm4 = xmm2[0,1],mem[2,3],xmm2[4,5,6,7]
7180 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7181 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm12[0,1,0,3]
7182 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7183 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,6]
7184 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm10[1]
7185 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm4, %xmm3
7186 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
7187 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7188 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
7189 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7190 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7191 ; AVX1-ONLY-NEXT: # xmm0 = mem[1,1,1,1]
7192 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
7193 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
7194 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
7195 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7196 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm13 # 16-byte Folded Reload
7197 ; AVX1-ONLY-NEXT: # xmm13 = xmm2[0,1],mem[2,3],xmm2[4,5,6,7]
7198 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm13, %xmm2
7199 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
7200 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
7201 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm2 = xmm12[1,1,1,1]
7202 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
7203 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm3 = xmm11[2,3,2,3]
7204 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
7205 ; AVX1-ONLY-NEXT: vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
7206 ; AVX1-ONLY-NEXT: # xmm3 = mem[0,1,0,3]
7207 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm6 = xmm3[0,1,2,3,4,5,4,6]
7208 ; AVX1-ONLY-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
7209 ; AVX1-ONLY-NEXT: # xmm6 = xmm6[1],mem[1]
7210 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2,3,4,5,6,7]
7211 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm5, %ymm0
7212 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm2, %ymm2
7213 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm2, %ymm0
7214 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7215 ; AVX1-ONLY-NEXT: vpblendw $243, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm9 # 16-byte Folded Reload
7216 ; AVX1-ONLY-NEXT: # xmm9 = mem[0,1],xmm2[2,3],mem[4,5,6,7]
7217 ; AVX1-ONLY-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7218 ; AVX1-ONLY-NEXT: vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
7219 ; AVX1-ONLY-NEXT: # xmm6 = mem[0,1,0,3]
7220 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,4,5,4,6]
7221 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7222 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm7[1],xmm2[1]
7223 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm9, %xmm10
7224 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm10[5,6,7]
7225 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
7226 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm7[5,6,7]
7227 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7228 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7229 ; AVX1-ONLY-NEXT: # xmm0 = mem[1,1,1,1]
7230 ; AVX1-ONLY-NEXT: vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
7231 ; AVX1-ONLY-NEXT: # xmm7 = mem[2,3,2,3]
7232 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
7233 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
7234 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
7235 ; AVX1-ONLY-NEXT: # xmm7 = xmm7[0,1],mem[2,3],xmm7[4,5,6,7]
7236 ; AVX1-ONLY-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7237 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm7, %xmm7
7238 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm7, %ymm0
7239 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
7240 ; AVX1-ONLY-NEXT: # xmm7 = mem[1,1,1,1]
7241 ; AVX1-ONLY-NEXT: vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
7242 ; AVX1-ONLY-NEXT: # xmm10 = mem[2,3,2,3]
7243 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3]
7244 ; AVX1-ONLY-NEXT: vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
7245 ; AVX1-ONLY-NEXT: # xmm9 = mem[0,1,0,3]
7246 ; AVX1-ONLY-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7247 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,4,5,4,6]
7248 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm10 = xmm10[1],xmm8[1]
7249 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm10[2,3,4,5,6,7]
7250 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm5, %ymm0
7251 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm7, %ymm7
7252 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm7, %ymm0
7253 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
7254 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm8 # 16-byte Folded Reload
7255 ; AVX1-ONLY-NEXT: # xmm8 = xmm7[0,1],mem[2,3],xmm7[4,5,6,7]
7256 ; AVX1-ONLY-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7257 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm15[0,1,0,3]
7258 ; AVX1-ONLY-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7259 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,4,6]
7260 ; AVX1-ONLY-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
7261 ; AVX1-ONLY-NEXT: # xmm7 = xmm7[1],mem[1]
7262 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm8, %xmm14
7263 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm14[5,6,7]
7264 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
7265 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm7[5,6,7]
7266 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7267 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7268 ; AVX1-ONLY-NEXT: # xmm0 = mem[1,1,1,1]
7269 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
7270 ; AVX1-ONLY-NEXT: vpshufd {{.*#+}} xmm7 = xmm10[2,3,2,3]
7271 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
7272 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
7273 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
7274 ; AVX1-ONLY-NEXT: # xmm7 = xmm7[0,1],mem[2,3],xmm7[4,5,6,7]
7275 ; AVX1-ONLY-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7276 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm7, %xmm7
7277 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm7, %ymm0
7278 ; AVX1-ONLY-NEXT: vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
7279 ; AVX1-ONLY-NEXT: # xmm7 = mem[1,1,1,1]
7280 ; AVX1-ONLY-NEXT: vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
7281 ; AVX1-ONLY-NEXT: # xmm15 = mem[2,3,2,3]
7282 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm15[0],xmm7[0],xmm15[1],xmm7[1],xmm15[2],xmm7[2],xmm15[3],xmm7[3]
7283 ; AVX1-ONLY-NEXT: vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
7284 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1,0,3]
7285 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm9 = xmm15[0,1,2,3,4,5,4,6]
7286 ; AVX1-ONLY-NEXT: vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
7287 ; AVX1-ONLY-NEXT: # xmm9 = xmm9[1],mem[1]
7288 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm9[2,3,4,5,6,7]
7289 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm5, %ymm0
7290 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm7, %ymm7
7291 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm7, %ymm0
7292 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
7293 ; AVX1-ONLY-NEXT: vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
7294 ; AVX1-ONLY-NEXT: # xmm7 = xmm7[0,1],mem[2,3],xmm7[4,5,6,7]
7295 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm7, %xmm9
7296 ; AVX1-ONLY-NEXT: vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
7297 ; AVX1-ONLY-NEXT: # xmm14 = mem[0,1,0,3]
7298 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm8 = xmm14[0,1,2,3,4,5,4,6]
7299 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
7300 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm8[1],xmm1[1]
7301 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm9[5,6,7]
7302 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
7303 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
7304 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7305 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7306 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm0, %xmm0
7307 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm8 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7308 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
7309 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm0 = [6,7,2,3,4,5,6,7,6,7,6,7,2,3,14,15]
7310 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm13, %xmm4
7311 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm4, %ymm4
7312 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm12, %xmm8
7313 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm9 = xmm11[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7314 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
7315 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
7316 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm9, %xmm9
7317 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,7]
7318 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm9[1]
7319 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm8[0,1],xmm3[2,3,4,5,6,7]
7320 ; AVX1-ONLY-NEXT: vandnps %ymm4, %ymm5, %ymm4
7321 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm3, %ymm3
7322 ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm3, %ymm3
7323 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm4
7324 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,4,5,5,7]
7325 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm4[1]
7326 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
7327 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm4, %xmm4
7328 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm4[5,6,7]
7329 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7330 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm2[5,6,7]
7331 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
7332 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm2, %xmm2
7333 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
7334 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm4 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7335 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
7336 ; AVX1-ONLY-NEXT: vmovdqa (%rsp), %xmm4 # 16-byte Reload
7337 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm4, %xmm4
7338 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
7339 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
7340 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm4, %xmm4
7341 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7342 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm6 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7343 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
7344 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7345 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm6, %xmm6
7346 ; AVX1-ONLY-NEXT: vpshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
7347 ; AVX1-ONLY-NEXT: # xmm8 = mem[0,1,2,3,4,5,5,7]
7348 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm8[1],xmm6[1]
7349 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,3,4,5,6,7]
7350 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm5, %ymm2
7351 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm4, %ymm4
7352 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm4, %ymm2
7353 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
7354 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm4, %xmm4
7355 ; AVX1-ONLY-NEXT: vpshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
7356 ; AVX1-ONLY-NEXT: # xmm6 = mem[0,1,2,3,4,5,5,7]
7357 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm4 = xmm6[1],xmm4[1]
7358 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7359 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm6, %xmm6
7360 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm6[5,6,7]
7361 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
7362 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
7363 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
7364 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm4, %xmm4
7365 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7366 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm6 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7367 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
7368 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7369 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm6, %xmm6
7370 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
7371 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7372 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm6, %xmm6
7373 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
7374 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm8 = xmm8[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7375 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
7376 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
7377 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm8, %xmm8
7378 ; AVX1-ONLY-NEXT: vpshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
7379 ; AVX1-ONLY-NEXT: # xmm9 = mem[0,1,2,3,4,5,5,7]
7380 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm8 = xmm9[1],xmm8[1]
7381 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2,3,4,5,6,7]
7382 ; AVX1-ONLY-NEXT: vandnps %ymm4, %ymm5, %ymm4
7383 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm6, %ymm6
7384 ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm6, %ymm4
7385 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7386 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm6, %xmm6
7387 ; AVX1-ONLY-NEXT: vpshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
7388 ; AVX1-ONLY-NEXT: # xmm8 = mem[0,1,2,3,4,5,5,7]
7389 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm8[1],xmm6[1]
7390 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
7391 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm8, %xmm8
7392 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm8[5,6,7]
7393 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
7394 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm6[5,6,7]
7395 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
7396 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm6, %xmm6
7397 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm8 = xmm10[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7398 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
7399 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
7400 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm8, %xmm8
7401 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm8, %ymm6
7402 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
7403 ; AVX1-ONLY-NEXT: vpsrlq $48, %xmm8, %xmm8
7404 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
7405 ; AVX1-ONLY-NEXT: vpsrldq {{.*#+}} xmm9 = xmm9[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7406 ; AVX1-ONLY-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
7407 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
7408 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm9, %xmm9
7409 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm10 = xmm15[0,1,2,3,4,5,5,7]
7410 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm9 = xmm10[1],xmm9[1]
7411 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3,4,5,6,7]
7412 ; AVX1-ONLY-NEXT: vandnps %ymm6, %ymm5, %ymm6
7413 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm8, %ymm5
7414 ; AVX1-ONLY-NEXT: vorps %ymm6, %ymm5, %ymm5
7415 ; AVX1-ONLY-NEXT: vpshufb %xmm0, %xmm7, %xmm0
7416 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm1, %xmm6
7417 ; AVX1-ONLY-NEXT: vpshufhw {{.*#+}} xmm1 = xmm14[0,1,2,3,4,5,5,7]
7418 ; AVX1-ONLY-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm6[1]
7419 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
7420 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7421 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm0[5,6,7]
7422 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7423 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%rsi)
7424 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7425 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rsi)
7426 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7427 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rsi)
7428 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7429 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rsi)
7430 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7431 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%rdx)
7432 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7433 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rdx)
7434 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7435 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rdx)
7436 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7437 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rdx)
7438 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7439 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rcx)
7440 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7441 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%rcx)
7442 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7443 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rcx)
7444 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7445 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rcx)
7446 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7447 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%r8)
7448 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7449 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%r8)
7450 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7451 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%r8)
7452 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7453 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%r8)
7454 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7455 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%r9)
7456 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7457 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%r9)
7458 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7459 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%r9)
7460 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7461 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%r9)
7462 ; AVX1-ONLY-NEXT: movq {{[0-9]+}}(%rsp), %rax
7463 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rax)
7464 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rax)
7465 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 64(%rax)
7466 ; AVX1-ONLY-NEXT: vmovaps %ymm3, (%rax)
7467 ; AVX1-ONLY-NEXT: addq $1368, %rsp # imm = 0x558
7468 ; AVX1-ONLY-NEXT: vzeroupper
7469 ; AVX1-ONLY-NEXT: retq
7471 ; AVX2-SLOW-LABEL: load_i16_stride6_vf64:
7472 ; AVX2-SLOW: # %bb.0:
7473 ; AVX2-SLOW-NEXT: subq $1272, %rsp # imm = 0x4F8
7474 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm0
7475 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm1
7476 ; AVX2-SLOW-NEXT: vmovaps 672(%rdi), %ymm2
7477 ; AVX2-SLOW-NEXT: vmovaps 640(%rdi), %ymm3
7478 ; AVX2-SLOW-NEXT: vmovdqa 288(%rdi), %ymm4
7479 ; AVX2-SLOW-NEXT: vmovdqa 256(%rdi), %ymm5
7480 ; AVX2-SLOW-NEXT: vmovdqa 416(%rdi), %ymm8
7481 ; AVX2-SLOW-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7482 ; AVX2-SLOW-NEXT: vmovdqa 384(%rdi), %ymm9
7483 ; AVX2-SLOW-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7484 ; AVX2-SLOW-NEXT: vmovdqa 480(%rdi), %ymm6
7485 ; AVX2-SLOW-NEXT: vmovdqa 448(%rdi), %ymm7
7486 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm10 = ymm7[2,3],ymm6[2,3]
7487 ; AVX2-SLOW-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7488 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm7[0,1],ymm6[0,1]
7489 ; AVX2-SLOW-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7490 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm11 = ymm5[2,3],ymm4[2,3]
7491 ; AVX2-SLOW-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7492 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm5[0,1],ymm4[0,1]
7493 ; AVX2-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7494 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3]
7495 ; AVX2-SLOW-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7496 ; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm3[0,1],ymm2[0,1]
7497 ; AVX2-SLOW-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7498 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm0[2,3],ymm1[2,3]
7499 ; AVX2-SLOW-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7500 ; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm12 = ymm0[0,1],ymm1[0,1]
7501 ; AVX2-SLOW-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7502 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
7503 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
7504 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm6 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
7505 ; AVX2-SLOW-NEXT: vpshufb %xmm6, %xmm1, %xmm0
7506 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm5
7507 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm5[2,2,2,2,4,5,6,7]
7508 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
7509 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0],ymm7[1],ymm10[2,3,4,5],ymm7[6],ymm10[7]
7510 ; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm4, %ymm7
7511 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
7512 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm3, %ymm7, %ymm3
7513 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7514 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm3
7515 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7516 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm7
7517 ; AVX2-SLOW-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7518 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm7[0,1],ymm3[2],ymm7[3,4],ymm3[5],ymm7[6,7]
7519 ; AVX2-SLOW-NEXT: vpshufb %xmm6, %xmm9, %xmm3
7520 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm9, %xmm13
7521 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm13[2,2,2,2,4,5,6,7]
7522 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm7[1],xmm3[2,3],xmm7[4],xmm3[5,6,7]
7523 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm12 = ymm14[0],ymm12[1],ymm14[2,3,4,5],ymm12[6],ymm14[7]
7524 ; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm12, %ymm7
7525 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm3, %ymm7, %ymm3
7526 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7527 ; AVX2-SLOW-NEXT: vmovdqa 224(%rdi), %ymm7
7528 ; AVX2-SLOW-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7529 ; AVX2-SLOW-NEXT: vmovdqa 192(%rdi), %ymm3
7530 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7531 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0,1],ymm3[2],ymm7[3,4],ymm3[5],ymm7[6,7]
7532 ; AVX2-SLOW-NEXT: vpshufb %xmm6, %xmm3, %xmm7
7533 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm8
7534 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm8[2,2,2,2,4,5,6,7]
7535 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm7[0],xmm10[1],xmm7[2,3],xmm10[4],xmm7[5,6,7]
7536 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
7537 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm11[0],ymm15[1],ymm11[2,3,4,5],ymm15[6],ymm11[7]
7538 ; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm7, %ymm11
7539 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm10, %ymm11, %ymm10
7540 ; AVX2-SLOW-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7541 ; AVX2-SLOW-NEXT: vmovdqa 608(%rdi), %ymm11
7542 ; AVX2-SLOW-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7543 ; AVX2-SLOW-NEXT: vmovdqa 576(%rdi), %ymm10
7544 ; AVX2-SLOW-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7545 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm11[0,1],ymm10[2],ymm11[3,4],ymm10[5],ymm11[6,7]
7546 ; AVX2-SLOW-NEXT: vpshufb %xmm6, %xmm10, %xmm11
7547 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm10, %xmm6
7548 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm14 = xmm6[2,2,2,2,4,5,6,7]
7549 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm14 = xmm11[0],xmm14[1],xmm11[2,3],xmm14[4],xmm11[5,6,7]
7550 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
7551 ; AVX2-SLOW-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
7552 ; AVX2-SLOW-NEXT: # ymm11 = mem[0],ymm11[1],mem[2,3,4,5],ymm11[6],mem[7]
7553 ; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm11, %ymm2
7554 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm14, %ymm2, %ymm2
7555 ; AVX2-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7556 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm2 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
7557 ; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm9, %xmm9
7558 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[1,1,2,3]
7559 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,5,5,5]
7560 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm13 = xmm9[0],xmm13[1],xmm9[2,3],xmm13[4],xmm9[5,6,7]
7561 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm9 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
7562 ; AVX2-SLOW-NEXT: vpshufb %ymm9, %ymm12, %ymm12
7563 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm13, %ymm12, %ymm12
7564 ; AVX2-SLOW-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7565 ; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
7566 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
7567 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
7568 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3],xmm5[4],xmm1[5,6,7]
7569 ; AVX2-SLOW-NEXT: vpshufb %ymm9, %ymm4, %ymm4
7570 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm1, %ymm4, %ymm1
7571 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7572 ; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm3, %xmm1
7573 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm8[1,1,2,3]
7574 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
7575 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
7576 ; AVX2-SLOW-NEXT: vpshufb %ymm9, %ymm7, %ymm3
7577 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm1, %ymm3, %ymm1
7578 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7579 ; AVX2-SLOW-NEXT: vpshufb %ymm9, %ymm11, %ymm1
7580 ; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm10, %xmm2
7581 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[1,1,2,3]
7582 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
7583 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
7584 ; AVX2-SLOW-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm0
7585 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7586 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7587 ; AVX2-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
7588 ; AVX2-SLOW-NEXT: # ymm13 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
7589 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm13, %xmm14
7590 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm14[0,2,0,3]
7591 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
7592 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm9 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
7593 ; AVX2-SLOW-NEXT: vpshufb %xmm9, %xmm13, %xmm1
7594 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
7595 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21>
7596 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7597 ; AVX2-SLOW-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7598 ; AVX2-SLOW-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5],ymm1[6],mem[7]
7599 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7600 ; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm1, %ymm1
7601 ; AVX2-SLOW-NEXT: vmovdqa %ymm2, %ymm4
7602 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7]
7603 ; AVX2-SLOW-NEXT: vmovdqa 544(%rdi), %ymm0
7604 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7605 ; AVX2-SLOW-NEXT: vmovdqa 512(%rdi), %ymm2
7606 ; AVX2-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7607 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
7608 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm11[2,2,2,2,4,5,6,7]
7609 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,2,2]
7610 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm0 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
7611 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm11, %xmm3
7612 ; AVX2-SLOW-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7613 ; AVX2-SLOW-NEXT: vpshufb %xmm0, %xmm3, %xmm3
7614 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
7615 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
7616 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
7617 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7618 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7619 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7620 ; AVX2-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
7621 ; AVX2-SLOW-NEXT: # ymm10 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
7622 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm10, %xmm1
7623 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7624 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,3]
7625 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
7626 ; AVX2-SLOW-NEXT: vpshufb %xmm9, %xmm10, %xmm2
7627 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
7628 ; AVX2-SLOW-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
7629 ; AVX2-SLOW-NEXT: # ymm2 = ymm15[0],mem[1],ymm15[2,3,4,5],mem[6],ymm15[7]
7630 ; AVX2-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7631 ; AVX2-SLOW-NEXT: vpshufb %ymm4, %ymm2, %ymm2
7632 ; AVX2-SLOW-NEXT: vmovdqa %ymm4, %ymm15
7633 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
7634 ; AVX2-SLOW-NEXT: vmovdqa 352(%rdi), %ymm2
7635 ; AVX2-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7636 ; AVX2-SLOW-NEXT: vmovdqa 320(%rdi), %ymm3
7637 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7638 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
7639 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm7[2,2,2,2,4,5,6,7]
7640 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
7641 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm7, %xmm3
7642 ; AVX2-SLOW-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7643 ; AVX2-SLOW-NEXT: vpshufb %xmm0, %xmm3, %xmm3
7644 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
7645 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
7646 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
7647 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
7648 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7649 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7650 ; AVX2-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
7651 ; AVX2-SLOW-NEXT: # ymm4 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
7652 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm4, %xmm1
7653 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7654 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,3]
7655 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
7656 ; AVX2-SLOW-NEXT: vpshufb %xmm9, %xmm4, %xmm2
7657 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
7658 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7659 ; AVX2-SLOW-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
7660 ; AVX2-SLOW-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
7661 ; AVX2-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7662 ; AVX2-SLOW-NEXT: vpshufb %ymm15, %ymm2, %ymm2
7663 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
7664 ; AVX2-SLOW-NEXT: vmovdqa 736(%rdi), %ymm2
7665 ; AVX2-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7666 ; AVX2-SLOW-NEXT: vmovdqa 704(%rdi), %ymm3
7667 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7668 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
7669 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm3[2,2,2,2,4,5,6,7]
7670 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
7671 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm2
7672 ; AVX2-SLOW-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7673 ; AVX2-SLOW-NEXT: vpshufb %xmm0, %xmm2, %xmm6
7674 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
7675 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
7676 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm5 = ymm1[0,1,2],ymm5[3,4,5,6,7],ymm1[8,9,10],ymm5[11,12,13,14,15]
7677 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
7678 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7679 ; AVX2-SLOW-NEXT: vmovdqa 160(%rdi), %ymm1
7680 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7681 ; AVX2-SLOW-NEXT: vmovdqa 128(%rdi), %ymm2
7682 ; AVX2-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7683 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
7684 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm12
7685 ; AVX2-SLOW-NEXT: vpshufb %xmm0, %xmm12, %xmm0
7686 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm2[2,2,2,2,4,5,6,7]
7687 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
7688 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm0[0,1,2],xmm5[3],xmm0[4,5],xmm5[6],xmm0[7]
7689 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7690 ; AVX2-SLOW-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
7691 ; AVX2-SLOW-NEXT: # ymm1 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
7692 ; AVX2-SLOW-NEXT: vpshufb %xmm9, %xmm1, %xmm5
7693 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm6
7694 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm6[0,2,0,3]
7695 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,6,7]
7696 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm9 = xmm5[0,1],xmm9[2],xmm5[3],xmm9[4,5],xmm5[6,7]
7697 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7698 ; AVX2-SLOW-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
7699 ; AVX2-SLOW-NEXT: # ymm5 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
7700 ; AVX2-SLOW-NEXT: vpshufb %ymm15, %ymm5, %ymm15
7701 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2],ymm15[3,4,5,6,7]
7702 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
7703 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7],ymm9[8,9,10],ymm8[11,12,13,14,15]
7704 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm8[4,5,6,7]
7705 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7706 ; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm9 = [2,3,14,15,10,11,0,0,2,3,14,15,10,11,0,0]
7707 ; AVX2-SLOW-NEXT: vpshufb %xmm9, %xmm14, %xmm15
7708 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm8 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
7709 ; AVX2-SLOW-NEXT: vpshufb %xmm8, %xmm13, %xmm13
7710 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm15 = xmm13[0,1],xmm15[2],xmm13[3],xmm15[4,5],xmm13[6,7]
7711 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23>
7712 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7713 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm0, %ymm14
7714 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2],ymm14[3,4,5,6,7]
7715 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm15 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
7716 ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7717 ; AVX2-SLOW-NEXT: vpshufb %xmm15, %xmm0, %xmm0
7718 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
7719 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm11[3],xmm0[4,5],xmm11[6],xmm0[7]
7720 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7721 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3,4,5,6,7],ymm14[8,9,10],ymm0[11,12,13,14,15]
7722 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7]
7723 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7724 ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7725 ; AVX2-SLOW-NEXT: vpshufb %xmm9, %xmm0, %xmm0
7726 ; AVX2-SLOW-NEXT: vpshufb %xmm8, %xmm10, %xmm10
7727 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm10[0,1],xmm0[2],xmm10[3],xmm0[4,5],xmm10[6,7]
7728 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
7729 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm10, %ymm10
7730 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm10[3,4,5,6,7]
7731 ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
7732 ; AVX2-SLOW-NEXT: vpshufb %xmm15, %xmm10, %xmm10
7733 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,5,5]
7734 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm10[0,1,2],xmm7[3],xmm10[4,5],xmm7[6],xmm10[7]
7735 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
7736 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm7 = ymm0[0,1,2],ymm7[3,4,5,6,7],ymm0[8,9,10],ymm7[11,12,13,14,15]
7737 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
7738 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7739 ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
7740 ; AVX2-SLOW-NEXT: vpshufb %xmm9, %xmm0, %xmm0
7741 ; AVX2-SLOW-NEXT: vpshufb %xmm8, %xmm4, %xmm4
7742 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3],xmm0[4,5],xmm4[6,7]
7743 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7744 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm4, %ymm4
7745 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
7746 ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
7747 ; AVX2-SLOW-NEXT: vpshufb %xmm15, %xmm4, %xmm4
7748 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
7749 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
7750 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
7751 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7],ymm0[8,9,10],ymm3[11,12,13,14,15]
7752 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
7753 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7754 ; AVX2-SLOW-NEXT: vpshufb %xmm15, %xmm12, %xmm0
7755 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
7756 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
7757 ; AVX2-SLOW-NEXT: vpshufb %xmm9, %xmm6, %xmm2
7758 ; AVX2-SLOW-NEXT: vpshufb %xmm8, %xmm1, %xmm1
7759 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
7760 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm5, %ymm2
7761 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
7762 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7763 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
7764 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
7765 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7766 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7767 ; AVX2-SLOW-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
7768 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
7769 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7770 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7771 ; AVX2-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7772 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
7773 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
7774 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
7775 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7776 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
7777 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7778 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
7779 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
7780 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,4]
7781 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
7782 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7783 ; AVX2-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7784 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
7785 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[2,1,2,3]
7786 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm1
7787 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[0,3,2,1]
7788 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm5[0,0,2,3,4,5,6,7]
7789 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
7790 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm7[2,1,2,0,4,5,6,7]
7791 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
7792 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7793 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
7794 ; AVX2-SLOW-NEXT: vpshufb %ymm8, %ymm3, %ymm2
7795 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
7796 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,4]
7797 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
7798 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
7799 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7800 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7801 ; AVX2-SLOW-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
7802 ; AVX2-SLOW-NEXT: # ymm3 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
7803 ; AVX2-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7804 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7805 ; AVX2-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7806 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
7807 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
7808 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
7809 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7810 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
7811 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7812 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
7813 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
7814 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,4]
7815 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
7816 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
7817 ; AVX2-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
7818 ; AVX2-SLOW-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
7819 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,2,3]
7820 ; AVX2-SLOW-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7821 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm1
7822 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
7823 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
7824 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
7825 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
7826 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[2,1,2,0,4,5,6,7]
7827 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
7828 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7829 ; AVX2-SLOW-NEXT: vpshufb %ymm8, %ymm3, %ymm2
7830 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
7831 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,4]
7832 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
7833 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
7834 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7835 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7836 ; AVX2-SLOW-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
7837 ; AVX2-SLOW-NEXT: # ymm9 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
7838 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7839 ; AVX2-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7840 ; AVX2-SLOW-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
7841 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
7842 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[0,1,2,1]
7843 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,1,0,3]
7844 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm3[0,0,0,0,4,5,6,7]
7845 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
7846 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,6,5,6,4]
7847 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
7848 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7849 ; AVX2-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
7850 ; AVX2-SLOW-NEXT: # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
7851 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
7852 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm0
7853 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,3,2,1]
7854 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm12 = xmm1[0,0,2,3,4,5,6,7]
7855 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,1,3,3]
7856 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm13 = xmm2[2,1,2,0,4,5,6,7]
7857 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm12 = xmm13[0],xmm12[1,2],xmm13[3],xmm12[4,5,6,7]
7858 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
7859 ; AVX2-SLOW-NEXT: vpshufb %ymm8, %ymm9, %ymm13
7860 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm4 = ymm13[0,1,2],ymm4[3,4,5,6,7],ymm13[8,9,10],ymm4[11,12,13,14,15]
7861 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,6,5,4]
7862 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm13[5,6,7]
7863 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm4[4,5,6,7]
7864 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7865 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7866 ; AVX2-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
7867 ; AVX2-SLOW-NEXT: # ymm4 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
7868 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm4, %xmm12
7869 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,1,2,1]
7870 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm4[2,1,0,3]
7871 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm13[0,0,0,0,4,5,6,7]
7872 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,6,7]
7873 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm14 = xmm12[0,1,2,3,6,5,6,4]
7874 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm14[4],xmm4[5,6],xmm14[7]
7875 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7876 ; AVX2-SLOW-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm14 # 32-byte Folded Reload
7877 ; AVX2-SLOW-NEXT: # ymm14 = mem[0,1],ymm4[2],mem[3],ymm4[4],mem[5,6],ymm4[7]
7878 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
7879 ; AVX2-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
7880 ; AVX2-SLOW-NEXT: # ymm4 = ymm4[0,1],mem[2],ymm4[3,4],mem[5],ymm4[6,7]
7881 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm15 = xmm4[2,1,2,3]
7882 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm4, %xmm4
7883 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
7884 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm4[0,0,2,3,4,5,6,7]
7885 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,3]
7886 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm11 = xmm15[2,1,2,0,4,5,6,7]
7887 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1,2],xmm11[3],xmm10[4,5,6,7]
7888 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7889 ; AVX2-SLOW-NEXT: vpshufb %ymm8, %ymm14, %ymm8
7890 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3,4,5,6,7],ymm8[8,9,10],ymm0[11,12,13,14,15]
7891 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,6,5,4]
7892 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm8 = xmm10[0,1,2,3,4],xmm8[5,6,7]
7893 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
7894 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7895 ; AVX2-SLOW-NEXT: vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7896 ; AVX2-SLOW-NEXT: # xmm0 = mem[0,1,2,3,7,5,6,5]
7897 ; AVX2-SLOW-NEXT: vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
7898 ; AVX2-SLOW-NEXT: # xmm8 = mem[1,1,1,1,4,5,6,7]
7899 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
7900 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm8[0,1,2,3],xmm0[4],xmm8[5,6],xmm0[7]
7901 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
7902 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,3,4,5,6,7]
7903 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
7904 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm5[1,2],xmm7[3],xmm5[4,5,6,7]
7905 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
7906 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
7907 ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm8, %ymm8
7908 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7909 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3,4,5,6,7],ymm8[8,9,10],ymm0[11,12,13,14,15]
7910 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[0,1,3,2]
7911 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm8[5,6,7]
7912 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
7913 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7914 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,7,5,6,5]
7915 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7]
7916 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
7917 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4],xmm3[5,6],xmm0[7]
7918 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
7919 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
7920 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
7921 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
7922 ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm9, %ymm2
7923 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7924 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
7925 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
7926 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
7927 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
7928 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
7929 ; AVX2-SLOW-NEXT: vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
7930 ; AVX2-SLOW-NEXT: # xmm0 = mem[0,1,2,3,7,5,6,5]
7931 ; AVX2-SLOW-NEXT: vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7932 ; AVX2-SLOW-NEXT: # xmm1 = mem[1,1,1,1,4,5,6,7]
7933 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,7]
7934 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6],xmm0[7]
7935 ; AVX2-SLOW-NEXT: vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
7936 ; AVX2-SLOW-NEXT: # xmm1 = mem[3,1,2,1,4,5,6,7]
7937 ; AVX2-SLOW-NEXT: vpshuflw $244, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
7938 ; AVX2-SLOW-NEXT: # xmm2 = mem[0,1,3,3,4,5,6,7]
7939 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
7940 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2],xmm1[3],xmm2[4,5,6,7]
7941 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
7942 ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm2, %ymm2
7943 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
7944 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
7945 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
7946 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
7947 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
7948 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm12[0,1,2,3,7,5,6,5]
7949 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm13[1,1,1,1,4,5,6,7]
7950 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
7951 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5,6],xmm1[7]
7952 ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm14, %ymm2
7953 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm15[3,1,2,1,4,5,6,7]
7954 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,3,4,5,6,7]
7955 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,7,7,7]
7956 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2],xmm3[3],xmm4[4,5,6,7]
7957 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
7958 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
7959 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
7960 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
7961 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm1[4,5,6,7]
7962 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7963 ; AVX2-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
7964 ; AVX2-SLOW-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
7965 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm5
7966 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[0,3,2,1]
7967 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm6[0,1,0,2,4,5,6,7]
7968 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
7969 ; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm7 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
7970 ; AVX2-SLOW-NEXT: vpshufb %xmm7, %xmm5, %xmm3
7971 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5],xmm3[6,7]
7972 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
7973 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
7974 ; AVX2-SLOW-NEXT: # ymm3 = mem[0,1,2,3,4],ymm1[5,6,7]
7975 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7976 ; AVX2-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
7977 ; AVX2-SLOW-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
7978 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm11
7979 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm1[0,3,2,1]
7980 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm12[0,1,0,2,4,5,6,7]
7981 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
7982 ; AVX2-SLOW-NEXT: vpshufb %xmm7, %xmm11, %xmm4
7983 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5],xmm4[6,7]
7984 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
7985 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
7986 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,2,3,4],ymm1[5,6,7]
7987 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
7988 ; AVX2-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
7989 ; AVX2-SLOW-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
7990 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm13
7991 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm14 = xmm1[0,3,2,1]
7992 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm14[0,1,0,2,4,5,6,7]
7993 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
7994 ; AVX2-SLOW-NEXT: vpshufb %xmm7, %xmm13, %xmm10
7995 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm10[4],xmm1[5],xmm10[6,7]
7996 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
7997 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
7998 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,2,3,4],ymm1[5,6,7]
7999 ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8000 ; AVX2-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
8001 ; AVX2-SLOW-NEXT: # ymm10 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
8002 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm10, %xmm15
8003 ; AVX2-SLOW-NEXT: vpshufb %xmm7, %xmm15, %xmm7
8004 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm10[0,3,2,1]
8005 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm0[0,1,0,2,4,5,6,7]
8006 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,6,6,6]
8007 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm10[0,1,2,3],xmm7[4],xmm10[5],xmm7[6,7]
8008 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
8009 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm10 # 32-byte Folded Reload
8010 ; AVX2-SLOW-NEXT: # ymm10 = mem[0,1,2,3,4],ymm7[5,6,7]
8011 ; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm1 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
8012 ; AVX2-SLOW-NEXT: vpshufb %xmm1, %xmm11, %xmm7
8013 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm11 = xmm12[0,1,1,3,4,5,6,7]
8014 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,3]
8015 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm7 = xmm11[0,1,2,3],xmm7[4],xmm11[5],xmm7[6,7]
8016 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
8017 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
8018 ; AVX2-SLOW-NEXT: # ymm7 = mem[0,1,2,3,4],ymm7[5,6,7]
8019 ; AVX2-SLOW-NEXT: vpshufb %xmm1, %xmm5, %xmm5
8020 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7]
8021 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,3]
8022 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4],xmm6[5],xmm5[6,7]
8023 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
8024 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
8025 ; AVX2-SLOW-NEXT: # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
8026 ; AVX2-SLOW-NEXT: vpshufb %xmm1, %xmm13, %xmm6
8027 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm11 = xmm14[0,1,1,3,4,5,6,7]
8028 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,3]
8029 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm6 = xmm11[0,1,2,3],xmm6[4],xmm11[5],xmm6[6,7]
8030 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
8031 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
8032 ; AVX2-SLOW-NEXT: # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
8033 ; AVX2-SLOW-NEXT: vpshufb %xmm1, %xmm15, %xmm1
8034 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
8035 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
8036 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6,7]
8037 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8038 ; AVX2-SLOW-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8039 ; AVX2-SLOW-NEXT: # ymm0 = mem[0,1,2,3,4],ymm0[5,6,7]
8040 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8041 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 96(%rsi)
8042 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8043 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%rsi)
8044 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8045 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%rsi)
8046 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8047 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rsi)
8048 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8049 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 96(%rdx)
8050 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8051 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%rdx)
8052 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8053 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%rdx)
8054 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8055 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rdx)
8056 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8057 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%rcx)
8058 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8059 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 96(%rcx)
8060 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8061 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%rcx)
8062 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8063 ; AVX2-SLOW-NEXT: vmovaps %ymm1, (%rcx)
8064 ; AVX2-SLOW-NEXT: vmovdqa %ymm9, 96(%r8)
8065 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8066 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%r8)
8067 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8068 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 64(%r8)
8069 ; AVX2-SLOW-NEXT: vmovdqa %ymm8, (%r8)
8070 ; AVX2-SLOW-NEXT: vmovdqa %ymm10, 96(%r9)
8071 ; AVX2-SLOW-NEXT: vmovdqa %ymm2, 32(%r9)
8072 ; AVX2-SLOW-NEXT: vmovdqa %ymm4, (%r9)
8073 ; AVX2-SLOW-NEXT: vmovdqa %ymm3, 64(%r9)
8074 ; AVX2-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
8075 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 96(%rax)
8076 ; AVX2-SLOW-NEXT: vmovdqa %ymm6, 32(%rax)
8077 ; AVX2-SLOW-NEXT: vmovdqa %ymm5, 64(%rax)
8078 ; AVX2-SLOW-NEXT: vmovdqa %ymm7, (%rax)
8079 ; AVX2-SLOW-NEXT: addq $1272, %rsp # imm = 0x4F8
8080 ; AVX2-SLOW-NEXT: vzeroupper
8081 ; AVX2-SLOW-NEXT: retq
8083 ; AVX2-FAST-LABEL: load_i16_stride6_vf64:
8084 ; AVX2-FAST: # %bb.0:
8085 ; AVX2-FAST-NEXT: subq $1256, %rsp # imm = 0x4E8
8086 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm0
8087 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm1
8088 ; AVX2-FAST-NEXT: vmovaps 672(%rdi), %ymm2
8089 ; AVX2-FAST-NEXT: vmovaps 640(%rdi), %ymm3
8090 ; AVX2-FAST-NEXT: vmovdqa 288(%rdi), %ymm4
8091 ; AVX2-FAST-NEXT: vmovdqa 256(%rdi), %ymm5
8092 ; AVX2-FAST-NEXT: vmovdqa 416(%rdi), %ymm8
8093 ; AVX2-FAST-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8094 ; AVX2-FAST-NEXT: vmovdqa 384(%rdi), %ymm9
8095 ; AVX2-FAST-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8096 ; AVX2-FAST-NEXT: vmovdqa 480(%rdi), %ymm6
8097 ; AVX2-FAST-NEXT: vmovdqa 448(%rdi), %ymm7
8098 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm10 = ymm7[2,3],ymm6[2,3]
8099 ; AVX2-FAST-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8100 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm7[0,1],ymm6[0,1]
8101 ; AVX2-FAST-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8102 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm5[2,3],ymm4[2,3]
8103 ; AVX2-FAST-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8104 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm15 = ymm5[0,1],ymm4[0,1]
8105 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3]
8106 ; AVX2-FAST-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8107 ; AVX2-FAST-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm3[0,1],ymm2[0,1]
8108 ; AVX2-FAST-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8109 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm12 = ymm0[2,3],ymm1[2,3]
8110 ; AVX2-FAST-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8111 ; AVX2-FAST-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm0[0,1],ymm1[0,1]
8112 ; AVX2-FAST-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8113 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
8114 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
8115 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm3 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
8116 ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm1, %xmm0
8117 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm5
8118 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm4 = xmm5[2,2,2,2,4,5,6,7]
8119 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm0[0],xmm4[1],xmm0[2,3],xmm4[4],xmm0[5,6,7]
8120 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0],ymm7[1],ymm10[2,3,4,5],ymm7[6],ymm10[7]
8121 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm4, %ymm7
8122 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
8123 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm6, %ymm7, %ymm6
8124 ; AVX2-FAST-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8125 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm6
8126 ; AVX2-FAST-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8127 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm7
8128 ; AVX2-FAST-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8129 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
8130 ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm7, %xmm6
8131 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm7, %xmm11
8132 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm8 = xmm11[2,2,2,2,4,5,6,7]
8133 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3],xmm8[4],xmm6[5,6,7]
8134 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0],ymm14[1],ymm12[2,3,4,5],ymm14[6],ymm12[7]
8135 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm10, %ymm8
8136 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm6, %ymm8, %ymm6
8137 ; AVX2-FAST-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8138 ; AVX2-FAST-NEXT: vmovdqa 224(%rdi), %ymm8
8139 ; AVX2-FAST-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8140 ; AVX2-FAST-NEXT: vmovdqa 192(%rdi), %ymm6
8141 ; AVX2-FAST-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8142 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0,1],ymm6[2],ymm8[3,4],ymm6[5],ymm8[6,7]
8143 ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm6, %xmm8
8144 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm6, %xmm9
8145 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm12 = xmm9[2,2,2,2,4,5,6,7]
8146 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm12 = xmm8[0],xmm12[1],xmm8[2,3],xmm12[4],xmm8[5,6,7]
8147 ; AVX2-FAST-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8148 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm13[0],ymm15[1],ymm13[2,3,4,5],ymm15[6],ymm13[7]
8149 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm8, %ymm13
8150 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm12, %ymm13, %ymm12
8151 ; AVX2-FAST-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8152 ; AVX2-FAST-NEXT: vmovdqa 608(%rdi), %ymm13
8153 ; AVX2-FAST-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8154 ; AVX2-FAST-NEXT: vmovdqa 576(%rdi), %ymm12
8155 ; AVX2-FAST-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8156 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
8157 ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm12, %xmm13
8158 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm12, %xmm3
8159 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm14 = xmm3[2,2,2,2,4,5,6,7]
8160 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm14 = xmm13[0],xmm14[1],xmm13[2,3],xmm14[4],xmm13[5,6,7]
8161 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
8162 ; AVX2-FAST-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
8163 ; AVX2-FAST-NEXT: # ymm13 = mem[0],ymm13[1],mem[2,3,4,5],ymm13[6],mem[7]
8164 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm13, %ymm2
8165 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm14, %ymm2, %ymm2
8166 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8167 ; AVX2-FAST-NEXT: vpbroadcastd {{.*#+}} xmm2 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
8168 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm11, %xmm14
8169 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
8170 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm7, %xmm7
8171 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm14 = xmm7[0],xmm14[1],xmm7[2,3],xmm14[4],xmm7[5,6,7]
8172 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
8173 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm10, %ymm10
8174 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm14, %ymm10, %ymm10
8175 ; AVX2-FAST-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8176 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm5, %xmm5
8177 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm1, %xmm1
8178 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3],xmm5[4],xmm1[5,6,7]
8179 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm4, %ymm4
8180 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm1, %ymm4, %ymm1
8181 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8182 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm9, %xmm1
8183 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm6, %xmm4
8184 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1],xmm4[2,3],xmm1[4],xmm4[5,6,7]
8185 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm8, %ymm4
8186 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm1, %ymm4, %ymm1
8187 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8188 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm13, %ymm1
8189 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm3, %xmm2
8190 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm12, %xmm3
8191 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
8192 ; AVX2-FAST-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm0
8193 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8194 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8195 ; AVX2-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
8196 ; AVX2-FAST-NEXT: # ymm10 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
8197 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm10, %xmm0
8198 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm14 = xmm0[2,1,0,3]
8199 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
8200 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm10, %xmm0
8201 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm2 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
8202 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm14, %xmm1
8203 ; AVX2-FAST-NEXT: vmovdqa %xmm2, %xmm4
8204 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
8205 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21>
8206 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8207 ; AVX2-FAST-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8208 ; AVX2-FAST-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5],ymm1[6],mem[7]
8209 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8210 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm1, %ymm1
8211 ; AVX2-FAST-NEXT: vmovdqa %ymm2, %ymm5
8212 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8213 ; AVX2-FAST-NEXT: vmovdqa 544(%rdi), %ymm0
8214 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8215 ; AVX2-FAST-NEXT: vmovdqa 512(%rdi), %ymm2
8216 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8217 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
8218 ; AVX2-FAST-NEXT: vpbroadcastd {{.*#+}} xmm0 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
8219 ; AVX2-FAST-NEXT: vpshufb %xmm0, %xmm9, %xmm2
8220 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm13 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
8221 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm9, %xmm3
8222 ; AVX2-FAST-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8223 ; AVX2-FAST-NEXT: vpshufb %xmm13, %xmm3, %xmm3
8224 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
8225 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
8226 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
8227 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
8228 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8229 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8230 ; AVX2-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
8231 ; AVX2-FAST-NEXT: # ymm8 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
8232 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm8, %xmm1
8233 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
8234 ; AVX2-FAST-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8235 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm8, %xmm1
8236 ; AVX2-FAST-NEXT: vpshufb %xmm4, %xmm2, %xmm2
8237 ; AVX2-FAST-NEXT: vmovdqa %xmm4, %xmm6
8238 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
8239 ; AVX2-FAST-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
8240 ; AVX2-FAST-NEXT: # ymm2 = ymm15[0],mem[1],ymm15[2,3,4,5],mem[6],ymm15[7]
8241 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8242 ; AVX2-FAST-NEXT: vpshufb %ymm5, %ymm2, %ymm2
8243 ; AVX2-FAST-NEXT: vmovdqa %ymm5, %ymm15
8244 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
8245 ; AVX2-FAST-NEXT: vmovdqa 352(%rdi), %ymm2
8246 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8247 ; AVX2-FAST-NEXT: vmovdqa 320(%rdi), %ymm3
8248 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8249 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
8250 ; AVX2-FAST-NEXT: vpshufb %xmm0, %xmm5, %xmm2
8251 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm5, %xmm3
8252 ; AVX2-FAST-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8253 ; AVX2-FAST-NEXT: vpshufb %xmm13, %xmm3, %xmm3
8254 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
8255 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
8256 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
8257 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
8258 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8259 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8260 ; AVX2-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
8261 ; AVX2-FAST-NEXT: # ymm4 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
8262 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm4, %xmm1
8263 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
8264 ; AVX2-FAST-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8265 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm4, %xmm1
8266 ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm2, %xmm2
8267 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
8268 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8269 ; AVX2-FAST-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
8270 ; AVX2-FAST-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
8271 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8272 ; AVX2-FAST-NEXT: vpshufb %ymm15, %ymm2, %ymm2
8273 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
8274 ; AVX2-FAST-NEXT: vmovdqa 736(%rdi), %ymm2
8275 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8276 ; AVX2-FAST-NEXT: vmovdqa 704(%rdi), %ymm3
8277 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8278 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
8279 ; AVX2-FAST-NEXT: vpshufb %xmm0, %xmm3, %xmm6
8280 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm3, %xmm2
8281 ; AVX2-FAST-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8282 ; AVX2-FAST-NEXT: vpshufb %xmm13, %xmm2, %xmm7
8283 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
8284 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
8285 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm6 = ymm1[0,1,2],ymm6[3,4,5,6,7],ymm1[8,9,10],ymm6[11,12,13,14,15]
8286 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
8287 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8288 ; AVX2-FAST-NEXT: vmovdqa 160(%rdi), %ymm1
8289 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8290 ; AVX2-FAST-NEXT: vmovdqa 128(%rdi), %ymm2
8291 ; AVX2-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8292 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
8293 ; AVX2-FAST-NEXT: vpshufb %xmm0, %xmm2, %xmm0
8294 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm2, %xmm12
8295 ; AVX2-FAST-NEXT: vpshufb %xmm13, %xmm12, %xmm6
8296 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm6[0,1,2],xmm0[3],xmm6[4,5],xmm0[6],xmm6[7]
8297 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8298 ; AVX2-FAST-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
8299 ; AVX2-FAST-NEXT: # ymm1 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
8300 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm1, %xmm6
8301 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm11
8302 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm13 = xmm11[2,1,0,3]
8303 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm11 = xmm13[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
8304 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm11 = xmm6[0,1],xmm11[2],xmm6[3],xmm11[4,5],xmm6[6,7]
8305 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8306 ; AVX2-FAST-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
8307 ; AVX2-FAST-NEXT: # ymm6 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
8308 ; AVX2-FAST-NEXT: vpshufb %ymm15, %ymm6, %ymm15
8309 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2],ymm15[3,4,5,6,7]
8310 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
8311 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3,4,5,6,7],ymm11[8,9,10],ymm7[11,12,13,14,15]
8312 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm7[4,5,6,7]
8313 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8314 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
8315 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm10, %xmm7
8316 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm10 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
8317 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm14, %xmm15
8318 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm15 = xmm7[0,1],xmm15[2],xmm7[3],xmm15[4,5],xmm7[6,7]
8319 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23>
8320 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8321 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm0, %ymm14
8322 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2],ymm14[3,4,5,6,7]
8323 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm15 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
8324 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8325 ; AVX2-FAST-NEXT: vpshufb %xmm15, %xmm0, %xmm0
8326 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,5,5,5,5]
8327 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm9[3],xmm0[4,5],xmm9[6],xmm0[7]
8328 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8329 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3,4,5,6,7],ymm14[8,9,10],ymm0[11,12,13,14,15]
8330 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7]
8331 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8332 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm8, %xmm0
8333 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8334 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm8, %xmm8
8335 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm8[2],xmm0[3],xmm8[4,5],xmm0[6,7]
8336 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
8337 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm8, %ymm8
8338 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm8[3,4,5,6,7]
8339 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8340 ; AVX2-FAST-NEXT: vpshufb %xmm15, %xmm8, %xmm8
8341 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
8342 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
8343 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
8344 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm5 = ymm0[0,1,2],ymm5[3,4,5,6,7],ymm0[8,9,10],ymm5[11,12,13,14,15]
8345 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
8346 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8347 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm4, %xmm0
8348 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8349 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm4, %xmm4
8350 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2],xmm0[3],xmm4[4,5],xmm0[6,7]
8351 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8352 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm4, %ymm4
8353 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
8354 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8355 ; AVX2-FAST-NEXT: vpshufb %xmm15, %xmm4, %xmm4
8356 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
8357 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
8358 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
8359 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7],ymm0[8,9,10],ymm3[11,12,13,14,15]
8360 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
8361 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8362 ; AVX2-FAST-NEXT: vpshufb %xmm15, %xmm12, %xmm0
8363 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
8364 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
8365 ; AVX2-FAST-NEXT: vpshufb %xmm11, %xmm1, %xmm1
8366 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm13, %xmm2
8367 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
8368 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm6, %ymm2
8369 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
8370 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8371 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
8372 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
8373 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8374 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8375 ; AVX2-FAST-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
8376 ; AVX2-FAST-NEXT: # ymm3 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
8377 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8378 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8379 ; AVX2-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8380 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
8381 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
8382 ; AVX2-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8383 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
8384 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,2,1]
8385 ; AVX2-FAST-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8386 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm12 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
8387 ; AVX2-FAST-NEXT: vpshufb %xmm12, %xmm1, %xmm0
8388 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,6,5,6,4]
8389 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
8390 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8391 ; AVX2-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8392 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
8393 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm2
8394 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm9 = xmm2[0,3,2,1]
8395 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[2,1,2,3]
8396 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm10 = [12,13,0,1,4,5,0,0,12,13,0,1,4,5,0,0]
8397 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm9, %xmm1
8398 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm7[2,1,2,0,4,5,6,7]
8399 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
8400 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8401 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm11 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
8402 ; AVX2-FAST-NEXT: vpshufb %ymm11, %ymm3, %ymm2
8403 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
8404 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,4]
8405 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
8406 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
8407 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8408 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8409 ; AVX2-FAST-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
8410 ; AVX2-FAST-NEXT: # ymm3 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
8411 ; AVX2-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8412 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8413 ; AVX2-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8414 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
8415 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
8416 ; AVX2-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8417 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
8418 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,2,1]
8419 ; AVX2-FAST-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8420 ; AVX2-FAST-NEXT: vpshufb %xmm12, %xmm1, %xmm0
8421 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,6,5,6,4]
8422 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
8423 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8424 ; AVX2-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8425 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
8426 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm2
8427 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
8428 ; AVX2-FAST-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8429 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,1,2,3]
8430 ; AVX2-FAST-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8431 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm2, %xmm1
8432 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm4[2,1,2,0,4,5,6,7]
8433 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
8434 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8435 ; AVX2-FAST-NEXT: vpshufb %ymm11, %ymm3, %ymm2
8436 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
8437 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,4]
8438 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
8439 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
8440 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8441 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8442 ; AVX2-FAST-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
8443 ; AVX2-FAST-NEXT: # ymm13 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
8444 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8445 ; AVX2-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8446 ; AVX2-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
8447 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[2,1,0,3]
8448 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
8449 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[0,1,2,1]
8450 ; AVX2-FAST-NEXT: vpshufb %xmm12, %xmm8, %xmm0
8451 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,6,5,6,4]
8452 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
8453 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8454 ; AVX2-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
8455 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
8456 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm0
8457 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[0,3,2,1]
8458 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
8459 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm4, %xmm3
8460 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm5 = xmm1[2,1,2,0,4,5,6,7]
8461 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1,2],xmm5[3],xmm3[4,5,6,7]
8462 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
8463 ; AVX2-FAST-NEXT: vpshufb %ymm11, %ymm13, %ymm5
8464 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3,4,5,6,7],ymm5[8,9,10],ymm2[11,12,13,14,15]
8465 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,4]
8466 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
8467 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm2[4,5,6,7]
8468 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8469 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8470 ; AVX2-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
8471 ; AVX2-FAST-NEXT: # ymm2 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
8472 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm14 = xmm2[2,1,0,3]
8473 ; AVX2-FAST-NEXT: vpshufb %xmm12, %xmm14, %xmm3
8474 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm2, %xmm2
8475 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm12 = xmm2[0,1,2,1]
8476 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,6,5,6,4]
8477 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
8478 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8479 ; AVX2-FAST-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
8480 ; AVX2-FAST-NEXT: # ymm15 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
8481 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8482 ; AVX2-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
8483 ; AVX2-FAST-NEXT: # ymm3 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
8484 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm3, %xmm5
8485 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
8486 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm5, %xmm0
8487 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,1,2,3]
8488 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm10[2,1,2,0,4,5,6,7]
8489 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1,2],xmm3[3],xmm0[4,5,6,7]
8490 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
8491 ; AVX2-FAST-NEXT: vpshufb %ymm11, %ymm15, %ymm3
8492 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
8493 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,4]
8494 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5,6,7]
8495 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
8496 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8497 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
8498 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8499 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm0, %xmm0
8500 ; AVX2-FAST-NEXT: vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
8501 ; AVX2-FAST-NEXT: # xmm3 = mem[0,1,2,3,7,5,6,5]
8502 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5,6],xmm3[7]
8503 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm3 = [14,15,2,3,6,7,0,0,14,15,2,3,6,7,0,0]
8504 ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm9, %xmm9
8505 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
8506 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm7[0],xmm9[1,2],xmm7[3],xmm9[4,5,6,7]
8507 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
8508 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
8509 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm11, %ymm11
8510 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8511 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm11[0,1,2],ymm0[3,4,5,6,7],ymm11[8,9,10],ymm0[11,12,13,14,15]
8512 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,2]
8513 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm11[5,6,7]
8514 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
8515 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8516 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm8, %xmm0
8517 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,5,6,5]
8518 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5,6],xmm6[7]
8519 ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm4, %xmm4
8520 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7]
8521 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2],xmm1[3],xmm4[4,5,6,7]
8522 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm13, %ymm4
8523 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8524 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5,6,7],ymm4[8,9,10],ymm0[11,12,13,14,15]
8525 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,2]
8526 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm4[5,6,7]
8527 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
8528 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8529 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8530 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm0, %xmm1
8531 ; AVX2-FAST-NEXT: vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
8532 ; AVX2-FAST-NEXT: # xmm4 = mem[0,1,2,3,7,5,6,5]
8533 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5,6],xmm4[7]
8534 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8535 ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm0, %xmm4
8536 ; AVX2-FAST-NEXT: vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
8537 ; AVX2-FAST-NEXT: # xmm6 = mem[3,1,2,1,4,5,6,7]
8538 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1,2],xmm6[3],xmm4[4,5,6,7]
8539 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8540 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm0, %ymm6
8541 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
8542 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
8543 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
8544 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm6[5,6,7]
8545 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3],ymm1[4,5,6,7]
8546 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm14, %xmm1
8547 ; AVX2-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,7,5,6,5]
8548 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
8549 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm15, %ymm2
8550 ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm5, %xmm3
8551 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm4 = xmm10[3,1,2,1,4,5,6,7]
8552 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5,6,7]
8553 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
8554 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
8555 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
8556 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
8557 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm9 = ymm2[0,1,2,3],ymm1[4,5,6,7]
8558 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8559 ; AVX2-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
8560 ; AVX2-FAST-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
8561 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm7
8562 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm1[0,3,2,1]
8563 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm2 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
8564 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm7, %xmm1
8565 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm6 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
8566 ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm8, %xmm4
8567 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4],xmm4[5],xmm1[6,7]
8568 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
8569 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
8570 ; AVX2-FAST-NEXT: # ymm4 = mem[0,1,2,3,4],ymm1[5,6,7]
8571 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8572 ; AVX2-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
8573 ; AVX2-FAST-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
8574 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm10
8575 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm12 = xmm1[0,3,2,1]
8576 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm10, %xmm1
8577 ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm12, %xmm5
8578 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4],xmm5[5],xmm1[6,7]
8579 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
8580 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
8581 ; AVX2-FAST-NEXT: # ymm5 = mem[0,1,2,3,4],ymm1[5,6,7]
8582 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8583 ; AVX2-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
8584 ; AVX2-FAST-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
8585 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm13
8586 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm14 = xmm1[0,3,2,1]
8587 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm13, %xmm1
8588 ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm14, %xmm15
8589 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm15[0,1,2,3],xmm1[4],xmm15[5],xmm1[6,7]
8590 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
8591 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
8592 ; AVX2-FAST-NEXT: # ymm3 = mem[0,1,2,3,4],ymm1[5,6,7]
8593 ; AVX2-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8594 ; AVX2-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
8595 ; AVX2-FAST-NEXT: # ymm15 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
8596 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm15, %xmm0
8597 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm0, %xmm2
8598 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[0,3,2,1]
8599 ; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm15, %xmm6
8600 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3],xmm2[4],xmm6[5],xmm2[6,7]
8601 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
8602 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
8603 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
8604 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm1 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
8605 ; AVX2-FAST-NEXT: vpshufb %xmm1, %xmm10, %xmm6
8606 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
8607 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm12, %xmm12
8608 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm12[0,1,2,3],xmm6[4],xmm12[5],xmm6[6,7]
8609 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
8610 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm12 # 32-byte Folded Reload
8611 ; AVX2-FAST-NEXT: # ymm12 = mem[0,1,2,3,4],ymm6[5,6,7]
8612 ; AVX2-FAST-NEXT: vpshufb %xmm1, %xmm7, %xmm6
8613 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm8, %xmm7
8614 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
8615 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
8616 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
8617 ; AVX2-FAST-NEXT: # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
8618 ; AVX2-FAST-NEXT: vpshufb %xmm1, %xmm13, %xmm7
8619 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm14, %xmm8
8620 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5],xmm7[6,7]
8621 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
8622 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
8623 ; AVX2-FAST-NEXT: # ymm7 = mem[0,1,2,3,4],ymm7[5,6,7]
8624 ; AVX2-FAST-NEXT: vpshufb %xmm1, %xmm0, %xmm0
8625 ; AVX2-FAST-NEXT: vpshufb %xmm10, %xmm15, %xmm1
8626 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5],xmm0[6,7]
8627 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8628 ; AVX2-FAST-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8629 ; AVX2-FAST-NEXT: # ymm0 = mem[0,1,2,3,4],ymm0[5,6,7]
8630 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8631 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%rsi)
8632 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8633 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%rsi)
8634 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8635 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%rsi)
8636 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8637 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%rsi)
8638 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8639 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%rdx)
8640 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8641 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%rdx)
8642 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8643 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%rdx)
8644 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8645 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%rdx)
8646 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8647 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%rcx)
8648 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8649 ; AVX2-FAST-NEXT: vmovaps %ymm1, 96(%rcx)
8650 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8651 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%rcx)
8652 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8653 ; AVX2-FAST-NEXT: vmovaps %ymm1, (%rcx)
8654 ; AVX2-FAST-NEXT: vmovdqa %ymm11, 96(%r8)
8655 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8656 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%r8)
8657 ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8658 ; AVX2-FAST-NEXT: vmovaps %ymm1, 64(%r8)
8659 ; AVX2-FAST-NEXT: vmovdqa %ymm9, (%r8)
8660 ; AVX2-FAST-NEXT: vmovdqa %ymm2, 96(%r9)
8661 ; AVX2-FAST-NEXT: vmovdqa %ymm3, 32(%r9)
8662 ; AVX2-FAST-NEXT: vmovdqa %ymm5, (%r9)
8663 ; AVX2-FAST-NEXT: vmovdqa %ymm4, 64(%r9)
8664 ; AVX2-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
8665 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 96(%rax)
8666 ; AVX2-FAST-NEXT: vmovdqa %ymm7, 32(%rax)
8667 ; AVX2-FAST-NEXT: vmovdqa %ymm6, 64(%rax)
8668 ; AVX2-FAST-NEXT: vmovdqa %ymm12, (%rax)
8669 ; AVX2-FAST-NEXT: addq $1256, %rsp # imm = 0x4E8
8670 ; AVX2-FAST-NEXT: vzeroupper
8671 ; AVX2-FAST-NEXT: retq
8673 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride6_vf64:
8674 ; AVX2-FAST-PERLANE: # %bb.0:
8675 ; AVX2-FAST-PERLANE-NEXT: subq $1256, %rsp # imm = 0x4E8
8676 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %ymm0
8677 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %ymm1
8678 ; AVX2-FAST-PERLANE-NEXT: vmovaps 672(%rdi), %ymm2
8679 ; AVX2-FAST-PERLANE-NEXT: vmovaps 640(%rdi), %ymm3
8680 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 288(%rdi), %ymm4
8681 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 256(%rdi), %ymm5
8682 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 416(%rdi), %ymm8
8683 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8684 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 384(%rdi), %ymm9
8685 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8686 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 480(%rdi), %ymm6
8687 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 448(%rdi), %ymm7
8688 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm10 = ymm7[2,3],ymm6[2,3]
8689 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8690 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm7[0,1],ymm6[0,1]
8691 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8692 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm13 = ymm5[2,3],ymm4[2,3]
8693 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8694 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm15 = ymm5[0,1],ymm4[0,1]
8695 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3]
8696 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8697 ; AVX2-FAST-PERLANE-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm3[0,1],ymm2[0,1]
8698 ; AVX2-FAST-PERLANE-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8699 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm12 = ymm0[2,3],ymm1[2,3]
8700 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8701 ; AVX2-FAST-PERLANE-NEXT: vperm2i128 {{.*#+}} ymm14 = ymm0[0,1],ymm1[0,1]
8702 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8703 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm2 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
8704 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
8705 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm3 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
8706 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm1, %xmm0
8707 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm5
8708 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm4 = xmm5[2,2,2,2,4,5,6,7]
8709 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm0[0],xmm4[1],xmm0[2,3],xmm4[4],xmm0[5,6,7]
8710 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm10[0],ymm7[1],ymm10[2,3,4,5],ymm7[6],ymm10[7]
8711 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm4, %ymm7
8712 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
8713 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm6, %ymm7, %ymm6
8714 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8715 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm6
8716 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8717 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm7
8718 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8719 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
8720 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm7, %xmm6
8721 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm7, %xmm11
8722 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm8 = xmm11[2,2,2,2,4,5,6,7]
8723 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3],xmm8[4],xmm6[5,6,7]
8724 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0],ymm14[1],ymm12[2,3,4,5],ymm14[6],ymm12[7]
8725 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm10, %ymm8
8726 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm6, %ymm8, %ymm6
8727 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8728 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rdi), %ymm8
8729 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8730 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rdi), %ymm6
8731 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8732 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm6 = ymm8[0,1],ymm6[2],ymm8[3,4],ymm6[5],ymm8[6,7]
8733 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm6, %xmm8
8734 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm6, %xmm9
8735 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm12 = xmm9[2,2,2,2,4,5,6,7]
8736 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm12 = xmm8[0],xmm12[1],xmm8[2,3],xmm12[4],xmm8[5,6,7]
8737 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8738 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm8 = ymm13[0],ymm15[1],ymm13[2,3,4,5],ymm15[6],ymm13[7]
8739 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm8, %ymm13
8740 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm12, %ymm13, %ymm12
8741 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8742 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 608(%rdi), %ymm13
8743 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8744 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 576(%rdi), %ymm12
8745 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8746 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
8747 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm12, %xmm13
8748 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm12, %xmm3
8749 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm14 = xmm3[2,2,2,2,4,5,6,7]
8750 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm14 = xmm13[0],xmm14[1],xmm13[2,3],xmm14[4],xmm13[5,6,7]
8751 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
8752 ; AVX2-FAST-PERLANE-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
8753 ; AVX2-FAST-PERLANE-NEXT: # ymm13 = mem[0],ymm13[1],mem[2,3,4,5],ymm13[6],mem[7]
8754 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm13, %ymm2
8755 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm14, %ymm2, %ymm2
8756 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8757 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{.*#+}} xmm2 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
8758 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm11, %xmm14
8759 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm11 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
8760 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm7, %xmm7
8761 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm14 = xmm7[0],xmm14[1],xmm7[2,3],xmm14[4],xmm7[5,6,7]
8762 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
8763 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm10, %ymm10
8764 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm14, %ymm10, %ymm10
8765 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8766 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm5, %xmm5
8767 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm1, %xmm1
8768 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3],xmm5[4],xmm1[5,6,7]
8769 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm4, %ymm4
8770 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm1, %ymm4, %ymm1
8771 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8772 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm9, %xmm1
8773 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm6, %xmm4
8774 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1],xmm4[2,3],xmm1[4],xmm4[5,6,7]
8775 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm8, %ymm4
8776 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm1, %ymm4, %ymm1
8777 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8778 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm13, %ymm1
8779 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm3, %xmm2
8780 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm12, %xmm3
8781 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
8782 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm0, %ymm2, %ymm1, %ymm0
8783 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8784 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8785 ; AVX2-FAST-PERLANE-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
8786 ; AVX2-FAST-PERLANE-NEXT: # ymm10 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
8787 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm10, %xmm0
8788 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm14 = xmm0[2,1,0,3]
8789 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm11 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
8790 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm10, %xmm0
8791 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm2 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
8792 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm14, %xmm1
8793 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm2, %xmm4
8794 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
8795 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21>
8796 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8797 ; AVX2-FAST-PERLANE-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8798 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3,4,5],ymm1[6],mem[7]
8799 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8800 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm1, %ymm1
8801 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, %ymm5
8802 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7]
8803 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 544(%rdi), %ymm0
8804 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8805 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 512(%rdi), %ymm2
8806 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8807 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm9 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
8808 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastd {{.*#+}} xmm0 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
8809 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm0, %xmm9, %xmm2
8810 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm13 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
8811 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm9, %xmm3
8812 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8813 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm13, %xmm3, %xmm3
8814 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
8815 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
8816 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
8817 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
8818 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8819 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8820 ; AVX2-FAST-PERLANE-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
8821 ; AVX2-FAST-PERLANE-NEXT: # ymm8 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
8822 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm8, %xmm1
8823 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
8824 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8825 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm8, %xmm1
8826 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm4, %xmm2, %xmm2
8827 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm4, %xmm6
8828 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
8829 ; AVX2-FAST-PERLANE-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
8830 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = ymm15[0],mem[1],ymm15[2,3,4,5],mem[6],ymm15[7]
8831 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8832 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm5, %ymm2, %ymm2
8833 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm5, %ymm15
8834 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
8835 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 352(%rdi), %ymm2
8836 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8837 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 320(%rdi), %ymm3
8838 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8839 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
8840 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm0, %xmm5, %xmm2
8841 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm5, %xmm3
8842 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8843 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm13, %xmm3, %xmm3
8844 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
8845 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
8846 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
8847 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
8848 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8849 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8850 ; AVX2-FAST-PERLANE-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
8851 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
8852 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm4, %xmm1
8853 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
8854 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8855 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm4, %xmm1
8856 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm6, %xmm2, %xmm2
8857 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
8858 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
8859 ; AVX2-FAST-PERLANE-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
8860 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
8861 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8862 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm15, %ymm2, %ymm2
8863 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
8864 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 736(%rdi), %ymm2
8865 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8866 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 704(%rdi), %ymm3
8867 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8868 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
8869 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm0, %xmm3, %xmm6
8870 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm3, %xmm2
8871 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8872 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm13, %xmm2, %xmm7
8873 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
8874 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
8875 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm6 = ymm1[0,1,2],ymm6[3,4,5,6,7],ymm1[8,9,10],ymm6[11,12,13,14,15]
8876 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
8877 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8878 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rdi), %ymm1
8879 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8880 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rdi), %ymm2
8881 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8882 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
8883 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm0, %xmm2, %xmm0
8884 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm2, %xmm12
8885 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm13, %xmm12, %xmm6
8886 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm7 = xmm6[0,1,2],xmm0[3],xmm6[4,5],xmm0[6],xmm6[7]
8887 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8888 ; AVX2-FAST-PERLANE-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
8889 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
8890 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm1, %xmm6
8891 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm11
8892 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm13 = xmm11[2,1,0,3]
8893 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm11 = xmm13[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
8894 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm11 = xmm6[0,1],xmm11[2],xmm6[3],xmm11[4,5],xmm6[6,7]
8895 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8896 ; AVX2-FAST-PERLANE-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
8897 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
8898 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm15, %ymm6, %ymm15
8899 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2],ymm15[3,4,5,6,7]
8900 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
8901 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3,4,5,6,7],ymm11[8,9,10],ymm7[11,12,13,14,15]
8902 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm7[4,5,6,7]
8903 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8904 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm11 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
8905 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm10, %xmm7
8906 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm10 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
8907 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm14, %xmm15
8908 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm15 = xmm7[0,1],xmm15[2],xmm7[3],xmm15[4,5],xmm7[6,7]
8909 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23>
8910 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8911 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm0, %ymm14
8912 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm14 = ymm15[0,1,2],ymm14[3,4,5,6,7]
8913 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm15 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
8914 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
8915 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm15, %xmm0, %xmm0
8916 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,5,5,5,5]
8917 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm9[3],xmm0[4,5],xmm9[6],xmm0[7]
8918 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8919 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3,4,5,6,7],ymm14[8,9,10],ymm0[11,12,13,14,15]
8920 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7]
8921 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8922 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm8, %xmm0
8923 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8924 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm8, %xmm8
8925 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm8[2],xmm0[3],xmm8[4,5],xmm0[6,7]
8926 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
8927 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm8, %ymm8
8928 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm8[3,4,5,6,7]
8929 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
8930 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm15, %xmm8, %xmm8
8931 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
8932 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
8933 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
8934 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm5 = ymm0[0,1,2],ymm5[3,4,5,6,7],ymm0[8,9,10],ymm5[11,12,13,14,15]
8935 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
8936 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8937 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm4, %xmm0
8938 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8939 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm4, %xmm4
8940 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2],xmm0[3],xmm4[4,5],xmm0[6,7]
8941 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
8942 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm4, %ymm4
8943 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
8944 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
8945 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm15, %xmm4, %xmm4
8946 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
8947 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
8948 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
8949 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7],ymm0[8,9,10],ymm3[11,12,13,14,15]
8950 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
8951 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8952 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm15, %xmm12, %xmm0
8953 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
8954 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
8955 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm11, %xmm1, %xmm1
8956 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm13, %xmm2
8957 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
8958 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm6, %ymm2
8959 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
8960 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8961 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
8962 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
8963 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8964 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8965 ; AVX2-FAST-PERLANE-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
8966 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
8967 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8968 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8969 ; AVX2-FAST-PERLANE-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
8970 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
8971 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
8972 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8973 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm0, %xmm0
8974 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,2,1]
8975 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
8976 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm12 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
8977 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm12, %xmm1, %xmm0
8978 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,6,5,6,4]
8979 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
8980 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
8981 ; AVX2-FAST-PERLANE-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
8982 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
8983 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm2
8984 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm9 = xmm2[0,3,2,1]
8985 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[2,1,2,3]
8986 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm10 = [12,13,0,1,4,5,0,0,12,13,0,1,4,5,0,0]
8987 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm9, %xmm1
8988 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm2 = xmm7[2,1,2,0,4,5,6,7]
8989 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
8990 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
8991 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm11 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
8992 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm11, %ymm3, %ymm2
8993 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
8994 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,4]
8995 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
8996 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
8997 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
8998 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
8999 ; AVX2-FAST-PERLANE-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
9000 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
9001 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9002 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9003 ; AVX2-FAST-PERLANE-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9004 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
9005 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
9006 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9007 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm0, %xmm0
9008 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,2,1]
9009 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9010 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm12, %xmm1, %xmm0
9011 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,6,5,6,4]
9012 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
9013 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9014 ; AVX2-FAST-PERLANE-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9015 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
9016 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm2
9017 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
9018 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9019 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,1,2,3]
9020 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
9021 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm2, %xmm1
9022 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm2 = xmm4[2,1,2,0,4,5,6,7]
9023 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
9024 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
9025 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm11, %ymm3, %ymm2
9026 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
9027 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,5,4]
9028 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
9029 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
9030 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9031 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9032 ; AVX2-FAST-PERLANE-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
9033 ; AVX2-FAST-PERLANE-NEXT: # ymm13 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
9034 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9035 ; AVX2-FAST-PERLANE-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9036 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
9037 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[2,1,0,3]
9038 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm0, %xmm0
9039 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[0,1,2,1]
9040 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm12, %xmm8, %xmm0
9041 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,6,5,6,4]
9042 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
9043 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9044 ; AVX2-FAST-PERLANE-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
9045 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
9046 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm0
9047 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[0,3,2,1]
9048 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
9049 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm4, %xmm3
9050 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm5 = xmm1[2,1,2,0,4,5,6,7]
9051 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1,2],xmm5[3],xmm3[4,5,6,7]
9052 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
9053 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm11, %ymm13, %ymm5
9054 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3,4,5,6,7],ymm5[8,9,10],ymm2[11,12,13,14,15]
9055 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,4]
9056 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
9057 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm2[4,5,6,7]
9058 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9059 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9060 ; AVX2-FAST-PERLANE-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
9061 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
9062 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm14 = xmm2[2,1,0,3]
9063 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm12, %xmm14, %xmm3
9064 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm2, %xmm2
9065 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm12 = xmm2[0,1,2,1]
9066 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,6,5,6,4]
9067 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
9068 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9069 ; AVX2-FAST-PERLANE-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
9070 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
9071 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9072 ; AVX2-FAST-PERLANE-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
9073 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
9074 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm3, %xmm5
9075 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
9076 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm5, %xmm0
9077 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,1,2,3]
9078 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm3 = xmm10[2,1,2,0,4,5,6,7]
9079 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1,2],xmm3[3],xmm0[4,5,6,7]
9080 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
9081 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm11, %ymm15, %ymm3
9082 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
9083 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,4]
9084 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5,6,7]
9085 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
9086 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9087 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm2 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
9088 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9089 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm0, %xmm0
9090 ; AVX2-FAST-PERLANE-NEXT: vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
9091 ; AVX2-FAST-PERLANE-NEXT: # xmm3 = mem[0,1,2,3,7,5,6,5]
9092 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5,6],xmm3[7]
9093 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm3 = [14,15,2,3,6,7,0,0,14,15,2,3,6,7,0,0]
9094 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm9, %xmm9
9095 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
9096 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm9 = xmm7[0],xmm9[1,2],xmm7[3],xmm9[4,5,6,7]
9097 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
9098 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
9099 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm11, %ymm11
9100 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
9101 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm11[0,1,2],ymm0[3,4,5,6,7],ymm11[8,9,10],ymm0[11,12,13,14,15]
9102 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,2]
9103 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm11[5,6,7]
9104 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
9105 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9106 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm8, %xmm0
9107 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,5,6,5]
9108 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5,6],xmm6[7]
9109 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm4, %xmm4
9110 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7]
9111 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2],xmm1[3],xmm4[4,5,6,7]
9112 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm13, %ymm4
9113 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
9114 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5,6,7],ymm4[8,9,10],ymm0[11,12,13,14,15]
9115 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,2]
9116 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm4[5,6,7]
9117 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
9118 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9119 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9120 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm0, %xmm1
9121 ; AVX2-FAST-PERLANE-NEXT: vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
9122 ; AVX2-FAST-PERLANE-NEXT: # xmm4 = mem[0,1,2,3,7,5,6,5]
9123 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5,6],xmm4[7]
9124 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
9125 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm0, %xmm4
9126 ; AVX2-FAST-PERLANE-NEXT: vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
9127 ; AVX2-FAST-PERLANE-NEXT: # xmm6 = mem[3,1,2,1,4,5,6,7]
9128 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1,2],xmm6[3],xmm4[4,5,6,7]
9129 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9130 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm0, %ymm6
9131 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9132 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
9133 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
9134 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm6[5,6,7]
9135 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3],ymm1[4,5,6,7]
9136 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm14, %xmm1
9137 ; AVX2-FAST-PERLANE-NEXT: vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,7,5,6,5]
9138 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
9139 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm15, %ymm2
9140 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm5, %xmm3
9141 ; AVX2-FAST-PERLANE-NEXT: vpshuflw {{.*#+}} xmm4 = xmm10[3,1,2,1,4,5,6,7]
9142 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5,6,7]
9143 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9144 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
9145 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
9146 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
9147 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm9 = ymm2[0,1,2,3],ymm1[4,5,6,7]
9148 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9149 ; AVX2-FAST-PERLANE-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
9150 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
9151 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm7
9152 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm8 = xmm1[0,3,2,1]
9153 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm2 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
9154 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm7, %xmm1
9155 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm6 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
9156 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm6, %xmm8, %xmm4
9157 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4],xmm4[5],xmm1[6,7]
9158 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9159 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
9160 ; AVX2-FAST-PERLANE-NEXT: # ymm4 = mem[0,1,2,3,4],ymm1[5,6,7]
9161 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9162 ; AVX2-FAST-PERLANE-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
9163 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
9164 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm10
9165 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm12 = xmm1[0,3,2,1]
9166 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm10, %xmm1
9167 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm6, %xmm12, %xmm5
9168 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4],xmm5[5],xmm1[6,7]
9169 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9170 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
9171 ; AVX2-FAST-PERLANE-NEXT: # ymm5 = mem[0,1,2,3,4],ymm1[5,6,7]
9172 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9173 ; AVX2-FAST-PERLANE-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
9174 ; AVX2-FAST-PERLANE-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
9175 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm1, %xmm13
9176 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm14 = xmm1[0,3,2,1]
9177 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm13, %xmm1
9178 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm6, %xmm14, %xmm15
9179 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm15[0,1,2,3],xmm1[4],xmm15[5],xmm1[6,7]
9180 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9181 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
9182 ; AVX2-FAST-PERLANE-NEXT: # ymm3 = mem[0,1,2,3,4],ymm1[5,6,7]
9183 ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9184 ; AVX2-FAST-PERLANE-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
9185 ; AVX2-FAST-PERLANE-NEXT: # ymm15 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
9186 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm15, %xmm0
9187 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm0, %xmm2
9188 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm15 = xmm15[0,3,2,1]
9189 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm6, %xmm15, %xmm6
9190 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3],xmm2[4],xmm6[5],xmm2[6,7]
9191 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
9192 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
9193 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
9194 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm1 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
9195 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm1, %xmm10, %xmm6
9196 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm10 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
9197 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm12, %xmm12
9198 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm12[0,1,2,3],xmm6[4],xmm12[5],xmm6[6,7]
9199 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
9200 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm12 # 32-byte Folded Reload
9201 ; AVX2-FAST-PERLANE-NEXT: # ymm12 = mem[0,1,2,3,4],ymm6[5,6,7]
9202 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm1, %xmm7, %xmm6
9203 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm8, %xmm7
9204 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
9205 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
9206 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
9207 ; AVX2-FAST-PERLANE-NEXT: # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
9208 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm1, %xmm13, %xmm7
9209 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm14, %xmm8
9210 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5],xmm7[6,7]
9211 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
9212 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
9213 ; AVX2-FAST-PERLANE-NEXT: # ymm7 = mem[0,1,2,3,4],ymm7[5,6,7]
9214 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm1, %xmm0, %xmm0
9215 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm10, %xmm15, %xmm1
9216 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5],xmm0[6,7]
9217 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
9218 ; AVX2-FAST-PERLANE-NEXT: vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9219 ; AVX2-FAST-PERLANE-NEXT: # ymm0 = mem[0,1,2,3,4],ymm0[5,6,7]
9220 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9221 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 96(%rsi)
9222 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9223 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%rsi)
9224 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9225 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%rsi)
9226 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9227 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rsi)
9228 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9229 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 96(%rdx)
9230 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9231 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%rdx)
9232 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9233 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%rdx)
9234 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9235 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rdx)
9236 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9237 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%rcx)
9238 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9239 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 96(%rcx)
9240 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9241 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%rcx)
9242 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9243 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, (%rcx)
9244 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm11, 96(%r8)
9245 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9246 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%r8)
9247 ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9248 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 64(%r8)
9249 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm9, (%r8)
9250 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, 96(%r9)
9251 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, 32(%r9)
9252 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm5, (%r9)
9253 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm4, 64(%r9)
9254 ; AVX2-FAST-PERLANE-NEXT: movq {{[0-9]+}}(%rsp), %rax
9255 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 96(%rax)
9256 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm7, 32(%rax)
9257 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm6, 64(%rax)
9258 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm12, (%rax)
9259 ; AVX2-FAST-PERLANE-NEXT: addq $1256, %rsp # imm = 0x4E8
9260 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
9261 ; AVX2-FAST-PERLANE-NEXT: retq
9263 ; AVX512F-ONLY-SLOW-LABEL: load_i16_stride6_vf64:
9264 ; AVX512F-ONLY-SLOW: # %bb.0:
9265 ; AVX512F-ONLY-SLOW-NEXT: subq $1480, %rsp # imm = 0x5C8
9266 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm10 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
9267 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 608(%rdi), %ymm0
9268 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9269 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 576(%rdi), %ymm1
9270 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9271 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
9272 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm10, %xmm1, %xmm0
9273 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
9274 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm1, %ymm20
9275 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,2,0,3]
9276 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm2, %xmm16
9277 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
9278 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
9279 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 544(%rdi), %ymm1
9280 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9281 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 512(%rdi), %ymm2
9282 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9283 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm14 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
9284 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm14, %xmm2
9285 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm12 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
9286 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm12, %xmm2, %xmm1
9287 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm2, %xmm21
9288 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm14[2,2,2,2,4,5,6,7]
9289 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
9290 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
9291 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9292 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
9293 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9294 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 448(%rdi), %ymm0
9295 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 416(%rdi), %ymm1
9296 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9297 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 384(%rdi), %ymm2
9298 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9299 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
9300 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm10, %xmm11, %xmm1
9301 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm11, %xmm3
9302 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[0,2,0,3]
9303 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm3, %xmm22
9304 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
9305 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
9306 ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
9307 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9308 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, 480(%rdi), %ymm0, %ymm0
9309 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9310 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
9311 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
9312 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm5, %ymm2, %ymm0
9313 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm2, %ymm23
9314 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9315 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9316 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 736(%rdi), %ymm0
9317 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9318 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 704(%rdi), %ymm1
9319 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9320 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
9321 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm8[2,2,2,2,4,5,6,7]
9322 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
9323 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm8, %xmm2
9324 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm12, %xmm2, %xmm1
9325 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm2, %xmm28
9326 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
9327 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 640(%rdi), %ymm1
9328 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
9329 ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
9330 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9331 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, 672(%rdi), %ymm1, %ymm1
9332 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9333 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5],ymm2[6],ymm1[7]
9334 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
9335 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm1
9336 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm3, %ymm17
9337 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm2, %ymm29
9338 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
9339 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,6]
9340 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
9341 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9342 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 224(%rdi), %ymm0
9343 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9344 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 192(%rdi), %ymm1
9345 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9346 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
9347 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm13, %xmm15
9348 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm15[0,2,0,3]
9349 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
9350 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm10, %xmm13, %xmm1
9351 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
9352 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 160(%rdi), %ymm1
9353 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 128(%rdi), %ymm2
9354 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9355 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
9356 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm1, %ymm30
9357 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[2,2,2,2,4,5,6,7]
9358 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
9359 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm4, %xmm6
9360 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm12, %xmm6, %xmm2
9361 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
9362 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9363 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
9364 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9365 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa (%rdi), %ymm0
9366 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9367 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1
9368 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9369 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
9370 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm10, %xmm3, %xmm1
9371 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm10
9372 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm10[0,2,0,3]
9373 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
9374 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
9375 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 64(%rdi), %ymm2
9376 ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],mem[2,3]
9377 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9378 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, 96(%rdi), %ymm2, %ymm2
9379 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9380 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm7 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
9381 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm5, %ymm7, %ymm2
9382 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm2[3,4,5,6,7]
9383 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9384 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 352(%rdi), %ymm0
9385 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9386 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 320(%rdi), %ymm1
9387 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
9388 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
9389 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm9, %xmm5
9390 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm12, %xmm5, %xmm1
9391 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm12 = xmm9[2,2,2,2,4,5,6,7]
9392 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,1,2,2]
9393 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm12 = xmm1[0,1,2],xmm12[3],xmm1[4,5],xmm12[6],xmm1[7]
9394 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa 256(%rdi), %ymm1
9395 ; AVX512F-ONLY-SLOW-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
9396 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9397 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, 288(%rdi), %ymm1, %ymm0
9398 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
9399 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm0, %ymm31
9400 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm17, %ymm0
9401 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm0, %ymm1, %ymm0
9402 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
9403 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm12 = ymm0[0,1,2],ymm12[3,4,5,6,7],ymm0[8,9,10],ymm12[11,12,13,14,15]
9404 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
9405 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
9406 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9407 ; AVX512F-ONLY-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm12 = [2,3,14,15,10,11,0,0,2,3,14,15,10,11,0,0]
9408 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm12, %xmm15, %xmm0
9409 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm15 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
9410 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm15, %xmm13, %xmm13
9411 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm13[0,1],xmm0[2],xmm13[3],xmm0[4,5],xmm13[6,7]
9412 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm13 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
9413 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm13, %xmm6, %xmm6
9414 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
9415 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
9416 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
9417 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm4, %zmm0
9418 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9419 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm12, %xmm10, %xmm0
9420 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm15, %xmm3, %xmm3
9421 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7]
9422 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
9423 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm3, %ymm7, %ymm4
9424 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
9425 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9426 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm13, %xmm5, %xmm0
9427 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,5,5,5,5]
9428 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
9429 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
9430 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm2, %ymm1, %ymm1
9431 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
9432 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
9433 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
9434 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
9435 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm0, %ymm27
9436 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm16, %xmm0
9437 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm12, %xmm0, %xmm0
9438 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm20, %ymm1
9439 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm15, %xmm1, %xmm1
9440 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
9441 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm21, %xmm1
9442 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm13, %xmm1, %xmm1
9443 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm14[0,1,2,3,5,5,5,5]
9444 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3],xmm1[4,5],xmm4[6],xmm1[7]
9445 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9446 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
9447 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9448 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm23, %ymm0
9449 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
9450 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm22, %xmm1
9451 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm12, %xmm1, %xmm1
9452 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm15, %xmm11, %xmm3
9453 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3],xmm1[4,5],xmm3[6,7]
9454 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
9455 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9456 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm29, %ymm0
9457 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm2, %ymm0, %ymm0
9458 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm28, %xmm1
9459 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm13, %xmm1, %xmm1
9460 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,5,5,5,5]
9461 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
9462 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9463 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
9464 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,7]
9465 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
9466 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm0, %ymm26
9467 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9468 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9469 ; AVX512F-ONLY-SLOW-NEXT: # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
9470 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
9471 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
9472 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
9473 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
9474 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm1, %xmm28
9475 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
9476 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm2[2,1,2,0,4,5,6,7]
9477 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm2, %xmm21
9478 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
9479 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm30, %ymm2
9480 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload
9481 ; AVX512F-ONLY-SLOW-NEXT: # ymm1 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
9482 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
9483 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
9484 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm3[0,0,0,0,4,5,6,7]
9485 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm3, %xmm20
9486 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
9487 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
9488 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,6,5,6,4]
9489 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm3, %xmm19
9490 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
9491 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9492 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm2
9493 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9494 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
9495 ; AVX512F-ONLY-SLOW-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
9496 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
9497 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
9498 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
9499 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
9500 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm1, %xmm18
9501 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
9502 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm3[2,1,2,0,4,5,6,7]
9503 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm3, %xmm16
9504 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
9505 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9506 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
9507 ; AVX512F-ONLY-SLOW-NEXT: # ymm4 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
9508 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm0 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
9509 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm0, %ymm4, %ymm3
9510 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm4, %ymm17
9511 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm3[5,6,7]
9512 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
9513 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
9514 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $36, (%rsp), %ymm3, %ymm3 # 32-byte Folded Reload
9515 ; AVX512F-ONLY-SLOW-NEXT: # ymm3 = ymm3[0,1],mem[2],ymm3[3,4],mem[5],ymm3[6,7]
9516 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm4
9517 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm15 = xmm3[2,1,0,3]
9518 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm15[0,0,0,0,4,5,6,7]
9519 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
9520 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm4[0,1,2,1]
9521 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm10[0,1,2,3,6,5,6,4]
9522 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
9523 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
9524 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm31, %ymm4
9525 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm13 # 32-byte Folded Reload
9526 ; AVX512F-ONLY-SLOW-NEXT: # ymm13 = ymm4[0,1],mem[2],ymm4[3],mem[4],ymm4[5,6],mem[7]
9527 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
9528 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm5, %ymm13, %ymm4
9529 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm5, %ymm24
9530 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
9531 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,5,4]
9532 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
9533 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm3
9534 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
9535 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, %zmm2, %zmm29, %zmm1
9536 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
9537 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm3
9538 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm2, %zmm22
9539 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9540 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9541 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9542 ; AVX512F-ONLY-SLOW-NEXT: # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
9543 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
9544 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm1[2,1,2,3]
9545 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm2[0,3,2,1]
9546 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm12[0,0,2,3,4,5,6,7]
9547 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
9548 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm11[2,1,2,0,4,5,6,7]
9549 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
9550 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9551 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
9552 ; AVX512F-ONLY-SLOW-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
9553 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm3
9554 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm2[2,1,0,3]
9555 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm9[0,0,0,0,4,5,6,7]
9556 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,6,7]
9557 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm3[0,1,2,1]
9558 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,6,5,6,4]
9559 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
9560 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
9561 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm23
9562 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
9563 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
9564 ; AVX512F-ONLY-SLOW-NEXT: # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
9565 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
9566 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,1,2,3]
9567 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
9568 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm7[0,0,2,3,4,5,6,7]
9569 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
9570 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm6[2,1,2,0,4,5,6,7]
9571 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
9572 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
9573 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
9574 ; AVX512F-ONLY-SLOW-NEXT: # ymm5 = ymm2[0,1],mem[2],ymm2[3],mem[4],ymm2[5,6],mem[7]
9575 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm0, %ymm5, %ymm0
9576 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
9577 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
9578 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9579 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
9580 ; AVX512F-ONLY-SLOW-NEXT: # ymm1 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
9581 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm0
9582 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
9583 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[0,0,0,0,4,5,6,7]
9584 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
9585 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
9586 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,6,5,6,4]
9587 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm14 = xmm1[0,1,2,3],xmm14[4],xmm1[5,6],xmm14[7]
9588 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9589 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
9590 ; AVX512F-ONLY-SLOW-NEXT: # ymm1 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
9591 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm24, %ymm0
9592 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm0, %ymm1, %ymm0
9593 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
9594 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
9595 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
9596 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
9597 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
9598 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, %zmm23, %zmm29, %zmm2
9599 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm2, %zmm22, %zmm0
9600 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9601 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm21, %xmm0
9602 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
9603 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm28, %xmm2
9604 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
9605 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
9606 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
9607 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm19, %xmm2
9608 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
9609 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm20, %xmm14
9610 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm14 = xmm14[1,1,1,1,4,5,6,7]
9611 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,7,7]
9612 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm2[4],xmm14[5,6],xmm2[7]
9613 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
9614 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm28
9615 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm16, %xmm0
9616 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[3,1,2,1,4,5,6,7]
9617 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm18, %xmm0
9618 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm14 = xmm0[0,1,3,3,4,5,6,7]
9619 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,7,7,7,7]
9620 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm14[1,2],xmm2[3],xmm14[4,5,6,7]
9621 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm14 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
9622 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm17, %ymm0
9623 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm14, %ymm0, %ymm0
9624 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm0[5,6,7]
9625 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
9626 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,7,5,6,5]
9627 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm15[1,1,1,1,4,5,6,7]
9628 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,7,7]
9629 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm10[0,1,2,3],xmm2[4],xmm10[5,6],xmm2[7]
9630 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
9631 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm10, %ymm13, %ymm13
9632 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
9633 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5,6,7],ymm13[8,9,10],ymm2[11,12,13,14,15]
9634 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,7,4,5]
9635 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm2[4,5,6,7]
9636 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
9637 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9638 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
9639 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9640 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm27, %zmm0, %zmm13
9641 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9642 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm26, %zmm0, %zmm13
9643 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9644 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm25
9645 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, %zmm28, %zmm29, %zmm0
9646 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm0, %zmm22, %zmm25
9647 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm11[3,1,2,1,4,5,6,7]
9648 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm12[0,1,3,3,4,5,6,7]
9649 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
9650 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
9651 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
9652 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm8 = xmm9[1,1,1,1,4,5,6,7]
9653 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
9654 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5,6],xmm2[7]
9655 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
9656 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
9657 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,1,4,5,6,7]
9658 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm7[0,1,3,3,4,5,6,7]
9659 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
9660 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1,2],xmm2[3],xmm6[4,5,6,7]
9661 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm14, %ymm5, %ymm5
9662 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm5[5,6,7]
9663 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
9664 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm10, %ymm1, %ymm1
9665 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
9666 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
9667 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
9668 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
9669 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
9670 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
9671 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
9672 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
9673 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm28
9674 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, %zmm0, %zmm29, %zmm2
9675 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm2, %zmm22, %zmm28
9676 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9677 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
9678 ; AVX512F-ONLY-SLOW-NEXT: # ymm2 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
9679 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm1 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
9680 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm1, %xmm2, %xmm0
9681 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm4
9682 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[2,2,2,2,4,5,6,7]
9683 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm4, %xmm24
9684 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
9685 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm30, %ymm4
9686 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm3 # 32-byte Folded Reload
9687 ; AVX512F-ONLY-SLOW-NEXT: # ymm3 = mem[0],ymm4[1],mem[2,3],ymm4[4],mem[5,6],ymm4[7]
9688 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm5
9689 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[0,3,2,1]
9690 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[0,1,0,2,4,5,6,7]
9691 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm4, %xmm22
9692 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
9693 ; AVX512F-ONLY-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm13 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
9694 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm13, %xmm5, %xmm4
9695 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm5, %xmm21
9696 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6,7]
9697 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
9698 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm30
9699 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9700 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
9701 ; AVX512F-ONLY-SLOW-NEXT: # ymm5 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
9702 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9703 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
9704 ; AVX512F-ONLY-SLOW-NEXT: # ymm10 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
9705 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm1, %xmm10, %xmm0
9706 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm10, %xmm4
9707 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[2,2,2,2,4,5,6,7]
9708 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm4, %xmm26
9709 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
9710 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 {{.*#+}} ymm29 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
9711 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
9712 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm0, %ymm5, %ymm4
9713 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm5, %ymm27
9714 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $236, %ymm29, %ymm4, %ymm3
9715 ; AVX512F-ONLY-SLOW-NEXT: movw $31, %ax
9716 ; AVX512F-ONLY-SLOW-NEXT: kmovw %eax, %k1
9717 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm3, %zmm30 {%k1}
9718 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm31, %ymm3
9719 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm5 # 32-byte Folded Reload
9720 ; AVX512F-ONLY-SLOW-NEXT: # ymm5 = mem[0],ymm3[1],mem[2,3,4,5],ymm3[6],mem[7]
9721 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
9722 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $109, (%rsp), %ymm3, %ymm3 # 32-byte Folded Reload
9723 ; AVX512F-ONLY-SLOW-NEXT: # ymm3 = mem[0],ymm3[1],mem[2,3],ymm3[4],mem[5,6],ymm3[7]
9724 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm6
9725 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[0,3,2,1]
9726 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm4[0,1,0,2,4,5,6,7]
9727 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm4, %xmm20
9728 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
9729 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm13, %xmm6, %xmm4
9730 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm6, %xmm18
9731 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6,7]
9732 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
9733 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
9734 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm6, %ymm5, %ymm4
9735 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm6, %ymm16
9736 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm5, %ymm19
9737 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm3[5,6,7]
9738 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm3, %ymm23
9739 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
9740 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm5 # 32-byte Folded Reload
9741 ; AVX512F-ONLY-SLOW-NEXT: # ymm5 = mem[0],ymm3[1],mem[2,3,4,5],ymm3[6],mem[7]
9742 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
9743 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm15 # 32-byte Folded Reload
9744 ; AVX512F-ONLY-SLOW-NEXT: # ymm15 = mem[0,1],ymm3[2],mem[3,4],ymm3[5],mem[6,7]
9745 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm1, %xmm15, %xmm3
9746 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm15, %xmm14
9747 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm14[2,2,2,2,4,5,6,7]
9748 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
9749 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm0, %ymm5, %ymm0
9750 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm5, %ymm17
9751 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $236, %ymm29, %ymm0, %ymm4
9752 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9753 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
9754 ; AVX512F-ONLY-SLOW-NEXT: # ymm5 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
9755 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm1, %xmm5, %xmm1
9756 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm5, %xmm9
9757 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm9[2,2,2,2,4,5,6,7]
9758 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
9759 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9760 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
9761 ; AVX512F-ONLY-SLOW-NEXT: # ymm3 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
9762 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm12
9763 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm3[0,3,2,1]
9764 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm11[0,1,0,2,4,5,6,7]
9765 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
9766 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm13, %xmm12, %xmm6
9767 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4],xmm3[5],xmm6[6,7]
9768 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
9769 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm31
9770 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm4, %zmm31 {%k1}
9771 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9772 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
9773 ; AVX512F-ONLY-SLOW-NEXT: # ymm8 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
9774 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
9775 ; AVX512F-ONLY-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
9776 ; AVX512F-ONLY-SLOW-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
9777 ; AVX512F-ONLY-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm7
9778 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm13, %xmm7, %xmm3
9779 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[0,3,2,1]
9780 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[0,1,0,2,4,5,6,7]
9781 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
9782 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5],xmm3[6,7]
9783 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm16, %ymm0
9784 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm0, %ymm8, %ymm3
9785 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9786 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm1[5,6,7]
9787 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm0, %ymm16
9788 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} xmm1 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
9789 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm1, %xmm2, %xmm2
9790 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm24[1,1,2,3]
9791 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
9792 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
9793 ; AVX512F-ONLY-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm6 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
9794 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm21, %xmm0
9795 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm6, %xmm0, %xmm3
9796 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm22, %xmm0
9797 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm13 = xmm0[0,1,1,3,4,5,6,7]
9798 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
9799 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm13[0,1,2,3],xmm3[4],xmm13[5],xmm3[6,7]
9800 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
9801 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm2, %zmm3, %zmm2
9802 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm1, %xmm10, %xmm3
9803 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm26[1,1,2,3]
9804 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
9805 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm10[1],xmm3[2,3],xmm10[4],xmm3[5,6,7]
9806 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
9807 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm27, %ymm0
9808 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm10, %ymm0, %ymm13
9809 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $236, %ymm29, %ymm13, %ymm3
9810 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm3, %zmm2 {%k1}
9811 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm18, %xmm0
9812 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm6, %xmm0, %xmm3
9813 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %xmm20, %xmm0
9814 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm13 = xmm0[0,1,1,3,4,5,6,7]
9815 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
9816 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm13[0,1,2,3],xmm3[4],xmm13[5],xmm3[6,7]
9817 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa {{.*#+}} ymm13 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
9818 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm19, %ymm0
9819 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm13, %ymm0, %ymm0
9820 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
9821 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
9822 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %ymm17, %ymm3
9823 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm10, %ymm3, %ymm3
9824 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm1, %xmm15, %xmm10
9825 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[1,1,2,3]
9826 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,5,5,5]
9827 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0],xmm14[1],xmm10[2,3],xmm14[4],xmm10[5,6,7]
9828 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $236, %ymm29, %ymm3, %ymm10
9829 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm1, %xmm5, %xmm1
9830 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm9[1,1,2,3]
9831 ; AVX512F-ONLY-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
9832 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
9833 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm23, %zmm0, %zmm3
9834 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm16, %zmm0, %zmm5
9835 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
9836 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm6, %xmm12, %xmm9
9837 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm11 = xmm11[0,1,1,3,4,5,6,7]
9838 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,3]
9839 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5],xmm9[6,7]
9840 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
9841 ; AVX512F-ONLY-SLOW-NEXT: vinserti32x4 $2, %xmm1, %zmm9, %zmm1
9842 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm10, %zmm1 {%k1}
9843 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %ymm13, %ymm8, %ymm8
9844 ; AVX512F-ONLY-SLOW-NEXT: vpshufb %xmm6, %xmm7, %xmm6
9845 ; AVX512F-ONLY-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
9846 ; AVX512F-ONLY-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,3]
9847 ; AVX512F-ONLY-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6,7]
9848 ; AVX512F-ONLY-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
9849 ; AVX512F-ONLY-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4],ymm4[5,6,7]
9850 ; AVX512F-ONLY-SLOW-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
9851 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
9852 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
9853 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm7 # 64-byte Folded Reload
9854 ; AVX512F-ONLY-SLOW-NEXT: movw $-2048, %ax # imm = 0xF800
9855 ; AVX512F-ONLY-SLOW-NEXT: kmovw %eax, %k1
9856 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
9857 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm8, %zmm7 {%k1}
9858 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm7, (%rsi)
9859 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
9860 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm7 # 64-byte Folded Reload
9861 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
9862 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm8, %zmm7 {%k1}
9863 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm7, 64(%rsi)
9864 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
9865 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm7 # 64-byte Folded Reload
9866 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
9867 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm8, %zmm7 {%k1}
9868 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm7, 64(%rdx)
9869 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
9870 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm7 # 64-byte Folded Reload
9871 ; AVX512F-ONLY-SLOW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
9872 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa32 %zmm6, %zmm7 {%k1}
9873 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm7, (%rdx)
9874 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
9875 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm30, %zmm6, %zmm3
9876 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm31, %zmm6, %zmm5
9877 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm2, %zmm6, %zmm0
9878 ; AVX512F-ONLY-SLOW-NEXT: vpternlogq $184, %zmm1, %zmm6, %zmm4
9879 ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
9880 ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm1, 64(%rcx)
9881 ; AVX512F-ONLY-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
9882 ; AVX512F-ONLY-SLOW-NEXT: vmovaps %zmm1, (%rcx)
9883 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm28, 64(%r8)
9884 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm25, (%r8)
9885 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm5, 64(%r9)
9886 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm3, (%r9)
9887 ; AVX512F-ONLY-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
9888 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm4, 64(%rax)
9889 ; AVX512F-ONLY-SLOW-NEXT: vmovdqa64 %zmm0, (%rax)
9890 ; AVX512F-ONLY-SLOW-NEXT: addq $1480, %rsp # imm = 0x5C8
9891 ; AVX512F-ONLY-SLOW-NEXT: vzeroupper
9892 ; AVX512F-ONLY-SLOW-NEXT: retq
9894 ; AVX512F-ONLY-FAST-LABEL: load_i16_stride6_vf64:
9895 ; AVX512F-ONLY-FAST: # %bb.0:
9896 ; AVX512F-ONLY-FAST-NEXT: subq $1480, %rsp # imm = 0x5C8
9897 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
9898 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 608(%rdi), %ymm0
9899 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9900 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 576(%rdi), %ymm1
9901 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9902 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
9903 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm10, %xmm2, %xmm0
9904 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastq {{.*#+}} xmm12 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
9905 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm2, %xmm1
9906 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm2, %ymm20
9907 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
9908 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm12, %xmm2, %xmm1
9909 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm2, %xmm21
9910 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
9911 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 544(%rdi), %ymm1
9912 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9913 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 512(%rdi), %ymm2
9914 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9915 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
9916 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastd {{.*#+}} xmm9 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
9917 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm9, %xmm15, %xmm1
9918 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm15, %xmm4
9919 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
9920 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm2, %xmm4, %xmm3
9921 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm4, %xmm22
9922 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
9923 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9924 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
9925 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9926 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 416(%rdi), %ymm0
9927 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9928 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 384(%rdi), %ymm1
9929 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9930 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm14 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
9931 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm10, %xmm14, %xmm0
9932 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm14, %xmm1
9933 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
9934 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm12, %xmm3, %xmm1
9935 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm3, %xmm23
9936 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
9937 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 448(%rdi), %ymm1
9938 ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],mem[2,3]
9939 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9940 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, 480(%rdi), %ymm1, %ymm1
9941 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9942 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3,4,5],ymm3[6],ymm1[7]
9943 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
9944 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm8, %ymm3, %ymm1
9945 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm3, %ymm24
9946 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
9947 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9948 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 736(%rdi), %ymm0
9949 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9950 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 704(%rdi), %ymm1
9951 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9952 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
9953 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm9, %xmm13, %xmm0
9954 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm13, %xmm3
9955 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm2, %xmm3, %xmm1
9956 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm3, %xmm26
9957 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
9958 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 640(%rdi), %ymm1
9959 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
9960 ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],mem[2,3]
9961 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9962 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, 672(%rdi), %ymm1, %ymm1
9963 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9964 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3,4,5],ymm3[6],ymm1[7]
9965 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
9966 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm4, %ymm3, %ymm1
9967 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm4, %ymm16
9968 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm3, %ymm29
9969 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
9970 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,6]
9971 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
9972 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9973 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 224(%rdi), %ymm0
9974 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9975 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 192(%rdi), %ymm1
9976 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9977 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
9978 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm11, %xmm0
9979 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm10, %xmm11, %xmm1
9980 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,1,0,3]
9981 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm12, %xmm7, %xmm0
9982 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
9983 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 160(%rdi), %ymm1
9984 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9985 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 128(%rdi), %ymm3
9986 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
9987 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm3, %ymm30
9988 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm9, %xmm5, %xmm1
9989 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm5, %xmm6
9990 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm2, %xmm6, %xmm3
9991 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
9992 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
9993 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
9994 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
9995 ; AVX512F-ONLY-FAST-NEXT: vmovdqa (%rdi), %ymm0
9996 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9997 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 32(%rdi), %ymm1
9998 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
9999 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
10000 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm10, %xmm4, %xmm0
10001 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm4, %xmm3
10002 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,1,0,3]
10003 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm12, %xmm10, %xmm3
10004 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
10005 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 64(%rdi), %ymm3
10006 ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
10007 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10008 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm3
10009 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10010 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm3[0],ymm1[1],ymm3[2,3,4,5],ymm1[6],ymm3[7]
10011 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm8, %ymm12, %ymm3
10012 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
10013 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10014 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 352(%rdi), %ymm0
10015 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10016 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 320(%rdi), %ymm1
10017 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
10018 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
10019 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm9, %xmm8, %xmm9
10020 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm8, %xmm3
10021 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm2, %xmm3, %xmm2
10022 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm2[0,1,2],xmm9[3],xmm2[4,5],xmm9[6],xmm2[7]
10023 ; AVX512F-ONLY-FAST-NEXT: vmovdqa 256(%rdi), %ymm2
10024 ; AVX512F-ONLY-FAST-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
10025 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10026 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, 288(%rdi), %ymm2, %ymm0
10027 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
10028 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm0, %ymm31
10029 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm16, %ymm0
10030 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm0, %ymm2, %ymm0
10031 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
10032 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
10033 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
10034 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
10035 ; AVX512F-ONLY-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10036 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm9 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
10037 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm9, %xmm11, %xmm0
10038 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastq {{.*#+}} xmm11 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
10039 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm7, %xmm7
10040 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm7[2],xmm0[3],xmm7[4,5],xmm0[6,7]
10041 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
10042 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm6, %xmm6
10043 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
10044 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
10045 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
10046 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm7, %zmm5, %zmm5
10047 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10048 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm9, %xmm4, %xmm4
10049 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm10, %xmm5
10050 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6,7]
10051 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
10052 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm5, %ymm12, %ymm6
10053 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5,6,7]
10054 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10055 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm3, %xmm3
10056 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,5,5,5]
10057 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
10058 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
10059 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
10060 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
10061 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
10062 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
10063 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
10064 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm1, %ymm28
10065 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm20, %ymm1
10066 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm9, %xmm1, %xmm1
10067 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm21, %xmm2
10068 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm2, %xmm2
10069 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
10070 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm22, %xmm2
10071 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm2, %xmm2
10072 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm4 = xmm15[0,1,2,3,5,5,5,5]
10073 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
10074 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10075 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
10076 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10077 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm24, %ymm1
10078 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm5, %ymm1, %ymm1
10079 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm9, %xmm14, %xmm2
10080 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm23, %xmm4
10081 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm4, %xmm4
10082 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3],xmm4[4,5],xmm2[6,7]
10083 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
10084 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10085 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm29, %ymm1
10086 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm3, %ymm1, %ymm1
10087 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm26, %xmm2
10088 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm2, %xmm0
10089 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm13[0,1,2,3,5,5,5,5]
10090 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
10091 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
10092 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
10093 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
10094 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
10095 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm0, %ymm27
10096 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10097 ; AVX512F-ONLY-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
10098 ; AVX512F-ONLY-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
10099 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm0, %xmm1
10100 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
10101 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13>
10102 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,3,2,1]
10103 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm2, %xmm1
10104 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm2, %xmm24
10105 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,1,2,0,4,5,6,7]
10106 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm3, %xmm23
10107 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
10108 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm30, %ymm2
10109 ; AVX512F-ONLY-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
10110 ; AVX512F-ONLY-FAST-NEXT: # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
10111 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm2, %xmm3
10112 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
10113 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm13 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
10114 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm13, %xmm4, %xmm2
10115 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm4, %xmm21
10116 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[0,1,2,1]
10117 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,6,5,6,4]
10118 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm4, %xmm20
10119 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
10120 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10121 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm3
10122 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10123 ; AVX512F-ONLY-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10124 ; AVX512F-ONLY-FAST-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
10125 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm1, %xmm4
10126 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
10127 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastq {{.*#+}} xmm2 = [12,13,0,1,4,5,0,0,12,13,0,1,4,5,0,0]
10128 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
10129 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm2, %xmm4, %xmm1
10130 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm4, %xmm19
10131 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm4 = xmm5[2,1,2,0,4,5,6,7]
10132 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm5, %xmm18
10133 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1,2],xmm4[3],xmm1[4,5,6,7]
10134 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10135 ; AVX512F-ONLY-FAST-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
10136 ; AVX512F-ONLY-FAST-NEXT: # ymm6 = ymm1[0,1],mem[2],ymm1[3],mem[4],ymm1[5,6],mem[7]
10137 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
10138 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm1, %ymm6, %ymm5
10139 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm6, %ymm17
10140 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
10141 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
10142 ; AVX512F-ONLY-FAST-NEXT: vmovdqu (%rsp), %ymm5 # 32-byte Reload
10143 ; AVX512F-ONLY-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
10144 ; AVX512F-ONLY-FAST-NEXT: # ymm5 = mem[0,1],ymm5[2],mem[3,4],ymm5[5],mem[6,7]
10145 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm5, %xmm6
10146 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[2,1,0,3]
10147 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm13, %xmm7, %xmm5
10148 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm7, %xmm16
10149 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm14 = xmm6[0,1,2,1]
10150 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm6 = xmm14[0,1,2,3,6,5,6,4]
10151 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5,6],xmm6[7]
10152 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
10153 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm31, %ymm6
10154 ; AVX512F-ONLY-FAST-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm15 # 32-byte Folded Reload
10155 ; AVX512F-ONLY-FAST-NEXT: # ymm15 = ymm6[0,1],mem[2],ymm6[3],mem[4],ymm6[5,6],mem[7]
10156 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
10157 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm7, %ymm15, %ymm6
10158 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm7, %ymm25
10159 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7],ymm6[8,9,10],ymm5[11,12,13,14,15]
10160 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
10161 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
10162 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
10163 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
10164 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, %zmm3, %zmm29, %zmm4
10165 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
10166 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm4, %zmm3, %zmm5
10167 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm3, %zmm22
10168 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10169 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
10170 ; AVX512F-ONLY-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
10171 ; AVX512F-ONLY-FAST-NEXT: # ymm3 = ymm3[0,1],mem[2],ymm3[3,4],mem[5],ymm3[6,7]
10172 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm3, %xmm4
10173 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm10 = xmm4[0,3,2,1]
10174 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm10, %xmm0
10175 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm12 = xmm3[2,1,2,3]
10176 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm12[2,1,2,0,4,5,6,7]
10177 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1,2],xmm3[3],xmm0[4,5,6,7]
10178 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
10179 ; AVX512F-ONLY-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
10180 ; AVX512F-ONLY-FAST-NEXT: # ymm3 = mem[0,1],ymm3[2],mem[3,4],ymm3[5],mem[6,7]
10181 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm3, %xmm4
10182 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm9 = xmm3[2,1,0,3]
10183 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm13, %xmm9, %xmm3
10184 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm4[0,1,2,1]
10185 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm4 = xmm8[0,1,2,3,6,5,6,4]
10186 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
10187 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
10188 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm11
10189 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10190 ; AVX512F-ONLY-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
10191 ; AVX512F-ONLY-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
10192 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm0, %xmm3
10193 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm3[0,3,2,1]
10194 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm2, %xmm7, %xmm2
10195 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,1,2,3]
10196 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm0 = xmm6[2,1,2,0,4,5,6,7]
10197 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
10198 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10199 ; AVX512F-ONLY-FAST-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
10200 ; AVX512F-ONLY-FAST-NEXT: # ymm5 = ymm2[0,1],mem[2],ymm2[3],mem[4],ymm2[5,6],mem[7]
10201 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm1, %ymm5, %ymm1
10202 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
10203 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
10204 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10205 ; AVX512F-ONLY-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
10206 ; AVX512F-ONLY-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
10207 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm0, %xmm2
10208 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,1,0,3]
10209 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm13, %xmm4, %xmm0
10210 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
10211 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm13 = xmm3[0,1,2,3,6,5,6,4]
10212 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm13 = xmm0[0,1,2,3],xmm13[4],xmm0[5,6],xmm13[7]
10213 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10214 ; AVX512F-ONLY-FAST-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
10215 ; AVX512F-ONLY-FAST-NEXT: # ymm2 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
10216 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm25, %ymm0
10217 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm0, %ymm2, %ymm0
10218 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
10219 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7],ymm0[8,9,10],ymm13[11,12,13,14,15]
10220 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
10221 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
10222 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
10223 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, %zmm11, %zmm29, %zmm1
10224 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm1, %zmm22, %zmm0
10225 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10226 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
10227 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm24, %xmm0
10228 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm1, %xmm0, %xmm0
10229 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm1, %xmm25
10230 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm23, %xmm1
10231 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7]
10232 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
10233 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
10234 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm21, %xmm11
10235 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm1, %xmm11, %xmm11
10236 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm20, %xmm13
10237 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,7,5,6,5]
10238 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm13[4],xmm11[5,6],xmm13[7]
10239 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
10240 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm11, %zmm24
10241 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastq {{.*#+}} xmm13 = [14,15,2,3,6,7,0,0,14,15,2,3,6,7,0,0]
10242 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm19, %xmm0
10243 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm13, %xmm0, %xmm11
10244 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm13, %xmm19
10245 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm18, %xmm0
10246 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm13 = xmm0[3,1,2,1,4,5,6,7]
10247 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0],xmm11[1,2],xmm13[3],xmm11[4,5,6,7]
10248 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm11 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
10249 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm17, %ymm0
10250 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm11, %ymm0, %ymm0
10251 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3,4],xmm0[5,6,7]
10252 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
10253 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm16, %xmm13
10254 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm1, %xmm13, %xmm13
10255 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,7,5,6,5]
10256 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3],xmm14[4],xmm13[5,6],xmm14[7]
10257 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm14 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
10258 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm14, %ymm15, %ymm15
10259 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
10260 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm13 = ymm15[0,1,2],ymm13[3,4,5,6,7],ymm15[8,9,10],ymm13[11,12,13,14,15]
10261 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,7,4,5]
10262 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
10263 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm26
10264 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, %zmm24, %zmm29, %zmm0
10265 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm0, %zmm22, %zmm26
10266 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm25, %xmm0
10267 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm0, %xmm10, %xmm0
10268 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 32-byte Folded Reload
10269 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10270 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 32-byte Folded Reload
10271 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10272 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm28, %zmm0, %zmm10
10273 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10274 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm27, %zmm0, %zmm10
10275 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10276 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm10 = xmm12[3,1,2,1,4,5,6,7]
10277 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm10[0],xmm0[1,2],xmm10[3],xmm0[4,5,6,7]
10278 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm1, %xmm9, %xmm9
10279 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,5,6,5]
10280 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4],xmm9[5,6],xmm8[7]
10281 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
10282 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm8, %zmm0
10283 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm19, %xmm8
10284 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm8, %xmm7, %xmm7
10285 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,1,4,5,6,7]
10286 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1,2],xmm6[3],xmm7[4,5,6,7]
10287 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm11, %ymm5, %ymm5
10288 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm5[5,6,7]
10289 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
10290 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm14, %ymm2, %ymm2
10291 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm1, %xmm4, %xmm1
10292 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
10293 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5,6],xmm3[7]
10294 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
10295 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
10296 ; AVX512F-ONLY-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
10297 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
10298 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm28
10299 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, %zmm0, %zmm29, %zmm5
10300 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm5, %zmm22, %zmm28
10301 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10302 ; AVX512F-ONLY-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
10303 ; AVX512F-ONLY-FAST-NEXT: # ymm12 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
10304 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
10305 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm1, %xmm12, %xmm0
10306 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm12, %xmm2
10307 ; AVX512F-ONLY-FAST-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
10308 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
10309 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
10310 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm30, %ymm2
10311 ; AVX512F-ONLY-FAST-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
10312 ; AVX512F-ONLY-FAST-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
10313 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm2, %xmm3
10314 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
10315 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastq {{.*#+}} xmm7 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
10316 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm3, %xmm2
10317 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm3, %xmm24
10318 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm15 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
10319 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm15, %xmm4, %xmm3
10320 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm4, %xmm25
10321 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
10322 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10323 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm30
10324 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10325 ; AVX512F-ONLY-FAST-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
10326 ; AVX512F-ONLY-FAST-NEXT: # ymm4 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
10327 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10328 ; AVX512F-ONLY-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
10329 ; AVX512F-ONLY-FAST-NEXT: # ymm11 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
10330 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm1, %xmm11, %xmm0
10331 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm11, %xmm9
10332 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm9[2,2,2,2,4,5,6,7]
10333 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
10334 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 {{.*#+}} ymm29 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
10335 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
10336 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm0, %ymm4, %ymm3
10337 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm4, %ymm20
10338 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $236, %ymm29, %ymm3, %ymm2
10339 ; AVX512F-ONLY-FAST-NEXT: movw $31, %ax
10340 ; AVX512F-ONLY-FAST-NEXT: kmovw %eax, %k1
10341 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm2, %zmm30 {%k1}
10342 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm31, %ymm2
10343 ; AVX512F-ONLY-FAST-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
10344 ; AVX512F-ONLY-FAST-NEXT: # ymm4 = mem[0],ymm2[1],mem[2,3,4,5],ymm2[6],mem[7]
10345 ; AVX512F-ONLY-FAST-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
10346 ; AVX512F-ONLY-FAST-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
10347 ; AVX512F-ONLY-FAST-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
10348 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm2, %xmm3
10349 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,3,2,1]
10350 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm3, %xmm2
10351 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm3, %xmm27
10352 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm15, %xmm5, %xmm3
10353 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm5, %xmm22
10354 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
10355 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10356 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm5 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
10357 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm5, %ymm4, %ymm3
10358 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm5, %ymm16
10359 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm4, %ymm21
10360 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
10361 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm2, %ymm23
10362 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10363 ; AVX512F-ONLY-FAST-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
10364 ; AVX512F-ONLY-FAST-NEXT: # ymm4 = mem[0],ymm2[1],mem[2,3,4,5],ymm2[6],mem[7]
10365 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10366 ; AVX512F-ONLY-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm14 # 32-byte Folded Reload
10367 ; AVX512F-ONLY-FAST-NEXT: # ymm14 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
10368 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm1, %xmm14, %xmm2
10369 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm14, %xmm5
10370 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm5[2,2,2,2,4,5,6,7]
10371 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm5, %xmm19
10372 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
10373 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm0, %ymm4, %ymm0
10374 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm4, %ymm18
10375 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $236, %ymm29, %ymm0, %ymm2
10376 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10377 ; AVX512F-ONLY-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
10378 ; AVX512F-ONLY-FAST-NEXT: # ymm4 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
10379 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm1, %xmm4, %xmm1
10380 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm4, %xmm0
10381 ; AVX512F-ONLY-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[2,2,2,2,4,5,6,7]
10382 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm0, %xmm17
10383 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
10384 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10385 ; AVX512F-ONLY-FAST-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
10386 ; AVX512F-ONLY-FAST-NEXT: # ymm3 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
10387 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm3, %xmm13
10388 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm3[0,3,2,1]
10389 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm13, %xmm3
10390 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm15, %xmm8, %xmm5
10391 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5],xmm3[6,7]
10392 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
10393 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm31
10394 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm2, %zmm31 {%k1}
10395 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10396 ; AVX512F-ONLY-FAST-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
10397 ; AVX512F-ONLY-FAST-NEXT: # ymm10 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
10398 ; AVX512F-ONLY-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10399 ; AVX512F-ONLY-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
10400 ; AVX512F-ONLY-FAST-NEXT: # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
10401 ; AVX512F-ONLY-FAST-NEXT: vextracti128 $1, %ymm1, %xmm6
10402 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm6, %xmm7
10403 ; AVX512F-ONLY-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[0,3,2,1]
10404 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm15, %xmm5, %xmm1
10405 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm7[4],xmm1[5],xmm7[6,7]
10406 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm16, %ymm0
10407 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm0, %ymm10, %ymm7
10408 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
10409 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4],ymm1[5,6,7]
10410 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm0, %ymm16
10411 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastd {{.*#+}} xmm3 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
10412 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm3, %xmm9, %xmm9
10413 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm7 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
10414 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm11, %xmm11
10415 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm11[0],xmm9[1],xmm11[2,3],xmm9[4],xmm11[5,6,7]
10416 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
10417 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm20, %ymm0
10418 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm1, %ymm0, %ymm11
10419 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm1, %ymm20
10420 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $236, %ymm29, %ymm11, %ymm9
10421 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
10422 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm3, %xmm0, %xmm11
10423 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm12, %xmm12
10424 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6,7]
10425 ; AVX512F-ONLY-FAST-NEXT: vpbroadcastq {{.*#+}} xmm12 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
10426 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm24, %xmm0
10427 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm12, %xmm0, %xmm0
10428 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
10429 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm25, %xmm2
10430 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm2, %xmm15
10431 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm15[0,1,2,3],xmm0[4],xmm15[5],xmm0[6,7]
10432 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
10433 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm2
10434 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm9, %zmm2 {%k1}
10435 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm27, %xmm0
10436 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm12, %xmm0, %xmm1
10437 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm22, %xmm0
10438 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm0, %xmm9
10439 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm9[0,1,2,3],xmm1[4],xmm9[5],xmm1[6,7]
10440 ; AVX512F-ONLY-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
10441 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm21, %ymm0
10442 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm9, %ymm0, %ymm15
10443 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
10444 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1,2,3,4],ymm1[5,6,7]
10445 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm18, %ymm0
10446 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %ymm20, %ymm15
10447 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm15, %ymm0, %ymm15
10448 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm19, %xmm0
10449 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm3, %xmm0, %xmm0
10450 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm14, %xmm14
10451 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3],xmm0[4],xmm14[5,6,7]
10452 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $236, %ymm29, %ymm15, %ymm0
10453 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %xmm17, %xmm14
10454 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm3, %xmm14, %xmm3
10455 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm7, %xmm4, %xmm4
10456 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6,7]
10457 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm12, %xmm13, %xmm4
10458 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm8, %xmm7
10459 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3],xmm4[4],xmm7[5],xmm4[6,7]
10460 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm23, %zmm0, %zmm7
10461 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm16, %zmm0, %zmm8
10462 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1
10463 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
10464 ; AVX512F-ONLY-FAST-NEXT: vinserti32x4 $2, %xmm3, %zmm4, %zmm3
10465 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm0, %zmm3 {%k1}
10466 ; AVX512F-ONLY-FAST-NEXT: vpshufb %ymm9, %ymm10, %ymm0
10467 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm12, %xmm6, %xmm4
10468 ; AVX512F-ONLY-FAST-NEXT: vpshufb %xmm11, %xmm5, %xmm5
10469 ; AVX512F-ONLY-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5],xmm4[6,7]
10470 ; AVX512F-ONLY-FAST-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
10471 ; AVX512F-ONLY-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
10472 ; AVX512F-ONLY-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
10473 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
10474 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
10475 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
10476 ; AVX512F-ONLY-FAST-NEXT: movw $-2048, %ax # imm = 0xF800
10477 ; AVX512F-ONLY-FAST-NEXT: kmovw %eax, %k1
10478 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
10479 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm6, %zmm5 {%k1}
10480 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm5, (%rsi)
10481 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
10482 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
10483 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
10484 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm6, %zmm5 {%k1}
10485 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm5, 64(%rsi)
10486 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
10487 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
10488 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
10489 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm6, %zmm5 {%k1}
10490 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm5, 64(%rdx)
10491 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
10492 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
10493 ; AVX512F-ONLY-FAST-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
10494 ; AVX512F-ONLY-FAST-NEXT: vmovdqa32 %zmm4, %zmm5 {%k1}
10495 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm5, (%rdx)
10496 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
10497 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm30, %zmm4, %zmm7
10498 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm31, %zmm4, %zmm8
10499 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm2, %zmm4, %zmm1
10500 ; AVX512F-ONLY-FAST-NEXT: vpternlogq $184, %zmm3, %zmm4, %zmm0
10501 ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
10502 ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm2, 64(%rcx)
10503 ; AVX512F-ONLY-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
10504 ; AVX512F-ONLY-FAST-NEXT: vmovaps %zmm2, (%rcx)
10505 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm28, 64(%r8)
10506 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm26, (%r8)
10507 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm8, 64(%r9)
10508 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm7, (%r9)
10509 ; AVX512F-ONLY-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
10510 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm0, 64(%rax)
10511 ; AVX512F-ONLY-FAST-NEXT: vmovdqa64 %zmm1, (%rax)
10512 ; AVX512F-ONLY-FAST-NEXT: addq $1480, %rsp # imm = 0x5C8
10513 ; AVX512F-ONLY-FAST-NEXT: vzeroupper
10514 ; AVX512F-ONLY-FAST-NEXT: retq
10516 ; AVX512DQ-SLOW-LABEL: load_i16_stride6_vf64:
10517 ; AVX512DQ-SLOW: # %bb.0:
10518 ; AVX512DQ-SLOW-NEXT: subq $840, %rsp # imm = 0x348
10519 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm9 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
10520 ; AVX512DQ-SLOW-NEXT: vmovdqa 608(%rdi), %ymm0
10521 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10522 ; AVX512DQ-SLOW-NEXT: vmovdqa 576(%rdi), %ymm1
10523 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10524 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
10525 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm9, %xmm1, %xmm0
10526 ; AVX512DQ-SLOW-NEXT: vextracti32x4 $1, %ymm1, %xmm24
10527 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm1, %ymm25
10528 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm24[0,2,0,3]
10529 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
10530 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
10531 ; AVX512DQ-SLOW-NEXT: vmovdqa 544(%rdi), %ymm1
10532 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10533 ; AVX512DQ-SLOW-NEXT: vmovdqa 512(%rdi), %ymm2
10534 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10535 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
10536 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm2
10537 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm12 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
10538 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm2, %xmm1
10539 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm2, %xmm23
10540 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,2,2,2,4,5,6,7]
10541 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm3, %ymm21
10542 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
10543 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
10544 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
10545 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
10546 ; AVX512DQ-SLOW-NEXT: vmovdqa 448(%rdi), %ymm1
10547 ; AVX512DQ-SLOW-NEXT: vmovdqa 416(%rdi), %ymm2
10548 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10549 ; AVX512DQ-SLOW-NEXT: vmovdqa 384(%rdi), %ymm3
10550 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10551 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm15 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
10552 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm9, %xmm15, %xmm2
10553 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm15, %xmm4
10554 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm4[0,2,0,3]
10555 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm4, %xmm22
10556 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
10557 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
10558 ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],mem[2,3]
10559 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10560 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, 480(%rdi), %ymm1, %ymm1
10561 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10562 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3,4,5],ymm3[6],ymm1[7]
10563 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
10564 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm6, %ymm3, %ymm1
10565 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm3, %ymm20
10566 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1,2],ymm1[3,4,5,6,7]
10567 ; AVX512DQ-SLOW-NEXT: vmovdqa 736(%rdi), %ymm1
10568 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10569 ; AVX512DQ-SLOW-NEXT: vmovdqa 704(%rdi), %ymm2
10570 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10571 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
10572 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm11[2,2,2,2,4,5,6,7]
10573 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
10574 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm11, %xmm4
10575 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm4, %xmm2
10576 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm4, %xmm19
10577 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
10578 ; AVX512DQ-SLOW-NEXT: vmovdqa 640(%rdi), %ymm2
10579 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
10580 ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],mem[2,3]
10581 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10582 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, 672(%rdi), %ymm2, %ymm2
10583 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10584 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0],ymm4[1],ymm2[2,3,4,5],ymm4[6],ymm2[7]
10585 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
10586 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm5, %ymm4, %ymm2
10587 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm5, %ymm27
10588 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm4, %ymm18
10589 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
10590 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,6]
10591 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
10592 ; AVX512DQ-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
10593 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm3
10594 ; AVX512DQ-SLOW-NEXT: movw $-2048, %ax # imm = 0xF800
10595 ; AVX512DQ-SLOW-NEXT: kmovw %eax, %k1
10596 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm3 {%k1}
10597 ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10598 ; AVX512DQ-SLOW-NEXT: vmovdqa 224(%rdi), %ymm0
10599 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10600 ; AVX512DQ-SLOW-NEXT: vmovdqa 192(%rdi), %ymm1
10601 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10602 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
10603 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm13, %xmm14
10604 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm14[0,2,0,3]
10605 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
10606 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm9, %xmm13, %xmm1
10607 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
10608 ; AVX512DQ-SLOW-NEXT: vmovdqa 160(%rdi), %ymm1
10609 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10610 ; AVX512DQ-SLOW-NEXT: vmovdqa 128(%rdi), %ymm2
10611 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
10612 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
10613 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[2,2,2,2,4,5,6,7]
10614 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
10615 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm4, %xmm8
10616 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm8, %xmm2
10617 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
10618 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
10619 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm17
10620 ; AVX512DQ-SLOW-NEXT: vmovdqa (%rdi), %ymm0
10621 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10622 ; AVX512DQ-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1
10623 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10624 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
10625 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm9, %xmm3, %xmm1
10626 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm9
10627 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm9[0,2,0,3]
10628 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
10629 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
10630 ; AVX512DQ-SLOW-NEXT: vmovdqa 64(%rdi), %ymm2
10631 ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],mem[2,3]
10632 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, 96(%rdi), %ymm2, %ymm2
10633 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
10634 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm2, %ymm30
10635 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm0, %ymm31
10636 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm6, %ymm5, %ymm2
10637 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm6 = ymm1[0,1,2],ymm2[3,4,5,6,7]
10638 ; AVX512DQ-SLOW-NEXT: vmovdqa 352(%rdi), %ymm0
10639 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10640 ; AVX512DQ-SLOW-NEXT: vmovdqa 320(%rdi), %ymm1
10641 ; AVX512DQ-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
10642 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm10 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
10643 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm10, %xmm7
10644 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm7, %xmm1
10645 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm12 = xmm10[2,2,2,2,4,5,6,7]
10646 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm12[0,1,2,2]
10647 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm12 = xmm1[0,1,2],xmm12[3],xmm1[4,5],xmm12[6],xmm1[7]
10648 ; AVX512DQ-SLOW-NEXT: vmovdqa 256(%rdi), %ymm1
10649 ; AVX512DQ-SLOW-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
10650 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, 288(%rdi), %ymm1, %ymm0
10651 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
10652 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm0, %ymm26
10653 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm2, %ymm29
10654 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm27, %ymm0
10655 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm0, %ymm1, %ymm0
10656 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
10657 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm12 = ymm0[0,1,2],ymm12[3,4,5,6,7],ymm0[8,9,10],ymm12[11,12,13,14,15]
10658 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
10659 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
10660 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm17, %zmm16, %zmm6
10661 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm6 {%k1}
10662 ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10663 ; AVX512DQ-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm12 = [2,3,14,15,10,11,0,0,2,3,14,15,10,11,0,0]
10664 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm14, %xmm0
10665 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm14 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
10666 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm14, %xmm13, %xmm13
10667 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm13[0,1],xmm0[2],xmm13[3],xmm0[4,5],xmm13[6,7]
10668 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm13 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
10669 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm13, %xmm8, %xmm6
10670 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
10671 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
10672 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
10673 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm4, %zmm0
10674 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm9, %xmm4
10675 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm14, %xmm3, %xmm3
10676 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3],xmm4[4,5],xmm3[6,7]
10677 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
10678 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm4, %ymm5, %ymm5
10679 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0,1,2],ymm5[3,4,5,6,7]
10680 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm13, %xmm7, %xmm2
10681 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm10[0,1,2,3,5,5,5,5]
10682 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
10683 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
10684 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm3, %ymm1, %ymm1
10685 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10686 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
10687 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
10688 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
10689 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm5
10690 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm5 {%k1}
10691 ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10692 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm24, %xmm0
10693 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm0, %xmm0
10694 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm25, %ymm1
10695 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm14, %xmm1, %xmm1
10696 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
10697 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm23, %xmm1
10698 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm13, %xmm1, %xmm1
10699 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm21, %ymm2
10700 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
10701 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
10702 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
10703 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
10704 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm20, %ymm1
10705 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm4, %ymm1, %ymm1
10706 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm22, %xmm2
10707 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm2, %xmm2
10708 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm14, %xmm15, %xmm4
10709 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2],xmm4[3],xmm2[4,5],xmm4[6,7]
10710 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm2[0,1,2],ymm1[3,4,5,6,7]
10711 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm18, %ymm1
10712 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm3, %ymm1, %ymm1
10713 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm19, %xmm2
10714 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm13, %xmm2, %xmm2
10715 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm11[0,1,2,3,5,5,5,5]
10716 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
10717 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10718 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
10719 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
10720 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
10721 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm4
10722 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
10723 ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10724 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10725 ; AVX512DQ-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
10726 ; AVX512DQ-SLOW-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
10727 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
10728 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
10729 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
10730 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
10731 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm1, %xmm21
10732 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
10733 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm2[2,1,2,0,4,5,6,7]
10734 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm2, %xmm22
10735 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
10736 ; AVX512DQ-SLOW-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload
10737 ; AVX512DQ-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10738 ; AVX512DQ-SLOW-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
10739 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
10740 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
10741 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm3[0,0,0,0,4,5,6,7]
10742 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm3, %xmm23
10743 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
10744 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
10745 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,6,5,6,4]
10746 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm3, %xmm24
10747 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
10748 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
10749 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm2
10750 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10751 ; AVX512DQ-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
10752 ; AVX512DQ-SLOW-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
10753 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
10754 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
10755 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
10756 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
10757 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm1, %xmm25
10758 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
10759 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm3[2,1,2,0,4,5,6,7]
10760 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm3, %xmm16
10761 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
10762 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm30, %ymm0
10763 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm31, %ymm3
10764 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm0[0,1],ymm3[2],ymm0[3],ymm3[4],ymm0[5,6],ymm3[7]
10765 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm0 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
10766 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm0, %ymm4, %ymm3
10767 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm4, %ymm17
10768 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm3[5,6,7]
10769 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
10770 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
10771 ; AVX512DQ-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
10772 ; AVX512DQ-SLOW-NEXT: # ymm3 = ymm3[0,1],mem[2],ymm3[3,4],mem[5],ymm3[6,7]
10773 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm4
10774 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm15 = xmm3[2,1,0,3]
10775 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm15[0,0,0,0,4,5,6,7]
10776 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
10777 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm10 = xmm4[0,1,2,1]
10778 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm10[0,1,2,3,6,5,6,4]
10779 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
10780 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
10781 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm26, %ymm4
10782 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm29, %ymm5
10783 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm13 = ymm4[0,1],ymm5[2],ymm4[3],ymm5[4],ymm4[5,6],ymm5[7]
10784 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
10785 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm5, %ymm13, %ymm4
10786 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm5, %ymm27
10787 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
10788 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,5,4]
10789 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
10790 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm3
10791 ; AVX512DQ-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
10792 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm2, %zmm20, %zmm1
10793 ; AVX512DQ-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
10794 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm3
10795 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm2, %zmm18
10796 ; AVX512DQ-SLOW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
10797 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10798 ; AVX512DQ-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10799 ; AVX512DQ-SLOW-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
10800 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
10801 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm11 = xmm1[2,1,2,3]
10802 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm2[0,3,2,1]
10803 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm12[0,0,2,3,4,5,6,7]
10804 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
10805 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm11[2,1,2,0,4,5,6,7]
10806 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
10807 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10808 ; AVX512DQ-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
10809 ; AVX512DQ-SLOW-NEXT: # ymm2 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
10810 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm3
10811 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm9 = xmm2[2,1,0,3]
10812 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm9[0,0,0,0,4,5,6,7]
10813 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,6,7]
10814 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm3[0,1,2,1]
10815 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,6,5,6,4]
10816 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
10817 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10818 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm19
10819 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10820 ; AVX512DQ-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
10821 ; AVX512DQ-SLOW-NEXT: # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
10822 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
10823 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,1,2,3]
10824 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
10825 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm7[0,0,2,3,4,5,6,7]
10826 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
10827 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm6[2,1,2,0,4,5,6,7]
10828 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
10829 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10830 ; AVX512DQ-SLOW-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
10831 ; AVX512DQ-SLOW-NEXT: # ymm5 = ymm2[0,1],mem[2],ymm2[3],mem[4],ymm2[5,6],mem[7]
10832 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm0, %ymm5, %ymm0
10833 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
10834 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
10835 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10836 ; AVX512DQ-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
10837 ; AVX512DQ-SLOW-NEXT: # ymm1 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
10838 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm0
10839 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
10840 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm4[0,0,0,0,4,5,6,7]
10841 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
10842 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
10843 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,6,5,6,4]
10844 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm14 = xmm1[0,1,2,3],xmm14[4],xmm1[5,6],xmm14[7]
10845 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10846 ; AVX512DQ-SLOW-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
10847 ; AVX512DQ-SLOW-NEXT: # ymm1 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
10848 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm27, %ymm0
10849 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm0, %ymm1, %ymm0
10850 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
10851 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
10852 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
10853 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
10854 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm28
10855 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm19, %zmm20, %zmm2
10856 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm2, %zmm18, %zmm28
10857 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm22, %xmm0
10858 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
10859 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm21, %xmm2
10860 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
10861 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
10862 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
10863 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm24, %xmm2
10864 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
10865 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm23, %xmm14
10866 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm14 = xmm14[1,1,1,1,4,5,6,7]
10867 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,7,7]
10868 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm2[4],xmm14[5,6],xmm2[7]
10869 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10870 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm21
10871 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm16, %xmm0
10872 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[3,1,2,1,4,5,6,7]
10873 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm25, %xmm0
10874 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm14 = xmm0[0,1,3,3,4,5,6,7]
10875 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,7,7,7,7]
10876 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm14[1,2],xmm2[3],xmm14[4,5,6,7]
10877 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm14 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
10878 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm17, %ymm0
10879 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm14, %ymm0, %ymm0
10880 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm0[5,6,7]
10881 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
10882 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,7,5,6,5]
10883 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm10 = xmm15[1,1,1,1,4,5,6,7]
10884 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,7,7]
10885 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm10[0,1,2,3],xmm2[4],xmm10[5,6],xmm2[7]
10886 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
10887 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm10, %ymm13, %ymm13
10888 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10889 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5,6,7],ymm13[8,9,10],ymm2[11,12,13,14,15]
10890 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,7,4,5]
10891 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm2[4,5,6,7]
10892 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm27
10893 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm21, %zmm20, %zmm0
10894 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm0, %zmm18, %zmm27
10895 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm11[3,1,2,1,4,5,6,7]
10896 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm12[0,1,3,3,4,5,6,7]
10897 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
10898 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
10899 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
10900 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm8 = xmm9[1,1,1,1,4,5,6,7]
10901 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
10902 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5,6],xmm2[7]
10903 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10904 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
10905 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,1,4,5,6,7]
10906 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm6 = xmm7[0,1,3,3,4,5,6,7]
10907 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
10908 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1,2],xmm2[3],xmm6[4,5,6,7]
10909 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm14, %ymm5, %ymm5
10910 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm5[5,6,7]
10911 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
10912 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm10, %ymm1, %ymm1
10913 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
10914 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
10915 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
10916 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
10917 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
10918 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
10919 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
10920 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
10921 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm22
10922 ; AVX512DQ-SLOW-NEXT: vpternlogq $226, %zmm0, %zmm20, %zmm2
10923 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm2, %zmm18, %zmm22
10924 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
10925 ; AVX512DQ-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
10926 ; AVX512DQ-SLOW-NEXT: # ymm4 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
10927 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
10928 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm0, %xmm4, %xmm1
10929 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm4, %xmm14
10930 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm14[2,2,2,2,4,5,6,7]
10931 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
10932 ; AVX512DQ-SLOW-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
10933 ; AVX512DQ-SLOW-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
10934 ; AVX512DQ-SLOW-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
10935 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm5
10936 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,3,2,1]
10937 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[0,1,0,2,4,5,6,7]
10938 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm3, %xmm19
10939 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,6,6,6]
10940 ; AVX512DQ-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm12 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
10941 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm5, %xmm3
10942 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm5, %xmm18
10943 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6,7]
10944 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10945 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm23
10946 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm30, %ymm1
10947 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm31, %ymm2
10948 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0],ymm1[1],ymm2[2,3,4,5],ymm1[6],ymm2[7]
10949 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10950 ; AVX512DQ-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
10951 ; AVX512DQ-SLOW-NEXT: # ymm10 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
10952 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm0, %xmm10, %xmm1
10953 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm10, %xmm3
10954 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[2,2,2,2,4,5,6,7]
10955 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm3, %xmm30
10956 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
10957 ; AVX512DQ-SLOW-NEXT: vmovdqa64 {{.*#+}} ymm20 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
10958 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
10959 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm1, %ymm5, %ymm3
10960 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm5, %ymm31
10961 ; AVX512DQ-SLOW-NEXT: vpternlogq $236, %ymm20, %ymm3, %ymm2
10962 ; AVX512DQ-SLOW-NEXT: movw $31, %ax
10963 ; AVX512DQ-SLOW-NEXT: kmovw %eax, %k1
10964 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $0, %ymm2, %zmm0, %zmm23 {%k1}
10965 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm26, %ymm2
10966 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm29, %ymm3
10967 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm2[1],ymm3[2,3,4,5],ymm2[6],ymm3[7]
10968 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10969 ; AVX512DQ-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
10970 ; AVX512DQ-SLOW-NEXT: # ymm2 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
10971 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm6
10972 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,3,2,1]
10973 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm3[0,1,0,2,4,5,6,7]
10974 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm3, %xmm29
10975 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,6,6,6]
10976 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm6, %xmm3
10977 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm6, %xmm16
10978 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6,7]
10979 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
10980 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
10981 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm6, %ymm5, %ymm3
10982 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm6, %ymm21
10983 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm5, %ymm17
10984 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
10985 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm24
10986 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10987 ; AVX512DQ-SLOW-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
10988 ; AVX512DQ-SLOW-NEXT: # ymm15 = mem[0],ymm2[1],mem[2,3,4,5],ymm2[6],mem[7]
10989 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
10990 ; AVX512DQ-SLOW-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
10991 ; AVX512DQ-SLOW-NEXT: # ymm3 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
10992 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm0, %xmm3, %xmm2
10993 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm13
10994 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm13[2,2,2,2,4,5,6,7]
10995 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3],xmm5[4],xmm2[5,6,7]
10996 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm1, %ymm15, %ymm1
10997 ; AVX512DQ-SLOW-NEXT: vpternlogq $236, %ymm20, %ymm1, %ymm2
10998 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
10999 ; AVX512DQ-SLOW-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11000 ; AVX512DQ-SLOW-NEXT: # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
11001 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm0, %xmm1, %xmm0
11002 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm11
11003 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm11[2,2,2,2,4,5,6,7]
11004 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1],xmm0[2,3],xmm5[4],xmm0[5,6,7]
11005 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11006 ; AVX512DQ-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
11007 ; AVX512DQ-SLOW-NEXT: # ymm5 = mem[0],ymm5[1],mem[2,3],ymm5[4],mem[5,6],ymm5[7]
11008 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm5, %xmm9
11009 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm5[0,3,2,1]
11010 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm5 = xmm8[0,1,0,2,4,5,6,7]
11011 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,6,6,6]
11012 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm9, %xmm6
11013 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6,7]
11014 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
11015 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm25
11016 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $0, %ymm2, %zmm0, %zmm25 {%k1}
11017 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11018 ; AVX512DQ-SLOW-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
11019 ; AVX512DQ-SLOW-NEXT: # ymm7 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
11020 ; AVX512DQ-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11021 ; AVX512DQ-SLOW-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11022 ; AVX512DQ-SLOW-NEXT: # ymm0 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
11023 ; AVX512DQ-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm6
11024 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm12, %xmm6, %xmm12
11025 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[0,3,2,1]
11026 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm5[0,1,0,2,4,5,6,7]
11027 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
11028 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm12[4],xmm0[5],xmm12[6,7]
11029 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm21, %ymm2
11030 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm2, %ymm7, %ymm12
11031 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
11032 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3,4],ymm0[5,6,7]
11033 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm21
11034 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} xmm2 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
11035 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm2, %xmm4, %xmm4
11036 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm14[1,1,2,3]
11037 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,5,5,5]
11038 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm12 = xmm4[0],xmm12[1],xmm4[2,3],xmm12[4],xmm4[5,6,7]
11039 ; AVX512DQ-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm4 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
11040 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm18, %xmm0
11041 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
11042 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm19, %xmm14
11043 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm14 = xmm14[0,1,1,3,4,5,6,7]
11044 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[0,1,3,3]
11045 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm14[0,1,2,3],xmm0[4],xmm14[5],xmm0[6,7]
11046 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
11047 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm12, %zmm0, %zmm26
11048 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm2, %xmm10, %xmm10
11049 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm30[1,1,2,3]
11050 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,5,5,5]
11051 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0],xmm12[1],xmm10[2,3],xmm12[4],xmm10[5,6,7]
11052 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
11053 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm31, %ymm0
11054 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm12, %ymm0, %ymm14
11055 ; AVX512DQ-SLOW-NEXT: vpternlogq $236, %ymm20, %ymm14, %ymm10
11056 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $0, %ymm10, %zmm0, %zmm26 {%k1}
11057 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm16, %xmm0
11058 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm10
11059 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %xmm29, %xmm0
11060 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm14 = xmm0[0,1,1,3,4,5,6,7]
11061 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm14 = xmm14[0,1,3,3]
11062 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm10 = xmm14[0,1,2,3],xmm10[4],xmm14[5],xmm10[6,7]
11063 ; AVX512DQ-SLOW-NEXT: vmovdqa {{.*#+}} ymm14 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
11064 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %ymm17, %ymm0
11065 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm14, %ymm0, %ymm0
11066 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
11067 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm10[5,6,7]
11068 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
11069 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm12, %ymm15, %ymm10
11070 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm2, %xmm3, %xmm3
11071 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm13[1,1,2,3]
11072 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,5,5,5]
11073 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm12[1],xmm3[2,3],xmm12[4],xmm3[5,6,7]
11074 ; AVX512DQ-SLOW-NEXT: vpternlogq $236, %ymm20, %ymm10, %ymm3
11075 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
11076 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm11[1,1,2,3]
11077 ; AVX512DQ-SLOW-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
11078 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
11079 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm4, %xmm9, %xmm2
11080 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7]
11081 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[0,1,3,3]
11082 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5],xmm2[6,7]
11083 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11084 ; AVX512DQ-SLOW-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
11085 ; AVX512DQ-SLOW-NEXT: vinserti32x8 $0, %ymm3, %zmm0, %zmm1 {%k1}
11086 ; AVX512DQ-SLOW-NEXT: vpshufb %ymm14, %ymm7, %ymm2
11087 ; AVX512DQ-SLOW-NEXT: vpshufb %xmm4, %xmm6, %xmm3
11088 ; AVX512DQ-SLOW-NEXT: vpshuflw {{.*#+}} xmm4 = xmm5[0,1,1,3,4,5,6,7]
11089 ; AVX512DQ-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,3]
11090 ; AVX512DQ-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5],xmm3[6,7]
11091 ; AVX512DQ-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
11092 ; AVX512DQ-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
11093 ; AVX512DQ-SLOW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm2
11094 ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11095 ; AVX512DQ-SLOW-NEXT: vmovaps %zmm3, (%rsi)
11096 ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11097 ; AVX512DQ-SLOW-NEXT: vmovaps %zmm3, 64(%rsi)
11098 ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11099 ; AVX512DQ-SLOW-NEXT: vmovaps %zmm3, 64(%rdx)
11100 ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
11101 ; AVX512DQ-SLOW-NEXT: vmovaps %zmm3, (%rdx)
11102 ; AVX512DQ-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
11103 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm23, %zmm3, %zmm24
11104 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm25, %zmm3, %zmm21
11105 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm26, %zmm3, %zmm0
11106 ; AVX512DQ-SLOW-NEXT: vpternlogq $184, %zmm1, %zmm3, %zmm2
11107 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm28, 64(%rcx)
11108 ; AVX512DQ-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11109 ; AVX512DQ-SLOW-NEXT: vmovaps %zmm1, (%rcx)
11110 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm22, 64(%r8)
11111 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm27, (%r8)
11112 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm21, 64(%r9)
11113 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm24, (%r9)
11114 ; AVX512DQ-SLOW-NEXT: movq {{[0-9]+}}(%rsp), %rax
11115 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm2, 64(%rax)
11116 ; AVX512DQ-SLOW-NEXT: vmovdqa64 %zmm0, (%rax)
11117 ; AVX512DQ-SLOW-NEXT: addq $840, %rsp # imm = 0x348
11118 ; AVX512DQ-SLOW-NEXT: vzeroupper
11119 ; AVX512DQ-SLOW-NEXT: retq
11121 ; AVX512DQ-FAST-LABEL: load_i16_stride6_vf64:
11122 ; AVX512DQ-FAST: # %bb.0:
11123 ; AVX512DQ-FAST-NEXT: subq $904, %rsp # imm = 0x388
11124 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm9 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
11125 ; AVX512DQ-FAST-NEXT: vmovdqa 608(%rdi), %ymm0
11126 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11127 ; AVX512DQ-FAST-NEXT: vmovdqa 576(%rdi), %ymm1
11128 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11129 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
11130 ; AVX512DQ-FAST-NEXT: vpshufb %xmm9, %xmm2, %xmm0
11131 ; AVX512DQ-FAST-NEXT: vpbroadcastq {{.*#+}} xmm12 = [8,9,12,13,0,1,0,0,8,9,12,13,0,1,0,0]
11132 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm2, %xmm1
11133 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm2, %ymm25
11134 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
11135 ; AVX512DQ-FAST-NEXT: vpshufb %xmm12, %xmm2, %xmm1
11136 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm2, %xmm24
11137 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
11138 ; AVX512DQ-FAST-NEXT: vmovdqa 544(%rdi), %ymm1
11139 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11140 ; AVX512DQ-FAST-NEXT: vmovdqa 512(%rdi), %ymm2
11141 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11142 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
11143 ; AVX512DQ-FAST-NEXT: vpbroadcastd {{.*#+}} xmm10 = [8,9,4,5,8,9,4,5,8,9,4,5,8,9,4,5]
11144 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm2, %xmm1
11145 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm2, %xmm4
11146 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm2, %ymm23
11147 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
11148 ; AVX512DQ-FAST-NEXT: vpshufb %xmm2, %xmm4, %xmm3
11149 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm4, %xmm22
11150 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
11151 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11152 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
11153 ; AVX512DQ-FAST-NEXT: vmovdqa 416(%rdi), %ymm1
11154 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11155 ; AVX512DQ-FAST-NEXT: vmovdqa 384(%rdi), %ymm3
11156 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11157 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm15 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
11158 ; AVX512DQ-FAST-NEXT: vpshufb %xmm9, %xmm15, %xmm1
11159 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm15, %xmm3
11160 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,1,0,3]
11161 ; AVX512DQ-FAST-NEXT: vpshufb %xmm12, %xmm4, %xmm3
11162 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm4, %xmm21
11163 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
11164 ; AVX512DQ-FAST-NEXT: vmovdqa 448(%rdi), %ymm3
11165 ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm3[2,3],mem[2,3]
11166 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11167 ; AVX512DQ-FAST-NEXT: vinserti128 $1, 480(%rdi), %ymm3, %ymm3
11168 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11169 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0],ymm4[1],ymm3[2,3,4,5],ymm4[6],ymm3[7]
11170 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
11171 ; AVX512DQ-FAST-NEXT: vpshufb %ymm6, %ymm4, %ymm3
11172 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm4, %ymm20
11173 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2],ymm3[3,4,5,6,7]
11174 ; AVX512DQ-FAST-NEXT: vmovdqa 736(%rdi), %ymm1
11175 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11176 ; AVX512DQ-FAST-NEXT: vmovdqa 704(%rdi), %ymm3
11177 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11178 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm14 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
11179 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm14, %xmm1
11180 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm14, %xmm5
11181 ; AVX512DQ-FAST-NEXT: vpshufb %xmm2, %xmm5, %xmm3
11182 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm5, %xmm19
11183 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
11184 ; AVX512DQ-FAST-NEXT: vmovdqa 640(%rdi), %ymm3
11185 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11186 ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm3[2,3],mem[2,3]
11187 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11188 ; AVX512DQ-FAST-NEXT: vinserti128 $1, 672(%rdi), %ymm3, %ymm3
11189 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11190 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm5[1],ymm3[2,3,4,5],ymm5[6],ymm3[7]
11191 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
11192 ; AVX512DQ-FAST-NEXT: vpshufb %ymm7, %ymm5, %ymm3
11193 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm7, %ymm26
11194 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm5, %ymm18
11195 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
11196 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
11197 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
11198 ; AVX512DQ-FAST-NEXT: vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
11199 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm4
11200 ; AVX512DQ-FAST-NEXT: movw $-2048, %ax # imm = 0xF800
11201 ; AVX512DQ-FAST-NEXT: kmovw %eax, %k1
11202 ; AVX512DQ-FAST-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
11203 ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11204 ; AVX512DQ-FAST-NEXT: vmovdqa 224(%rdi), %ymm0
11205 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11206 ; AVX512DQ-FAST-NEXT: vmovdqa 192(%rdi), %ymm1
11207 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11208 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
11209 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm11, %xmm0
11210 ; AVX512DQ-FAST-NEXT: vpshufb %xmm9, %xmm11, %xmm1
11211 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,1,0,3]
11212 ; AVX512DQ-FAST-NEXT: vpshufb %xmm12, %xmm7, %xmm0
11213 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
11214 ; AVX512DQ-FAST-NEXT: vmovdqa 160(%rdi), %ymm1
11215 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm1, (%rsp) # 32-byte Spill
11216 ; AVX512DQ-FAST-NEXT: vmovdqa 128(%rdi), %ymm3
11217 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11218 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
11219 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm5, %xmm1
11220 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm5, %xmm13
11221 ; AVX512DQ-FAST-NEXT: vpshufb %xmm2, %xmm13, %xmm3
11222 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
11223 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11224 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm17
11225 ; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %ymm0
11226 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11227 ; AVX512DQ-FAST-NEXT: vmovdqa 32(%rdi), %ymm1
11228 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11229 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
11230 ; AVX512DQ-FAST-NEXT: vpshufb %xmm9, %xmm4, %xmm0
11231 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm4, %xmm3
11232 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm9 = xmm3[2,1,0,3]
11233 ; AVX512DQ-FAST-NEXT: vpshufb %xmm12, %xmm9, %xmm3
11234 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
11235 ; AVX512DQ-FAST-NEXT: vmovdqa 64(%rdi), %ymm3
11236 ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
11237 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11238 ; AVX512DQ-FAST-NEXT: vinserti128 $1, 96(%rdi), %ymm3, %ymm3
11239 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11240 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm3[0],ymm1[1],ymm3[2,3,4,5],ymm1[6],ymm3[7]
11241 ; AVX512DQ-FAST-NEXT: vpshufb %ymm6, %ymm12, %ymm3
11242 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm3[3,4,5,6,7]
11243 ; AVX512DQ-FAST-NEXT: vmovdqa 352(%rdi), %ymm0
11244 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11245 ; AVX512DQ-FAST-NEXT: vmovdqa 320(%rdi), %ymm1
11246 ; AVX512DQ-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
11247 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
11248 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm8, %xmm10
11249 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm8, %xmm3
11250 ; AVX512DQ-FAST-NEXT: vpshufb %xmm2, %xmm3, %xmm2
11251 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm10 = xmm2[0,1,2],xmm10[3],xmm2[4,5],xmm10[6],xmm2[7]
11252 ; AVX512DQ-FAST-NEXT: vmovdqa 256(%rdi), %ymm2
11253 ; AVX512DQ-FAST-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
11254 ; AVX512DQ-FAST-NEXT: vinserti128 $1, 288(%rdi), %ymm2, %ymm0
11255 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
11256 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm0, %ymm30
11257 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm1, %ymm31
11258 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm26, %ymm0
11259 ; AVX512DQ-FAST-NEXT: vpshufb %ymm0, %ymm2, %ymm0
11260 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
11261 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
11262 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
11263 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
11264 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm17, %zmm16, %zmm6
11265 ; AVX512DQ-FAST-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm6 {%k1}
11266 ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11267 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
11268 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm11, %xmm0
11269 ; AVX512DQ-FAST-NEXT: vpbroadcastq {{.*#+}} xmm11 = [10,11,14,15,2,3,0,0,10,11,14,15,2,3,0,0]
11270 ; AVX512DQ-FAST-NEXT: vpshufb %xmm11, %xmm7, %xmm7
11271 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm7[2],xmm0[3],xmm7[4,5],xmm0[6,7]
11272 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
11273 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm13, %xmm6
11274 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
11275 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
11276 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
11277 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm7, %zmm5, %zmm5
11278 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm4, %xmm4
11279 ; AVX512DQ-FAST-NEXT: vpshufb %xmm11, %xmm9, %xmm6
11280 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2],xmm4[3],xmm6[4,5],xmm4[6,7]
11281 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
11282 ; AVX512DQ-FAST-NEXT: vpshufb %ymm6, %ymm12, %ymm7
11283 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm7[3,4,5,6,7]
11284 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm3, %xmm3
11285 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,5,5,5]
11286 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
11287 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
11288 ; AVX512DQ-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
11289 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11290 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
11291 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
11292 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
11293 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm5, %zmm16, %zmm4
11294 ; AVX512DQ-FAST-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
11295 ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11296 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm25, %ymm1
11297 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm1, %xmm1
11298 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm24, %xmm2
11299 ; AVX512DQ-FAST-NEXT: vpshufb %xmm11, %xmm2, %xmm2
11300 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
11301 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm22, %xmm2
11302 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm2, %xmm2
11303 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm23, %ymm4
11304 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
11305 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
11306 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11307 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
11308 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm20, %ymm2
11309 ; AVX512DQ-FAST-NEXT: vpshufb %ymm6, %ymm2, %ymm2
11310 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm15, %xmm4
11311 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm21, %xmm5
11312 ; AVX512DQ-FAST-NEXT: vpshufb %xmm11, %xmm5, %xmm5
11313 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6,7]
11314 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm2[3,4,5,6,7]
11315 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm18, %ymm2
11316 ; AVX512DQ-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
11317 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm19, %xmm3
11318 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm3, %xmm0
11319 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm3 = xmm14[0,1,2,3,5,5,5,5]
11320 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4,5],xmm3[6],xmm0[7]
11321 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
11322 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
11323 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
11324 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
11325 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm4
11326 ; AVX512DQ-FAST-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm4 {%k1}
11327 ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11328 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11329 ; AVX512DQ-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11330 ; AVX512DQ-FAST-NEXT: # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
11331 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm0, %xmm1
11332 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm14 = xmm0[2,1,2,3]
11333 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13>
11334 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm15 = xmm1[0,3,2,1]
11335 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm15, %xmm1
11336 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm14[2,1,2,0,4,5,6,7]
11337 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
11338 ; AVX512DQ-FAST-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
11339 ; AVX512DQ-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
11340 ; AVX512DQ-FAST-NEXT: # ymm2 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
11341 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm2, %xmm3
11342 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
11343 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
11344 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm4, %xmm2
11345 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm4, %xmm19
11346 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[0,1,2,1]
11347 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,6,5,6,4]
11348 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm4, %xmm21
11349 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
11350 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11351 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm3
11352 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11353 ; AVX512DQ-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11354 ; AVX512DQ-FAST-NEXT: # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
11355 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm1, %xmm4
11356 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
11357 ; AVX512DQ-FAST-NEXT: vpbroadcastq {{.*#+}} xmm2 = [12,13,0,1,4,5,0,0,12,13,0,1,4,5,0,0]
11358 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
11359 ; AVX512DQ-FAST-NEXT: vpshufb %xmm2, %xmm4, %xmm1
11360 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm4, %xmm23
11361 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm4 = xmm5[2,1,2,0,4,5,6,7]
11362 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm5, %xmm22
11363 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1,2],xmm4[3],xmm1[4,5,6,7]
11364 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11365 ; AVX512DQ-FAST-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
11366 ; AVX512DQ-FAST-NEXT: # ymm6 = ymm1[0,1],mem[2],ymm1[3],mem[4],ymm1[5,6],mem[7]
11367 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
11368 ; AVX512DQ-FAST-NEXT: vpshufb %ymm1, %ymm6, %ymm5
11369 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm6, %ymm24
11370 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
11371 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
11372 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
11373 ; AVX512DQ-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
11374 ; AVX512DQ-FAST-NEXT: # ymm5 = mem[0,1],ymm5[2],mem[3,4],ymm5[5],mem[6,7]
11375 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm5, %xmm6
11376 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[2,1,0,3]
11377 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm7, %xmm5
11378 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm7, %xmm25
11379 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[0,1,2,1]
11380 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,6,5,6,4]
11381 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm7, %xmm18
11382 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5,6],xmm6[7]
11383 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
11384 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm30, %ymm6
11385 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm31, %ymm7
11386 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm6[0,1],ymm7[2],ymm6[3],ymm7[4],ymm6[5,6],ymm7[7]
11387 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
11388 ; AVX512DQ-FAST-NEXT: vpshufb %ymm8, %ymm7, %ymm6
11389 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm8, %ymm27
11390 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm7, %ymm17
11391 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7],ymm6[8,9,10],ymm5[11,12,13,14,15]
11392 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
11393 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
11394 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5
11395 ; AVX512DQ-FAST-NEXT: vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
11396 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm3, %zmm20, %zmm4
11397 ; AVX512DQ-FAST-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
11398 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm4, %zmm3, %zmm5
11399 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm3, %zmm26
11400 ; AVX512DQ-FAST-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
11401 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11402 ; AVX512DQ-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
11403 ; AVX512DQ-FAST-NEXT: # ymm3 = ymm3[0,1],mem[2],ymm3[3,4],mem[5],ymm3[6,7]
11404 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm3, %xmm4
11405 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
11406 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm4, %xmm0
11407 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm4, %xmm16
11408 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm13 = xmm3[2,1,2,3]
11409 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm13[2,1,2,0,4,5,6,7]
11410 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1,2],xmm3[3],xmm0[4,5,6,7]
11411 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
11412 ; AVX512DQ-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
11413 ; AVX512DQ-FAST-NEXT: # ymm3 = ymm3[0,1],mem[2],ymm3[3,4],mem[5],ymm3[6,7]
11414 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm3, %xmm4
11415 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm12 = xmm3[2,1,0,3]
11416 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm12, %xmm3
11417 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm11 = xmm4[0,1,2,1]
11418 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm4 = xmm11[0,1,2,3,6,5,6,4]
11419 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
11420 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
11421 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm3, %zmm9
11422 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11423 ; AVX512DQ-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11424 ; AVX512DQ-FAST-NEXT: # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
11425 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm0, %xmm3
11426 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm8 = xmm3[0,3,2,1]
11427 ; AVX512DQ-FAST-NEXT: vpshufb %xmm2, %xmm8, %xmm2
11428 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,1,2,3]
11429 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm0 = xmm7[2,1,2,0,4,5,6,7]
11430 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
11431 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11432 ; AVX512DQ-FAST-NEXT: vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
11433 ; AVX512DQ-FAST-NEXT: # ymm6 = mem[0,1],ymm2[2],mem[3],ymm2[4],mem[5,6],ymm2[7]
11434 ; AVX512DQ-FAST-NEXT: vpshufb %ymm1, %ymm6, %ymm1
11435 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
11436 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
11437 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11438 ; AVX512DQ-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11439 ; AVX512DQ-FAST-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
11440 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm0, %xmm2
11441 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[2,1,0,3]
11442 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm5, %xmm0
11443 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,1,2,1]
11444 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm10 = xmm4[0,1,2,3,6,5,6,4]
11445 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm10 = xmm0[0,1,2,3],xmm10[4],xmm0[5,6],xmm10[7]
11446 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11447 ; AVX512DQ-FAST-NEXT: vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
11448 ; AVX512DQ-FAST-NEXT: # ymm2 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
11449 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm27, %ymm0
11450 ; AVX512DQ-FAST-NEXT: vpshufb %ymm0, %ymm2, %ymm0
11451 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
11452 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
11453 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
11454 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
11455 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm29
11456 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm9, %zmm20, %zmm1
11457 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm1, %zmm26, %zmm29
11458 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
11459 ; AVX512DQ-FAST-NEXT: vpshufb %xmm1, %xmm15, %xmm0
11460 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm1, %xmm28
11461 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm14[3,1,2,1,4,5,6,7]
11462 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
11463 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm3 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
11464 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm19, %xmm1
11465 ; AVX512DQ-FAST-NEXT: vpshufb %xmm3, %xmm1, %xmm9
11466 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm21, %xmm1
11467 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,7,5,6,5]
11468 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5,6],xmm10[7]
11469 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
11470 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm9, %zmm21
11471 ; AVX512DQ-FAST-NEXT: vpbroadcastq {{.*#+}} xmm10 = [14,15,2,3,6,7,0,0,14,15,2,3,6,7,0,0]
11472 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm23, %xmm0
11473 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm0, %xmm9
11474 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm22, %xmm0
11475 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm15 = xmm0[3,1,2,1,4,5,6,7]
11476 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm15 = xmm15[0],xmm9[1,2],xmm15[3],xmm9[4,5,6,7]
11477 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
11478 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm24, %ymm0
11479 ; AVX512DQ-FAST-NEXT: vpshufb %ymm9, %ymm0, %ymm0
11480 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm15 = xmm15[0,1,2,3,4],xmm0[5,6,7]
11481 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm0[4,5,6,7]
11482 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm25, %xmm0
11483 ; AVX512DQ-FAST-NEXT: vpshufb %xmm3, %xmm0, %xmm15
11484 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm18, %xmm0
11485 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm14 = xmm0[0,1,2,3,7,5,6,5]
11486 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5,6],xmm14[7]
11487 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm15 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
11488 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm17, %ymm0
11489 ; AVX512DQ-FAST-NEXT: vpshufb %ymm15, %ymm0, %ymm0
11490 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
11491 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
11492 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,4,5]
11493 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
11494 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm27
11495 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm21, %zmm20, %zmm1
11496 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm1, %zmm26, %zmm27
11497 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm16, %xmm0
11498 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm28, %xmm1
11499 ; AVX512DQ-FAST-NEXT: vpshufb %xmm1, %xmm0, %xmm0
11500 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm13[3,1,2,1,4,5,6,7]
11501 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
11502 ; AVX512DQ-FAST-NEXT: vpshufb %xmm3, %xmm12, %xmm1
11503 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,7,5,6,5]
11504 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm11[4],xmm1[5,6],xmm11[7]
11505 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11506 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
11507 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm8, %xmm1
11508 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
11509 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0],xmm1[1,2],xmm7[3],xmm1[4,5,6,7]
11510 ; AVX512DQ-FAST-NEXT: vpshufb %ymm9, %ymm6, %ymm6
11511 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm6[5,6,7]
11512 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
11513 ; AVX512DQ-FAST-NEXT: vpshufb %ymm15, %ymm2, %ymm2
11514 ; AVX512DQ-FAST-NEXT: vpshufb %xmm3, %xmm5, %xmm3
11515 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,5]
11516 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
11517 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
11518 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
11519 ; AVX512DQ-FAST-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
11520 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
11521 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm21
11522 ; AVX512DQ-FAST-NEXT: vpternlogq $226, %zmm0, %zmm20, %zmm1
11523 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm1, %zmm26, %zmm21
11524 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11525 ; AVX512DQ-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
11526 ; AVX512DQ-FAST-NEXT: # ymm13 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
11527 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
11528 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm13, %xmm1
11529 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm13, %xmm14
11530 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm14[2,2,2,2,4,5,6,7]
11531 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
11532 ; AVX512DQ-FAST-NEXT: vmovdqu (%rsp), %ymm2 # 32-byte Reload
11533 ; AVX512DQ-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
11534 ; AVX512DQ-FAST-NEXT: # ymm2 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
11535 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm2, %xmm3
11536 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,3,2,1]
11537 ; AVX512DQ-FAST-NEXT: vpbroadcastq {{.*#+}} xmm4 = [8,9,0,0,0,1,12,13,8,9,0,0,0,1,12,13]
11538 ; AVX512DQ-FAST-NEXT: vpshufb %xmm4, %xmm3, %xmm2
11539 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm3, %xmm26
11540 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
11541 ; AVX512DQ-FAST-NEXT: vpshufb %xmm11, %xmm5, %xmm3
11542 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm5, %xmm28
11543 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
11544 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11545 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm22
11546 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11547 ; AVX512DQ-FAST-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
11548 ; AVX512DQ-FAST-NEXT: # ymm5 = mem[0],ymm1[1],mem[2,3,4,5],ymm1[6],mem[7]
11549 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11550 ; AVX512DQ-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
11551 ; AVX512DQ-FAST-NEXT: # ymm12 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
11552 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm12, %xmm1
11553 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm12, %xmm10
11554 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm10[2,2,2,2,4,5,6,7]
11555 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
11556 ; AVX512DQ-FAST-NEXT: vmovdqa64 {{.*#+}} ymm20 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
11557 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
11558 ; AVX512DQ-FAST-NEXT: vpshufb %ymm1, %ymm5, %ymm3
11559 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm5, %ymm18
11560 ; AVX512DQ-FAST-NEXT: vpternlogq $236, %ymm20, %ymm3, %ymm2
11561 ; AVX512DQ-FAST-NEXT: movw $31, %ax
11562 ; AVX512DQ-FAST-NEXT: kmovw %eax, %k1
11563 ; AVX512DQ-FAST-NEXT: vinserti32x8 $0, %ymm2, %zmm0, %zmm22 {%k1}
11564 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm30, %ymm2
11565 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm31, %ymm3
11566 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm2[1],ymm3[2,3,4,5],ymm2[6],ymm3[7]
11567 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11568 ; AVX512DQ-FAST-NEXT: vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
11569 ; AVX512DQ-FAST-NEXT: # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
11570 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm2, %xmm3
11571 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,3,2,1]
11572 ; AVX512DQ-FAST-NEXT: vpshufb %xmm4, %xmm3, %xmm2
11573 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm3, %xmm19
11574 ; AVX512DQ-FAST-NEXT: vpshufb %xmm11, %xmm6, %xmm3
11575 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm6, %xmm30
11576 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
11577 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11578 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
11579 ; AVX512DQ-FAST-NEXT: vpshufb %ymm6, %ymm5, %ymm3
11580 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm6, %ymm25
11581 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm5, %ymm31
11582 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
11583 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm23
11584 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11585 ; AVX512DQ-FAST-NEXT: vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
11586 ; AVX512DQ-FAST-NEXT: # ymm5 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
11587 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
11588 ; AVX512DQ-FAST-NEXT: vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
11589 ; AVX512DQ-FAST-NEXT: # ymm15 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
11590 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm15, %xmm2
11591 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm15, %xmm6
11592 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm3 = xmm6[2,2,2,2,4,5,6,7]
11593 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm6, %xmm17
11594 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
11595 ; AVX512DQ-FAST-NEXT: vpshufb %ymm1, %ymm5, %ymm1
11596 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm5, %ymm16
11597 ; AVX512DQ-FAST-NEXT: vpternlogq $236, %ymm20, %ymm1, %ymm3
11598 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11599 ; AVX512DQ-FAST-NEXT: vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
11600 ; AVX512DQ-FAST-NEXT: # ymm2 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
11601 ; AVX512DQ-FAST-NEXT: vpshufb %xmm0, %xmm2, %xmm0
11602 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm2, %xmm9
11603 ; AVX512DQ-FAST-NEXT: vpshuflw {{.*#+}} xmm1 = xmm9[2,2,2,2,4,5,6,7]
11604 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
11605 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
11606 ; AVX512DQ-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
11607 ; AVX512DQ-FAST-NEXT: # ymm1 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
11608 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm1, %xmm8
11609 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[0,3,2,1]
11610 ; AVX512DQ-FAST-NEXT: vpshufb %xmm4, %xmm8, %xmm1
11611 ; AVX512DQ-FAST-NEXT: vpshufb %xmm11, %xmm7, %xmm5
11612 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4],xmm5[5],xmm1[6,7]
11613 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
11614 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm24
11615 ; AVX512DQ-FAST-NEXT: vinserti32x8 $0, %ymm3, %zmm0, %zmm24 {%k1}
11616 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11617 ; AVX512DQ-FAST-NEXT: vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
11618 ; AVX512DQ-FAST-NEXT: # ymm6 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
11619 ; AVX512DQ-FAST-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
11620 ; AVX512DQ-FAST-NEXT: vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
11621 ; AVX512DQ-FAST-NEXT: # ymm0 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
11622 ; AVX512DQ-FAST-NEXT: vextracti128 $1, %ymm0, %xmm5
11623 ; AVX512DQ-FAST-NEXT: vpshufb %xmm4, %xmm5, %xmm4
11624 ; AVX512DQ-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,3,2,1]
11625 ; AVX512DQ-FAST-NEXT: vpshufb %xmm11, %xmm3, %xmm0
11626 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4],xmm0[5],xmm4[6,7]
11627 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm25, %ymm1
11628 ; AVX512DQ-FAST-NEXT: vpshufb %ymm1, %ymm6, %ymm4
11629 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
11630 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
11631 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm25
11632 ; AVX512DQ-FAST-NEXT: vpbroadcastd {{.*#+}} xmm1 = [10,11,6,7,10,11,6,7,10,11,6,7,10,11,6,7]
11633 ; AVX512DQ-FAST-NEXT: vpshufb %xmm1, %xmm10, %xmm11
11634 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
11635 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm12, %xmm12
11636 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm11 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6,7]
11637 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
11638 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm18, %ymm0
11639 ; AVX512DQ-FAST-NEXT: vpshufb %ymm4, %ymm0, %ymm12
11640 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm4, %ymm18
11641 ; AVX512DQ-FAST-NEXT: vpternlogq $236, %ymm20, %ymm12, %ymm11
11642 ; AVX512DQ-FAST-NEXT: vpshufb %xmm1, %xmm14, %xmm12
11643 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm13, %xmm13
11644 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm13[0],xmm12[1],xmm13[2,3],xmm12[4],xmm13[5,6,7]
11645 ; AVX512DQ-FAST-NEXT: vpbroadcastq {{.*#+}} xmm12 = [10,11,0,0,2,3,14,15,10,11,0,0,2,3,14,15]
11646 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm26, %xmm4
11647 ; AVX512DQ-FAST-NEXT: vpshufb %xmm12, %xmm4, %xmm4
11648 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} xmm13 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
11649 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm28, %xmm14
11650 ; AVX512DQ-FAST-NEXT: vpshufb %xmm13, %xmm14, %xmm14
11651 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm14[0,1,2,3],xmm4[4],xmm14[5],xmm4[6,7]
11652 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
11653 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm4, %zmm26
11654 ; AVX512DQ-FAST-NEXT: vinserti32x8 $0, %ymm11, %zmm0, %zmm26 {%k1}
11655 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm19, %xmm0
11656 ; AVX512DQ-FAST-NEXT: vpshufb %xmm12, %xmm0, %xmm4
11657 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm30, %xmm0
11658 ; AVX512DQ-FAST-NEXT: vpshufb %xmm13, %xmm0, %xmm11
11659 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm4 = xmm11[0,1,2,3],xmm4[4],xmm11[5],xmm4[6,7]
11660 ; AVX512DQ-FAST-NEXT: vmovdqa {{.*#+}} ymm11 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
11661 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm31, %ymm0
11662 ; AVX512DQ-FAST-NEXT: vpshufb %ymm11, %ymm0, %ymm14
11663 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
11664 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm14[0,1,2,3,4],ymm4[5,6,7]
11665 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
11666 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm16, %ymm0
11667 ; AVX512DQ-FAST-NEXT: vmovdqa64 %ymm18, %ymm14
11668 ; AVX512DQ-FAST-NEXT: vpshufb %ymm14, %ymm0, %ymm14
11669 ; AVX512DQ-FAST-NEXT: vmovdqa64 %xmm17, %xmm0
11670 ; AVX512DQ-FAST-NEXT: vpshufb %xmm1, %xmm0, %xmm0
11671 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm15, %xmm15
11672 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm15[0],xmm0[1],xmm15[2,3],xmm0[4],xmm15[5,6,7]
11673 ; AVX512DQ-FAST-NEXT: vpternlogq $236, %ymm20, %ymm14, %ymm0
11674 ; AVX512DQ-FAST-NEXT: vpshufb %xmm1, %xmm9, %xmm1
11675 ; AVX512DQ-FAST-NEXT: vpshufb %xmm10, %xmm2, %xmm2
11676 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6,7]
11677 ; AVX512DQ-FAST-NEXT: vpshufb %xmm12, %xmm8, %xmm2
11678 ; AVX512DQ-FAST-NEXT: vpshufb %xmm13, %xmm7, %xmm7
11679 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4],xmm7[5],xmm2[6,7]
11680 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11681 ; AVX512DQ-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm2, %zmm1
11682 ; AVX512DQ-FAST-NEXT: vinserti32x8 $0, %ymm0, %zmm0, %zmm1 {%k1}
11683 ; AVX512DQ-FAST-NEXT: vpshufb %ymm11, %ymm6, %ymm0
11684 ; AVX512DQ-FAST-NEXT: vpshufb %xmm12, %xmm5, %xmm2
11685 ; AVX512DQ-FAST-NEXT: vpshufb %xmm13, %xmm3, %xmm3
11686 ; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
11687 ; AVX512DQ-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
11688 ; AVX512DQ-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
11689 ; AVX512DQ-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
11690 ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11691 ; AVX512DQ-FAST-NEXT: vmovaps %zmm2, (%rsi)
11692 ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11693 ; AVX512DQ-FAST-NEXT: vmovaps %zmm2, 64(%rsi)
11694 ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11695 ; AVX512DQ-FAST-NEXT: vmovaps %zmm2, 64(%rdx)
11696 ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
11697 ; AVX512DQ-FAST-NEXT: vmovaps %zmm2, (%rdx)
11698 ; AVX512DQ-FAST-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
11699 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm22, %zmm2, %zmm23
11700 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm24, %zmm2, %zmm25
11701 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm26, %zmm2, %zmm4
11702 ; AVX512DQ-FAST-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm0
11703 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm29, 64(%rcx)
11704 ; AVX512DQ-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
11705 ; AVX512DQ-FAST-NEXT: vmovaps %zmm1, (%rcx)
11706 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm21, 64(%r8)
11707 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm27, (%r8)
11708 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm25, 64(%r9)
11709 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm23, (%r9)
11710 ; AVX512DQ-FAST-NEXT: movq {{[0-9]+}}(%rsp), %rax
11711 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm0, 64(%rax)
11712 ; AVX512DQ-FAST-NEXT: vmovdqa64 %zmm4, (%rax)
11713 ; AVX512DQ-FAST-NEXT: addq $904, %rsp # imm = 0x388
11714 ; AVX512DQ-FAST-NEXT: vzeroupper
11715 ; AVX512DQ-FAST-NEXT: retq
11717 ; AVX512BW-LABEL: load_i16_stride6_vf64:
11718 ; AVX512BW: # %bb.0:
11719 ; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
11720 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm3
11721 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm2
11722 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm0
11723 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm5
11724 ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm4
11725 ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm6
11726 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm10
11727 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm13
11728 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm1
11729 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm12
11730 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm9
11731 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm11
11732 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [0,0,0,0,0,0,4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0,4,10,16,22,28,34,40,46,52,58]
11733 ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
11734 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm15
11735 ; AVX512BW-NEXT: vpermt2w %zmm9, %zmm14, %zmm15
11736 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [0,6,12,18,24,30,0,0,0,0,0,34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0,34,40,46,52,58]
11737 ; AVX512BW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
11738 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm17
11739 ; AVX512BW-NEXT: vpermt2w %zmm1, %zmm16, %zmm17
11740 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm7 = <0,6,12,18,24,30,36,42,48,54,60,u,u,u,u,u>
11741 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm8
11742 ; AVX512BW-NEXT: vpermt2w %zmm13, %zmm7, %zmm8
11743 ; AVX512BW-NEXT: movl $4192256, %edi # imm = 0x3FF800
11744 ; AVX512BW-NEXT: kmovd %edi, %k1
11745 ; AVX512BW-NEXT: vmovdqu16 %zmm17, %zmm8 {%k1}
11746 ; AVX512BW-NEXT: movw $-2048, %di # imm = 0xF800
11747 ; AVX512BW-NEXT: kmovd %edi, %k2
11748 ; AVX512BW-NEXT: vmovdqa32 %zmm15, %zmm8 {%k2}
11749 ; AVX512BW-NEXT: vpermi2w %zmm4, %zmm6, %zmm14
11750 ; AVX512BW-NEXT: vpermi2w %zmm0, %zmm5, %zmm16
11751 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm7
11752 ; AVX512BW-NEXT: vmovdqu16 %zmm16, %zmm7 {%k1}
11753 ; AVX512BW-NEXT: vmovdqa32 %zmm14, %zmm7 {%k2}
11754 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm16 = [0,0,0,0,0,0,5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0,5,11,17,23,29,35,41,47,53,59]
11755 ; AVX512BW-NEXT: # zmm16 = mem[0,1,2,3,0,1,2,3]
11756 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm17
11757 ; AVX512BW-NEXT: vpermt2w %zmm9, %zmm16, %zmm17
11758 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm18 = [1,7,13,19,25,31,0,0,0,0,0,35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0,35,41,47,53,59]
11759 ; AVX512BW-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3]
11760 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm19
11761 ; AVX512BW-NEXT: vpermt2w %zmm1, %zmm18, %zmm19
11762 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm14 = <1,7,13,19,25,31,37,43,49,55,61,u,u,u,u,u>
11763 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm15
11764 ; AVX512BW-NEXT: vpermt2w %zmm13, %zmm14, %zmm15
11765 ; AVX512BW-NEXT: vmovdqu16 %zmm19, %zmm15 {%k1}
11766 ; AVX512BW-NEXT: vmovdqa32 %zmm17, %zmm15 {%k2}
11767 ; AVX512BW-NEXT: vpermi2w %zmm4, %zmm6, %zmm16
11768 ; AVX512BW-NEXT: vpermi2w %zmm0, %zmm5, %zmm18
11769 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm14
11770 ; AVX512BW-NEXT: vmovdqu16 %zmm18, %zmm14 {%k1}
11771 ; AVX512BW-NEXT: vmovdqa32 %zmm16, %zmm14 {%k2}
11772 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm18 = [0,0,0,0,0,0,6,12,18,24,30,36,42,48,54,60,0,0,0,0,0,0,6,12,18,24,30,36,42,48,54,60]
11773 ; AVX512BW-NEXT: # zmm18 = mem[0,1,2,3,0,1,2,3]
11774 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm19
11775 ; AVX512BW-NEXT: vpermt2w %zmm9, %zmm18, %zmm19
11776 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [34,40,46,52,58,0,0,0,0,0,0,4,10,16,22,28,34,40,46,52,58,0,0,0,0,0,0,4,10,16,22,28]
11777 ; AVX512BW-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
11778 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm21
11779 ; AVX512BW-NEXT: vpermt2w %zmm12, %zmm20, %zmm21
11780 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm16 = <34,40,46,52,58,0,6,12,18,24,30,u,u,u,u,u>
11781 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm17
11782 ; AVX512BW-NEXT: vpermt2w %zmm10, %zmm16, %zmm17
11783 ; AVX512BW-NEXT: movl $2095104, %edi # imm = 0x1FF800
11784 ; AVX512BW-NEXT: kmovd %edi, %k2
11785 ; AVX512BW-NEXT: vmovdqu16 %zmm21, %zmm17 {%k2}
11786 ; AVX512BW-NEXT: movl $-2097152, %edi # imm = 0xFFE00000
11787 ; AVX512BW-NEXT: kmovd %edi, %k1
11788 ; AVX512BW-NEXT: vmovdqu16 %zmm19, %zmm17 {%k1}
11789 ; AVX512BW-NEXT: vpermi2w %zmm4, %zmm6, %zmm18
11790 ; AVX512BW-NEXT: vpermi2w %zmm5, %zmm0, %zmm20
11791 ; AVX512BW-NEXT: vpermi2w %zmm2, %zmm3, %zmm16
11792 ; AVX512BW-NEXT: vmovdqu16 %zmm20, %zmm16 {%k2}
11793 ; AVX512BW-NEXT: vmovdqu16 %zmm18, %zmm16 {%k1}
11794 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm20 = [0,0,0,0,0,1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0,1,7,13,19,25,31,37,43,49,55,61]
11795 ; AVX512BW-NEXT: # zmm20 = mem[0,1,2,3,0,1,2,3]
11796 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm21
11797 ; AVX512BW-NEXT: vpermt2w %zmm9, %zmm20, %zmm21
11798 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [35,41,47,53,59,0,0,0,0,0,0,5,11,17,23,29,35,41,47,53,59,0,0,0,0,0,0,5,11,17,23,29]
11799 ; AVX512BW-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
11800 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm23
11801 ; AVX512BW-NEXT: vpermt2w %zmm12, %zmm22, %zmm23
11802 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm18 = <35,41,47,53,59,1,7,13,19,25,31,u,u,u,u,u>
11803 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm19
11804 ; AVX512BW-NEXT: vpermt2w %zmm10, %zmm18, %zmm19
11805 ; AVX512BW-NEXT: vmovdqu16 %zmm23, %zmm19 {%k2}
11806 ; AVX512BW-NEXT: vmovdqu16 %zmm21, %zmm19 {%k1}
11807 ; AVX512BW-NEXT: vpermi2w %zmm4, %zmm6, %zmm20
11808 ; AVX512BW-NEXT: vpermi2w %zmm5, %zmm0, %zmm22
11809 ; AVX512BW-NEXT: vpermi2w %zmm2, %zmm3, %zmm18
11810 ; AVX512BW-NEXT: vmovdqu16 %zmm22, %zmm18 {%k2}
11811 ; AVX512BW-NEXT: vmovdqu16 %zmm20, %zmm18 {%k1}
11812 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = <4,10,16,22,28,34,40,46,52,58,u,u,u,u,u,u>
11813 ; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm21
11814 ; AVX512BW-NEXT: vpermt2w %zmm13, %zmm20, %zmm21
11815 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm22 = [36,42,48,54,60,0,0,0,0,0,0,6,12,18,24,30,36,42,48,54,60,0,0,0,0,0,0,6,12,18,24,30]
11816 ; AVX512BW-NEXT: # zmm22 = mem[0,1,2,3,0,1,2,3]
11817 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm23
11818 ; AVX512BW-NEXT: vpermt2w %zmm12, %zmm22, %zmm23
11819 ; AVX512BW-NEXT: movw $31, %di
11820 ; AVX512BW-NEXT: kmovd %edi, %k2
11821 ; AVX512BW-NEXT: vmovdqa32 %zmm21, %zmm23 {%k2}
11822 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm21 = [0,0,0,0,0,34,40,46,52,58,0,6,12,18,24,30,0,0,0,0,0,34,40,46,52,58,0,6,12,18,24,30]
11823 ; AVX512BW-NEXT: # zmm21 = mem[0,1,2,3,0,1,2,3]
11824 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm24
11825 ; AVX512BW-NEXT: vpermt2w %zmm11, %zmm21, %zmm24
11826 ; AVX512BW-NEXT: vmovdqu16 %zmm24, %zmm23 {%k1}
11827 ; AVX512BW-NEXT: vpermi2w %zmm6, %zmm4, %zmm21
11828 ; AVX512BW-NEXT: vpermi2w %zmm5, %zmm0, %zmm22
11829 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm20
11830 ; AVX512BW-NEXT: vmovdqa32 %zmm20, %zmm22 {%k2}
11831 ; AVX512BW-NEXT: vmovdqu16 %zmm21, %zmm22 {%k1}
11832 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm20 = <5,11,17,23,29,35,41,47,53,59,u,u,u,u,u,u>
11833 ; AVX512BW-NEXT: vpermt2w %zmm13, %zmm20, %zmm10
11834 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [37,43,49,55,61,0,0,0,0,0,1,7,13,19,25,31,37,43,49,55,61,0,0,0,0,0,1,7,13,19,25,31]
11835 ; AVX512BW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
11836 ; AVX512BW-NEXT: vpermt2w %zmm12, %zmm13, %zmm1
11837 ; AVX512BW-NEXT: vmovdqa32 %zmm10, %zmm1 {%k2}
11838 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [0,0,0,0,0,35,41,47,53,59,1,7,13,19,25,31,0,0,0,0,0,35,41,47,53,59,1,7,13,19,25,31]
11839 ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
11840 ; AVX512BW-NEXT: vpermt2w %zmm11, %zmm10, %zmm9
11841 ; AVX512BW-NEXT: vmovdqu16 %zmm9, %zmm1 {%k1}
11842 ; AVX512BW-NEXT: vpermt2w %zmm6, %zmm10, %zmm4
11843 ; AVX512BW-NEXT: vpermt2w %zmm5, %zmm13, %zmm0
11844 ; AVX512BW-NEXT: vpermt2w %zmm3, %zmm20, %zmm2
11845 ; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm0 {%k2}
11846 ; AVX512BW-NEXT: vmovdqu16 %zmm4, %zmm0 {%k1}
11847 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 64(%rsi)
11848 ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%rsi)
11849 ; AVX512BW-NEXT: vmovdqa64 %zmm14, 64(%rdx)
11850 ; AVX512BW-NEXT: vmovdqa64 %zmm15, (%rdx)
11851 ; AVX512BW-NEXT: vmovdqa64 %zmm16, 64(%rcx)
11852 ; AVX512BW-NEXT: vmovdqa64 %zmm17, (%rcx)
11853 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 64(%r8)
11854 ; AVX512BW-NEXT: vmovdqa64 %zmm19, (%r8)
11855 ; AVX512BW-NEXT: vmovdqa64 %zmm22, 64(%r9)
11856 ; AVX512BW-NEXT: vmovdqa64 %zmm23, (%r9)
11857 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
11858 ; AVX512BW-NEXT: vmovdqa64 %zmm1, (%rax)
11859 ; AVX512BW-NEXT: vzeroupper
11860 ; AVX512BW-NEXT: retq
11861 %wide.vec = load <384 x i16>, ptr %in.vec, align 64
11862 %strided.vec0 = shufflevector <384 x i16> %wide.vec, <384 x i16> poison, <64 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186, i32 192, i32 198, i32 204, i32 210, i32 216, i32 222, i32 228, i32 234, i32 240, i32 246, i32 252, i32 258, i32 264, i32 270, i32 276, i32 282, i32 288, i32 294, i32 300, i32 306, i32 312, i32 318, i32 324, i32 330, i32 336, i32 342, i32 348, i32 354, i32 360, i32 366, i32 372, i32 378>
11863 %strided.vec1 = shufflevector <384 x i16> %wide.vec, <384 x i16> poison, <64 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187, i32 193, i32 199, i32 205, i32 211, i32 217, i32 223, i32 229, i32 235, i32 241, i32 247, i32 253, i32 259, i32 265, i32 271, i32 277, i32 283, i32 289, i32 295, i32 301, i32 307, i32 313, i32 319, i32 325, i32 331, i32 337, i32 343, i32 349, i32 355, i32 361, i32 367, i32 373, i32 379>
11864 %strided.vec2 = shufflevector <384 x i16> %wide.vec, <384 x i16> poison, <64 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92, i32 98, i32 104, i32 110, i32 116, i32 122, i32 128, i32 134, i32 140, i32 146, i32 152, i32 158, i32 164, i32 170, i32 176, i32 182, i32 188, i32 194, i32 200, i32 206, i32 212, i32 218, i32 224, i32 230, i32 236, i32 242, i32 248, i32 254, i32 260, i32 266, i32 272, i32 278, i32 284, i32 290, i32 296, i32 302, i32 308, i32 314, i32 320, i32 326, i32 332, i32 338, i32 344, i32 350, i32 356, i32 362, i32 368, i32 374, i32 380>
11865 %strided.vec3 = shufflevector <384 x i16> %wide.vec, <384 x i16> poison, <64 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93, i32 99, i32 105, i32 111, i32 117, i32 123, i32 129, i32 135, i32 141, i32 147, i32 153, i32 159, i32 165, i32 171, i32 177, i32 183, i32 189, i32 195, i32 201, i32 207, i32 213, i32 219, i32 225, i32 231, i32 237, i32 243, i32 249, i32 255, i32 261, i32 267, i32 273, i32 279, i32 285, i32 291, i32 297, i32 303, i32 309, i32 315, i32 321, i32 327, i32 333, i32 339, i32 345, i32 351, i32 357, i32 363, i32 369, i32 375, i32 381>
11866 %strided.vec4 = shufflevector <384 x i16> %wide.vec, <384 x i16> poison, <64 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94, i32 100, i32 106, i32 112, i32 118, i32 124, i32 130, i32 136, i32 142, i32 148, i32 154, i32 160, i32 166, i32 172, i32 178, i32 184, i32 190, i32 196, i32 202, i32 208, i32 214, i32 220, i32 226, i32 232, i32 238, i32 244, i32 250, i32 256, i32 262, i32 268, i32 274, i32 280, i32 286, i32 292, i32 298, i32 304, i32 310, i32 316, i32 322, i32 328, i32 334, i32 340, i32 346, i32 352, i32 358, i32 364, i32 370, i32 376, i32 382>
11867 %strided.vec5 = shufflevector <384 x i16> %wide.vec, <384 x i16> poison, <64 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95, i32 101, i32 107, i32 113, i32 119, i32 125, i32 131, i32 137, i32 143, i32 149, i32 155, i32 161, i32 167, i32 173, i32 179, i32 185, i32 191, i32 197, i32 203, i32 209, i32 215, i32 221, i32 227, i32 233, i32 239, i32 245, i32 251, i32 257, i32 263, i32 269, i32 275, i32 281, i32 287, i32 293, i32 299, i32 305, i32 311, i32 317, i32 323, i32 329, i32 335, i32 341, i32 347, i32 353, i32 359, i32 365, i32 371, i32 377, i32 383>
11868 store <64 x i16> %strided.vec0, ptr %out.vec0, align 64
11869 store <64 x i16> %strided.vec1, ptr %out.vec1, align 64
11870 store <64 x i16> %strided.vec2, ptr %out.vec2, align 64
11871 store <64 x i16> %strided.vec3, ptr %out.vec3, align 64
11872 store <64 x i16> %strided.vec4, ptr %out.vec4, align 64
11873 store <64 x i16> %strided.vec5, ptr %out.vec5, align 64
11876 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
11880 ; AVX2-ONLY: {{.*}}
11882 ; AVX512-FAST: {{.*}}
11883 ; AVX512BW-ONLY-FAST: {{.*}}
11884 ; AVX512BW-ONLY-SLOW: {{.*}}
11885 ; AVX512BW-SLOW: {{.*}}
11886 ; AVX512DQBW-FAST: {{.*}}
11887 ; AVX512DQBW-SLOW: {{.*}}
11889 ; FALLBACK0: {{.*}}
11890 ; FALLBACK1: {{.*}}
11891 ; FALLBACK10: {{.*}}
11892 ; FALLBACK11: {{.*}}
11893 ; FALLBACK12: {{.*}}
11894 ; FALLBACK2: {{.*}}
11895 ; FALLBACK3: {{.*}}
11896 ; FALLBACK4: {{.*}}
11897 ; FALLBACK5: {{.*}}
11898 ; FALLBACK6: {{.*}}
11899 ; FALLBACK7: {{.*}}
11900 ; FALLBACK8: {{.*}}
11901 ; FALLBACK9: {{.*}}