1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
8 define void @avg_v4i8(ptr %a, ptr %b) nounwind {
9 ; SSE2-LABEL: avg_v4i8:
11 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
12 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
13 ; SSE2-NEXT: pavgb %xmm0, %xmm1
14 ; SSE2-NEXT: movd %xmm1, (%rax)
17 ; AVX-LABEL: avg_v4i8:
19 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
20 ; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
21 ; AVX-NEXT: vpavgb %xmm0, %xmm1, %xmm0
22 ; AVX-NEXT: vmovd %xmm0, (%rax)
24 %1 = load <4 x i8>, ptr %a
25 %2 = load <4 x i8>, ptr %b
26 %3 = zext <4 x i8> %1 to <4 x i32>
27 %4 = zext <4 x i8> %2 to <4 x i32>
28 %5 = add nuw nsw <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1>
29 %6 = add nuw nsw <4 x i32> %5, %4
30 %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1>
31 %8 = trunc <4 x i32> %7 to <4 x i8>
32 store <4 x i8> %8, ptr undef, align 4
36 define void @avg_v8i8(ptr %a, ptr %b) nounwind {
37 ; SSE2-LABEL: avg_v8i8:
39 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
40 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
41 ; SSE2-NEXT: pavgb %xmm0, %xmm1
42 ; SSE2-NEXT: movq %xmm1, (%rax)
45 ; AVX-LABEL: avg_v8i8:
47 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
48 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
49 ; AVX-NEXT: vpavgb %xmm0, %xmm1, %xmm0
50 ; AVX-NEXT: vmovq %xmm0, (%rax)
52 %1 = load <8 x i8>, ptr %a
53 %2 = load <8 x i8>, ptr %b
54 %3 = zext <8 x i8> %1 to <8 x i32>
55 %4 = zext <8 x i8> %2 to <8 x i32>
56 %5 = add nuw nsw <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
57 %6 = add nuw nsw <8 x i32> %5, %4
58 %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
59 %8 = trunc <8 x i32> %7 to <8 x i8>
60 store <8 x i8> %8, ptr undef, align 4
64 define void @avg_v16i8(ptr %a, ptr %b) nounwind {
65 ; SSE2-LABEL: avg_v16i8:
67 ; SSE2-NEXT: movdqa (%rdi), %xmm0
68 ; SSE2-NEXT: pavgb (%rsi), %xmm0
69 ; SSE2-NEXT: movdqu %xmm0, (%rax)
72 ; AVX-LABEL: avg_v16i8:
74 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
75 ; AVX-NEXT: vpavgb (%rsi), %xmm0, %xmm0
76 ; AVX-NEXT: vmovdqu %xmm0, (%rax)
78 %1 = load <16 x i8>, ptr %a
79 %2 = load <16 x i8>, ptr %b
80 %3 = zext <16 x i8> %1 to <16 x i32>
81 %4 = zext <16 x i8> %2 to <16 x i32>
82 %5 = add nuw nsw <16 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
83 %6 = add nuw nsw <16 x i32> %5, %4
84 %7 = lshr <16 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
85 %8 = trunc <16 x i32> %7 to <16 x i8>
86 store <16 x i8> %8, ptr undef, align 4
90 define void @avg_v24i8(ptr %a, ptr %b) nounwind {
91 ; SSE2-LABEL: avg_v24i8:
93 ; SSE2-NEXT: movdqa (%rsi), %xmm0
94 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1
95 ; SSE2-NEXT: pavgb (%rdi), %xmm0
96 ; SSE2-NEXT: pavgb 16(%rdi), %xmm1
97 ; SSE2-NEXT: movq %xmm1, (%rax)
98 ; SSE2-NEXT: movdqu %xmm0, (%rax)
101 ; AVX1-LABEL: avg_v24i8:
103 ; AVX1-NEXT: vmovdqa (%rsi), %xmm0
104 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
105 ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0
106 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1
107 ; AVX1-NEXT: vmovq %xmm1, (%rax)
108 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
111 ; AVX2-LABEL: avg_v24i8:
113 ; AVX2-NEXT: vmovdqa (%rsi), %ymm0
114 ; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0
115 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
116 ; AVX2-NEXT: vmovq %xmm1, (%rax)
117 ; AVX2-NEXT: vmovdqu %xmm0, (%rax)
118 ; AVX2-NEXT: vzeroupper
121 ; AVX512-LABEL: avg_v24i8:
123 ; AVX512-NEXT: vmovdqa (%rsi), %ymm0
124 ; AVX512-NEXT: vpavgb (%rdi), %ymm0, %ymm0
125 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
126 ; AVX512-NEXT: vmovq %xmm1, (%rax)
127 ; AVX512-NEXT: vmovdqu %xmm0, (%rax)
128 ; AVX512-NEXT: vzeroupper
130 %1 = load <24 x i8>, ptr %a
131 %2 = load <24 x i8>, ptr %b
132 %3 = zext <24 x i8> %1 to <24 x i32>
133 %4 = zext <24 x i8> %2 to <24 x i32>
134 %5 = add nuw nsw <24 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
135 %6 = add nuw nsw <24 x i32> %5, %4
136 %7 = lshr <24 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
137 %8 = trunc <24 x i32> %7 to <24 x i8>
138 store <24 x i8> %8, ptr undef, align 4
142 define void @avg_v32i8(ptr %a, ptr %b) nounwind {
143 ; SSE2-LABEL: avg_v32i8:
145 ; SSE2-NEXT: movdqa (%rsi), %xmm0
146 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1
147 ; SSE2-NEXT: pavgb (%rdi), %xmm0
148 ; SSE2-NEXT: pavgb 16(%rdi), %xmm1
149 ; SSE2-NEXT: movdqu %xmm1, (%rax)
150 ; SSE2-NEXT: movdqu %xmm0, (%rax)
153 ; AVX1-LABEL: avg_v32i8:
155 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
156 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
157 ; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0
158 ; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1
159 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
160 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
163 ; AVX2-LABEL: avg_v32i8:
165 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
166 ; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0
167 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
168 ; AVX2-NEXT: vzeroupper
171 ; AVX512-LABEL: avg_v32i8:
173 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
174 ; AVX512-NEXT: vpavgb (%rsi), %ymm0, %ymm0
175 ; AVX512-NEXT: vmovdqu %ymm0, (%rax)
176 ; AVX512-NEXT: vzeroupper
178 %1 = load <32 x i8>, ptr %a
179 %2 = load <32 x i8>, ptr %b
180 %3 = zext <32 x i8> %1 to <32 x i32>
181 %4 = zext <32 x i8> %2 to <32 x i32>
182 %5 = add nuw nsw <32 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
183 %6 = add nuw nsw <32 x i32> %5, %4
184 %7 = lshr <32 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
185 %8 = trunc <32 x i32> %7 to <32 x i8>
186 store <32 x i8> %8, ptr undef, align 4
190 define void @avg_v48i8(ptr %a, ptr %b) nounwind {
191 ; SSE2-LABEL: avg_v48i8:
193 ; SSE2-NEXT: movdqa (%rsi), %xmm0
194 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1
195 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2
196 ; SSE2-NEXT: pavgb (%rdi), %xmm0
197 ; SSE2-NEXT: pavgb 16(%rdi), %xmm1
198 ; SSE2-NEXT: pavgb 32(%rdi), %xmm2
199 ; SSE2-NEXT: movdqu %xmm2, (%rax)
200 ; SSE2-NEXT: movdqu %xmm1, (%rax)
201 ; SSE2-NEXT: movdqu %xmm0, (%rax)
204 ; AVX1-LABEL: avg_v48i8:
206 ; AVX1-NEXT: vmovdqa (%rsi), %xmm0
207 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
208 ; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2
209 ; AVX1-NEXT: vpavgb 32(%rdi), %xmm2, %xmm2
210 ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0
211 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1
212 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
213 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
214 ; AVX1-NEXT: vmovdqu %xmm2, (%rax)
217 ; AVX2-LABEL: avg_v48i8:
219 ; AVX2-NEXT: vmovdqa (%rsi), %ymm0
220 ; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0
221 ; AVX2-NEXT: vmovdqa 32(%rsi), %xmm1
222 ; AVX2-NEXT: vpavgb 32(%rdi), %xmm1, %xmm1
223 ; AVX2-NEXT: vmovdqu %xmm1, (%rax)
224 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
225 ; AVX2-NEXT: vzeroupper
228 ; AVX512F-LABEL: avg_v48i8:
230 ; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
231 ; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm0
232 ; AVX512F-NEXT: vmovdqa 32(%rsi), %xmm1
233 ; AVX512F-NEXT: vpavgb 32(%rdi), %xmm1, %xmm1
234 ; AVX512F-NEXT: vmovdqu %xmm1, (%rax)
235 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
236 ; AVX512F-NEXT: vzeroupper
239 ; AVX512BW-LABEL: avg_v48i8:
241 ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
242 ; AVX512BW-NEXT: vpavgb (%rdi), %zmm0, %zmm0
243 ; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, (%rax)
244 ; AVX512BW-NEXT: vmovdqu %ymm0, (%rax)
245 ; AVX512BW-NEXT: vzeroupper
246 ; AVX512BW-NEXT: retq
247 %1 = load <48 x i8>, ptr %a
248 %2 = load <48 x i8>, ptr %b
249 %3 = zext <48 x i8> %1 to <48 x i32>
250 %4 = zext <48 x i8> %2 to <48 x i32>
251 %5 = add nuw nsw <48 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
252 %6 = add nuw nsw <48 x i32> %5, %4
253 %7 = lshr <48 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
254 %8 = trunc <48 x i32> %7 to <48 x i8>
255 store <48 x i8> %8, ptr undef, align 4
259 define void @avg_v64i8(ptr %a, ptr %b) nounwind {
260 ; SSE2-LABEL: avg_v64i8:
262 ; SSE2-NEXT: movdqa (%rsi), %xmm0
263 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1
264 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2
265 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3
266 ; SSE2-NEXT: pavgb (%rdi), %xmm0
267 ; SSE2-NEXT: pavgb 16(%rdi), %xmm1
268 ; SSE2-NEXT: pavgb 32(%rdi), %xmm2
269 ; SSE2-NEXT: pavgb 48(%rdi), %xmm3
270 ; SSE2-NEXT: movdqu %xmm3, (%rax)
271 ; SSE2-NEXT: movdqu %xmm2, (%rax)
272 ; SSE2-NEXT: movdqu %xmm1, (%rax)
273 ; SSE2-NEXT: movdqu %xmm0, (%rax)
276 ; AVX1-LABEL: avg_v64i8:
278 ; AVX1-NEXT: vmovdqa (%rsi), %xmm0
279 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
280 ; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2
281 ; AVX1-NEXT: vmovdqa 48(%rsi), %xmm3
282 ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0
283 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1
284 ; AVX1-NEXT: vpavgb 32(%rdi), %xmm2, %xmm2
285 ; AVX1-NEXT: vpavgb 48(%rdi), %xmm3, %xmm3
286 ; AVX1-NEXT: vmovdqu %xmm3, (%rax)
287 ; AVX1-NEXT: vmovdqu %xmm2, (%rax)
288 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
289 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
292 ; AVX2-LABEL: avg_v64i8:
294 ; AVX2-NEXT: vmovdqa (%rsi), %ymm0
295 ; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1
296 ; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0
297 ; AVX2-NEXT: vpavgb 32(%rdi), %ymm1, %ymm1
298 ; AVX2-NEXT: vmovdqu %ymm1, (%rax)
299 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
300 ; AVX2-NEXT: vzeroupper
303 ; AVX512F-LABEL: avg_v64i8:
305 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
306 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
307 ; AVX512F-NEXT: vpavgb (%rsi), %ymm0, %ymm0
308 ; AVX512F-NEXT: vpavgb 32(%rsi), %ymm1, %ymm1
309 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
310 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
311 ; AVX512F-NEXT: vzeroupper
314 ; AVX512BW-LABEL: avg_v64i8:
316 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
317 ; AVX512BW-NEXT: vpavgb (%rsi), %zmm0, %zmm0
318 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
319 ; AVX512BW-NEXT: vzeroupper
320 ; AVX512BW-NEXT: retq
321 %1 = load <64 x i8>, ptr %a
322 %2 = load <64 x i8>, ptr %b
323 %3 = zext <64 x i8> %1 to <64 x i32>
324 %4 = zext <64 x i8> %2 to <64 x i32>
325 %5 = add nuw nsw <64 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
326 %6 = add nuw nsw <64 x i32> %5, %4
327 %7 = lshr <64 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
328 %8 = trunc <64 x i32> %7 to <64 x i8>
329 store <64 x i8> %8, ptr undef, align 4
333 define void @avg_v4i16(ptr %a, ptr %b) nounwind {
334 ; SSE2-LABEL: avg_v4i16:
336 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
337 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
338 ; SSE2-NEXT: pavgw %xmm0, %xmm1
339 ; SSE2-NEXT: movq %xmm1, (%rax)
342 ; AVX-LABEL: avg_v4i16:
344 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
345 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
346 ; AVX-NEXT: vpavgw %xmm0, %xmm1, %xmm0
347 ; AVX-NEXT: vmovq %xmm0, (%rax)
349 %1 = load <4 x i16>, ptr %a
350 %2 = load <4 x i16>, ptr %b
351 %3 = zext <4 x i16> %1 to <4 x i32>
352 %4 = zext <4 x i16> %2 to <4 x i32>
353 %5 = add nuw nsw <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1>
354 %6 = add nuw nsw <4 x i32> %5, %4
355 %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1>
356 %8 = trunc <4 x i32> %7 to <4 x i16>
357 store <4 x i16> %8, ptr undef, align 4
361 define void @avg_v8i16(ptr %a, ptr %b) nounwind {
362 ; SSE2-LABEL: avg_v8i16:
364 ; SSE2-NEXT: movdqa (%rdi), %xmm0
365 ; SSE2-NEXT: pavgw (%rsi), %xmm0
366 ; SSE2-NEXT: movdqu %xmm0, (%rax)
369 ; AVX-LABEL: avg_v8i16:
371 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
372 ; AVX-NEXT: vpavgw (%rsi), %xmm0, %xmm0
373 ; AVX-NEXT: vmovdqu %xmm0, (%rax)
375 %1 = load <8 x i16>, ptr %a
376 %2 = load <8 x i16>, ptr %b
377 %3 = zext <8 x i16> %1 to <8 x i32>
378 %4 = zext <8 x i16> %2 to <8 x i32>
379 %5 = add nuw nsw <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
380 %6 = add nuw nsw <8 x i32> %5, %4
381 %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
382 %8 = trunc <8 x i32> %7 to <8 x i16>
383 store <8 x i16> %8, ptr undef, align 4
387 define void @avg_v16i16(ptr %a, ptr %b) nounwind {
388 ; SSE2-LABEL: avg_v16i16:
390 ; SSE2-NEXT: movdqa (%rsi), %xmm0
391 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1
392 ; SSE2-NEXT: pavgw (%rdi), %xmm0
393 ; SSE2-NEXT: pavgw 16(%rdi), %xmm1
394 ; SSE2-NEXT: movdqu %xmm1, (%rax)
395 ; SSE2-NEXT: movdqu %xmm0, (%rax)
398 ; AVX1-LABEL: avg_v16i16:
400 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
401 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
402 ; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0
403 ; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1
404 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
405 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
408 ; AVX2-LABEL: avg_v16i16:
410 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
411 ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0
412 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
413 ; AVX2-NEXT: vzeroupper
416 ; AVX512-LABEL: avg_v16i16:
418 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
419 ; AVX512-NEXT: vpavgw (%rsi), %ymm0, %ymm0
420 ; AVX512-NEXT: vmovdqu %ymm0, (%rax)
421 ; AVX512-NEXT: vzeroupper
423 %1 = load <16 x i16>, ptr %a
424 %2 = load <16 x i16>, ptr %b
425 %3 = zext <16 x i16> %1 to <16 x i32>
426 %4 = zext <16 x i16> %2 to <16 x i32>
427 %5 = add nuw nsw <16 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
428 %6 = add nuw nsw <16 x i32> %5, %4
429 %7 = lshr <16 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
430 %8 = trunc <16 x i32> %7 to <16 x i16>
431 store <16 x i16> %8, ptr undef, align 4
435 define void @avg_v32i16(ptr %a, ptr %b) nounwind {
436 ; SSE2-LABEL: avg_v32i16:
438 ; SSE2-NEXT: movdqa (%rsi), %xmm0
439 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1
440 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2
441 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3
442 ; SSE2-NEXT: pavgw (%rdi), %xmm0
443 ; SSE2-NEXT: pavgw 16(%rdi), %xmm1
444 ; SSE2-NEXT: pavgw 32(%rdi), %xmm2
445 ; SSE2-NEXT: pavgw 48(%rdi), %xmm3
446 ; SSE2-NEXT: movdqu %xmm3, (%rax)
447 ; SSE2-NEXT: movdqu %xmm2, (%rax)
448 ; SSE2-NEXT: movdqu %xmm1, (%rax)
449 ; SSE2-NEXT: movdqu %xmm0, (%rax)
452 ; AVX1-LABEL: avg_v32i16:
454 ; AVX1-NEXT: vmovdqa (%rsi), %xmm0
455 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
456 ; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2
457 ; AVX1-NEXT: vmovdqa 48(%rsi), %xmm3
458 ; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm0
459 ; AVX1-NEXT: vpavgw 16(%rdi), %xmm1, %xmm1
460 ; AVX1-NEXT: vpavgw 32(%rdi), %xmm2, %xmm2
461 ; AVX1-NEXT: vpavgw 48(%rdi), %xmm3, %xmm3
462 ; AVX1-NEXT: vmovdqu %xmm3, (%rax)
463 ; AVX1-NEXT: vmovdqu %xmm2, (%rax)
464 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
465 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
468 ; AVX2-LABEL: avg_v32i16:
470 ; AVX2-NEXT: vmovdqa (%rsi), %ymm0
471 ; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1
472 ; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm0
473 ; AVX2-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1
474 ; AVX2-NEXT: vmovdqu %ymm1, (%rax)
475 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
476 ; AVX2-NEXT: vzeroupper
479 ; AVX512F-LABEL: avg_v32i16:
481 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
482 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
483 ; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0
484 ; AVX512F-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1
485 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
486 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
487 ; AVX512F-NEXT: vzeroupper
490 ; AVX512BW-LABEL: avg_v32i16:
492 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
493 ; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0
494 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
495 ; AVX512BW-NEXT: vzeroupper
496 ; AVX512BW-NEXT: retq
497 %1 = load <32 x i16>, ptr %a
498 %2 = load <32 x i16>, ptr %b
499 %3 = zext <32 x i16> %1 to <32 x i32>
500 %4 = zext <32 x i16> %2 to <32 x i32>
501 %5 = add nuw nsw <32 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
502 %6 = add nuw nsw <32 x i32> %5, %4
503 %7 = lshr <32 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
504 %8 = trunc <32 x i32> %7 to <32 x i16>
505 store <32 x i16> %8, ptr undef, align 4
509 define void @avg_v40i16(ptr %a, ptr %b) nounwind {
510 ; SSE2-LABEL: avg_v40i16:
512 ; SSE2-NEXT: movdqa (%rsi), %xmm0
513 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1
514 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2
515 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3
516 ; SSE2-NEXT: pavgw (%rdi), %xmm0
517 ; SSE2-NEXT: pavgw 16(%rdi), %xmm1
518 ; SSE2-NEXT: pavgw 32(%rdi), %xmm2
519 ; SSE2-NEXT: pavgw 48(%rdi), %xmm3
520 ; SSE2-NEXT: movdqa 64(%rsi), %xmm4
521 ; SSE2-NEXT: pavgw 64(%rdi), %xmm4
522 ; SSE2-NEXT: movdqu %xmm4, (%rax)
523 ; SSE2-NEXT: movdqu %xmm3, (%rax)
524 ; SSE2-NEXT: movdqu %xmm2, (%rax)
525 ; SSE2-NEXT: movdqu %xmm1, (%rax)
526 ; SSE2-NEXT: movdqu %xmm0, (%rax)
529 ; AVX1-LABEL: avg_v40i16:
531 ; AVX1-NEXT: vmovdqa 64(%rsi), %xmm0
532 ; AVX1-NEXT: vpavgw 64(%rdi), %xmm0, %xmm0
533 ; AVX1-NEXT: vmovdqa (%rsi), %xmm1
534 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm2
535 ; AVX1-NEXT: vmovdqa 32(%rsi), %xmm3
536 ; AVX1-NEXT: vmovdqa 48(%rsi), %xmm4
537 ; AVX1-NEXT: vpavgw (%rdi), %xmm1, %xmm1
538 ; AVX1-NEXT: vpavgw 16(%rdi), %xmm2, %xmm2
539 ; AVX1-NEXT: vpavgw 32(%rdi), %xmm3, %xmm3
540 ; AVX1-NEXT: vpavgw 48(%rdi), %xmm4, %xmm4
541 ; AVX1-NEXT: vmovdqu %xmm4, (%rax)
542 ; AVX1-NEXT: vmovdqu %xmm3, (%rax)
543 ; AVX1-NEXT: vmovdqu %xmm2, (%rax)
544 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
545 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
548 ; AVX2-LABEL: avg_v40i16:
550 ; AVX2-NEXT: vmovdqa (%rsi), %ymm0
551 ; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1
552 ; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm0
553 ; AVX2-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1
554 ; AVX2-NEXT: vmovdqa 64(%rsi), %xmm2
555 ; AVX2-NEXT: vpavgw 64(%rdi), %xmm2, %xmm2
556 ; AVX2-NEXT: vmovdqu %xmm2, (%rax)
557 ; AVX2-NEXT: vmovdqu %ymm1, (%rax)
558 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
559 ; AVX2-NEXT: vzeroupper
562 ; AVX512F-LABEL: avg_v40i16:
564 ; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
565 ; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1
566 ; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm0
567 ; AVX512F-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1
568 ; AVX512F-NEXT: vmovdqa 64(%rsi), %xmm2
569 ; AVX512F-NEXT: vpavgw 64(%rdi), %xmm2, %xmm2
570 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
571 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
572 ; AVX512F-NEXT: vmovdqu %xmm2, (%rax)
573 ; AVX512F-NEXT: vzeroupper
576 ; AVX512BW-LABEL: avg_v40i16:
578 ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
579 ; AVX512BW-NEXT: vpavgw (%rdi), %zmm0, %zmm0
580 ; AVX512BW-NEXT: vmovdqa 64(%rsi), %xmm1
581 ; AVX512BW-NEXT: vpavgw 64(%rdi), %xmm1, %xmm1
582 ; AVX512BW-NEXT: vmovdqu %xmm1, (%rax)
583 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
584 ; AVX512BW-NEXT: vzeroupper
585 ; AVX512BW-NEXT: retq
586 %1 = load <40 x i16>, ptr %a
587 %2 = load <40 x i16>, ptr %b
588 %3 = zext <40 x i16> %1 to <40 x i32>
589 %4 = zext <40 x i16> %2 to <40 x i32>
590 %5 = add nuw nsw <40 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
591 %6 = add nuw nsw <40 x i32> %5, %4
592 %7 = lshr <40 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
593 %8 = trunc <40 x i32> %7 to <40 x i16>
594 store <40 x i16> %8, ptr undef, align 4
598 define void @avg_v4i8_2(ptr %a, ptr %b) nounwind {
599 ; SSE2-LABEL: avg_v4i8_2:
601 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
602 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
603 ; SSE2-NEXT: pavgb %xmm0, %xmm1
604 ; SSE2-NEXT: movd %xmm1, (%rax)
607 ; AVX-LABEL: avg_v4i8_2:
609 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
610 ; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
611 ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
612 ; AVX-NEXT: vmovd %xmm0, (%rax)
614 %1 = load <4 x i8>, ptr %a
615 %2 = load <4 x i8>, ptr %b
616 %3 = zext <4 x i8> %1 to <4 x i32>
617 %4 = zext <4 x i8> %2 to <4 x i32>
618 %5 = add nuw nsw <4 x i32> %3, %4
619 %6 = add nuw nsw <4 x i32> %5, <i32 1, i32 1, i32 1, i32 1>
620 %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1>
621 %8 = trunc <4 x i32> %7 to <4 x i8>
622 store <4 x i8> %8, ptr undef, align 4
626 define void @avg_v8i8_2(ptr %a, ptr %b) nounwind {
627 ; SSE2-LABEL: avg_v8i8_2:
629 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
630 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
631 ; SSE2-NEXT: pavgb %xmm0, %xmm1
632 ; SSE2-NEXT: movq %xmm1, (%rax)
635 ; AVX-LABEL: avg_v8i8_2:
637 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
638 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
639 ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
640 ; AVX-NEXT: vmovq %xmm0, (%rax)
642 %1 = load <8 x i8>, ptr %a
643 %2 = load <8 x i8>, ptr %b
644 %3 = zext <8 x i8> %1 to <8 x i32>
645 %4 = zext <8 x i8> %2 to <8 x i32>
646 %5 = add nuw nsw <8 x i32> %3, %4
647 %6 = add nuw nsw <8 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
648 %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
649 %8 = trunc <8 x i32> %7 to <8 x i8>
650 store <8 x i8> %8, ptr undef, align 4
654 define void @avg_v16i8_2(ptr %a, ptr %b) nounwind {
655 ; SSE2-LABEL: avg_v16i8_2:
657 ; SSE2-NEXT: movdqa (%rdi), %xmm0
658 ; SSE2-NEXT: pavgb (%rsi), %xmm0
659 ; SSE2-NEXT: movdqu %xmm0, (%rax)
662 ; AVX-LABEL: avg_v16i8_2:
664 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
665 ; AVX-NEXT: vpavgb (%rsi), %xmm0, %xmm0
666 ; AVX-NEXT: vmovdqu %xmm0, (%rax)
668 %1 = load <16 x i8>, ptr %a
669 %2 = load <16 x i8>, ptr %b
670 %3 = zext <16 x i8> %1 to <16 x i32>
671 %4 = zext <16 x i8> %2 to <16 x i32>
672 %5 = add nuw nsw <16 x i32> %3, %4
673 %6 = add nuw nsw <16 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
674 %7 = lshr <16 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
675 %8 = trunc <16 x i32> %7 to <16 x i8>
676 store <16 x i8> %8, ptr undef, align 4
680 define void @avg_v32i8_2(ptr %a, ptr %b) nounwind {
681 ; SSE2-LABEL: avg_v32i8_2:
683 ; SSE2-NEXT: movdqa (%rdi), %xmm0
684 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1
685 ; SSE2-NEXT: pavgb (%rsi), %xmm0
686 ; SSE2-NEXT: pavgb 16(%rsi), %xmm1
687 ; SSE2-NEXT: movdqu %xmm1, (%rax)
688 ; SSE2-NEXT: movdqu %xmm0, (%rax)
691 ; AVX1-LABEL: avg_v32i8_2:
693 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
694 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
695 ; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0
696 ; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1
697 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
698 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
701 ; AVX2-LABEL: avg_v32i8_2:
703 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
704 ; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0
705 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
706 ; AVX2-NEXT: vzeroupper
709 ; AVX512-LABEL: avg_v32i8_2:
711 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
712 ; AVX512-NEXT: vpavgb (%rsi), %ymm0, %ymm0
713 ; AVX512-NEXT: vmovdqu %ymm0, (%rax)
714 ; AVX512-NEXT: vzeroupper
716 %1 = load <32 x i8>, ptr %a
717 %2 = load <32 x i8>, ptr %b
718 %3 = zext <32 x i8> %1 to <32 x i32>
719 %4 = zext <32 x i8> %2 to <32 x i32>
720 %5 = add nuw nsw <32 x i32> %3, %4
721 %6 = add nuw nsw <32 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
722 %7 = lshr <32 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
723 %8 = trunc <32 x i32> %7 to <32 x i8>
724 store <32 x i8> %8, ptr undef, align 4
728 define void @avg_v64i8_2(ptr %a, ptr %b) nounwind {
729 ; SSE2-LABEL: avg_v64i8_2:
731 ; SSE2-NEXT: movaps (%rsi), %xmm0
732 ; SSE2-NEXT: movaps 16(%rsi), %xmm1
733 ; SSE2-NEXT: movaps 32(%rsi), %xmm2
734 ; SSE2-NEXT: movaps 48(%rsi), %xmm3
735 ; SSE2-NEXT: movups %xmm3, (%rax)
736 ; SSE2-NEXT: movups %xmm2, (%rax)
737 ; SSE2-NEXT: movups %xmm1, (%rax)
738 ; SSE2-NEXT: movups %xmm0, (%rax)
741 ; AVX1-LABEL: avg_v64i8_2:
743 ; AVX1-NEXT: vmovaps (%rsi), %ymm0
744 ; AVX1-NEXT: vmovaps 32(%rsi), %ymm1
745 ; AVX1-NEXT: vmovups %ymm1, (%rax)
746 ; AVX1-NEXT: vmovups %ymm0, (%rax)
747 ; AVX1-NEXT: vzeroupper
750 ; AVX2-LABEL: avg_v64i8_2:
752 ; AVX2-NEXT: vmovaps (%rsi), %ymm0
753 ; AVX2-NEXT: vmovaps 32(%rsi), %ymm1
754 ; AVX2-NEXT: vmovups %ymm1, (%rax)
755 ; AVX2-NEXT: vmovups %ymm0, (%rax)
756 ; AVX2-NEXT: vzeroupper
759 ; AVX512-LABEL: avg_v64i8_2:
761 ; AVX512-NEXT: vmovaps (%rsi), %zmm0
762 ; AVX512-NEXT: vmovups %zmm0, (%rax)
763 ; AVX512-NEXT: vzeroupper
765 %1 = load <64 x i8>, ptr %a
766 %2 = load <64 x i8>, ptr %b
767 %3 = zext <64 x i8> %1 to <64 x i32>
768 %4 = zext <64 x i8> %2 to <64 x i32>
769 %5 = add nuw nsw <64 x i32> %4, %4
770 %6 = add nuw nsw <64 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
771 %7 = lshr <64 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
772 %8 = trunc <64 x i32> %7 to <64 x i8>
773 store <64 x i8> %8, ptr undef, align 4
778 define void @avg_v4i16_2(ptr %a, ptr %b) nounwind {
779 ; SSE2-LABEL: avg_v4i16_2:
781 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
782 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
783 ; SSE2-NEXT: pavgw %xmm0, %xmm1
784 ; SSE2-NEXT: movq %xmm1, (%rax)
787 ; AVX-LABEL: avg_v4i16_2:
789 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
790 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
791 ; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
792 ; AVX-NEXT: vmovq %xmm0, (%rax)
794 %1 = load <4 x i16>, ptr %a
795 %2 = load <4 x i16>, ptr %b
796 %3 = zext <4 x i16> %1 to <4 x i32>
797 %4 = zext <4 x i16> %2 to <4 x i32>
798 %5 = add nuw nsw <4 x i32> %3, %4
799 %6 = add nuw nsw <4 x i32> %5, <i32 1, i32 1, i32 1, i32 1>
800 %7 = lshr <4 x i32> %6, <i32 1, i32 1, i32 1, i32 1>
801 %8 = trunc <4 x i32> %7 to <4 x i16>
802 store <4 x i16> %8, ptr undef, align 4
806 define void @avg_v8i16_2(ptr %a, ptr %b) nounwind {
807 ; SSE2-LABEL: avg_v8i16_2:
809 ; SSE2-NEXT: movdqa (%rdi), %xmm0
810 ; SSE2-NEXT: pavgw (%rsi), %xmm0
811 ; SSE2-NEXT: movdqu %xmm0, (%rax)
814 ; AVX-LABEL: avg_v8i16_2:
816 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
817 ; AVX-NEXT: vpavgw (%rsi), %xmm0, %xmm0
818 ; AVX-NEXT: vmovdqu %xmm0, (%rax)
820 %1 = load <8 x i16>, ptr %a
821 %2 = load <8 x i16>, ptr %b
822 %3 = zext <8 x i16> %1 to <8 x i32>
823 %4 = zext <8 x i16> %2 to <8 x i32>
824 %5 = add nuw nsw <8 x i32> %3, %4
825 %6 = add nuw nsw <8 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
826 %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
827 %8 = trunc <8 x i32> %7 to <8 x i16>
828 store <8 x i16> %8, ptr undef, align 4
832 define void @avg_v16i16_2(ptr %a, ptr %b) nounwind {
833 ; SSE2-LABEL: avg_v16i16_2:
835 ; SSE2-NEXT: movdqa (%rdi), %xmm0
836 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1
837 ; SSE2-NEXT: pavgw (%rsi), %xmm0
838 ; SSE2-NEXT: pavgw 16(%rsi), %xmm1
839 ; SSE2-NEXT: movdqu %xmm1, (%rax)
840 ; SSE2-NEXT: movdqu %xmm0, (%rax)
843 ; AVX1-LABEL: avg_v16i16_2:
845 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
846 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
847 ; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0
848 ; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1
849 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
850 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
853 ; AVX2-LABEL: avg_v16i16_2:
855 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
856 ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0
857 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
858 ; AVX2-NEXT: vzeroupper
861 ; AVX512-LABEL: avg_v16i16_2:
863 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
864 ; AVX512-NEXT: vpavgw (%rsi), %ymm0, %ymm0
865 ; AVX512-NEXT: vmovdqu %ymm0, (%rax)
866 ; AVX512-NEXT: vzeroupper
868 %1 = load <16 x i16>, ptr %a
869 %2 = load <16 x i16>, ptr %b
870 %3 = zext <16 x i16> %1 to <16 x i32>
871 %4 = zext <16 x i16> %2 to <16 x i32>
872 %5 = add nuw nsw <16 x i32> %3, %4
873 %6 = add nuw nsw <16 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
874 %7 = lshr <16 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
875 %8 = trunc <16 x i32> %7 to <16 x i16>
876 store <16 x i16> %8, ptr undef, align 4
880 define void @avg_v32i16_2(ptr %a, ptr %b) nounwind {
881 ; SSE2-LABEL: avg_v32i16_2:
883 ; SSE2-NEXT: movdqa (%rdi), %xmm0
884 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1
885 ; SSE2-NEXT: movdqa 32(%rdi), %xmm2
886 ; SSE2-NEXT: movdqa 48(%rdi), %xmm3
887 ; SSE2-NEXT: pavgw (%rsi), %xmm0
888 ; SSE2-NEXT: pavgw 16(%rsi), %xmm1
889 ; SSE2-NEXT: pavgw 32(%rsi), %xmm2
890 ; SSE2-NEXT: pavgw 48(%rsi), %xmm3
891 ; SSE2-NEXT: movdqu %xmm3, (%rax)
892 ; SSE2-NEXT: movdqu %xmm2, (%rax)
893 ; SSE2-NEXT: movdqu %xmm1, (%rax)
894 ; SSE2-NEXT: movdqu %xmm0, (%rax)
897 ; AVX1-LABEL: avg_v32i16_2:
899 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
900 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
901 ; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2
902 ; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3
903 ; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0
904 ; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1
905 ; AVX1-NEXT: vpavgw 32(%rsi), %xmm2, %xmm2
906 ; AVX1-NEXT: vpavgw 48(%rsi), %xmm3, %xmm3
907 ; AVX1-NEXT: vmovdqu %xmm3, (%rax)
908 ; AVX1-NEXT: vmovdqu %xmm2, (%rax)
909 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
910 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
913 ; AVX2-LABEL: avg_v32i16_2:
915 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
916 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
917 ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0
918 ; AVX2-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1
919 ; AVX2-NEXT: vmovdqu %ymm1, (%rax)
920 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
921 ; AVX2-NEXT: vzeroupper
924 ; AVX512F-LABEL: avg_v32i16_2:
926 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
927 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
928 ; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0
929 ; AVX512F-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1
930 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
931 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
932 ; AVX512F-NEXT: vzeroupper
935 ; AVX512BW-LABEL: avg_v32i16_2:
937 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
938 ; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0
939 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
940 ; AVX512BW-NEXT: vzeroupper
941 ; AVX512BW-NEXT: retq
942 %1 = load <32 x i16>, ptr %a
943 %2 = load <32 x i16>, ptr %b
944 %3 = zext <32 x i16> %1 to <32 x i32>
945 %4 = zext <32 x i16> %2 to <32 x i32>
946 %5 = add nuw nsw <32 x i32> %3, %4
947 %6 = add nuw nsw <32 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
948 %7 = lshr <32 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
949 %8 = trunc <32 x i32> %7 to <32 x i16>
950 store <32 x i16> %8, ptr undef, align 4
954 define void @avg_v4i8_const(ptr %a) nounwind {
955 ; SSE2-LABEL: avg_v4i8_const:
957 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
958 ; SSE2-NEXT: pavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
959 ; SSE2-NEXT: movd %xmm0, (%rax)
962 ; AVX-LABEL: avg_v4i8_const:
964 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
965 ; AVX-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
966 ; AVX-NEXT: vmovd %xmm0, (%rax)
968 %1 = load <4 x i8>, ptr %a
969 %2 = zext <4 x i8> %1 to <4 x i32>
970 %3 = add nuw nsw <4 x i32> %2, <i32 1, i32 2, i32 3, i32 4>
971 %4 = lshr <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1>
972 %5 = trunc <4 x i32> %4 to <4 x i8>
973 store <4 x i8> %5, ptr undef, align 4
977 define void @avg_v8i8_const(ptr %a) nounwind {
978 ; SSE2-LABEL: avg_v8i8_const:
980 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
981 ; SSE2-NEXT: pavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
982 ; SSE2-NEXT: movq %xmm0, (%rax)
985 ; AVX-LABEL: avg_v8i8_const:
987 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
988 ; AVX-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
989 ; AVX-NEXT: vmovq %xmm0, (%rax)
991 %1 = load <8 x i8>, ptr %a
992 %2 = zext <8 x i8> %1 to <8 x i32>
993 %3 = add nuw nsw <8 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
994 %4 = lshr <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
995 %5 = trunc <8 x i32> %4 to <8 x i8>
996 store <8 x i8> %5, ptr undef, align 4
1000 define void @avg_v16i8_const(ptr %a) nounwind {
1001 ; SSE2-LABEL: avg_v16i8_const:
1003 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1004 ; SSE2-NEXT: pavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1005 ; SSE2-NEXT: movdqu %xmm0, (%rax)
1008 ; AVX-LABEL: avg_v16i8_const:
1010 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1011 ; AVX-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1012 ; AVX-NEXT: vmovdqu %xmm0, (%rax)
1014 %1 = load <16 x i8>, ptr %a
1015 %2 = zext <16 x i8> %1 to <16 x i32>
1016 %3 = add nuw nsw <16 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
1017 %4 = lshr <16 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1018 %5 = trunc <16 x i32> %4 to <16 x i8>
1019 store <16 x i8> %5, ptr undef, align 4
1023 define void @avg_v32i8_const(ptr %a) nounwind {
1024 ; SSE2-LABEL: avg_v32i8_const:
1026 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
1027 ; SSE2-NEXT: movdqa (%rdi), %xmm1
1028 ; SSE2-NEXT: pavgb %xmm0, %xmm1
1029 ; SSE2-NEXT: pavgb 16(%rdi), %xmm0
1030 ; SSE2-NEXT: movdqu %xmm0, (%rax)
1031 ; SSE2-NEXT: movdqu %xmm1, (%rax)
1034 ; AVX1-LABEL: avg_v32i8_const:
1036 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
1037 ; AVX1-NEXT: # xmm0 = mem[0,0]
1038 ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm1
1039 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm0, %xmm0
1040 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
1041 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
1044 ; AVX2-LABEL: avg_v32i8_const:
1046 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
1047 ; AVX2-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1048 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
1049 ; AVX2-NEXT: vzeroupper
1052 ; AVX512-LABEL: avg_v32i8_const:
1054 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
1055 ; AVX512-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1056 ; AVX512-NEXT: vmovdqu %ymm0, (%rax)
1057 ; AVX512-NEXT: vzeroupper
1059 %1 = load <32 x i8>, ptr %a
1060 %2 = zext <32 x i8> %1 to <32 x i32>
1061 %3 = add nuw nsw <32 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
1062 %4 = lshr <32 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1063 %5 = trunc <32 x i32> %4 to <32 x i8>
1064 store <32 x i8> %5, ptr undef, align 4
1068 define void @avg_v64i8_const(ptr %a) nounwind {
1069 ; SSE2-LABEL: avg_v64i8_const:
1071 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
1072 ; SSE2-NEXT: movdqa (%rdi), %xmm1
1073 ; SSE2-NEXT: pavgb %xmm0, %xmm1
1074 ; SSE2-NEXT: movdqa 16(%rdi), %xmm2
1075 ; SSE2-NEXT: pavgb %xmm0, %xmm2
1076 ; SSE2-NEXT: movdqa 32(%rdi), %xmm3
1077 ; SSE2-NEXT: pavgb %xmm0, %xmm3
1078 ; SSE2-NEXT: pavgb 48(%rdi), %xmm0
1079 ; SSE2-NEXT: movdqu %xmm0, (%rax)
1080 ; SSE2-NEXT: movdqu %xmm3, (%rax)
1081 ; SSE2-NEXT: movdqu %xmm2, (%rax)
1082 ; SSE2-NEXT: movdqu %xmm1, (%rax)
1085 ; AVX1-LABEL: avg_v64i8_const:
1087 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
1088 ; AVX1-NEXT: # xmm0 = mem[0,0]
1089 ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm1
1090 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm0, %xmm2
1091 ; AVX1-NEXT: vpavgb 32(%rdi), %xmm0, %xmm3
1092 ; AVX1-NEXT: vpavgb 48(%rdi), %xmm0, %xmm0
1093 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
1094 ; AVX1-NEXT: vmovdqu %xmm3, (%rax)
1095 ; AVX1-NEXT: vmovdqu %xmm2, (%rax)
1096 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
1099 ; AVX2-LABEL: avg_v64i8_const:
1101 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
1102 ; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm1
1103 ; AVX2-NEXT: vpavgb 32(%rdi), %ymm0, %ymm0
1104 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
1105 ; AVX2-NEXT: vmovdqu %ymm1, (%rax)
1106 ; AVX2-NEXT: vzeroupper
1109 ; AVX512F-LABEL: avg_v64i8_const:
1111 ; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
1112 ; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm1
1113 ; AVX512F-NEXT: vpavgb 32(%rdi), %ymm0, %ymm0
1114 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
1115 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
1116 ; AVX512F-NEXT: vzeroupper
1117 ; AVX512F-NEXT: retq
1119 ; AVX512BW-LABEL: avg_v64i8_const:
1120 ; AVX512BW: # %bb.0:
1121 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
1122 ; AVX512BW-NEXT: vpavgb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
1123 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
1124 ; AVX512BW-NEXT: vzeroupper
1125 ; AVX512BW-NEXT: retq
1126 %1 = load <64 x i8>, ptr %a
1127 %2 = zext <64 x i8> %1 to <64 x i32>
1128 %3 = add nuw nsw <64 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
1129 %4 = lshr <64 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1130 %5 = trunc <64 x i32> %4 to <64 x i8>
1131 store <64 x i8> %5, ptr undef, align 4
1135 define void @avg_v4i16_const(ptr %a) nounwind {
1136 ; SSE2-LABEL: avg_v4i16_const:
1138 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
1139 ; SSE2-NEXT: pavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1140 ; SSE2-NEXT: movq %xmm0, (%rax)
1143 ; AVX-LABEL: avg_v4i16_const:
1145 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
1146 ; AVX-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1147 ; AVX-NEXT: vmovq %xmm0, (%rax)
1149 %1 = load <4 x i16>, ptr %a
1150 %2 = zext <4 x i16> %1 to <4 x i32>
1151 %3 = add nuw nsw <4 x i32> %2, <i32 1, i32 2, i32 3, i32 4>
1152 %4 = lshr <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1>
1153 %5 = trunc <4 x i32> %4 to <4 x i16>
1154 store <4 x i16> %5, ptr undef, align 4
1158 define void @avg_v8i16_const(ptr %a) nounwind {
1159 ; SSE2-LABEL: avg_v8i16_const:
1161 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1162 ; SSE2-NEXT: pavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1163 ; SSE2-NEXT: movdqu %xmm0, (%rax)
1166 ; AVX-LABEL: avg_v8i16_const:
1168 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1169 ; AVX-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1170 ; AVX-NEXT: vmovdqu %xmm0, (%rax)
1172 %1 = load <8 x i16>, ptr %a
1173 %2 = zext <8 x i16> %1 to <8 x i32>
1174 %3 = add nuw nsw <8 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
1175 %4 = lshr <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1176 %5 = trunc <8 x i32> %4 to <8 x i16>
1177 store <8 x i16> %5, ptr undef, align 4
1181 define void @avg_v16i16_const(ptr %a) nounwind {
1182 ; SSE2-LABEL: avg_v16i16_const:
1184 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7]
1185 ; SSE2-NEXT: movdqa (%rdi), %xmm1
1186 ; SSE2-NEXT: pavgw %xmm0, %xmm1
1187 ; SSE2-NEXT: pavgw 16(%rdi), %xmm0
1188 ; SSE2-NEXT: movdqu %xmm0, (%rax)
1189 ; SSE2-NEXT: movdqu %xmm1, (%rax)
1192 ; AVX1-LABEL: avg_v16i16_const:
1194 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7]
1195 ; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm1
1196 ; AVX1-NEXT: vpavgw 16(%rdi), %xmm0, %xmm0
1197 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
1198 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
1201 ; AVX2-LABEL: avg_v16i16_const:
1203 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
1204 ; AVX2-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1205 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
1206 ; AVX2-NEXT: vzeroupper
1209 ; AVX512-LABEL: avg_v16i16_const:
1211 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
1212 ; AVX512-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1213 ; AVX512-NEXT: vmovdqu %ymm0, (%rax)
1214 ; AVX512-NEXT: vzeroupper
1216 %1 = load <16 x i16>, ptr %a
1217 %2 = zext <16 x i16> %1 to <16 x i32>
1218 %3 = add nuw nsw <16 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
1219 %4 = lshr <16 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1220 %5 = trunc <16 x i32> %4 to <16 x i16>
1221 store <16 x i16> %5, ptr undef, align 4
1225 define void @avg_v32i16_const(ptr %a) nounwind {
1226 ; SSE2-LABEL: avg_v32i16_const:
1228 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7]
1229 ; SSE2-NEXT: movdqa (%rdi), %xmm1
1230 ; SSE2-NEXT: pavgw %xmm0, %xmm1
1231 ; SSE2-NEXT: movdqa 16(%rdi), %xmm2
1232 ; SSE2-NEXT: pavgw %xmm0, %xmm2
1233 ; SSE2-NEXT: movdqa 32(%rdi), %xmm3
1234 ; SSE2-NEXT: pavgw %xmm0, %xmm3
1235 ; SSE2-NEXT: pavgw 48(%rdi), %xmm0
1236 ; SSE2-NEXT: movdqu %xmm0, (%rax)
1237 ; SSE2-NEXT: movdqu %xmm3, (%rax)
1238 ; SSE2-NEXT: movdqu %xmm2, (%rax)
1239 ; SSE2-NEXT: movdqu %xmm1, (%rax)
1242 ; AVX1-LABEL: avg_v32i16_const:
1244 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7]
1245 ; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm1
1246 ; AVX1-NEXT: vpavgw 16(%rdi), %xmm0, %xmm2
1247 ; AVX1-NEXT: vpavgw 32(%rdi), %xmm0, %xmm3
1248 ; AVX1-NEXT: vpavgw 48(%rdi), %xmm0, %xmm0
1249 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
1250 ; AVX1-NEXT: vmovdqu %xmm3, (%rax)
1251 ; AVX1-NEXT: vmovdqu %xmm2, (%rax)
1252 ; AVX1-NEXT: vmovdqu %xmm1, (%rax)
1255 ; AVX2-LABEL: avg_v32i16_const:
1257 ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
1258 ; AVX2-NEXT: # ymm0 = mem[0,1,0,1]
1259 ; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm1
1260 ; AVX2-NEXT: vpavgw 32(%rdi), %ymm0, %ymm0
1261 ; AVX2-NEXT: vmovdqu %ymm0, (%rax)
1262 ; AVX2-NEXT: vmovdqu %ymm1, (%rax)
1263 ; AVX2-NEXT: vzeroupper
1266 ; AVX512F-LABEL: avg_v32i16_const:
1268 ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
1269 ; AVX512F-NEXT: # ymm0 = mem[0,1,0,1]
1270 ; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm1
1271 ; AVX512F-NEXT: vpavgw 32(%rdi), %ymm0, %ymm0
1272 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
1273 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
1274 ; AVX512F-NEXT: vzeroupper
1275 ; AVX512F-NEXT: retq
1277 ; AVX512BW-LABEL: avg_v32i16_const:
1278 ; AVX512BW: # %bb.0:
1279 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
1280 ; AVX512BW-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
1281 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
1282 ; AVX512BW-NEXT: vzeroupper
1283 ; AVX512BW-NEXT: retq
1284 %1 = load <32 x i16>, ptr %a
1285 %2 = zext <32 x i16> %1 to <32 x i32>
1286 %3 = add nuw nsw <32 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
1287 %4 = lshr <32 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1288 %5 = trunc <32 x i32> %4 to <32 x i16>
1289 store <32 x i16> %5, ptr undef, align 4
1293 define <16 x i8> @avg_v16i8_3(<16 x i8> %a, <16 x i8> %b) nounwind {
1294 ; SSE2-LABEL: avg_v16i8_3:
1296 ; SSE2-NEXT: pavgb %xmm1, %xmm0
1299 ; AVX-LABEL: avg_v16i8_3:
1301 ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
1303 %za = zext <16 x i8> %a to <16 x i16>
1304 %zb = zext <16 x i8> %b to <16 x i16>
1305 %add = add nuw nsw <16 x i16> %za, %zb
1306 %add1 = add nuw nsw <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1307 %lshr = lshr <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1308 %res = trunc <16 x i16> %lshr to <16 x i8>
1312 define <32 x i8> @avg_v32i8_3(<32 x i8> %a, <32 x i8> %b) nounwind {
1313 ; SSE2-LABEL: avg_v32i8_3:
1315 ; SSE2-NEXT: pavgb %xmm2, %xmm0
1316 ; SSE2-NEXT: pavgb %xmm3, %xmm1
1319 ; AVX1-LABEL: avg_v32i8_3:
1321 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
1322 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
1323 ; AVX1-NEXT: vpavgb %xmm2, %xmm3, %xmm2
1324 ; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0
1325 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1328 ; AVX2-LABEL: avg_v32i8_3:
1330 ; AVX2-NEXT: vpavgb %ymm1, %ymm0, %ymm0
1333 ; AVX512-LABEL: avg_v32i8_3:
1335 ; AVX512-NEXT: vpavgb %ymm1, %ymm0, %ymm0
1337 %za = zext <32 x i8> %a to <32 x i16>
1338 %zb = zext <32 x i8> %b to <32 x i16>
1339 %add = add nuw nsw <32 x i16> %za, %zb
1340 %add1 = add nuw nsw <32 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1341 %lshr = lshr <32 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1342 %res = trunc <32 x i16> %lshr to <32 x i8>
1346 define <64 x i8> @avg_v64i8_3(<64 x i8> %a, <64 x i8> %b) nounwind {
1347 ; SSE2-LABEL: avg_v64i8_3:
1349 ; SSE2-NEXT: pavgb %xmm4, %xmm0
1350 ; SSE2-NEXT: pavgb %xmm5, %xmm1
1351 ; SSE2-NEXT: pavgb %xmm6, %xmm2
1352 ; SSE2-NEXT: pavgb %xmm7, %xmm3
1355 ; AVX1-LABEL: avg_v64i8_3:
1357 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
1358 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
1359 ; AVX1-NEXT: vpavgb %xmm4, %xmm5, %xmm4
1360 ; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0
1361 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
1362 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
1363 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
1364 ; AVX1-NEXT: vpavgb %xmm2, %xmm4, %xmm2
1365 ; AVX1-NEXT: vpavgb %xmm3, %xmm1, %xmm1
1366 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
1369 ; AVX2-LABEL: avg_v64i8_3:
1371 ; AVX2-NEXT: vpavgb %ymm2, %ymm0, %ymm0
1372 ; AVX2-NEXT: vpavgb %ymm3, %ymm1, %ymm1
1375 ; AVX512F-LABEL: avg_v64i8_3:
1377 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
1378 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
1379 ; AVX512F-NEXT: vpavgb %ymm2, %ymm3, %ymm2
1380 ; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm0
1381 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
1382 ; AVX512F-NEXT: retq
1384 ; AVX512BW-LABEL: avg_v64i8_3:
1385 ; AVX512BW: # %bb.0:
1386 ; AVX512BW-NEXT: vpavgb %zmm1, %zmm0, %zmm0
1387 ; AVX512BW-NEXT: retq
1388 %za = zext <64 x i8> %a to <64 x i16>
1389 %zb = zext <64 x i8> %b to <64 x i16>
1390 %add = add nuw nsw <64 x i16> %za, %zb
1391 %add1 = add nuw nsw <64 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1392 %lshr = lshr <64 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1393 %res = trunc <64 x i16> %lshr to <64 x i8>
1397 define <512 x i8> @avg_v512i8_3(<512 x i8> %a, <512 x i8> %b) nounwind {
1398 ; SSE2-LABEL: avg_v512i8_3:
1400 ; SSE2-NEXT: movq %rdi, %rax
1401 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1402 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1403 ; SSE2-NEXT: movdqa %xmm8, 496(%rdi)
1404 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1405 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1406 ; SSE2-NEXT: movdqa %xmm8, 480(%rdi)
1407 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1408 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1409 ; SSE2-NEXT: movdqa %xmm8, 464(%rdi)
1410 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1411 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1412 ; SSE2-NEXT: movdqa %xmm8, 448(%rdi)
1413 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1414 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1415 ; SSE2-NEXT: movdqa %xmm8, 432(%rdi)
1416 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1417 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1418 ; SSE2-NEXT: movdqa %xmm8, 416(%rdi)
1419 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1420 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1421 ; SSE2-NEXT: movdqa %xmm8, 400(%rdi)
1422 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1423 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1424 ; SSE2-NEXT: movdqa %xmm8, 384(%rdi)
1425 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1426 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1427 ; SSE2-NEXT: movdqa %xmm8, 368(%rdi)
1428 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1429 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1430 ; SSE2-NEXT: movdqa %xmm8, 352(%rdi)
1431 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1432 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1433 ; SSE2-NEXT: movdqa %xmm8, 336(%rdi)
1434 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1435 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1436 ; SSE2-NEXT: movdqa %xmm8, 320(%rdi)
1437 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1438 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1439 ; SSE2-NEXT: movdqa %xmm8, 304(%rdi)
1440 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1441 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1442 ; SSE2-NEXT: movdqa %xmm8, 288(%rdi)
1443 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1444 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1445 ; SSE2-NEXT: movdqa %xmm8, 272(%rdi)
1446 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1447 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1448 ; SSE2-NEXT: movdqa %xmm8, 256(%rdi)
1449 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1450 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1451 ; SSE2-NEXT: movdqa %xmm8, 240(%rdi)
1452 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1453 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1454 ; SSE2-NEXT: movdqa %xmm8, 224(%rdi)
1455 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1456 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1457 ; SSE2-NEXT: movdqa %xmm8, 208(%rdi)
1458 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1459 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1460 ; SSE2-NEXT: movdqa %xmm8, 192(%rdi)
1461 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1462 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1463 ; SSE2-NEXT: movdqa %xmm8, 176(%rdi)
1464 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1465 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1466 ; SSE2-NEXT: movdqa %xmm8, 160(%rdi)
1467 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1468 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1469 ; SSE2-NEXT: movdqa %xmm8, 144(%rdi)
1470 ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
1471 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8
1472 ; SSE2-NEXT: movdqa %xmm8, 128(%rdi)
1473 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm7
1474 ; SSE2-NEXT: movdqa %xmm7, 112(%rdi)
1475 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm6
1476 ; SSE2-NEXT: movdqa %xmm6, 96(%rdi)
1477 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm5
1478 ; SSE2-NEXT: movdqa %xmm5, 80(%rdi)
1479 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm4
1480 ; SSE2-NEXT: movdqa %xmm4, 64(%rdi)
1481 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm3
1482 ; SSE2-NEXT: movdqa %xmm3, 48(%rdi)
1483 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm2
1484 ; SSE2-NEXT: movdqa %xmm2, 32(%rdi)
1485 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm1
1486 ; SSE2-NEXT: movdqa %xmm1, 16(%rdi)
1487 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm0
1488 ; SSE2-NEXT: movdqa %xmm0, (%rdi)
1491 ; AVX1-LABEL: avg_v512i8_3:
1493 ; AVX1-NEXT: pushq %rbp
1494 ; AVX1-NEXT: movq %rsp, %rbp
1495 ; AVX1-NEXT: andq $-32, %rsp
1496 ; AVX1-NEXT: subq $32, %rsp
1497 ; AVX1-NEXT: movq %rdi, %rax
1498 ; AVX1-NEXT: vmovdqa 256(%rbp), %xmm8
1499 ; AVX1-NEXT: vpavgb 768(%rbp), %xmm8, %xmm8
1500 ; AVX1-NEXT: vmovdqa %xmm8, 496(%rdi)
1501 ; AVX1-NEXT: vmovdqa 240(%rbp), %xmm8
1502 ; AVX1-NEXT: vpavgb 752(%rbp), %xmm8, %xmm8
1503 ; AVX1-NEXT: vmovdqa %xmm8, 480(%rdi)
1504 ; AVX1-NEXT: vmovdqa 224(%rbp), %xmm8
1505 ; AVX1-NEXT: vpavgb 736(%rbp), %xmm8, %xmm8
1506 ; AVX1-NEXT: vmovdqa %xmm8, 464(%rdi)
1507 ; AVX1-NEXT: vmovdqa 208(%rbp), %xmm8
1508 ; AVX1-NEXT: vpavgb 720(%rbp), %xmm8, %xmm8
1509 ; AVX1-NEXT: vmovdqa %xmm8, 448(%rdi)
1510 ; AVX1-NEXT: vmovdqa 192(%rbp), %xmm8
1511 ; AVX1-NEXT: vpavgb 704(%rbp), %xmm8, %xmm8
1512 ; AVX1-NEXT: vmovdqa %xmm8, 432(%rdi)
1513 ; AVX1-NEXT: vmovdqa 176(%rbp), %xmm8
1514 ; AVX1-NEXT: vpavgb 688(%rbp), %xmm8, %xmm8
1515 ; AVX1-NEXT: vmovdqa %xmm8, 416(%rdi)
1516 ; AVX1-NEXT: vmovdqa 160(%rbp), %xmm8
1517 ; AVX1-NEXT: vpavgb 672(%rbp), %xmm8, %xmm8
1518 ; AVX1-NEXT: vmovdqa %xmm8, 400(%rdi)
1519 ; AVX1-NEXT: vmovdqa 144(%rbp), %xmm8
1520 ; AVX1-NEXT: vpavgb 656(%rbp), %xmm8, %xmm8
1521 ; AVX1-NEXT: vmovdqa %xmm8, 384(%rdi)
1522 ; AVX1-NEXT: vmovdqa 128(%rbp), %xmm8
1523 ; AVX1-NEXT: vpavgb 640(%rbp), %xmm8, %xmm8
1524 ; AVX1-NEXT: vmovdqa %xmm8, 368(%rdi)
1525 ; AVX1-NEXT: vmovdqa 112(%rbp), %xmm8
1526 ; AVX1-NEXT: vpavgb 624(%rbp), %xmm8, %xmm8
1527 ; AVX1-NEXT: vmovdqa %xmm8, 352(%rdi)
1528 ; AVX1-NEXT: vmovdqa 96(%rbp), %xmm8
1529 ; AVX1-NEXT: vpavgb 608(%rbp), %xmm8, %xmm8
1530 ; AVX1-NEXT: vmovdqa %xmm8, 336(%rdi)
1531 ; AVX1-NEXT: vmovdqa 80(%rbp), %xmm8
1532 ; AVX1-NEXT: vpavgb 592(%rbp), %xmm8, %xmm8
1533 ; AVX1-NEXT: vmovdqa %xmm8, 320(%rdi)
1534 ; AVX1-NEXT: vmovdqa 64(%rbp), %xmm8
1535 ; AVX1-NEXT: vpavgb 576(%rbp), %xmm8, %xmm8
1536 ; AVX1-NEXT: vmovdqa %xmm8, 304(%rdi)
1537 ; AVX1-NEXT: vmovdqa 48(%rbp), %xmm8
1538 ; AVX1-NEXT: vpavgb 560(%rbp), %xmm8, %xmm8
1539 ; AVX1-NEXT: vmovdqa %xmm8, 288(%rdi)
1540 ; AVX1-NEXT: vmovdqa 32(%rbp), %xmm8
1541 ; AVX1-NEXT: vpavgb 544(%rbp), %xmm8, %xmm8
1542 ; AVX1-NEXT: vmovdqa %xmm8, 272(%rdi)
1543 ; AVX1-NEXT: vmovdqa 16(%rbp), %xmm8
1544 ; AVX1-NEXT: vpavgb 528(%rbp), %xmm8, %xmm8
1545 ; AVX1-NEXT: vmovdqa %xmm8, 256(%rdi)
1546 ; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
1547 ; AVX1-NEXT: vpavgb 512(%rbp), %xmm8, %xmm8
1548 ; AVX1-NEXT: vmovdqa %xmm8, 240(%rdi)
1549 ; AVX1-NEXT: vpavgb 496(%rbp), %xmm7, %xmm7
1550 ; AVX1-NEXT: vmovdqa %xmm7, 224(%rdi)
1551 ; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
1552 ; AVX1-NEXT: vpavgb 480(%rbp), %xmm7, %xmm7
1553 ; AVX1-NEXT: vmovdqa %xmm7, 208(%rdi)
1554 ; AVX1-NEXT: vpavgb 464(%rbp), %xmm6, %xmm6
1555 ; AVX1-NEXT: vmovdqa %xmm6, 192(%rdi)
1556 ; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
1557 ; AVX1-NEXT: vpavgb 448(%rbp), %xmm6, %xmm6
1558 ; AVX1-NEXT: vmovdqa %xmm6, 176(%rdi)
1559 ; AVX1-NEXT: vpavgb 432(%rbp), %xmm5, %xmm5
1560 ; AVX1-NEXT: vmovdqa %xmm5, 160(%rdi)
1561 ; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
1562 ; AVX1-NEXT: vpavgb 416(%rbp), %xmm5, %xmm5
1563 ; AVX1-NEXT: vmovdqa %xmm5, 144(%rdi)
1564 ; AVX1-NEXT: vpavgb 400(%rbp), %xmm4, %xmm4
1565 ; AVX1-NEXT: vmovdqa %xmm4, 128(%rdi)
1566 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
1567 ; AVX1-NEXT: vpavgb 384(%rbp), %xmm4, %xmm4
1568 ; AVX1-NEXT: vmovdqa %xmm4, 112(%rdi)
1569 ; AVX1-NEXT: vpavgb 368(%rbp), %xmm3, %xmm3
1570 ; AVX1-NEXT: vmovdqa %xmm3, 96(%rdi)
1571 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
1572 ; AVX1-NEXT: vpavgb 352(%rbp), %xmm3, %xmm3
1573 ; AVX1-NEXT: vmovdqa %xmm3, 80(%rdi)
1574 ; AVX1-NEXT: vpavgb 336(%rbp), %xmm2, %xmm2
1575 ; AVX1-NEXT: vmovdqa %xmm2, 64(%rdi)
1576 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
1577 ; AVX1-NEXT: vpavgb 320(%rbp), %xmm2, %xmm2
1578 ; AVX1-NEXT: vmovdqa %xmm2, 48(%rdi)
1579 ; AVX1-NEXT: vpavgb 304(%rbp), %xmm1, %xmm1
1580 ; AVX1-NEXT: vmovdqa %xmm1, 32(%rdi)
1581 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1582 ; AVX1-NEXT: vpavgb 288(%rbp), %xmm1, %xmm1
1583 ; AVX1-NEXT: vmovdqa %xmm1, 16(%rdi)
1584 ; AVX1-NEXT: vpavgb 272(%rbp), %xmm0, %xmm0
1585 ; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
1586 ; AVX1-NEXT: movq %rbp, %rsp
1587 ; AVX1-NEXT: popq %rbp
1588 ; AVX1-NEXT: vzeroupper
1591 ; AVX2-LABEL: avg_v512i8_3:
1593 ; AVX2-NEXT: pushq %rbp
1594 ; AVX2-NEXT: movq %rsp, %rbp
1595 ; AVX2-NEXT: andq $-32, %rsp
1596 ; AVX2-NEXT: subq $32, %rsp
1597 ; AVX2-NEXT: movq %rdi, %rax
1598 ; AVX2-NEXT: vmovdqa 240(%rbp), %ymm8
1599 ; AVX2-NEXT: vmovdqa 208(%rbp), %ymm9
1600 ; AVX2-NEXT: vmovdqa 176(%rbp), %ymm10
1601 ; AVX2-NEXT: vmovdqa 144(%rbp), %ymm11
1602 ; AVX2-NEXT: vmovdqa 112(%rbp), %ymm12
1603 ; AVX2-NEXT: vmovdqa 80(%rbp), %ymm13
1604 ; AVX2-NEXT: vmovdqa 48(%rbp), %ymm14
1605 ; AVX2-NEXT: vmovdqa 16(%rbp), %ymm15
1606 ; AVX2-NEXT: vpavgb 272(%rbp), %ymm0, %ymm0
1607 ; AVX2-NEXT: vpavgb 304(%rbp), %ymm1, %ymm1
1608 ; AVX2-NEXT: vpavgb 336(%rbp), %ymm2, %ymm2
1609 ; AVX2-NEXT: vpavgb 368(%rbp), %ymm3, %ymm3
1610 ; AVX2-NEXT: vpavgb 400(%rbp), %ymm4, %ymm4
1611 ; AVX2-NEXT: vpavgb 432(%rbp), %ymm5, %ymm5
1612 ; AVX2-NEXT: vpavgb 464(%rbp), %ymm6, %ymm6
1613 ; AVX2-NEXT: vpavgb 496(%rbp), %ymm7, %ymm7
1614 ; AVX2-NEXT: vpavgb 528(%rbp), %ymm15, %ymm15
1615 ; AVX2-NEXT: vpavgb 560(%rbp), %ymm14, %ymm14
1616 ; AVX2-NEXT: vpavgb 592(%rbp), %ymm13, %ymm13
1617 ; AVX2-NEXT: vpavgb 624(%rbp), %ymm12, %ymm12
1618 ; AVX2-NEXT: vpavgb 656(%rbp), %ymm11, %ymm11
1619 ; AVX2-NEXT: vpavgb 688(%rbp), %ymm10, %ymm10
1620 ; AVX2-NEXT: vpavgb 720(%rbp), %ymm9, %ymm9
1621 ; AVX2-NEXT: vpavgb 752(%rbp), %ymm8, %ymm8
1622 ; AVX2-NEXT: vmovdqa %ymm8, 480(%rdi)
1623 ; AVX2-NEXT: vmovdqa %ymm9, 448(%rdi)
1624 ; AVX2-NEXT: vmovdqa %ymm10, 416(%rdi)
1625 ; AVX2-NEXT: vmovdqa %ymm11, 384(%rdi)
1626 ; AVX2-NEXT: vmovdqa %ymm12, 352(%rdi)
1627 ; AVX2-NEXT: vmovdqa %ymm13, 320(%rdi)
1628 ; AVX2-NEXT: vmovdqa %ymm14, 288(%rdi)
1629 ; AVX2-NEXT: vmovdqa %ymm15, 256(%rdi)
1630 ; AVX2-NEXT: vmovdqa %ymm7, 224(%rdi)
1631 ; AVX2-NEXT: vmovdqa %ymm6, 192(%rdi)
1632 ; AVX2-NEXT: vmovdqa %ymm5, 160(%rdi)
1633 ; AVX2-NEXT: vmovdqa %ymm4, 128(%rdi)
1634 ; AVX2-NEXT: vmovdqa %ymm3, 96(%rdi)
1635 ; AVX2-NEXT: vmovdqa %ymm2, 64(%rdi)
1636 ; AVX2-NEXT: vmovdqa %ymm1, 32(%rdi)
1637 ; AVX2-NEXT: vmovdqa %ymm0, (%rdi)
1638 ; AVX2-NEXT: movq %rbp, %rsp
1639 ; AVX2-NEXT: popq %rbp
1640 ; AVX2-NEXT: vzeroupper
1643 ; AVX512F-LABEL: avg_v512i8_3:
1645 ; AVX512F-NEXT: pushq %rbp
1646 ; AVX512F-NEXT: movq %rsp, %rbp
1647 ; AVX512F-NEXT: andq $-64, %rsp
1648 ; AVX512F-NEXT: subq $64, %rsp
1649 ; AVX512F-NEXT: movq %rdi, %rax
1650 ; AVX512F-NEXT: vpavgb 16(%rbp), %ymm0, %ymm8
1651 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0
1652 ; AVX512F-NEXT: vpavgb 48(%rbp), %ymm0, %ymm0
1653 ; AVX512F-NEXT: vpavgb 80(%rbp), %ymm1, %ymm9
1654 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm1
1655 ; AVX512F-NEXT: vpavgb 112(%rbp), %ymm1, %ymm1
1656 ; AVX512F-NEXT: vpavgb 144(%rbp), %ymm2, %ymm10
1657 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm2
1658 ; AVX512F-NEXT: vpavgb 176(%rbp), %ymm2, %ymm2
1659 ; AVX512F-NEXT: vpavgb 208(%rbp), %ymm3, %ymm11
1660 ; AVX512F-NEXT: vextracti64x4 $1, %zmm3, %ymm3
1661 ; AVX512F-NEXT: vpavgb 240(%rbp), %ymm3, %ymm3
1662 ; AVX512F-NEXT: vpavgb 272(%rbp), %ymm4, %ymm12
1663 ; AVX512F-NEXT: vextracti64x4 $1, %zmm4, %ymm4
1664 ; AVX512F-NEXT: vpavgb 304(%rbp), %ymm4, %ymm4
1665 ; AVX512F-NEXT: vpavgb 336(%rbp), %ymm5, %ymm13
1666 ; AVX512F-NEXT: vextracti64x4 $1, %zmm5, %ymm5
1667 ; AVX512F-NEXT: vpavgb 368(%rbp), %ymm5, %ymm5
1668 ; AVX512F-NEXT: vpavgb 400(%rbp), %ymm6, %ymm14
1669 ; AVX512F-NEXT: vextracti64x4 $1, %zmm6, %ymm6
1670 ; AVX512F-NEXT: vpavgb 432(%rbp), %ymm6, %ymm6
1671 ; AVX512F-NEXT: vpavgb 464(%rbp), %ymm7, %ymm15
1672 ; AVX512F-NEXT: vextracti64x4 $1, %zmm7, %ymm7
1673 ; AVX512F-NEXT: vpavgb 496(%rbp), %ymm7, %ymm7
1674 ; AVX512F-NEXT: vmovdqa %ymm7, 480(%rdi)
1675 ; AVX512F-NEXT: vmovdqa %ymm15, 448(%rdi)
1676 ; AVX512F-NEXT: vmovdqa %ymm6, 416(%rdi)
1677 ; AVX512F-NEXT: vmovdqa %ymm14, 384(%rdi)
1678 ; AVX512F-NEXT: vmovdqa %ymm5, 352(%rdi)
1679 ; AVX512F-NEXT: vmovdqa %ymm13, 320(%rdi)
1680 ; AVX512F-NEXT: vmovdqa %ymm4, 288(%rdi)
1681 ; AVX512F-NEXT: vmovdqa %ymm12, 256(%rdi)
1682 ; AVX512F-NEXT: vmovdqa %ymm3, 224(%rdi)
1683 ; AVX512F-NEXT: vmovdqa %ymm11, 192(%rdi)
1684 ; AVX512F-NEXT: vmovdqa %ymm2, 160(%rdi)
1685 ; AVX512F-NEXT: vmovdqa %ymm10, 128(%rdi)
1686 ; AVX512F-NEXT: vmovdqa %ymm1, 96(%rdi)
1687 ; AVX512F-NEXT: vmovdqa %ymm9, 64(%rdi)
1688 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rdi)
1689 ; AVX512F-NEXT: vmovdqa %ymm8, (%rdi)
1690 ; AVX512F-NEXT: movq %rbp, %rsp
1691 ; AVX512F-NEXT: popq %rbp
1692 ; AVX512F-NEXT: vzeroupper
1693 ; AVX512F-NEXT: retq
1695 ; AVX512BW-LABEL: avg_v512i8_3:
1696 ; AVX512BW: # %bb.0:
1697 ; AVX512BW-NEXT: pushq %rbp
1698 ; AVX512BW-NEXT: movq %rsp, %rbp
1699 ; AVX512BW-NEXT: andq $-64, %rsp
1700 ; AVX512BW-NEXT: subq $64, %rsp
1701 ; AVX512BW-NEXT: movq %rdi, %rax
1702 ; AVX512BW-NEXT: vpavgb 16(%rbp), %zmm0, %zmm0
1703 ; AVX512BW-NEXT: vpavgb 80(%rbp), %zmm1, %zmm1
1704 ; AVX512BW-NEXT: vpavgb 144(%rbp), %zmm2, %zmm2
1705 ; AVX512BW-NEXT: vpavgb 208(%rbp), %zmm3, %zmm3
1706 ; AVX512BW-NEXT: vpavgb 272(%rbp), %zmm4, %zmm4
1707 ; AVX512BW-NEXT: vpavgb 336(%rbp), %zmm5, %zmm5
1708 ; AVX512BW-NEXT: vpavgb 400(%rbp), %zmm6, %zmm6
1709 ; AVX512BW-NEXT: vpavgb 464(%rbp), %zmm7, %zmm7
1710 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 448(%rdi)
1711 ; AVX512BW-NEXT: vmovdqa64 %zmm6, 384(%rdi)
1712 ; AVX512BW-NEXT: vmovdqa64 %zmm5, 320(%rdi)
1713 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 256(%rdi)
1714 ; AVX512BW-NEXT: vmovdqa64 %zmm3, 192(%rdi)
1715 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 128(%rdi)
1716 ; AVX512BW-NEXT: vmovdqa64 %zmm1, 64(%rdi)
1717 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdi)
1718 ; AVX512BW-NEXT: movq %rbp, %rsp
1719 ; AVX512BW-NEXT: popq %rbp
1720 ; AVX512BW-NEXT: vzeroupper
1721 ; AVX512BW-NEXT: retq
1722 %za = zext <512 x i8> %a to <512 x i16>
1723 %zb = zext <512 x i8> %b to <512 x i16>
1724 %add = add nuw nsw <512 x i16> %za, %zb
1725 %add1 = add nuw nsw <512 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1726 %lshr = lshr <512 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1727 %res = trunc <512 x i16> %lshr to <512 x i8>
1731 ; This is not an avg, but its structurally similar and previously caused a crash
1732 ; because the constants can't be read with APInt::getZExtValue.
1733 define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
1734 ; SSE2-LABEL: not_avg_v16i8_wide_constants:
1736 ; SSE2-NEXT: pushq %rbp
1737 ; SSE2-NEXT: pushq %r15
1738 ; SSE2-NEXT: pushq %r14
1739 ; SSE2-NEXT: pushq %r13
1740 ; SSE2-NEXT: pushq %r12
1741 ; SSE2-NEXT: pushq %rbx
1742 ; SSE2-NEXT: movaps (%rdi), %xmm1
1743 ; SSE2-NEXT: movaps (%rsi), %xmm0
1744 ; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
1745 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
1746 ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
1747 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
1748 ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
1749 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
1750 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
1751 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
1752 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
1753 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
1754 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
1755 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
1756 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
1757 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d
1758 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d
1759 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r12d
1760 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r13d
1761 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
1762 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1763 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
1764 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp
1765 ; SSE2-NEXT: addq %rdx, %rbp
1766 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
1767 ; SSE2-NEXT: addq %rcx, %rdx
1768 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1769 ; SSE2-NEXT: leaq -1(%r13,%rcx), %r13
1770 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1771 ; SSE2-NEXT: leaq -1(%r12,%rcx), %r12
1772 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1773 ; SSE2-NEXT: leaq -1(%r15,%rcx), %r15
1774 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1775 ; SSE2-NEXT: leaq -1(%r14,%rcx), %r14
1776 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1777 ; SSE2-NEXT: leaq -1(%rbx,%rcx), %rbx
1778 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1779 ; SSE2-NEXT: leaq -1(%r11,%rcx), %r11
1780 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1781 ; SSE2-NEXT: leaq -1(%r10,%rcx), %r10
1782 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1783 ; SSE2-NEXT: leaq -1(%r9,%rcx), %r9
1784 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1785 ; SSE2-NEXT: leaq -1(%r8,%rcx), %r8
1786 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1787 ; SSE2-NEXT: leaq -1(%rdi,%rcx), %rdi
1788 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1789 ; SSE2-NEXT: leaq -1(%rsi,%rcx), %rsi
1790 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
1791 ; SSE2-NEXT: leaq -1(%rax,%rcx), %rax
1792 ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
1793 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
1794 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
1795 ; SSE2-NEXT: leaq -1(%rcx,%rax), %rax
1796 ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
1797 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
1798 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
1799 ; SSE2-NEXT: leaq -1(%rcx,%rax), %rax
1800 ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
1801 ; SSE2-NEXT: xorl %ecx, %ecx
1802 ; SSE2-NEXT: addq $-1, %rbp
1803 ; SSE2-NEXT: movl $0, %eax
1804 ; SSE2-NEXT: adcq $-1, %rax
1805 ; SSE2-NEXT: addq $-1, %rdx
1806 ; SSE2-NEXT: adcq $-1, %rcx
1807 ; SSE2-NEXT: shldq $63, %rdx, %rcx
1808 ; SSE2-NEXT: shldq $63, %rbp, %rax
1809 ; SSE2-NEXT: movq %rax, %xmm1
1810 ; SSE2-NEXT: movq %rcx, %xmm0
1811 ; SSE2-NEXT: shrq %r13
1812 ; SSE2-NEXT: movq %r13, %xmm3
1813 ; SSE2-NEXT: shrq %r12
1814 ; SSE2-NEXT: movq %r12, %xmm2
1815 ; SSE2-NEXT: shrq %r15
1816 ; SSE2-NEXT: movq %r15, %xmm5
1817 ; SSE2-NEXT: shrq %r14
1818 ; SSE2-NEXT: movq %r14, %xmm4
1819 ; SSE2-NEXT: shrq %rbx
1820 ; SSE2-NEXT: movq %rbx, %xmm6
1821 ; SSE2-NEXT: shrq %r11
1822 ; SSE2-NEXT: movq %r11, %xmm7
1823 ; SSE2-NEXT: shrq %r10
1824 ; SSE2-NEXT: movq %r10, %xmm9
1825 ; SSE2-NEXT: shrq %r9
1826 ; SSE2-NEXT: movq %r9, %xmm8
1827 ; SSE2-NEXT: shrq %r8
1828 ; SSE2-NEXT: movq %r8, %xmm11
1829 ; SSE2-NEXT: shrq %rdi
1830 ; SSE2-NEXT: movq %rdi, %xmm12
1831 ; SSE2-NEXT: shrq %rsi
1832 ; SSE2-NEXT: movq %rsi, %xmm13
1833 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
1834 ; SSE2-NEXT: shrq %rax
1835 ; SSE2-NEXT: movq %rax, %xmm10
1836 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
1837 ; SSE2-NEXT: shrq %rax
1838 ; SSE2-NEXT: movq %rax, %xmm14
1839 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
1840 ; SSE2-NEXT: shrq %rax
1841 ; SSE2-NEXT: movq %rax, %xmm15
1842 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1843 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
1844 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,65535,65535]
1845 ; SSE2-NEXT: pand %xmm1, %xmm0
1846 ; SSE2-NEXT: pslld $16, %xmm2
1847 ; SSE2-NEXT: pandn %xmm2, %xmm1
1848 ; SSE2-NEXT: por %xmm0, %xmm1
1849 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
1850 ; SSE2-NEXT: psllq $48, %xmm4
1851 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
1852 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,65535,65535]
1853 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,0,1,1]
1854 ; SSE2-NEXT: pand %xmm0, %xmm2
1855 ; SSE2-NEXT: pandn %xmm4, %xmm0
1856 ; SSE2-NEXT: por %xmm2, %xmm0
1857 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1858 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1859 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
1860 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
1861 ; SSE2-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1]
1862 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm12[0,0,0,0]
1863 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1864 ; SSE2-NEXT: por %xmm8, %xmm0
1865 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3],xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
1866 ; SSE2-NEXT: pslldq {{.*#+}} xmm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm10[0,1,2,3,4,5]
1867 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,0,65535,65535]
1868 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
1869 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm15[0,1,0,1]
1870 ; SSE2-NEXT: pand %xmm2, %xmm3
1871 ; SSE2-NEXT: pandn %xmm10, %xmm2
1872 ; SSE2-NEXT: por %xmm3, %xmm2
1873 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,2,2]
1874 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
1875 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
1876 ; SSE2-NEXT: movupd %xmm2, (%rax)
1877 ; SSE2-NEXT: popq %rbx
1878 ; SSE2-NEXT: popq %r12
1879 ; SSE2-NEXT: popq %r13
1880 ; SSE2-NEXT: popq %r14
1881 ; SSE2-NEXT: popq %r15
1882 ; SSE2-NEXT: popq %rbp
1885 ; AVX1-LABEL: not_avg_v16i8_wide_constants:
1887 ; AVX1-NEXT: pushq %rbp
1888 ; AVX1-NEXT: pushq %r15
1889 ; AVX1-NEXT: pushq %r14
1890 ; AVX1-NEXT: pushq %r13
1891 ; AVX1-NEXT: pushq %r12
1892 ; AVX1-NEXT: pushq %rbx
1893 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
1894 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
1895 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
1896 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
1897 ; AVX1-NEXT: vpextrw $4, %xmm0, %eax
1898 ; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
1899 ; AVX1-NEXT: vpextrw $5, %xmm0, %eax
1900 ; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
1901 ; AVX1-NEXT: vpextrw $6, %xmm0, %ebx
1902 ; AVX1-NEXT: vpextrw $7, %xmm0, %esi
1903 ; AVX1-NEXT: vpextrw $0, %xmm3, %edi
1904 ; AVX1-NEXT: vpextrw $1, %xmm3, %r8d
1905 ; AVX1-NEXT: vpextrw $2, %xmm3, %r9d
1906 ; AVX1-NEXT: vpextrw $3, %xmm3, %r10d
1907 ; AVX1-NEXT: vpextrw $4, %xmm3, %r11d
1908 ; AVX1-NEXT: vpextrw $5, %xmm3, %r14d
1909 ; AVX1-NEXT: vpextrw $6, %xmm3, %r15d
1910 ; AVX1-NEXT: vpextrw $7, %xmm3, %edx
1911 ; AVX1-NEXT: vpextrw $1, %xmm0, %eax
1912 ; AVX1-NEXT: vpextrw $0, %xmm0, %r12d
1913 ; AVX1-NEXT: vpextrw $1, %xmm1, %ecx
1914 ; AVX1-NEXT: addq %rax, %rcx
1915 ; AVX1-NEXT: vpextrw $0, %xmm1, %eax
1916 ; AVX1-NEXT: addq %r12, %rax
1917 ; AVX1-NEXT: vpextrw $7, %xmm2, %r12d
1918 ; AVX1-NEXT: leaq -1(%rdx,%r12), %rdx
1919 ; AVX1-NEXT: vpextrw $6, %xmm2, %r12d
1920 ; AVX1-NEXT: leaq -1(%r15,%r12), %rbp
1921 ; AVX1-NEXT: vpextrw $5, %xmm2, %r15d
1922 ; AVX1-NEXT: leaq -1(%r14,%r15), %r13
1923 ; AVX1-NEXT: vpextrw $4, %xmm2, %r14d
1924 ; AVX1-NEXT: leaq -1(%r11,%r14), %r12
1925 ; AVX1-NEXT: vpextrw $3, %xmm2, %r11d
1926 ; AVX1-NEXT: leaq -1(%r10,%r11), %r15
1927 ; AVX1-NEXT: vpextrw $2, %xmm2, %r10d
1928 ; AVX1-NEXT: leaq -1(%r9,%r10), %r14
1929 ; AVX1-NEXT: vpextrw $1, %xmm2, %r9d
1930 ; AVX1-NEXT: leaq -1(%r8,%r9), %r11
1931 ; AVX1-NEXT: vpextrw $0, %xmm2, %r8d
1932 ; AVX1-NEXT: leaq -1(%rdi,%r8), %r10
1933 ; AVX1-NEXT: vpextrw $7, %xmm1, %edi
1934 ; AVX1-NEXT: leaq -1(%rsi,%rdi), %r9
1935 ; AVX1-NEXT: vpextrw $6, %xmm1, %esi
1936 ; AVX1-NEXT: leaq -1(%rbx,%rsi), %r8
1937 ; AVX1-NEXT: vpextrw $5, %xmm1, %esi
1938 ; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
1939 ; AVX1-NEXT: leaq -1(%rdi,%rsi), %rsi
1940 ; AVX1-NEXT: vpextrw $4, %xmm1, %edi
1941 ; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
1942 ; AVX1-NEXT: leaq -1(%rbx,%rdi), %rdi
1943 ; AVX1-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
1944 ; AVX1-NEXT: vpextrw $3, %xmm0, %edi
1945 ; AVX1-NEXT: vpextrw $3, %xmm1, %ebx
1946 ; AVX1-NEXT: leaq -1(%rdi,%rbx), %rdi
1947 ; AVX1-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
1948 ; AVX1-NEXT: vpextrw $2, %xmm0, %edi
1949 ; AVX1-NEXT: vpextrw $2, %xmm1, %ebx
1950 ; AVX1-NEXT: leaq -1(%rdi,%rbx), %rdi
1951 ; AVX1-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
1952 ; AVX1-NEXT: xorl %edi, %edi
1953 ; AVX1-NEXT: addq $-1, %rcx
1954 ; AVX1-NEXT: movl $0, %ebx
1955 ; AVX1-NEXT: adcq $-1, %rbx
1956 ; AVX1-NEXT: addq $-1, %rax
1957 ; AVX1-NEXT: adcq $-1, %rdi
1958 ; AVX1-NEXT: shldq $63, %rax, %rdi
1959 ; AVX1-NEXT: shldq $63, %rcx, %rbx
1960 ; AVX1-NEXT: shrq %rdx
1961 ; AVX1-NEXT: vmovq %rdx, %xmm0
1962 ; AVX1-NEXT: shrq %rbp
1963 ; AVX1-NEXT: vmovq %rbp, %xmm1
1964 ; AVX1-NEXT: shrq %r13
1965 ; AVX1-NEXT: vmovq %r13, %xmm2
1966 ; AVX1-NEXT: shrq %r12
1967 ; AVX1-NEXT: vmovq %r12, %xmm3
1968 ; AVX1-NEXT: shrq %r15
1969 ; AVX1-NEXT: vmovq %r15, %xmm4
1970 ; AVX1-NEXT: shrq %r14
1971 ; AVX1-NEXT: vmovq %r14, %xmm5
1972 ; AVX1-NEXT: shrq %r11
1973 ; AVX1-NEXT: vmovq %r11, %xmm6
1974 ; AVX1-NEXT: shrq %r10
1975 ; AVX1-NEXT: vmovq %r10, %xmm7
1976 ; AVX1-NEXT: shrq %r9
1977 ; AVX1-NEXT: vmovq %r9, %xmm8
1978 ; AVX1-NEXT: shrq %r8
1979 ; AVX1-NEXT: vmovq %r8, %xmm9
1980 ; AVX1-NEXT: shrq %rsi
1981 ; AVX1-NEXT: vmovq %rsi, %xmm10
1982 ; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
1983 ; AVX1-NEXT: shrq %rax
1984 ; AVX1-NEXT: vmovq %rax, %xmm11
1985 ; AVX1-NEXT: vmovq %rbx, %xmm12
1986 ; AVX1-NEXT: vmovq %rdi, %xmm13
1987 ; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
1988 ; AVX1-NEXT: shrq %rax
1989 ; AVX1-NEXT: vmovq %rax, %xmm14
1990 ; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
1991 ; AVX1-NEXT: shrq %rax
1992 ; AVX1-NEXT: vmovq %rax, %xmm15
1993 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1994 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
1995 ; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
1996 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
1997 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
1998 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
1999 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
2000 ; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
2001 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
2002 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5],xmm2[6,7]
2003 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
2004 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
2005 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
2006 ; AVX1-NEXT: vpsllq $48, %xmm1, %xmm1
2007 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
2008 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5,6,7]
2009 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
2010 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
2011 ; AVX1-NEXT: vpslld $16, %xmm3, %xmm3
2012 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5,6,7]
2013 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5,6,7]
2014 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
2015 ; AVX1-NEXT: vmovdqu %xmm0, (%rax)
2016 ; AVX1-NEXT: popq %rbx
2017 ; AVX1-NEXT: popq %r12
2018 ; AVX1-NEXT: popq %r13
2019 ; AVX1-NEXT: popq %r14
2020 ; AVX1-NEXT: popq %r15
2021 ; AVX1-NEXT: popq %rbp
2024 ; AVX2-LABEL: not_avg_v16i8_wide_constants:
2026 ; AVX2-NEXT: pushq %rbp
2027 ; AVX2-NEXT: pushq %r15
2028 ; AVX2-NEXT: pushq %r14
2029 ; AVX2-NEXT: pushq %r13
2030 ; AVX2-NEXT: pushq %r12
2031 ; AVX2-NEXT: pushq %rbx
2032 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
2033 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
2034 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2035 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
2036 ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
2037 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
2038 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
2039 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
2040 ; AVX2-NEXT: vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
2041 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
2042 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2043 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
2044 ; AVX2-NEXT: vmovq %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
2045 ; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm7
2046 ; AVX2-NEXT: vmovq %xmm7, %rsi
2047 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
2048 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
2049 ; AVX2-NEXT: vmovq %xmm2, %rdx
2050 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm8
2051 ; AVX2-NEXT: vmovq %xmm8, %r8
2052 ; AVX2-NEXT: vpextrq $1, %xmm8, %r13
2053 ; AVX2-NEXT: vpextrq $1, %xmm2, %r14
2054 ; AVX2-NEXT: vpextrq $1, %xmm7, %r15
2055 ; AVX2-NEXT: vpextrq $1, %xmm6, %r12
2056 ; AVX2-NEXT: vpextrq $1, %xmm4, %rbx
2057 ; AVX2-NEXT: vpextrq $1, %xmm1, %rdi
2058 ; AVX2-NEXT: vpextrq $1, %xmm3, %rcx
2059 ; AVX2-NEXT: vmovq %xmm3, %rax
2060 ; AVX2-NEXT: vpextrq $1, %xmm0, %r11
2061 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
2062 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
2063 ; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm2
2064 ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
2065 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
2066 ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
2067 ; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm5
2068 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm8 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
2069 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
2070 ; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm7
2071 ; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm8
2072 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm8 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
2073 ; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm9
2074 ; AVX2-NEXT: vpextrq $1, %xmm9, %r9
2075 ; AVX2-NEXT: addq %r13, %r9
2076 ; AVX2-NEXT: movq %r9, %r13
2077 ; AVX2-NEXT: vpextrq $1, %xmm8, %r9
2078 ; AVX2-NEXT: addq %r14, %r9
2079 ; AVX2-NEXT: movq %r9, %r14
2080 ; AVX2-NEXT: vpextrq $1, %xmm7, %r10
2081 ; AVX2-NEXT: addq %r15, %r10
2082 ; AVX2-NEXT: vpextrq $1, %xmm5, %r15
2083 ; AVX2-NEXT: addq %r12, %r15
2084 ; AVX2-NEXT: vpextrq $1, %xmm4, %r12
2085 ; AVX2-NEXT: addq %rbx, %r12
2086 ; AVX2-NEXT: vpextrq $1, %xmm3, %rbp
2087 ; AVX2-NEXT: addq %rdi, %rbp
2088 ; AVX2-NEXT: vpextrq $1, %xmm6, %r9
2089 ; AVX2-NEXT: addq %rcx, %r9
2090 ; AVX2-NEXT: vmovq %xmm6, %rdi
2091 ; AVX2-NEXT: addq %rax, %rdi
2092 ; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
2093 ; AVX2-NEXT: addq %r11, %rcx
2094 ; AVX2-NEXT: vmovq %xmm9, %r11
2095 ; AVX2-NEXT: leaq -1(%r8,%r11), %rax
2096 ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2097 ; AVX2-NEXT: vmovq %xmm8, %r8
2098 ; AVX2-NEXT: leaq -1(%rdx,%r8), %rax
2099 ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2100 ; AVX2-NEXT: vmovq %xmm7, %rdx
2101 ; AVX2-NEXT: leaq -1(%rsi,%rdx), %rax
2102 ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2103 ; AVX2-NEXT: vmovq %xmm5, %rdx
2104 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
2105 ; AVX2-NEXT: leaq -1(%rax,%rdx), %rax
2106 ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2107 ; AVX2-NEXT: vmovq %xmm4, %rdx
2108 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
2109 ; AVX2-NEXT: leaq -1(%rax,%rdx), %rax
2110 ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2111 ; AVX2-NEXT: vmovq %xmm1, %rdx
2112 ; AVX2-NEXT: vmovq %xmm3, %rsi
2113 ; AVX2-NEXT: leaq -1(%rdx,%rsi), %rax
2114 ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2115 ; AVX2-NEXT: vmovq %xmm0, %rdx
2116 ; AVX2-NEXT: vmovq %xmm2, %rsi
2117 ; AVX2-NEXT: leaq -1(%rdx,%rsi), %rdx
2118 ; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2119 ; AVX2-NEXT: addq $-1, %r13
2120 ; AVX2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2121 ; AVX2-NEXT: movl $0, %edx
2122 ; AVX2-NEXT: adcq $-1, %rdx
2123 ; AVX2-NEXT: addq $-1, %r14
2124 ; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2125 ; AVX2-NEXT: movl $0, %esi
2126 ; AVX2-NEXT: adcq $-1, %rsi
2127 ; AVX2-NEXT: addq $-1, %r10
2128 ; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2129 ; AVX2-NEXT: movl $0, %r8d
2130 ; AVX2-NEXT: adcq $-1, %r8
2131 ; AVX2-NEXT: addq $-1, %r15
2132 ; AVX2-NEXT: movl $0, %r10d
2133 ; AVX2-NEXT: adcq $-1, %r10
2134 ; AVX2-NEXT: addq $-1, %r12
2135 ; AVX2-NEXT: movl $0, %ebx
2136 ; AVX2-NEXT: adcq $-1, %rbx
2137 ; AVX2-NEXT: addq $-1, %rbp
2138 ; AVX2-NEXT: movl $0, %r14d
2139 ; AVX2-NEXT: adcq $-1, %r14
2140 ; AVX2-NEXT: addq $-1, %r9
2141 ; AVX2-NEXT: movl $0, %r13d
2142 ; AVX2-NEXT: adcq $-1, %r13
2143 ; AVX2-NEXT: addq $-1, %rdi
2144 ; AVX2-NEXT: movl $0, %r11d
2145 ; AVX2-NEXT: adcq $-1, %r11
2146 ; AVX2-NEXT: addq $-1, %rcx
2147 ; AVX2-NEXT: movl $0, %eax
2148 ; AVX2-NEXT: adcq $-1, %rax
2149 ; AVX2-NEXT: shldq $63, %rcx, %rax
2150 ; AVX2-NEXT: shldq $63, %rdi, %r11
2151 ; AVX2-NEXT: shldq $63, %r9, %r13
2152 ; AVX2-NEXT: shldq $63, %rbp, %r14
2153 ; AVX2-NEXT: shldq $63, %r12, %rbx
2154 ; AVX2-NEXT: shldq $63, %r15, %r10
2155 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
2156 ; AVX2-NEXT: shldq $63, %rcx, %r8
2157 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
2158 ; AVX2-NEXT: shldq $63, %rcx, %rsi
2159 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
2160 ; AVX2-NEXT: shldq $63, %rcx, %rdx
2161 ; AVX2-NEXT: vmovq %rdx, %xmm0
2162 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
2163 ; AVX2-NEXT: shrq %rcx
2164 ; AVX2-NEXT: vmovq %rcx, %xmm1
2165 ; AVX2-NEXT: vmovq %rsi, %xmm2
2166 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
2167 ; AVX2-NEXT: shrq %rcx
2168 ; AVX2-NEXT: vmovq %rcx, %xmm3
2169 ; AVX2-NEXT: vmovq %r8, %xmm4
2170 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
2171 ; AVX2-NEXT: shrq %rcx
2172 ; AVX2-NEXT: vmovq %rcx, %xmm5
2173 ; AVX2-NEXT: vmovq %r10, %xmm6
2174 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
2175 ; AVX2-NEXT: shrq %rcx
2176 ; AVX2-NEXT: vmovq %rcx, %xmm7
2177 ; AVX2-NEXT: vmovq %rbx, %xmm8
2178 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
2179 ; AVX2-NEXT: shrq %rcx
2180 ; AVX2-NEXT: vmovq %rcx, %xmm9
2181 ; AVX2-NEXT: vmovq %r14, %xmm10
2182 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
2183 ; AVX2-NEXT: shrq %rcx
2184 ; AVX2-NEXT: vmovq %rcx, %xmm11
2185 ; AVX2-NEXT: vmovq %r13, %xmm12
2186 ; AVX2-NEXT: vmovq %r11, %xmm13
2187 ; AVX2-NEXT: vmovq %rax, %xmm14
2188 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
2189 ; AVX2-NEXT: shrq %rax
2190 ; AVX2-NEXT: vmovq %rax, %xmm15
2191 ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
2192 ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
2193 ; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
2194 ; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
2195 ; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2196 ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
2197 ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
2198 ; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
2199 ; AVX2-NEXT: vpbroadcastw %xmm2, %xmm2
2200 ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5],xmm2[6,7]
2201 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
2202 ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
2203 ; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
2204 ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
2205 ; AVX2-NEXT: vpbroadcastw %xmm2, %xmm2
2206 ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5,6,7]
2207 ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
2208 ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
2209 ; AVX2-NEXT: vpslld $16, %xmm3, %xmm3
2210 ; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5,6,7]
2211 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3]
2212 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
2213 ; AVX2-NEXT: vmovdqu %xmm0, (%rax)
2214 ; AVX2-NEXT: popq %rbx
2215 ; AVX2-NEXT: popq %r12
2216 ; AVX2-NEXT: popq %r13
2217 ; AVX2-NEXT: popq %r14
2218 ; AVX2-NEXT: popq %r15
2219 ; AVX2-NEXT: popq %rbp
2220 ; AVX2-NEXT: vzeroupper
2223 ; AVX512-LABEL: not_avg_v16i8_wide_constants:
2225 ; AVX512-NEXT: pushq %rbp
2226 ; AVX512-NEXT: pushq %r15
2227 ; AVX512-NEXT: pushq %r14
2228 ; AVX512-NEXT: pushq %r13
2229 ; AVX512-NEXT: pushq %r12
2230 ; AVX512-NEXT: pushq %rbx
2231 ; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
2232 ; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
2233 ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2234 ; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm0
2235 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
2236 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
2237 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
2238 ; AVX512-NEXT: vmovq %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
2239 ; AVX512-NEXT: vpextrq $1, %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
2240 ; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm3
2241 ; AVX512-NEXT: vmovq %xmm3, %r13
2242 ; AVX512-NEXT: vpextrq $1, %xmm3, %rsi
2243 ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
2244 ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2245 ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
2246 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
2247 ; AVX512-NEXT: vmovq %xmm3, %rdi
2248 ; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm5
2249 ; AVX512-NEXT: vmovq %xmm5, %r8
2250 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
2251 ; AVX512-NEXT: vmovq %xmm2, %r9
2252 ; AVX512-NEXT: vpextrq $1, %xmm2, %r10
2253 ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
2254 ; AVX512-NEXT: vmovq %xmm2, %r11
2255 ; AVX512-NEXT: vpextrq $1, %xmm2, %rbx
2256 ; AVX512-NEXT: vpextrq $1, %xmm5, %rdx
2257 ; AVX512-NEXT: vpextrq $1, %xmm3, %rcx
2258 ; AVX512-NEXT: vpextrq $1, %xmm1, %rax
2259 ; AVX512-NEXT: vpextrq $1, %xmm0, %r14
2260 ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
2261 ; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm2
2262 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
2263 ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
2264 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
2265 ; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm6
2266 ; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm4
2267 ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm7 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
2268 ; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm4
2269 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
2270 ; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm8
2271 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
2272 ; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm9
2273 ; AVX512-NEXT: vpextrq $1, %xmm8, %rbp
2274 ; AVX512-NEXT: addq %rdx, %rbp
2275 ; AVX512-NEXT: vpextrq $1, %xmm4, %rdx
2276 ; AVX512-NEXT: addq %rcx, %rdx
2277 ; AVX512-NEXT: vpextrq $1, %xmm3, %rcx
2278 ; AVX512-NEXT: addq %rax, %rcx
2279 ; AVX512-NEXT: vpextrq $1, %xmm2, %rax
2280 ; AVX512-NEXT: addq %r14, %rax
2281 ; AVX512-NEXT: vpextrq $1, %xmm9, %r14
2282 ; AVX512-NEXT: leaq -1(%rbx,%r14), %r12
2283 ; AVX512-NEXT: vmovq %xmm9, %rbx
2284 ; AVX512-NEXT: leaq -1(%r11,%rbx), %r15
2285 ; AVX512-NEXT: vpextrq $1, %xmm7, %r11
2286 ; AVX512-NEXT: leaq -1(%r10,%r11), %r14
2287 ; AVX512-NEXT: vmovq %xmm7, %r10
2288 ; AVX512-NEXT: leaq -1(%r9,%r10), %rbx
2289 ; AVX512-NEXT: vmovq %xmm8, %r9
2290 ; AVX512-NEXT: leaq -1(%r8,%r9), %r11
2291 ; AVX512-NEXT: vmovq %xmm4, %r8
2292 ; AVX512-NEXT: leaq -1(%rdi,%r8), %r10
2293 ; AVX512-NEXT: vpextrq $1, %xmm6, %rdi
2294 ; AVX512-NEXT: leaq -1(%rsi,%rdi), %r9
2295 ; AVX512-NEXT: vmovq %xmm6, %rsi
2296 ; AVX512-NEXT: leaq -1(%r13,%rsi), %rsi
2297 ; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2298 ; AVX512-NEXT: vpextrq $1, %xmm5, %rsi
2299 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
2300 ; AVX512-NEXT: leaq -1(%rdi,%rsi), %rsi
2301 ; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2302 ; AVX512-NEXT: vmovq %xmm5, %rsi
2303 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
2304 ; AVX512-NEXT: leaq -1(%rdi,%rsi), %rsi
2305 ; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2306 ; AVX512-NEXT: vmovq %xmm1, %rsi
2307 ; AVX512-NEXT: vmovq %xmm3, %rdi
2308 ; AVX512-NEXT: leaq -1(%rsi,%rdi), %rsi
2309 ; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2310 ; AVX512-NEXT: vmovq %xmm0, %rsi
2311 ; AVX512-NEXT: vmovq %xmm2, %rdi
2312 ; AVX512-NEXT: leaq -1(%rsi,%rdi), %rsi
2313 ; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
2314 ; AVX512-NEXT: xorl %r8d, %r8d
2315 ; AVX512-NEXT: addq $-1, %rbp
2316 ; AVX512-NEXT: movl $0, %esi
2317 ; AVX512-NEXT: adcq $-1, %rsi
2318 ; AVX512-NEXT: addq $-1, %rdx
2319 ; AVX512-NEXT: movl $0, %edi
2320 ; AVX512-NEXT: adcq $-1, %rdi
2321 ; AVX512-NEXT: addq $-1, %rcx
2322 ; AVX512-NEXT: movl $0, %r13d
2323 ; AVX512-NEXT: adcq $-1, %r13
2324 ; AVX512-NEXT: addq $-1, %rax
2325 ; AVX512-NEXT: adcq $-1, %r8
2326 ; AVX512-NEXT: shldq $63, %rax, %r8
2327 ; AVX512-NEXT: shldq $63, %rcx, %r13
2328 ; AVX512-NEXT: shldq $63, %rdx, %rdi
2329 ; AVX512-NEXT: shldq $63, %rbp, %rsi
2330 ; AVX512-NEXT: shrq %r12
2331 ; AVX512-NEXT: vmovq %r12, %xmm0
2332 ; AVX512-NEXT: shrq %r15
2333 ; AVX512-NEXT: vmovq %r15, %xmm1
2334 ; AVX512-NEXT: shrq %r14
2335 ; AVX512-NEXT: vmovq %r14, %xmm2
2336 ; AVX512-NEXT: shrq %rbx
2337 ; AVX512-NEXT: vmovq %rbx, %xmm3
2338 ; AVX512-NEXT: vmovq %rsi, %xmm4
2339 ; AVX512-NEXT: shrq %r11
2340 ; AVX512-NEXT: vmovq %r11, %xmm5
2341 ; AVX512-NEXT: vmovq %rdi, %xmm6
2342 ; AVX512-NEXT: shrq %r10
2343 ; AVX512-NEXT: vmovq %r10, %xmm7
2344 ; AVX512-NEXT: shrq %r9
2345 ; AVX512-NEXT: vmovq %r9, %xmm8
2346 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
2347 ; AVX512-NEXT: shrq %rax
2348 ; AVX512-NEXT: vmovq %rax, %xmm9
2349 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
2350 ; AVX512-NEXT: shrq %rax
2351 ; AVX512-NEXT: vmovq %rax, %xmm10
2352 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
2353 ; AVX512-NEXT: shrq %rax
2354 ; AVX512-NEXT: vmovq %rax, %xmm11
2355 ; AVX512-NEXT: vmovq %r13, %xmm12
2356 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
2357 ; AVX512-NEXT: shrq %rax
2358 ; AVX512-NEXT: vmovq %rax, %xmm13
2359 ; AVX512-NEXT: vmovq %r8, %xmm14
2360 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
2361 ; AVX512-NEXT: shrq %rax
2362 ; AVX512-NEXT: vmovq %rax, %xmm15
2363 ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
2364 ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
2365 ; AVX512-NEXT: vpbroadcastw %xmm0, %xmm0
2366 ; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
2367 ; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
2368 ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
2369 ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
2370 ; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
2371 ; AVX512-NEXT: vpbroadcastw %xmm2, %xmm2
2372 ; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
2373 ; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
2374 ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
2375 ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
2376 ; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
2377 ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
2378 ; AVX512-NEXT: vpsllq $48, %xmm2, %xmm2
2379 ; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
2380 ; AVX512-NEXT: vpbroadcastw %xmm3, %xmm3
2381 ; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5,6,7]
2382 ; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
2383 ; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
2384 ; AVX512-NEXT: vmovdqu %xmm0, (%rax)
2385 ; AVX512-NEXT: popq %rbx
2386 ; AVX512-NEXT: popq %r12
2387 ; AVX512-NEXT: popq %r13
2388 ; AVX512-NEXT: popq %r14
2389 ; AVX512-NEXT: popq %r15
2390 ; AVX512-NEXT: popq %rbp
2391 ; AVX512-NEXT: vzeroupper
2393 %1 = load <16 x i8>, ptr %a
2394 %2 = load <16 x i8>, ptr %b
2395 %3 = zext <16 x i8> %1 to <16 x i128>
2396 %4 = zext <16 x i8> %2 to <16 x i128>
2397 %5 = add <16 x i128> %3, <i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1, i128 -1>
2398 %6 = add <16 x i128> %5, %4
2399 %7 = lshr <16 x i128> %6, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
2400 %8 = trunc <16 x i128> %7 to <16 x i8>
2401 store <16 x i8> %8, ptr undef, align 4
2405 ; Make sure we don't fail on single element vectors.
2406 define <1 x i8> @avg_v1i8(<1 x i8> %x, <1 x i8> %y) {
2407 ; SSE2-LABEL: avg_v1i8:
2409 ; SSE2-NEXT: movzbl %dil, %eax
2410 ; SSE2-NEXT: movzbl %sil, %ecx
2411 ; SSE2-NEXT: leal 1(%rax,%rcx), %eax
2412 ; SSE2-NEXT: shrl %eax
2413 ; SSE2-NEXT: # kill: def $al killed $al killed $eax
2416 ; AVX-LABEL: avg_v1i8:
2418 ; AVX-NEXT: movzbl %dil, %eax
2419 ; AVX-NEXT: movzbl %sil, %ecx
2420 ; AVX-NEXT: leal 1(%rax,%rcx), %eax
2421 ; AVX-NEXT: shrl %eax
2422 ; AVX-NEXT: # kill: def $al killed $al killed $eax
2424 %a = zext <1 x i8> %x to <1 x i16>
2425 %b = zext <1 x i8> %y to <1 x i16>
2426 %c = add <1 x i16> %a, %b
2427 %d = add <1 x i16> %c, <i16 1>
2428 %e = lshr <1 x i16> %d, <i16 1>
2429 %f = trunc <1 x i16> %e to <1 x i8>
2433 ; _mm_avg_epu16( _mm_slli_epi16(a, 2), _mm_slli_epi16(b, 2))
2434 define <2 x i64> @PR41316(<2 x i64>, <2 x i64>) {
2435 ; SSE2-LABEL: PR41316:
2437 ; SSE2-NEXT: psllw $2, %xmm0
2438 ; SSE2-NEXT: psllw $2, %xmm1
2439 ; SSE2-NEXT: pavgw %xmm1, %xmm0
2442 ; AVX-LABEL: PR41316:
2444 ; AVX-NEXT: vpsllw $2, %xmm0, %xmm0
2445 ; AVX-NEXT: vpsllw $2, %xmm1, %xmm1
2446 ; AVX-NEXT: vpavgw %xmm0, %xmm1, %xmm0
2448 %3 = bitcast <2 x i64> %0 to <8 x i16>
2449 %4 = shl <8 x i16> %3, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
2450 %5 = bitcast <2 x i64> %1 to <8 x i16>
2451 %6 = shl <8 x i16> %5, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
2452 %7 = zext <8 x i16> %6 to <8 x i32>
2453 %8 = or <8 x i16> %4, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2454 %9 = zext <8 x i16> %8 to <8 x i32>
2455 %10 = add nuw nsw <8 x i32> %9, %7
2456 %11 = lshr <8 x i32> %10, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2457 %12 = trunc <8 x i32> %11 to <8 x i16>
2458 %13 = bitcast <8 x i16> %12 to <2 x i64>
2462 ; shuffle(avg(shuffle(),shuffle())) -> avg(shuffle(),shuffle())
2463 define <16 x i8> @fold_avgb_shuffles(<16 x i8> %x, <16 x i8> %y) {
2464 ; SSE2-LABEL: fold_avgb_shuffles:
2465 ; SSE2: # %bb.0: # %entry
2466 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
2467 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2468 ; SSE2-NEXT: pavgb %xmm1, %xmm0
2471 ; AVX-LABEL: fold_avgb_shuffles:
2472 ; AVX: # %bb.0: # %entry
2473 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
2474 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2475 ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
2478 %0 = shufflevector <16 x i8> %x, <16 x i8> poison, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
2479 %1 = shufflevector <16 x i8> %y, <16 x i8> poison, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
2480 %2 = tail call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %0, <16 x i8> %1)
2481 %3 = shufflevector <16 x i8> %2, <16 x i8> poison, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
2484 declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>)
2486 define <8 x i16> @fold_avgw_shuffles(<8 x i16> %x, <8 x i16> %y) {
2487 ; SSE2-LABEL: fold_avgw_shuffles:
2488 ; SSE2: # %bb.0: # %entry
2489 ; SSE2-NEXT: pavgw %xmm1, %xmm0
2492 ; AVX-LABEL: fold_avgw_shuffles:
2493 ; AVX: # %bb.0: # %entry
2494 ; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
2497 %0 = shufflevector <8 x i16> %x, <8 x i16> poison, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1>
2498 %1 = shufflevector <8 x i16> %y, <8 x i16> poison, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1>
2499 %2 = tail call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %0, <8 x i16> %1)
2500 %3 = shufflevector <8 x i16> %2, <8 x i16> poison, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1>
2503 declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>)
2505 define <8 x i16> @PR52131_pavg_chain(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) {
2506 ; SSE2-LABEL: PR52131_pavg_chain:
2508 ; SSE2-NEXT: pavgw %xmm1, %xmm0
2509 ; SSE2-NEXT: pavgw %xmm2, %xmm0
2512 ; AVX-LABEL: PR52131_pavg_chain:
2514 ; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
2515 ; AVX-NEXT: vpavgw %xmm0, %xmm2, %xmm0
2517 %i = zext <8 x i16> %a to <8 x i32>
2518 %i1 = zext <8 x i16> %b to <8 x i32>
2519 %i2 = add nuw nsw <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2520 %i3 = add nuw nsw <8 x i32> %i2, %i1
2521 %i4 = lshr <8 x i32> %i3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2522 %i5 = and <8 x i32> %i4, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
2523 %i6 = zext <8 x i16> %c to <8 x i32>
2524 %i7 = add nuw nsw <8 x i32> %i6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2525 %i8 = add nuw nsw <8 x i32> %i7, %i5
2526 %i9 = lshr <8 x i32> %i8, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2527 %i10 = trunc <8 x i32> %i9 to <8 x i16>
2531 define <8 x i16> @PR52131_pavg_chainlike_but_not_zext(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) {
2532 ; SSE2-LABEL: PR52131_pavg_chainlike_but_not_zext:
2534 ; SSE2-NEXT: pavgw %xmm1, %xmm0
2535 ; SSE2-NEXT: pavgw %xmm2, %xmm0
2538 ; AVX-LABEL: PR52131_pavg_chainlike_but_not_zext:
2540 ; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
2541 ; AVX-NEXT: vpavgw %xmm0, %xmm2, %xmm0
2543 %i = zext <8 x i16> %a to <8 x i32>
2544 %i1 = zext <8 x i16> %b to <8 x i32>
2545 %i2 = add nuw nsw <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2546 %i3 = add nuw nsw <8 x i32> %i2, %i1
2547 %i4 = lshr <8 x i32> %i3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2548 %i5 = and <8 x i32> %i4, <i32 131071, i32 131071, i32 131071, i32 131071, i32 131071, i32 131071, i32 131071, i32 131071>
2549 %i6 = zext <8 x i16> %c to <8 x i32>
2550 %i7 = add nuw nsw <8 x i32> %i6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2551 %i8 = add nuw nsw <8 x i32> %i7, %i5
2552 %i9 = lshr <8 x i32> %i8, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2553 %i10 = trunc <8 x i32> %i9 to <8 x i16>
2557 define <8 x i16> @PR52131_pavg_with_mask(<8 x i32> %a, <8 x i16> %b) {
2558 ; SSE2-LABEL: PR52131_pavg_with_mask:
2560 ; SSE2-NEXT: pslld $16, %xmm1
2561 ; SSE2-NEXT: psrad $16, %xmm1
2562 ; SSE2-NEXT: pslld $16, %xmm0
2563 ; SSE2-NEXT: psrad $16, %xmm0
2564 ; SSE2-NEXT: packssdw %xmm1, %xmm0
2565 ; SSE2-NEXT: pavgw %xmm2, %xmm0
2568 ; AVX1-LABEL: PR52131_pavg_with_mask:
2570 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
2571 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
2572 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
2573 ; AVX1-NEXT: vpavgw %xmm0, %xmm1, %xmm0
2574 ; AVX1-NEXT: vzeroupper
2577 ; AVX2-LABEL: PR52131_pavg_with_mask:
2579 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
2580 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
2581 ; AVX2-NEXT: vpavgw %xmm0, %xmm1, %xmm0
2582 ; AVX2-NEXT: vzeroupper
2585 ; AVX512-LABEL: PR52131_pavg_with_mask:
2587 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
2588 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0
2589 ; AVX512-NEXT: vpavgw %xmm0, %xmm1, %xmm0
2590 ; AVX512-NEXT: vzeroupper
2592 %i = and <8 x i32> %a, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
2593 %i3 = zext <8 x i16> %b to <8 x i32>
2594 %i4 = add nuw nsw <8 x i32> %i3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2595 %i5 = add nuw nsw <8 x i32> %i4, %i
2596 %i6 = lshr <8 x i32> %i5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2597 %i7 = trunc <8 x i32> %i6 to <8 x i16>
2601 define <8 x i16> @PR52131_not_zext_with_constant(<8 x i32> %a) {
2602 ; SSE2-LABEL: PR52131_not_zext_with_constant:
2604 ; SSE2-NEXT: pslld $16, %xmm1
2605 ; SSE2-NEXT: psrad $16, %xmm1
2606 ; SSE2-NEXT: pslld $16, %xmm0
2607 ; SSE2-NEXT: psrad $16, %xmm0
2608 ; SSE2-NEXT: packssdw %xmm1, %xmm0
2609 ; SSE2-NEXT: pavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
2612 ; AVX1-LABEL: PR52131_not_zext_with_constant:
2614 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
2615 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
2616 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
2617 ; AVX1-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
2618 ; AVX1-NEXT: vzeroupper
2621 ; AVX2-LABEL: PR52131_not_zext_with_constant:
2623 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
2624 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
2625 ; AVX2-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
2626 ; AVX2-NEXT: vzeroupper
2629 ; AVX512-LABEL: PR52131_not_zext_with_constant:
2631 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
2632 ; AVX512-NEXT: vpmovdw %zmm0, %ymm0
2633 ; AVX512-NEXT: vpavgw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
2634 ; AVX512-NEXT: vzeroupper
2636 %i = and <8 x i32> %a, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
2637 %i1 = add nuw nsw <8 x i32> %i, <i32 43, i32 43, i32 43, i32 43, i32 43, i32 43, i32 43, i32 43>
2638 %i2 = lshr <8 x i32> %i1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
2639 %i3 = trunc <8 x i32> %i2 to <8 x i16>