1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BWVL
5 define <16 x i8> @avg_v16i8_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %src, i16 %mask) nounwind {
6 ; AVX512F-LABEL: avg_v16i8_mask:
8 ; AVX512F-NEXT: vpavgb %xmm1, %xmm0, %xmm0
9 ; AVX512F-NEXT: kmovw %edi, %k1
10 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
11 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
12 ; AVX512F-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0
13 ; AVX512F-NEXT: vzeroupper
16 ; AVX512BWVL-LABEL: avg_v16i8_mask:
17 ; AVX512BWVL: # %bb.0:
18 ; AVX512BWVL-NEXT: kmovd %edi, %k1
19 ; AVX512BWVL-NEXT: vpavgb %xmm1, %xmm0, %xmm2 {%k1}
20 ; AVX512BWVL-NEXT: vmovdqa %xmm2, %xmm0
21 ; AVX512BWVL-NEXT: retq
22 %za = zext <16 x i8> %a to <16 x i16>
23 %zb = zext <16 x i8> %b to <16 x i16>
24 %add = add nuw nsw <16 x i16> %za, %zb
25 %add1 = add nuw nsw <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
26 %lshr = lshr <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
27 %trunc = trunc <16 x i16> %lshr to <16 x i8>
28 %mask1 = bitcast i16 %mask to <16 x i1>
29 %res = select <16 x i1> %mask1, <16 x i8> %trunc, <16 x i8> %src
33 define <16 x i8> @avg_v16i8_maskz(<16 x i8> %a, <16 x i8> %b, i16 %mask) nounwind {
34 ; AVX512F-LABEL: avg_v16i8_maskz:
36 ; AVX512F-NEXT: vpavgb %xmm1, %xmm0, %xmm0
37 ; AVX512F-NEXT: kmovw %edi, %k1
38 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
39 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
40 ; AVX512F-NEXT: vpand %xmm0, %xmm1, %xmm0
41 ; AVX512F-NEXT: vzeroupper
44 ; AVX512BWVL-LABEL: avg_v16i8_maskz:
45 ; AVX512BWVL: # %bb.0:
46 ; AVX512BWVL-NEXT: kmovd %edi, %k1
47 ; AVX512BWVL-NEXT: vpavgb %xmm1, %xmm0, %xmm0 {%k1} {z}
48 ; AVX512BWVL-NEXT: retq
49 %za = zext <16 x i8> %a to <16 x i16>
50 %zb = zext <16 x i8> %b to <16 x i16>
51 %add = add nuw nsw <16 x i16> %za, %zb
52 %add1 = add nuw nsw <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
53 %lshr = lshr <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
54 %trunc = trunc <16 x i16> %lshr to <16 x i8>
55 %mask1 = bitcast i16 %mask to <16 x i1>
56 %res = select <16 x i1> %mask1, <16 x i8> %trunc, <16 x i8> zeroinitializer
60 define <32 x i8> @avg_v32i8_mask(<32 x i8> %a, <32 x i8> %b, <32 x i8> %src, i32 %mask) nounwind {
61 ; AVX512F-LABEL: avg_v32i8_mask:
63 ; AVX512F-NEXT: kmovw %edi, %k1
64 ; AVX512F-NEXT: shrl $16, %edi
65 ; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm0
66 ; AVX512F-NEXT: kmovw %edi, %k2
67 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
68 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
69 ; AVX512F-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k2} {z}
70 ; AVX512F-NEXT: vpmovdb %zmm3, %xmm3
71 ; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
72 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0
75 ; AVX512BWVL-LABEL: avg_v32i8_mask:
76 ; AVX512BWVL: # %bb.0:
77 ; AVX512BWVL-NEXT: kmovd %edi, %k1
78 ; AVX512BWVL-NEXT: vpavgb %ymm1, %ymm0, %ymm2 {%k1}
79 ; AVX512BWVL-NEXT: vmovdqa %ymm2, %ymm0
80 ; AVX512BWVL-NEXT: retq
81 %za = zext <32 x i8> %a to <32 x i16>
82 %zb = zext <32 x i8> %b to <32 x i16>
83 %add = add nuw nsw <32 x i16> %za, %zb
84 %add1 = add nuw nsw <32 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
85 %lshr = lshr <32 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
86 %trunc = trunc <32 x i16> %lshr to <32 x i8>
87 %mask1 = bitcast i32 %mask to <32 x i1>
88 %res = select <32 x i1> %mask1, <32 x i8> %trunc, <32 x i8> %src
92 define <32 x i8> @avg_v32i8_maskz(<32 x i8> %a, <32 x i8> %b, i32 %mask) nounwind {
93 ; AVX512F-LABEL: avg_v32i8_maskz:
95 ; AVX512F-NEXT: kmovw %edi, %k1
96 ; AVX512F-NEXT: shrl $16, %edi
97 ; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm0
98 ; AVX512F-NEXT: kmovw %edi, %k2
99 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
100 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
101 ; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
102 ; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
103 ; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
104 ; AVX512F-NEXT: vpand %ymm0, %ymm1, %ymm0
107 ; AVX512BWVL-LABEL: avg_v32i8_maskz:
108 ; AVX512BWVL: # %bb.0:
109 ; AVX512BWVL-NEXT: kmovd %edi, %k1
110 ; AVX512BWVL-NEXT: vpavgb %ymm1, %ymm0, %ymm0 {%k1} {z}
111 ; AVX512BWVL-NEXT: retq
112 %za = zext <32 x i8> %a to <32 x i16>
113 %zb = zext <32 x i8> %b to <32 x i16>
114 %add = add nuw nsw <32 x i16> %za, %zb
115 %add1 = add nuw nsw <32 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
116 %lshr = lshr <32 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
117 %trunc = trunc <32 x i16> %lshr to <32 x i8>
118 %mask1 = bitcast i32 %mask to <32 x i1>
119 %res = select <32 x i1> %mask1, <32 x i8> %trunc, <32 x i8> zeroinitializer
123 define <64 x i8> @avg_v64i8_mask(<64 x i8> %a, <64 x i8> %b, <64 x i8> %src, i64 %mask) nounwind {
124 ; AVX512F-LABEL: avg_v64i8_mask:
126 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm3
127 ; AVX512F-NEXT: movq %rdi, %rax
128 ; AVX512F-NEXT: movl %edi, %ecx
129 ; AVX512F-NEXT: kmovw %edi, %k1
130 ; AVX512F-NEXT: shrq $32, %rdi
131 ; AVX512F-NEXT: shrq $48, %rax
132 ; AVX512F-NEXT: shrl $16, %ecx
133 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm4
134 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm5
135 ; AVX512F-NEXT: vpavgb %ymm4, %ymm5, %ymm4
136 ; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm0
137 ; AVX512F-NEXT: kmovw %ecx, %k2
138 ; AVX512F-NEXT: kmovw %eax, %k3
139 ; AVX512F-NEXT: kmovw %edi, %k4
140 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k4} {z}
141 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
142 ; AVX512F-NEXT: vpternlogd $255, %zmm5, %zmm5, %zmm5 {%k3} {z}
143 ; AVX512F-NEXT: vpmovdb %zmm5, %xmm5
144 ; AVX512F-NEXT: vinserti128 $1, %xmm5, %ymm1, %ymm1
145 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm1
146 ; AVX512F-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k1} {z}
147 ; AVX512F-NEXT: vpmovdb %zmm3, %xmm3
148 ; AVX512F-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 {%k2} {z}
149 ; AVX512F-NEXT: vpmovdb %zmm4, %xmm4
150 ; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm3
151 ; AVX512F-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
152 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
155 ; AVX512BWVL-LABEL: avg_v64i8_mask:
156 ; AVX512BWVL: # %bb.0:
157 ; AVX512BWVL-NEXT: kmovq %rdi, %k1
158 ; AVX512BWVL-NEXT: vpavgb %zmm1, %zmm0, %zmm2 {%k1}
159 ; AVX512BWVL-NEXT: vmovdqa64 %zmm2, %zmm0
160 ; AVX512BWVL-NEXT: retq
161 %za = zext <64 x i8> %a to <64 x i16>
162 %zb = zext <64 x i8> %b to <64 x i16>
163 %add = add nuw nsw <64 x i16> %za, %zb
164 %add1 = add nuw nsw <64 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
165 %lshr = lshr <64 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
166 %trunc = trunc <64 x i16> %lshr to <64 x i8>
167 %mask1 = bitcast i64 %mask to <64 x i1>
168 %res = select <64 x i1> %mask1, <64 x i8> %trunc, <64 x i8> %src
172 define <64 x i8> @avg_v64i8_maskz(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind {
173 ; AVX512F-LABEL: avg_v64i8_maskz:
175 ; AVX512F-NEXT: movq %rdi, %rax
176 ; AVX512F-NEXT: movl %edi, %ecx
177 ; AVX512F-NEXT: kmovw %edi, %k1
178 ; AVX512F-NEXT: shrq $32, %rdi
179 ; AVX512F-NEXT: shrq $48, %rax
180 ; AVX512F-NEXT: shrl $16, %ecx
181 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
182 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
183 ; AVX512F-NEXT: vpavgb %ymm2, %ymm3, %ymm2
184 ; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm0
185 ; AVX512F-NEXT: kmovw %ecx, %k2
186 ; AVX512F-NEXT: kmovw %eax, %k3
187 ; AVX512F-NEXT: kmovw %edi, %k4
188 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k4} {z}
189 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
190 ; AVX512F-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k3} {z}
191 ; AVX512F-NEXT: vpmovdb %zmm3, %xmm3
192 ; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
193 ; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
194 ; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
195 ; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
196 ; AVX512F-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k2} {z}
197 ; AVX512F-NEXT: vpmovdb %zmm3, %xmm3
198 ; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
199 ; AVX512F-NEXT: vpand %ymm0, %ymm2, %ymm0
200 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
203 ; AVX512BWVL-LABEL: avg_v64i8_maskz:
204 ; AVX512BWVL: # %bb.0:
205 ; AVX512BWVL-NEXT: kmovq %rdi, %k1
206 ; AVX512BWVL-NEXT: vpavgb %zmm1, %zmm0, %zmm0 {%k1} {z}
207 ; AVX512BWVL-NEXT: retq
208 %za = zext <64 x i8> %a to <64 x i16>
209 %zb = zext <64 x i8> %b to <64 x i16>
210 %add = add nuw nsw <64 x i16> %za, %zb
211 %add1 = add nuw nsw <64 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
212 %lshr = lshr <64 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
213 %trunc = trunc <64 x i16> %lshr to <64 x i8>
214 %mask1 = bitcast i64 %mask to <64 x i1>
215 %res = select <64 x i1> %mask1, <64 x i8> %trunc, <64 x i8> zeroinitializer
219 define <8 x i16> @avg_v8i16_mask(<8 x i16> %a, <8 x i16> %b, <8 x i16> %src, i8 %mask) nounwind {
220 ; AVX512F-LABEL: avg_v8i16_mask:
222 ; AVX512F-NEXT: vpavgw %xmm1, %xmm0, %xmm0
223 ; AVX512F-NEXT: kmovw %edi, %k1
224 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
225 ; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
226 ; AVX512F-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0
227 ; AVX512F-NEXT: vzeroupper
230 ; AVX512BWVL-LABEL: avg_v8i16_mask:
231 ; AVX512BWVL: # %bb.0:
232 ; AVX512BWVL-NEXT: kmovd %edi, %k1
233 ; AVX512BWVL-NEXT: vpavgw %xmm1, %xmm0, %xmm2 {%k1}
234 ; AVX512BWVL-NEXT: vmovdqa %xmm2, %xmm0
235 ; AVX512BWVL-NEXT: retq
236 %za = zext <8 x i16> %a to <8 x i32>
237 %zb = zext <8 x i16> %b to <8 x i32>
238 %add = add nuw nsw <8 x i32> %za, %zb
239 %add1 = add nuw nsw <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
240 %lshr = lshr <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
241 %trunc = trunc <8 x i32> %lshr to <8 x i16>
242 %mask1 = bitcast i8 %mask to <8 x i1>
243 %res = select <8 x i1> %mask1, <8 x i16> %trunc, <8 x i16> %src
247 define <8 x i16> @avg_v8i16_maskz(<8 x i16> %a, <8 x i16> %b, i8 %mask) nounwind {
248 ; AVX512F-LABEL: avg_v8i16_maskz:
250 ; AVX512F-NEXT: vpavgw %xmm1, %xmm0, %xmm0
251 ; AVX512F-NEXT: kmovw %edi, %k1
252 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
253 ; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
254 ; AVX512F-NEXT: vpand %xmm0, %xmm1, %xmm0
255 ; AVX512F-NEXT: vzeroupper
258 ; AVX512BWVL-LABEL: avg_v8i16_maskz:
259 ; AVX512BWVL: # %bb.0:
260 ; AVX512BWVL-NEXT: kmovd %edi, %k1
261 ; AVX512BWVL-NEXT: vpavgw %xmm1, %xmm0, %xmm0 {%k1} {z}
262 ; AVX512BWVL-NEXT: retq
263 %za = zext <8 x i16> %a to <8 x i32>
264 %zb = zext <8 x i16> %b to <8 x i32>
265 %add = add nuw nsw <8 x i32> %za, %zb
266 %add1 = add nuw nsw <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
267 %lshr = lshr <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
268 %trunc = trunc <8 x i32> %lshr to <8 x i16>
269 %mask1 = bitcast i8 %mask to <8 x i1>
270 %res = select <8 x i1> %mask1, <8 x i16> %trunc, <8 x i16> zeroinitializer
274 define <16 x i16> @avg_v16i16_mask(<16 x i16> %a, <16 x i16> %b, <16 x i16> %src, i16 %mask) nounwind {
275 ; AVX512F-LABEL: avg_v16i16_mask:
277 ; AVX512F-NEXT: vpavgw %ymm1, %ymm0, %ymm0
278 ; AVX512F-NEXT: kmovw %edi, %k1
279 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
280 ; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
281 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0
284 ; AVX512BWVL-LABEL: avg_v16i16_mask:
285 ; AVX512BWVL: # %bb.0:
286 ; AVX512BWVL-NEXT: kmovd %edi, %k1
287 ; AVX512BWVL-NEXT: vpavgw %ymm1, %ymm0, %ymm2 {%k1}
288 ; AVX512BWVL-NEXT: vmovdqa %ymm2, %ymm0
289 ; AVX512BWVL-NEXT: retq
290 %za = zext <16 x i16> %a to <16 x i32>
291 %zb = zext <16 x i16> %b to <16 x i32>
292 %add = add nuw nsw <16 x i32> %za, %zb
293 %add1 = add nuw nsw <16 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
294 %lshr = lshr <16 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
295 %trunc = trunc <16 x i32> %lshr to <16 x i16>
296 %mask1 = bitcast i16 %mask to <16 x i1>
297 %res = select <16 x i1> %mask1, <16 x i16> %trunc, <16 x i16> %src
301 define <16 x i16> @avg_v16i16_maskz(<16 x i16> %a, <16 x i16> %b, i16 %mask) nounwind {
302 ; AVX512F-LABEL: avg_v16i16_maskz:
304 ; AVX512F-NEXT: vpavgw %ymm1, %ymm0, %ymm0
305 ; AVX512F-NEXT: kmovw %edi, %k1
306 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
307 ; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
308 ; AVX512F-NEXT: vpand %ymm0, %ymm1, %ymm0
311 ; AVX512BWVL-LABEL: avg_v16i16_maskz:
312 ; AVX512BWVL: # %bb.0:
313 ; AVX512BWVL-NEXT: kmovd %edi, %k1
314 ; AVX512BWVL-NEXT: vpavgw %ymm1, %ymm0, %ymm0 {%k1} {z}
315 ; AVX512BWVL-NEXT: retq
316 %za = zext <16 x i16> %a to <16 x i32>
317 %zb = zext <16 x i16> %b to <16 x i32>
318 %add = add nuw nsw <16 x i32> %za, %zb
319 %add1 = add nuw nsw <16 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
320 %lshr = lshr <16 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
321 %trunc = trunc <16 x i32> %lshr to <16 x i16>
322 %mask1 = bitcast i16 %mask to <16 x i1>
323 %res = select <16 x i1> %mask1, <16 x i16> %trunc, <16 x i16> zeroinitializer
327 define <32 x i16> @avg_v32i16_mask(<32 x i16> %a, <32 x i16> %b, <32 x i16> %src, i32 %mask) nounwind {
328 ; AVX512F-LABEL: avg_v32i16_mask:
330 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm3
331 ; AVX512F-NEXT: kmovw %edi, %k1
332 ; AVX512F-NEXT: shrl $16, %edi
333 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm4
334 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm5
335 ; AVX512F-NEXT: vpavgw %ymm4, %ymm5, %ymm4
336 ; AVX512F-NEXT: vpavgw %ymm1, %ymm0, %ymm0
337 ; AVX512F-NEXT: kmovw %edi, %k2
338 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
339 ; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
340 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm1
341 ; AVX512F-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k1} {z}
342 ; AVX512F-NEXT: vpmovdw %zmm3, %ymm3
343 ; AVX512F-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
344 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
347 ; AVX512BWVL-LABEL: avg_v32i16_mask:
348 ; AVX512BWVL: # %bb.0:
349 ; AVX512BWVL-NEXT: kmovd %edi, %k1
350 ; AVX512BWVL-NEXT: vpavgw %zmm1, %zmm0, %zmm2 {%k1}
351 ; AVX512BWVL-NEXT: vmovdqa64 %zmm2, %zmm0
352 ; AVX512BWVL-NEXT: retq
353 %za = zext <32 x i16> %a to <32 x i32>
354 %zb = zext <32 x i16> %b to <32 x i32>
355 %add = add nuw nsw <32 x i32> %za, %zb
356 %add1 = add nuw nsw <32 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
357 %lshr = lshr <32 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
358 %trunc = trunc <32 x i32> %lshr to <32 x i16>
359 %mask1 = bitcast i32 %mask to <32 x i1>
360 %res = select <32 x i1> %mask1, <32 x i16> %trunc, <32 x i16> %src
364 define <32 x i16> @avg_v32i16_maskz(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind {
365 ; AVX512F-LABEL: avg_v32i16_maskz:
367 ; AVX512F-NEXT: kmovw %edi, %k1
368 ; AVX512F-NEXT: shrl $16, %edi
369 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
370 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
371 ; AVX512F-NEXT: vpavgw %ymm2, %ymm3, %ymm2
372 ; AVX512F-NEXT: vpavgw %ymm1, %ymm0, %ymm0
373 ; AVX512F-NEXT: kmovw %edi, %k2
374 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
375 ; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
376 ; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
377 ; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
378 ; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
379 ; AVX512F-NEXT: vpand %ymm0, %ymm2, %ymm0
380 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
383 ; AVX512BWVL-LABEL: avg_v32i16_maskz:
384 ; AVX512BWVL: # %bb.0:
385 ; AVX512BWVL-NEXT: kmovd %edi, %k1
386 ; AVX512BWVL-NEXT: vpavgw %zmm1, %zmm0, %zmm0 {%k1} {z}
387 ; AVX512BWVL-NEXT: retq
388 %za = zext <32 x i16> %a to <32 x i32>
389 %zb = zext <32 x i16> %b to <32 x i32>
390 %add = add nuw nsw <32 x i32> %za, %zb
391 %add1 = add nuw nsw <32 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
392 %lshr = lshr <32 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
393 %trunc = trunc <32 x i32> %lshr to <32 x i16>
394 %mask1 = bitcast i32 %mask to <32 x i1>
395 %res = select <32 x i1> %mask1, <32 x i16> %trunc, <32 x i16> zeroinitializer