1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefixes=ALL,AVX512DQ
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512BW
9 define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
10 ; ALL-LABEL: var_shift_v8i64:
12 ; ALL-NEXT: vpsravq %zmm1, %zmm0, %zmm0
14 %shift = ashr <8 x i64> %a, %b
18 define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
19 ; ALL-LABEL: var_shift_v16i32:
21 ; ALL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
23 %shift = ashr <16 x i32> %a, %b
27 define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
28 ; AVX512DQ-LABEL: var_shift_v32i16:
30 ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
31 ; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm3
32 ; AVX512DQ-NEXT: vpsravd %zmm2, %zmm3, %zmm2
33 ; AVX512DQ-NEXT: vpmovdw %zmm2, %ymm2
34 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm1
35 ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
36 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm0
37 ; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
38 ; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
39 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
40 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
43 ; AVX512BW-LABEL: var_shift_v32i16:
45 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
47 %shift = ashr <32 x i16> %a, %b
51 define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
52 ; AVX512DQ-LABEL: var_shift_v64i8:
54 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm2
55 ; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
56 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
57 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm4
58 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
59 ; AVX512DQ-NEXT: vpsraw $4, %ymm5, %ymm6
60 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
61 ; AVX512DQ-NEXT: vpsraw $2, %ymm5, %ymm6
62 ; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
63 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
64 ; AVX512DQ-NEXT: vpsraw $1, %ymm5, %ymm6
65 ; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
66 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm3
67 ; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
68 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
69 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
70 ; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
71 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4
72 ; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5
73 ; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
74 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4
75 ; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5
76 ; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
77 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm2
78 ; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
79 ; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm2, %ymm2
80 ; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
81 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
82 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
83 ; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
84 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
85 ; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5
86 ; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
87 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
88 ; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5
89 ; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
90 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
91 ; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
92 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
93 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
94 ; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm4
95 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
96 ; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm4
97 ; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
98 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
99 ; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm4
100 ; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
101 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
102 ; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
103 ; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
104 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
105 ; AVX512DQ-NEXT: retq
107 ; AVX512BW-LABEL: var_shift_v64i8:
109 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
110 ; AVX512BW-NEXT: vpsraw $4, %zmm2, %zmm3
111 ; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
112 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
113 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
114 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
115 ; AVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3
116 ; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
117 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
118 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
119 ; AVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3
120 ; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
121 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
122 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
123 ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
124 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
125 ; AVX512BW-NEXT: vpsraw $4, %zmm0, %zmm3
126 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
127 ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
128 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
129 ; AVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3
130 ; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
131 ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
132 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
133 ; AVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3
134 ; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
135 ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
136 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
137 ; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
138 ; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
139 ; AVX512BW-NEXT: retq
140 %shift = ashr <64 x i8> %a, %b
145 ; Uniform Variable Shifts
148 define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
149 ; ALL-LABEL: splatvar_shift_v8i64:
151 ; ALL-NEXT: vpsraq %xmm1, %zmm0, %zmm0
153 %splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
154 %shift = ashr <8 x i64> %a, %splat
158 define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
159 ; ALL-LABEL: splatvar_shift_v16i32:
161 ; ALL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
162 ; ALL-NEXT: vpsrad %xmm1, %zmm0, %zmm0
164 %splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
165 %shift = ashr <16 x i32> %a, %splat
166 ret <16 x i32> %shift
169 define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
170 ; AVX512DQ-LABEL: splatvar_shift_v32i16:
172 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm2
173 ; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
174 ; AVX512DQ-NEXT: vpsraw %xmm1, %ymm2, %ymm2
175 ; AVX512DQ-NEXT: vpsraw %xmm1, %ymm0, %ymm0
176 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
177 ; AVX512DQ-NEXT: retq
179 ; AVX512BW-LABEL: splatvar_shift_v32i16:
181 ; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
182 ; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0
183 ; AVX512BW-NEXT: retq
184 %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
185 %shift = ashr <32 x i16> %a, %splat
186 ret <32 x i16> %shift
189 define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
190 ; AVX512DQ-LABEL: splatvar_shift_v64i8:
192 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm2
193 ; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
194 ; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
195 ; AVX512DQ-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
196 ; AVX512DQ-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
197 ; AVX512DQ-NEXT: vpsrlw $8, %xmm3, %xmm3
198 ; AVX512DQ-NEXT: vpbroadcastb %xmm3, %ymm3
199 ; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
200 ; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
201 ; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm4, %ymm4
202 ; AVX512DQ-NEXT: vpxor %ymm4, %ymm2, %ymm2
203 ; AVX512DQ-NEXT: vpsubb %ymm4, %ymm2, %ymm2
204 ; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
205 ; AVX512DQ-NEXT: vpand %ymm3, %ymm0, %ymm0
206 ; AVX512DQ-NEXT: vpxor %ymm4, %ymm0, %ymm0
207 ; AVX512DQ-NEXT: vpsubb %ymm4, %ymm0, %ymm0
208 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
209 ; AVX512DQ-NEXT: retq
211 ; AVX512BW-LABEL: splatvar_shift_v64i8:
213 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
214 ; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
215 ; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
216 ; AVX512BW-NEXT: vpsrlw %xmm1, %zmm2, %zmm2
217 ; AVX512BW-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
218 ; AVX512BW-NEXT: vpsrlw %xmm1, %xmm3, %xmm1
219 ; AVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
220 ; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
221 ; AVX512BW-NEXT: vpternlogq $108, %zmm0, %zmm2, %zmm1
222 ; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm0
223 ; AVX512BW-NEXT: retq
224 %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
225 %shift = ashr <64 x i8> %a, %splat
230 ; Uniform Variable Modulo Shifts
233 define <8 x i64> @splatvar_modulo_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
234 ; ALL-LABEL: splatvar_modulo_shift_v8i64:
236 ; ALL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
237 ; ALL-NEXT: vpsraq %xmm1, %zmm0, %zmm0
239 %mod = and <8 x i64> %b, <i64 63, i64 63, i64 63, i64 63, i64 63, i64 63, i64 63, i64 63>
240 %splat = shufflevector <8 x i64> %mod, <8 x i64> undef, <8 x i32> zeroinitializer
241 %shift = ashr <8 x i64> %a, %splat
245 define <16 x i32> @splatvar_modulo_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
246 ; ALL-LABEL: splatvar_modulo_shift_v16i32:
248 ; ALL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
249 ; ALL-NEXT: vpsrad %xmm1, %zmm0, %zmm0
251 %mod = and <16 x i32> %b, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
252 %splat = shufflevector <16 x i32> %mod, <16 x i32> undef, <16 x i32> zeroinitializer
253 %shift = ashr <16 x i32> %a, %splat
254 ret <16 x i32> %shift
257 define <32 x i16> @splatvar_modulo_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
258 ; AVX512DQ-LABEL: splatvar_modulo_shift_v32i16:
260 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm2
261 ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
262 ; AVX512DQ-NEXT: vpsraw %xmm1, %ymm2, %ymm2
263 ; AVX512DQ-NEXT: vpsraw %xmm1, %ymm0, %ymm0
264 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
265 ; AVX512DQ-NEXT: retq
267 ; AVX512BW-LABEL: splatvar_modulo_shift_v32i16:
269 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
270 ; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0
271 ; AVX512BW-NEXT: retq
272 %mod = and <32 x i16> %b, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
273 %splat = shufflevector <32 x i16> %mod, <32 x i16> undef, <32 x i32> zeroinitializer
274 %shift = ashr <32 x i16> %a, %splat
275 ret <32 x i16> %shift
278 define <64 x i8> @splatvar_modulo_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
279 ; AVX512DQ-LABEL: splatvar_modulo_shift_v64i8:
281 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm2
282 ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
283 ; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
284 ; AVX512DQ-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
285 ; AVX512DQ-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
286 ; AVX512DQ-NEXT: vpsrlw $8, %xmm3, %xmm3
287 ; AVX512DQ-NEXT: vpbroadcastb %xmm3, %ymm3
288 ; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
289 ; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
290 ; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm4, %ymm4
291 ; AVX512DQ-NEXT: vpxor %ymm4, %ymm2, %ymm2
292 ; AVX512DQ-NEXT: vpsubb %ymm4, %ymm2, %ymm2
293 ; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
294 ; AVX512DQ-NEXT: vpand %ymm3, %ymm0, %ymm0
295 ; AVX512DQ-NEXT: vpxor %ymm4, %ymm0, %ymm0
296 ; AVX512DQ-NEXT: vpsubb %ymm4, %ymm0, %ymm0
297 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
298 ; AVX512DQ-NEXT: retq
300 ; AVX512BW-LABEL: splatvar_modulo_shift_v64i8:
302 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
303 ; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
304 ; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
305 ; AVX512BW-NEXT: vpsrlw %xmm1, %zmm2, %zmm2
306 ; AVX512BW-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
307 ; AVX512BW-NEXT: vpsrlw %xmm1, %xmm3, %xmm1
308 ; AVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
309 ; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
310 ; AVX512BW-NEXT: vpternlogq $108, %zmm0, %zmm2, %zmm1
311 ; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm0
312 ; AVX512BW-NEXT: retq
313 %mod = and <64 x i8> %b, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
314 %splat = shufflevector <64 x i8> %mod, <64 x i8> undef, <64 x i32> zeroinitializer
315 %shift = ashr <64 x i8> %a, %splat
323 define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) nounwind {
324 ; ALL-LABEL: constant_shift_v8i64:
326 ; ALL-NEXT: vpsravq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
328 %shift = ashr <8 x i64> %a, <i64 1, i64 7, i64 31, i64 62, i64 1, i64 7, i64 31, i64 62>
332 define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
333 ; ALL-LABEL: constant_shift_v16i32:
335 ; ALL-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
337 %shift = ashr <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
338 ret <16 x i32> %shift
341 define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
342 ; AVX512DQ-LABEL: constant_shift_v32i16:
344 ; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm1
345 ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
346 ; AVX512DQ-NEXT: vpsravd %zmm2, %zmm1, %zmm1
347 ; AVX512DQ-NEXT: vpmovdw %zmm1, %ymm1
348 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm0
349 ; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
350 ; AVX512DQ-NEXT: vpsravd %zmm2, %zmm0, %zmm0
351 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
352 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
353 ; AVX512DQ-NEXT: retq
355 ; AVX512BW-LABEL: constant_shift_v32i16:
357 ; AVX512BW-NEXT: vpsravw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
358 ; AVX512BW-NEXT: retq
359 %shift = ashr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
360 ret <32 x i16> %shift
363 define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
364 ; AVX512DQ-LABEL: constant_shift_v64i8:
366 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
367 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
368 ; AVX512DQ-NEXT: vpsraw $8, %ymm2, %ymm2
369 ; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [2,4,8,16,32,64,128,256,2,4,8,16,32,64,128,256]
370 ; AVX512DQ-NEXT: # ymm3 = mem[0,1,0,1]
371 ; AVX512DQ-NEXT: vpmullw %ymm3, %ymm2, %ymm2
372 ; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
373 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
374 ; AVX512DQ-NEXT: vpsraw $8, %ymm1, %ymm1
375 ; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
376 ; AVX512DQ-NEXT: # ymm4 = mem[0,1,0,1]
377 ; AVX512DQ-NEXT: vpmullw %ymm4, %ymm1, %ymm1
378 ; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
379 ; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
380 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
381 ; AVX512DQ-NEXT: vpsraw $8, %ymm2, %ymm2
382 ; AVX512DQ-NEXT: vpmullw %ymm3, %ymm2, %ymm2
383 ; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
384 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
385 ; AVX512DQ-NEXT: vpsraw $8, %ymm0, %ymm0
386 ; AVX512DQ-NEXT: vpmullw %ymm4, %ymm0, %ymm0
387 ; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
388 ; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
389 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
390 ; AVX512DQ-NEXT: retq
392 ; AVX512BW-LABEL: constant_shift_v64i8:
394 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
395 ; AVX512BW-NEXT: vpsraw $8, %zmm1, %zmm1
396 ; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
397 ; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
398 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
399 ; AVX512BW-NEXT: vpsraw $8, %zmm0, %zmm0
400 ; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
401 ; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
402 ; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
403 ; AVX512BW-NEXT: retq
404 %shift = ashr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
409 ; Uniform Constant Shifts
412 define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) nounwind {
413 ; ALL-LABEL: splatconstant_shift_v8i64:
415 ; ALL-NEXT: vpsraq $7, %zmm0, %zmm0
417 %shift = ashr <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
421 define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) nounwind {
422 ; ALL-LABEL: splatconstant_shift_v16i32:
424 ; ALL-NEXT: vpsrad $5, %zmm0, %zmm0
426 %shift = ashr <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
427 ret <16 x i32> %shift
430 define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
431 ; AVX512DQ-LABEL: splatconstant_shift_v32i16:
433 ; AVX512DQ-NEXT: vpsraw $3, %ymm0, %ymm1
434 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm0
435 ; AVX512DQ-NEXT: vpsraw $3, %ymm0, %ymm0
436 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
437 ; AVX512DQ-NEXT: retq
439 ; AVX512BW-LABEL: splatconstant_shift_v32i16:
441 ; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm0
442 ; AVX512BW-NEXT: retq
443 %shift = ashr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
444 ret <32 x i16> %shift
447 define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
448 ; AVX512DQ-LABEL: splatconstant_shift_v64i8:
450 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
451 ; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1
452 ; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
453 ; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
454 ; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
455 ; AVX512DQ-NEXT: vpxor %ymm3, %ymm1, %ymm1
456 ; AVX512DQ-NEXT: vpsubb %ymm3, %ymm1, %ymm1
457 ; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0
458 ; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
459 ; AVX512DQ-NEXT: vpxor %ymm3, %ymm0, %ymm0
460 ; AVX512DQ-NEXT: vpsubb %ymm3, %ymm0, %ymm0
461 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
462 ; AVX512DQ-NEXT: retq
464 ; AVX512BW-LABEL: splatconstant_shift_v64i8:
466 ; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
467 ; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
468 ; AVX512BW-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
469 ; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
470 ; AVX512BW-NEXT: retq
471 %shift = ashr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
475 define <64 x i8> @ashr_const7_v64i8(<64 x i8> %a) {
476 ; AVX512DQ-LABEL: ashr_const7_v64i8:
478 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
479 ; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
480 ; AVX512DQ-NEXT: vpcmpgtb %ymm1, %ymm2, %ymm1
481 ; AVX512DQ-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
482 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
483 ; AVX512DQ-NEXT: retq
485 ; AVX512BW-LABEL: ashr_const7_v64i8:
487 ; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
488 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
489 ; AVX512BW-NEXT: retq
490 %res = ashr <64 x i8> %a, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
494 define <8 x i64> @PR52719(<8 x i64> %a0, i32 %a1) {
495 ; ALL-LABEL: PR52719:
497 ; ALL-NEXT: vmovd %edi, %xmm1
498 ; ALL-NEXT: vpsraq %xmm1, %zmm0, %zmm0
500 %vec = insertelement <8 x i32> poison, i32 %a1, i64 0
501 %splat = shufflevector <8 x i32> %vec, <8 x i32> poison, <8 x i32> zeroinitializer
502 %zext = zext <8 x i32> %splat to <8 x i64>
503 %ashr = ashr <8 x i64> %a0, %zext