1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
9 define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
10 ; ALL-LABEL: var_shift_v8i64:
12 ; ALL-NEXT: vpsravq %zmm1, %zmm0, %zmm0
14 %shift = ashr <8 x i64> %a, %b
18 define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
19 ; ALL-LABEL: var_shift_v16i32:
21 ; ALL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
23 %shift = ashr <16 x i32> %a, %b
27 define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
28 ; AVX512DQ-LABEL: var_shift_v32i16:
30 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm2
31 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm3
32 ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
33 ; AVX512DQ-NEXT: vpmovsxwd %ymm2, %zmm2
34 ; AVX512DQ-NEXT: vpsravd %zmm3, %zmm2, %zmm2
35 ; AVX512DQ-NEXT: vpmovdw %zmm2, %ymm2
36 ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
37 ; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
38 ; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
39 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
40 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
43 ; AVX512BW-LABEL: var_shift_v32i16:
45 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
47 %shift = ashr <32 x i16> %a, %b
51 define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
52 ; AVX512DQ-LABEL: var_shift_v64i8:
54 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm2
55 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm3
56 ; AVX512DQ-NEXT: vpsllw $5, %ymm3, %ymm3
57 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm3[8],ymm0[9],ymm3[9],ymm0[10],ymm3[10],ymm0[11],ymm3[11],ymm0[12],ymm3[12],ymm0[13],ymm3[13],ymm0[14],ymm3[14],ymm0[15],ymm3[15],ymm0[24],ymm3[24],ymm0[25],ymm3[25],ymm0[26],ymm3[26],ymm0[27],ymm3[27],ymm0[28],ymm3[28],ymm0[29],ymm3[29],ymm0[30],ymm3[30],ymm0[31],ymm3[31]
58 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
59 ; AVX512DQ-NEXT: vpsraw $4, %ymm5, %ymm6
60 ; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
61 ; AVX512DQ-NEXT: vpsraw $2, %ymm5, %ymm6
62 ; AVX512DQ-NEXT: vpaddw %ymm4, %ymm4, %ymm4
63 ; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
64 ; AVX512DQ-NEXT: vpsraw $1, %ymm5, %ymm6
65 ; AVX512DQ-NEXT: vpaddw %ymm4, %ymm4, %ymm4
66 ; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm4
67 ; AVX512DQ-NEXT: vpsrlw $8, %ymm4, %ymm4
68 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[4],ymm3[4],ymm0[5],ymm3[5],ymm0[6],ymm3[6],ymm0[7],ymm3[7],ymm0[16],ymm3[16],ymm0[17],ymm3[17],ymm0[18],ymm3[18],ymm0[19],ymm3[19],ymm0[20],ymm3[20],ymm0[21],ymm3[21],ymm0[22],ymm3[22],ymm0[23],ymm3[23]
69 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
70 ; AVX512DQ-NEXT: vpsraw $4, %ymm2, %ymm5
71 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm2, %ymm2
72 ; AVX512DQ-NEXT: vpsraw $2, %ymm2, %ymm5
73 ; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
74 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm2, %ymm2
75 ; AVX512DQ-NEXT: vpsraw $1, %ymm2, %ymm5
76 ; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
77 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm2, %ymm2
78 ; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
79 ; AVX512DQ-NEXT: vpackuswb %ymm4, %ymm2, %ymm2
80 ; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
81 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
82 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
83 ; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
84 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
85 ; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5
86 ; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
87 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
88 ; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5
89 ; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
90 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
91 ; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
92 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
93 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
94 ; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm4
95 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
96 ; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm4
97 ; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
98 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
99 ; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm4
100 ; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
101 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
102 ; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
103 ; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
104 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
105 ; AVX512DQ-NEXT: retq
107 ; AVX512BW-LABEL: var_shift_v64i8:
109 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
110 ; AVX512BW-NEXT: vpsraw $4, %zmm2, %zmm3
111 ; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
112 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
113 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
114 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
115 ; AVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3
116 ; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
117 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
118 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
119 ; AVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3
120 ; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
121 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
122 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
123 ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
124 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
125 ; AVX512BW-NEXT: vpsraw $4, %zmm0, %zmm3
126 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
127 ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
128 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
129 ; AVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3
130 ; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
131 ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
132 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
133 ; AVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3
134 ; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
135 ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
136 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
137 ; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
138 ; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
139 ; AVX512BW-NEXT: retq
140 %shift = ashr <64 x i8> %a, %b
145 ; Uniform Variable Shifts
148 define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
149 ; ALL-LABEL: splatvar_shift_v8i64:
151 ; ALL-NEXT: vpsraq %xmm1, %zmm0, %zmm0
153 %splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
154 %shift = ashr <8 x i64> %a, %splat
158 define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
159 ; ALL-LABEL: splatvar_shift_v16i32:
161 ; ALL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
162 ; ALL-NEXT: vpsrad %xmm1, %zmm0, %zmm0
164 %splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
165 %shift = ashr <16 x i32> %a, %splat
166 ret <16 x i32> %shift
169 define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
170 ; AVX512DQ-LABEL: splatvar_shift_v32i16:
172 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm2
173 ; AVX512DQ-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
174 ; AVX512DQ-NEXT: vpsraw %xmm1, %ymm2, %ymm2
175 ; AVX512DQ-NEXT: vpsraw %xmm1, %ymm0, %ymm0
176 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
177 ; AVX512DQ-NEXT: retq
179 ; AVX512BW-LABEL: splatvar_shift_v32i16:
181 ; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
182 ; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0
183 ; AVX512BW-NEXT: retq
184 %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
185 %shift = ashr <32 x i16> %a, %splat
186 ret <32 x i16> %shift
189 define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
190 ; AVX512DQ-LABEL: splatvar_shift_v64i8:
192 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm2
193 ; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
194 ; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
195 ; AVX512DQ-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
196 ; AVX512DQ-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
197 ; AVX512DQ-NEXT: vpsrlw $8, %xmm3, %xmm3
198 ; AVX512DQ-NEXT: vpbroadcastb %xmm3, %ymm3
199 ; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
200 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
201 ; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm4, %ymm4
202 ; AVX512DQ-NEXT: vpxor %ymm4, %ymm2, %ymm2
203 ; AVX512DQ-NEXT: vpsubb %ymm4, %ymm2, %ymm2
204 ; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
205 ; AVX512DQ-NEXT: vpand %ymm3, %ymm0, %ymm0
206 ; AVX512DQ-NEXT: vpxor %ymm4, %ymm0, %ymm0
207 ; AVX512DQ-NEXT: vpsubb %ymm4, %ymm0, %ymm0
208 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
209 ; AVX512DQ-NEXT: retq
211 ; AVX512BW-LABEL: splatvar_shift_v64i8:
213 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
214 ; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
215 ; AVX512BW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
216 ; AVX512BW-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
217 ; AVX512BW-NEXT: vpsrlw $8, %xmm2, %xmm2
218 ; AVX512BW-NEXT: vpbroadcastb %xmm2, %zmm2
219 ; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
220 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
221 ; AVX512BW-NEXT: vpsrlw %xmm1, %zmm2, %zmm1
222 ; AVX512BW-NEXT: vpxorq %zmm1, %zmm0, %zmm0
223 ; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
224 ; AVX512BW-NEXT: retq
225 %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
226 %shift = ashr <64 x i8> %a, %splat
234 define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) nounwind {
235 ; ALL-LABEL: constant_shift_v8i64:
237 ; ALL-NEXT: vpsravq {{.*}}(%rip), %zmm0, %zmm0
239 %shift = ashr <8 x i64> %a, <i64 1, i64 7, i64 31, i64 62, i64 1, i64 7, i64 31, i64 62>
243 define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
244 ; ALL-LABEL: constant_shift_v16i32:
246 ; ALL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
248 %shift = ashr <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
249 ret <16 x i32> %shift
252 define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
253 ; AVX512DQ-LABEL: constant_shift_v32i16:
255 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
256 ; AVX512DQ-NEXT: vpmovsxwd %ymm1, %zmm1
257 ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
258 ; AVX512DQ-NEXT: vpsravd %zmm2, %zmm1, %zmm1
259 ; AVX512DQ-NEXT: vpmovdw %zmm1, %ymm1
260 ; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
261 ; AVX512DQ-NEXT: vpsravd %zmm2, %zmm0, %zmm0
262 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
263 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
264 ; AVX512DQ-NEXT: retq
266 ; AVX512BW-LABEL: constant_shift_v32i16:
268 ; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
269 ; AVX512BW-NEXT: retq
270 %shift = ashr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
271 ret <32 x i16> %shift
274 define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
275 ; AVX512DQ-LABEL: constant_shift_v64i8:
277 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
278 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
279 ; AVX512DQ-NEXT: vpsraw $8, %ymm2, %ymm2
280 ; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [2,4,8,16,32,64,128,256,2,4,8,16,32,64,128,256]
281 ; AVX512DQ-NEXT: # ymm3 = mem[0,1,0,1]
282 ; AVX512DQ-NEXT: vpmullw %ymm3, %ymm2, %ymm2
283 ; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
284 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
285 ; AVX512DQ-NEXT: vpsraw $8, %ymm1, %ymm1
286 ; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
287 ; AVX512DQ-NEXT: # ymm4 = mem[0,1,0,1]
288 ; AVX512DQ-NEXT: vpmullw %ymm4, %ymm1, %ymm1
289 ; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
290 ; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
291 ; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
292 ; AVX512DQ-NEXT: vpsraw $8, %ymm2, %ymm2
293 ; AVX512DQ-NEXT: vpmullw %ymm3, %ymm2, %ymm2
294 ; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
295 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
296 ; AVX512DQ-NEXT: vpsraw $8, %ymm0, %ymm0
297 ; AVX512DQ-NEXT: vpmullw %ymm4, %ymm0, %ymm0
298 ; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
299 ; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
300 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
301 ; AVX512DQ-NEXT: retq
303 ; AVX512BW-LABEL: constant_shift_v64i8:
305 ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
306 ; AVX512BW-NEXT: vpsraw $8, %zmm1, %zmm1
307 ; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm1, %zmm1
308 ; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
309 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
310 ; AVX512BW-NEXT: vpsraw $8, %zmm0, %zmm0
311 ; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
312 ; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
313 ; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
314 ; AVX512BW-NEXT: retq
315 %shift = ashr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
320 ; Uniform Constant Shifts
323 define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) nounwind {
324 ; ALL-LABEL: splatconstant_shift_v8i64:
326 ; ALL-NEXT: vpsraq $7, %zmm0, %zmm0
328 %shift = ashr <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
332 define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) nounwind {
333 ; ALL-LABEL: splatconstant_shift_v16i32:
335 ; ALL-NEXT: vpsrad $5, %zmm0, %zmm0
337 %shift = ashr <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
338 ret <16 x i32> %shift
341 define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
342 ; AVX512DQ-LABEL: splatconstant_shift_v32i16:
344 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
345 ; AVX512DQ-NEXT: vpsraw $3, %ymm1, %ymm1
346 ; AVX512DQ-NEXT: vpsraw $3, %ymm0, %ymm0
347 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
348 ; AVX512DQ-NEXT: retq
350 ; AVX512BW-LABEL: splatconstant_shift_v32i16:
352 ; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm0
353 ; AVX512BW-NEXT: retq
354 %shift = ashr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
355 ret <32 x i16> %shift
358 define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
359 ; AVX512DQ-LABEL: splatconstant_shift_v64i8:
361 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
362 ; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1
363 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
364 ; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
365 ; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
366 ; AVX512DQ-NEXT: vpxor %ymm3, %ymm1, %ymm1
367 ; AVX512DQ-NEXT: vpsubb %ymm3, %ymm1, %ymm1
368 ; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0
369 ; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
370 ; AVX512DQ-NEXT: vpxor %ymm3, %ymm0, %ymm0
371 ; AVX512DQ-NEXT: vpsubb %ymm3, %ymm0, %ymm0
372 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
373 ; AVX512DQ-NEXT: retq
375 ; AVX512BW-LABEL: splatconstant_shift_v64i8:
377 ; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
378 ; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
379 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
380 ; AVX512BW-NEXT: vpxorq %zmm1, %zmm0, %zmm0
381 ; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
382 ; AVX512BW-NEXT: retq
383 %shift = ashr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
387 define <64 x i8> @ashr_const7_v64i8(<64 x i8> %a) {
388 ; AVX512DQ-LABEL: ashr_const7_v64i8:
390 ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
391 ; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
392 ; AVX512DQ-NEXT: vpcmpgtb %ymm1, %ymm2, %ymm1
393 ; AVX512DQ-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
394 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
395 ; AVX512DQ-NEXT: retq
397 ; AVX512BW-LABEL: ashr_const7_v64i8:
399 ; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
400 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
401 ; AVX512BW-NEXT: retq
402 %res = ashr <64 x i8> %a, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>