1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2,+gfni | FileCheck %s --check-prefixes=GFNISSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX1OR2,GFNIAVX1
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX1OR2,GFNIAVX2
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX512,GFNIAVX512VL
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX512,GFNIAVX512BW
9 ; 128 Bit Vector Shifts
12 define <16 x i8> @var_shl_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
13 ; GFNISSE-LABEL: var_shl_v16i8:
15 ; GFNISSE-NEXT: movdqa %xmm0, %xmm2
16 ; GFNISSE-NEXT: psllw $5, %xmm1
17 ; GFNISSE-NEXT: movdqa %xmm0, %xmm3
18 ; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
19 ; GFNISSE-NEXT: movdqa %xmm1, %xmm0
20 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm3, %xmm2
21 ; GFNISSE-NEXT: movdqa %xmm2, %xmm3
22 ; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
23 ; GFNISSE-NEXT: paddb %xmm1, %xmm1
24 ; GFNISSE-NEXT: movdqa %xmm1, %xmm0
25 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm3, %xmm2
26 ; GFNISSE-NEXT: movdqa %xmm2, %xmm3
27 ; GFNISSE-NEXT: paddb %xmm2, %xmm3
28 ; GFNISSE-NEXT: paddb %xmm1, %xmm1
29 ; GFNISSE-NEXT: movdqa %xmm1, %xmm0
30 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm3, %xmm2
31 ; GFNISSE-NEXT: movdqa %xmm2, %xmm0
34 ; GFNIAVX1OR2-LABEL: var_shl_v16i8:
35 ; GFNIAVX1OR2: # %bb.0:
36 ; GFNIAVX1OR2-NEXT: vpsllw $5, %xmm1, %xmm1
37 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
38 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
39 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
40 ; GFNIAVX1OR2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
41 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
42 ; GFNIAVX1OR2-NEXT: vpaddb %xmm0, %xmm0, %xmm2
43 ; GFNIAVX1OR2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
44 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
45 ; GFNIAVX1OR2-NEXT: retq
47 ; GFNIAVX512VL-LABEL: var_shl_v16i8:
48 ; GFNIAVX512VL: # %bb.0:
49 ; GFNIAVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
50 ; GFNIAVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
51 ; GFNIAVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
52 ; GFNIAVX512VL-NEXT: vpmovdb %zmm0, %xmm0
53 ; GFNIAVX512VL-NEXT: vzeroupper
54 ; GFNIAVX512VL-NEXT: retq
56 ; GFNIAVX512BW-LABEL: var_shl_v16i8:
57 ; GFNIAVX512BW: # %bb.0:
58 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
59 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
60 ; GFNIAVX512BW-NEXT: vpsllvw %ymm1, %ymm0, %ymm0
61 ; GFNIAVX512BW-NEXT: vpmovwb %ymm0, %xmm0
62 ; GFNIAVX512BW-NEXT: vzeroupper
63 ; GFNIAVX512BW-NEXT: retq
64 %shift = shl <16 x i8> %a, %b
68 define <16 x i8> @var_lshr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
69 ; GFNISSE-LABEL: var_lshr_v16i8:
71 ; GFNISSE-NEXT: movdqa %xmm0, %xmm2
72 ; GFNISSE-NEXT: psllw $5, %xmm1
73 ; GFNISSE-NEXT: movdqa %xmm0, %xmm3
74 ; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
75 ; GFNISSE-NEXT: movdqa %xmm1, %xmm0
76 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm3, %xmm2
77 ; GFNISSE-NEXT: movdqa %xmm2, %xmm3
78 ; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
79 ; GFNISSE-NEXT: paddb %xmm1, %xmm1
80 ; GFNISSE-NEXT: movdqa %xmm1, %xmm0
81 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm3, %xmm2
82 ; GFNISSE-NEXT: movdqa %xmm2, %xmm3
83 ; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
84 ; GFNISSE-NEXT: paddb %xmm1, %xmm1
85 ; GFNISSE-NEXT: movdqa %xmm1, %xmm0
86 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm3, %xmm2
87 ; GFNISSE-NEXT: movdqa %xmm2, %xmm0
90 ; GFNIAVX1OR2-LABEL: var_lshr_v16i8:
91 ; GFNIAVX1OR2: # %bb.0:
92 ; GFNIAVX1OR2-NEXT: vpsllw $5, %xmm1, %xmm1
93 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
94 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
95 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
96 ; GFNIAVX1OR2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
97 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
98 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
99 ; GFNIAVX1OR2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
100 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
101 ; GFNIAVX1OR2-NEXT: retq
103 ; GFNIAVX512VL-LABEL: var_lshr_v16i8:
104 ; GFNIAVX512VL: # %bb.0:
105 ; GFNIAVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
106 ; GFNIAVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
107 ; GFNIAVX512VL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
108 ; GFNIAVX512VL-NEXT: vpmovdb %zmm0, %xmm0
109 ; GFNIAVX512VL-NEXT: vzeroupper
110 ; GFNIAVX512VL-NEXT: retq
112 ; GFNIAVX512BW-LABEL: var_lshr_v16i8:
113 ; GFNIAVX512BW: # %bb.0:
114 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
115 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
116 ; GFNIAVX512BW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
117 ; GFNIAVX512BW-NEXT: vpmovwb %ymm0, %xmm0
118 ; GFNIAVX512BW-NEXT: vzeroupper
119 ; GFNIAVX512BW-NEXT: retq
120 %shift = lshr <16 x i8> %a, %b
124 define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
125 ; GFNISSE-LABEL: var_ashr_v16i8:
127 ; GFNISSE-NEXT: movdqa %xmm0, %xmm2
128 ; GFNISSE-NEXT: psllw $5, %xmm1
129 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
130 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
131 ; GFNISSE-NEXT: movdqa %xmm3, %xmm4
132 ; GFNISSE-NEXT: psraw $4, %xmm4
133 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm3
134 ; GFNISSE-NEXT: movdqa %xmm3, %xmm4
135 ; GFNISSE-NEXT: psraw $2, %xmm4
136 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
137 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm3
138 ; GFNISSE-NEXT: movdqa %xmm3, %xmm4
139 ; GFNISSE-NEXT: psraw $1, %xmm4
140 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
141 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm3
142 ; GFNISSE-NEXT: psrlw $8, %xmm3
143 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
144 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
145 ; GFNISSE-NEXT: movdqa %xmm1, %xmm2
146 ; GFNISSE-NEXT: psraw $4, %xmm2
147 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm2, %xmm1
148 ; GFNISSE-NEXT: movdqa %xmm1, %xmm2
149 ; GFNISSE-NEXT: psraw $2, %xmm2
150 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
151 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm2, %xmm1
152 ; GFNISSE-NEXT: movdqa %xmm1, %xmm2
153 ; GFNISSE-NEXT: psraw $1, %xmm2
154 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
155 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm2, %xmm1
156 ; GFNISSE-NEXT: psrlw $8, %xmm1
157 ; GFNISSE-NEXT: packuswb %xmm3, %xmm1
158 ; GFNISSE-NEXT: movdqa %xmm1, %xmm0
161 ; GFNIAVX1OR2-LABEL: var_ashr_v16i8:
162 ; GFNIAVX1OR2: # %bb.0:
163 ; GFNIAVX1OR2-NEXT: vpsllw $5, %xmm1, %xmm1
164 ; GFNIAVX1OR2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
165 ; GFNIAVX1OR2-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
166 ; GFNIAVX1OR2-NEXT: vpsraw $4, %xmm3, %xmm4
167 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
168 ; GFNIAVX1OR2-NEXT: vpsraw $2, %xmm3, %xmm4
169 ; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm2
170 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
171 ; GFNIAVX1OR2-NEXT: vpsraw $1, %xmm3, %xmm4
172 ; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm2
173 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
174 ; GFNIAVX1OR2-NEXT: vpsrlw $8, %xmm2, %xmm2
175 ; GFNIAVX1OR2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
176 ; GFNIAVX1OR2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
177 ; GFNIAVX1OR2-NEXT: vpsraw $4, %xmm0, %xmm3
178 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
179 ; GFNIAVX1OR2-NEXT: vpsraw $2, %xmm0, %xmm3
180 ; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm1
181 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
182 ; GFNIAVX1OR2-NEXT: vpsraw $1, %xmm0, %xmm3
183 ; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm1
184 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
185 ; GFNIAVX1OR2-NEXT: vpsrlw $8, %xmm0, %xmm0
186 ; GFNIAVX1OR2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
187 ; GFNIAVX1OR2-NEXT: retq
189 ; GFNIAVX512VL-LABEL: var_ashr_v16i8:
190 ; GFNIAVX512VL: # %bb.0:
191 ; GFNIAVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
192 ; GFNIAVX512VL-NEXT: vpmovsxbd %xmm0, %zmm0
193 ; GFNIAVX512VL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
194 ; GFNIAVX512VL-NEXT: vpmovdb %zmm0, %xmm0
195 ; GFNIAVX512VL-NEXT: vzeroupper
196 ; GFNIAVX512VL-NEXT: retq
198 ; GFNIAVX512BW-LABEL: var_ashr_v16i8:
199 ; GFNIAVX512BW: # %bb.0:
200 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
201 ; GFNIAVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
202 ; GFNIAVX512BW-NEXT: vpsravw %ymm1, %ymm0, %ymm0
203 ; GFNIAVX512BW-NEXT: vpmovwb %ymm0, %xmm0
204 ; GFNIAVX512BW-NEXT: vzeroupper
205 ; GFNIAVX512BW-NEXT: retq
206 %shift = ashr <16 x i8> %a, %b
210 define <16 x i8> @splatvar_shl_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
211 ; GFNISSE-LABEL: splatvar_shl_v16i8:
213 ; GFNISSE-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
214 ; GFNISSE-NEXT: psllw %xmm1, %xmm0
215 ; GFNISSE-NEXT: pcmpeqd %xmm2, %xmm2
216 ; GFNISSE-NEXT: psllw %xmm1, %xmm2
217 ; GFNISSE-NEXT: pxor %xmm1, %xmm1
218 ; GFNISSE-NEXT: pshufb %xmm1, %xmm2
219 ; GFNISSE-NEXT: pand %xmm2, %xmm0
222 ; GFNIAVX1-LABEL: splatvar_shl_v16i8:
224 ; GFNIAVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
225 ; GFNIAVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm0
226 ; GFNIAVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
227 ; GFNIAVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm1
228 ; GFNIAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
229 ; GFNIAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
230 ; GFNIAVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
231 ; GFNIAVX1-NEXT: retq
233 ; GFNIAVX2-LABEL: splatvar_shl_v16i8:
235 ; GFNIAVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
236 ; GFNIAVX2-NEXT: vpsllw %xmm1, %xmm0, %xmm0
237 ; GFNIAVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
238 ; GFNIAVX2-NEXT: vpsllw %xmm1, %xmm2, %xmm1
239 ; GFNIAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
240 ; GFNIAVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
241 ; GFNIAVX2-NEXT: retq
243 ; GFNIAVX512VL-LABEL: splatvar_shl_v16i8:
244 ; GFNIAVX512VL: # %bb.0:
245 ; GFNIAVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
246 ; GFNIAVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
247 ; GFNIAVX512VL-NEXT: vpslld %xmm1, %zmm0, %zmm0
248 ; GFNIAVX512VL-NEXT: vpmovdb %zmm0, %xmm0
249 ; GFNIAVX512VL-NEXT: vzeroupper
250 ; GFNIAVX512VL-NEXT: retq
252 ; GFNIAVX512BW-LABEL: splatvar_shl_v16i8:
253 ; GFNIAVX512BW: # %bb.0:
254 ; GFNIAVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
255 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
256 ; GFNIAVX512BW-NEXT: vpsllw %xmm1, %ymm0, %ymm0
257 ; GFNIAVX512BW-NEXT: vpmovwb %ymm0, %xmm0
258 ; GFNIAVX512BW-NEXT: vzeroupper
259 ; GFNIAVX512BW-NEXT: retq
260 %splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
261 %shift = shl <16 x i8> %a, %splat
265 define <16 x i8> @splatvar_lshr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
266 ; GFNISSE-LABEL: splatvar_lshr_v16i8:
268 ; GFNISSE-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
269 ; GFNISSE-NEXT: psrlw %xmm1, %xmm0
270 ; GFNISSE-NEXT: pcmpeqd %xmm2, %xmm2
271 ; GFNISSE-NEXT: psrlw %xmm1, %xmm2
272 ; GFNISSE-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
273 ; GFNISSE-NEXT: pand %xmm2, %xmm0
276 ; GFNIAVX1-LABEL: splatvar_lshr_v16i8:
278 ; GFNIAVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
279 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
280 ; GFNIAVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
281 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
282 ; GFNIAVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
283 ; GFNIAVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
284 ; GFNIAVX1-NEXT: retq
286 ; GFNIAVX2-LABEL: splatvar_lshr_v16i8:
288 ; GFNIAVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
289 ; GFNIAVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
290 ; GFNIAVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
291 ; GFNIAVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
292 ; GFNIAVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
293 ; GFNIAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
294 ; GFNIAVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
295 ; GFNIAVX2-NEXT: retq
297 ; GFNIAVX512VL-LABEL: splatvar_lshr_v16i8:
298 ; GFNIAVX512VL: # %bb.0:
299 ; GFNIAVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
300 ; GFNIAVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
301 ; GFNIAVX512VL-NEXT: vpsrld %xmm1, %zmm0, %zmm0
302 ; GFNIAVX512VL-NEXT: vpmovdb %zmm0, %xmm0
303 ; GFNIAVX512VL-NEXT: vzeroupper
304 ; GFNIAVX512VL-NEXT: retq
306 ; GFNIAVX512BW-LABEL: splatvar_lshr_v16i8:
307 ; GFNIAVX512BW: # %bb.0:
308 ; GFNIAVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
309 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
310 ; GFNIAVX512BW-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
311 ; GFNIAVX512BW-NEXT: vpmovwb %ymm0, %xmm0
312 ; GFNIAVX512BW-NEXT: vzeroupper
313 ; GFNIAVX512BW-NEXT: retq
314 %splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
315 %shift = lshr <16 x i8> %a, %splat
319 define <16 x i8> @splatvar_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
320 ; GFNISSE-LABEL: splatvar_ashr_v16i8:
322 ; GFNISSE-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
323 ; GFNISSE-NEXT: psrlw %xmm1, %xmm0
324 ; GFNISSE-NEXT: pcmpeqd %xmm2, %xmm2
325 ; GFNISSE-NEXT: psrlw %xmm1, %xmm2
326 ; GFNISSE-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
327 ; GFNISSE-NEXT: pand %xmm2, %xmm0
328 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
329 ; GFNISSE-NEXT: psrlw %xmm1, %xmm2
330 ; GFNISSE-NEXT: pxor %xmm2, %xmm0
331 ; GFNISSE-NEXT: psubb %xmm2, %xmm0
334 ; GFNIAVX1-LABEL: splatvar_ashr_v16i8:
336 ; GFNIAVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
337 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
338 ; GFNIAVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
339 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
340 ; GFNIAVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
341 ; GFNIAVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
342 ; GFNIAVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
343 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
344 ; GFNIAVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
345 ; GFNIAVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
346 ; GFNIAVX1-NEXT: retq
348 ; GFNIAVX2-LABEL: splatvar_ashr_v16i8:
350 ; GFNIAVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
351 ; GFNIAVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
352 ; GFNIAVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
353 ; GFNIAVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
354 ; GFNIAVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
355 ; GFNIAVX2-NEXT: vpbroadcastb %xmm2, %xmm2
356 ; GFNIAVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
357 ; GFNIAVX2-NEXT: vpbroadcastb {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
358 ; GFNIAVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
359 ; GFNIAVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
360 ; GFNIAVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
361 ; GFNIAVX2-NEXT: retq
363 ; GFNIAVX512VL-LABEL: splatvar_ashr_v16i8:
364 ; GFNIAVX512VL: # %bb.0:
365 ; GFNIAVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
366 ; GFNIAVX512VL-NEXT: vpmovsxbd %xmm0, %zmm0
367 ; GFNIAVX512VL-NEXT: vpsrad %xmm1, %zmm0, %zmm0
368 ; GFNIAVX512VL-NEXT: vpmovdb %zmm0, %xmm0
369 ; GFNIAVX512VL-NEXT: vzeroupper
370 ; GFNIAVX512VL-NEXT: retq
372 ; GFNIAVX512BW-LABEL: splatvar_ashr_v16i8:
373 ; GFNIAVX512BW: # %bb.0:
374 ; GFNIAVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
375 ; GFNIAVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
376 ; GFNIAVX512BW-NEXT: vpsraw %xmm1, %ymm0, %ymm0
377 ; GFNIAVX512BW-NEXT: vpmovwb %ymm0, %xmm0
378 ; GFNIAVX512BW-NEXT: vzeroupper
379 ; GFNIAVX512BW-NEXT: retq
380 %splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
381 %shift = ashr <16 x i8> %a, %splat
385 define <16 x i8> @constant_shl_v16i8(<16 x i8> %a) nounwind {
386 ; GFNISSE-LABEL: constant_shl_v16i8:
388 ; GFNISSE-NEXT: movdqa %xmm0, %xmm1
389 ; GFNISSE-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
390 ; GFNISSE-NEXT: psllw $8, %xmm1
391 ; GFNISSE-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0]
392 ; GFNISSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
393 ; GFNISSE-NEXT: por %xmm1, %xmm0
396 ; GFNIAVX1-LABEL: constant_shl_v16i8:
398 ; GFNIAVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
399 ; GFNIAVX1-NEXT: vpsllw $8, %xmm1, %xmm1
400 ; GFNIAVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0]
401 ; GFNIAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
402 ; GFNIAVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
403 ; GFNIAVX1-NEXT: retq
405 ; GFNIAVX2-LABEL: constant_shl_v16i8:
407 ; GFNIAVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
408 ; GFNIAVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]
409 ; GFNIAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
410 ; GFNIAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
411 ; GFNIAVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
412 ; GFNIAVX2-NEXT: vzeroupper
413 ; GFNIAVX2-NEXT: retq
415 ; GFNIAVX512VL-LABEL: constant_shl_v16i8:
416 ; GFNIAVX512VL: # %bb.0:
417 ; GFNIAVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
418 ; GFNIAVX512VL-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
419 ; GFNIAVX512VL-NEXT: vpmovdb %zmm0, %xmm0
420 ; GFNIAVX512VL-NEXT: vzeroupper
421 ; GFNIAVX512VL-NEXT: retq
423 ; GFNIAVX512BW-LABEL: constant_shl_v16i8:
424 ; GFNIAVX512BW: # %bb.0:
425 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
426 ; GFNIAVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
427 ; GFNIAVX512BW-NEXT: vpmovwb %ymm0, %xmm0
428 ; GFNIAVX512BW-NEXT: vzeroupper
429 ; GFNIAVX512BW-NEXT: retq
430 %shift = shl <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
434 define <16 x i8> @constant_lshr_v16i8(<16 x i8> %a) nounwind {
435 ; GFNISSE-LABEL: constant_lshr_v16i8:
437 ; GFNISSE-NEXT: pxor %xmm2, %xmm2
438 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
439 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
440 ; GFNISSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,4,8,16,32,64,128,256]
441 ; GFNISSE-NEXT: psrlw $8, %xmm0
442 ; GFNISSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [256,128,64,32,16,8,4,2]
443 ; GFNISSE-NEXT: psrlw $8, %xmm1
444 ; GFNISSE-NEXT: packuswb %xmm0, %xmm1
445 ; GFNISSE-NEXT: movdqa %xmm1, %xmm0
448 ; GFNIAVX1-LABEL: constant_lshr_v16i8:
450 ; GFNIAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
451 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
452 ; GFNIAVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2,4,8,16,32,64,128,256]
453 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
454 ; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
455 ; GFNIAVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [256,128,64,32,16,8,4,2]
456 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
457 ; GFNIAVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
458 ; GFNIAVX1-NEXT: retq
460 ; GFNIAVX2-LABEL: constant_lshr_v16i8:
462 ; GFNIAVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
463 ; GFNIAVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [256,128,64,32,16,8,4,2,2,4,8,16,32,64,128,256]
464 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
465 ; GFNIAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
466 ; GFNIAVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
467 ; GFNIAVX2-NEXT: vzeroupper
468 ; GFNIAVX2-NEXT: retq
470 ; GFNIAVX512VL-LABEL: constant_lshr_v16i8:
471 ; GFNIAVX512VL: # %bb.0:
472 ; GFNIAVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
473 ; GFNIAVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
474 ; GFNIAVX512VL-NEXT: vpmovdb %zmm0, %xmm0
475 ; GFNIAVX512VL-NEXT: vzeroupper
476 ; GFNIAVX512VL-NEXT: retq
478 ; GFNIAVX512BW-LABEL: constant_lshr_v16i8:
479 ; GFNIAVX512BW: # %bb.0:
480 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
481 ; GFNIAVX512BW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
482 ; GFNIAVX512BW-NEXT: vpmovwb %ymm0, %xmm0
483 ; GFNIAVX512BW-NEXT: vzeroupper
484 ; GFNIAVX512BW-NEXT: retq
485 %shift = lshr <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
489 define <16 x i8> @constant_ashr_v16i8(<16 x i8> %a) nounwind {
490 ; GFNISSE-LABEL: constant_ashr_v16i8:
492 ; GFNISSE-NEXT: movdqa %xmm0, %xmm1
493 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
494 ; GFNISSE-NEXT: psraw $8, %xmm1
495 ; GFNISSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [2,4,8,16,32,64,128,256]
496 ; GFNISSE-NEXT: psrlw $8, %xmm1
497 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
498 ; GFNISSE-NEXT: psraw $8, %xmm0
499 ; GFNISSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [256,128,64,32,16,8,4,2]
500 ; GFNISSE-NEXT: psrlw $8, %xmm0
501 ; GFNISSE-NEXT: packuswb %xmm1, %xmm0
504 ; GFNIAVX1-LABEL: constant_ashr_v16i8:
506 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
507 ; GFNIAVX1-NEXT: vpsraw $8, %xmm1, %xmm1
508 ; GFNIAVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [2,4,8,16,32,64,128,256]
509 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
510 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
511 ; GFNIAVX1-NEXT: vpsraw $8, %xmm0, %xmm0
512 ; GFNIAVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [256,128,64,32,16,8,4,2]
513 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
514 ; GFNIAVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
515 ; GFNIAVX1-NEXT: retq
517 ; GFNIAVX2-LABEL: constant_ashr_v16i8:
519 ; GFNIAVX2-NEXT: vpmovsxbw %xmm0, %ymm0
520 ; GFNIAVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [256,128,64,32,16,8,4,2,2,4,8,16,32,64,128,256]
521 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
522 ; GFNIAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
523 ; GFNIAVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
524 ; GFNIAVX2-NEXT: vzeroupper
525 ; GFNIAVX2-NEXT: retq
527 ; GFNIAVX512VL-LABEL: constant_ashr_v16i8:
528 ; GFNIAVX512VL: # %bb.0:
529 ; GFNIAVX512VL-NEXT: vpmovsxbd %xmm0, %zmm0
530 ; GFNIAVX512VL-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
531 ; GFNIAVX512VL-NEXT: vpmovdb %zmm0, %xmm0
532 ; GFNIAVX512VL-NEXT: vzeroupper
533 ; GFNIAVX512VL-NEXT: retq
535 ; GFNIAVX512BW-LABEL: constant_ashr_v16i8:
536 ; GFNIAVX512BW: # %bb.0:
537 ; GFNIAVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
538 ; GFNIAVX512BW-NEXT: vpsravw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
539 ; GFNIAVX512BW-NEXT: vpmovwb %ymm0, %xmm0
540 ; GFNIAVX512BW-NEXT: vzeroupper
541 ; GFNIAVX512BW-NEXT: retq
542 %shift = ashr <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
546 define <16 x i8> @splatconstant_shl_v16i8(<16 x i8> %a) nounwind {
547 ; GFNISSE-LABEL: splatconstant_shl_v16i8:
549 ; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
552 ; GFNIAVX1OR2-LABEL: splatconstant_shl_v16i8:
553 ; GFNIAVX1OR2: # %bb.0:
554 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
555 ; GFNIAVX1OR2-NEXT: retq
557 ; GFNIAVX512-LABEL: splatconstant_shl_v16i8:
558 ; GFNIAVX512: # %bb.0:
559 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
560 ; GFNIAVX512-NEXT: retq
561 %shift = shl <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
565 define <16 x i8> @splatconstant_lshr_v16i8(<16 x i8> %a) nounwind {
566 ; GFNISSE-LABEL: splatconstant_lshr_v16i8:
568 ; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
571 ; GFNIAVX1OR2-LABEL: splatconstant_lshr_v16i8:
572 ; GFNIAVX1OR2: # %bb.0:
573 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
574 ; GFNIAVX1OR2-NEXT: retq
576 ; GFNIAVX512-LABEL: splatconstant_lshr_v16i8:
577 ; GFNIAVX512: # %bb.0:
578 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
579 ; GFNIAVX512-NEXT: retq
580 %shift = lshr <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
584 define <16 x i8> @splatconstant_ashr_v16i8(<16 x i8> %a) nounwind {
585 ; GFNISSE-LABEL: splatconstant_ashr_v16i8:
587 ; GFNISSE-NEXT: gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
590 ; GFNIAVX1OR2-LABEL: splatconstant_ashr_v16i8:
591 ; GFNIAVX1OR2: # %bb.0:
592 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
593 ; GFNIAVX1OR2-NEXT: retq
595 ; GFNIAVX512-LABEL: splatconstant_ashr_v16i8:
596 ; GFNIAVX512: # %bb.0:
597 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
598 ; GFNIAVX512-NEXT: retq
599 %shift = ashr <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
604 ; 256 Bit Vector Shifts
607 define <32 x i8> @var_shl_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
608 ; GFNISSE-LABEL: var_shl_v32i8:
610 ; GFNISSE-NEXT: movdqa %xmm2, %xmm4
611 ; GFNISSE-NEXT: movdqa %xmm0, %xmm2
612 ; GFNISSE-NEXT: pmovsxdq {{.*#+}} xmm5 = [16909320,16909320]
613 ; GFNISSE-NEXT: movdqa %xmm0, %xmm6
614 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm6
615 ; GFNISSE-NEXT: psllw $5, %xmm4
616 ; GFNISSE-NEXT: movdqa %xmm4, %xmm0
617 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm2
618 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm6 = [32,16,8,4,2,1,0,0,32,16,8,4,2,1,0,0]
619 ; GFNISSE-NEXT: movdqa %xmm2, %xmm7
620 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm7
621 ; GFNISSE-NEXT: paddb %xmm4, %xmm4
622 ; GFNISSE-NEXT: movdqa %xmm4, %xmm0
623 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm7, %xmm2
624 ; GFNISSE-NEXT: movdqa %xmm2, %xmm7
625 ; GFNISSE-NEXT: paddb %xmm2, %xmm7
626 ; GFNISSE-NEXT: paddb %xmm4, %xmm4
627 ; GFNISSE-NEXT: movdqa %xmm4, %xmm0
628 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm7, %xmm2
629 ; GFNISSE-NEXT: movdqa %xmm1, %xmm4
630 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm4
631 ; GFNISSE-NEXT: psllw $5, %xmm3
632 ; GFNISSE-NEXT: movdqa %xmm3, %xmm0
633 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm1
634 ; GFNISSE-NEXT: movdqa %xmm1, %xmm4
635 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm4
636 ; GFNISSE-NEXT: paddb %xmm3, %xmm3
637 ; GFNISSE-NEXT: movdqa %xmm3, %xmm0
638 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm1
639 ; GFNISSE-NEXT: movdqa %xmm1, %xmm4
640 ; GFNISSE-NEXT: paddb %xmm1, %xmm4
641 ; GFNISSE-NEXT: paddb %xmm3, %xmm3
642 ; GFNISSE-NEXT: movdqa %xmm3, %xmm0
643 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm1
644 ; GFNISSE-NEXT: movdqa %xmm2, %xmm0
647 ; GFNIAVX1-LABEL: var_shl_v32i8:
649 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
650 ; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm3 = [8,4,2,1,0,0,0,0,8,4,2,1,0,0,0,0]
651 ; GFNIAVX1-NEXT: # xmm3 = mem[0,0]
652 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm2, %xmm4
653 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
654 ; GFNIAVX1-NEXT: vpsllw $5, %xmm5, %xmm5
655 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
656 ; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm4 = [32,16,8,4,2,1,0,0,32,16,8,4,2,1,0,0]
657 ; GFNIAVX1-NEXT: # xmm4 = mem[0,0]
658 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm2, %xmm6
659 ; GFNIAVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
660 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm2, %xmm2
661 ; GFNIAVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm6
662 ; GFNIAVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
663 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm2, %xmm2
664 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm0, %xmm3
665 ; GFNIAVX1-NEXT: vpsllw $5, %xmm1, %xmm1
666 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
667 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm0, %xmm3
668 ; GFNIAVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
669 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
670 ; GFNIAVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm3
671 ; GFNIAVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
672 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
673 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
674 ; GFNIAVX1-NEXT: retq
676 ; GFNIAVX2-LABEL: var_shl_v32i8:
678 ; GFNIAVX2-NEXT: vpsllw $5, %ymm1, %ymm1
679 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
680 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
681 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
682 ; GFNIAVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
683 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
684 ; GFNIAVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
685 ; GFNIAVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
686 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
687 ; GFNIAVX2-NEXT: retq
689 ; GFNIAVX512VL-LABEL: var_shl_v32i8:
690 ; GFNIAVX512VL: # %bb.0:
691 ; GFNIAVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1
692 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm2
693 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
694 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm2
695 ; GFNIAVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
696 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
697 ; GFNIAVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm2
698 ; GFNIAVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
699 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
700 ; GFNIAVX512VL-NEXT: retq
702 ; GFNIAVX512BW-LABEL: var_shl_v32i8:
703 ; GFNIAVX512BW: # %bb.0:
704 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
705 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
706 ; GFNIAVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
707 ; GFNIAVX512BW-NEXT: vpmovwb %zmm0, %ymm0
708 ; GFNIAVX512BW-NEXT: retq
709 %shift = shl <32 x i8> %a, %b
713 define <32 x i8> @var_lshr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
714 ; GFNISSE-LABEL: var_lshr_v32i8:
716 ; GFNISSE-NEXT: movdqa %xmm2, %xmm4
717 ; GFNISSE-NEXT: movdqa %xmm0, %xmm2
718 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm5 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
719 ; GFNISSE-NEXT: movdqa %xmm0, %xmm6
720 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm6
721 ; GFNISSE-NEXT: psllw $5, %xmm4
722 ; GFNISSE-NEXT: movdqa %xmm4, %xmm0
723 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm2
724 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm6 = [0,0,128,64,32,16,8,4,0,0,128,64,32,16,8,4]
725 ; GFNISSE-NEXT: movdqa %xmm2, %xmm7
726 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm7
727 ; GFNISSE-NEXT: paddb %xmm4, %xmm4
728 ; GFNISSE-NEXT: movdqa %xmm4, %xmm0
729 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm7, %xmm2
730 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm7 = [0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2]
731 ; GFNISSE-NEXT: movdqa %xmm2, %xmm8
732 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm7, %xmm8
733 ; GFNISSE-NEXT: paddb %xmm4, %xmm4
734 ; GFNISSE-NEXT: movdqa %xmm4, %xmm0
735 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm2
736 ; GFNISSE-NEXT: movdqa %xmm1, %xmm4
737 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm5, %xmm4
738 ; GFNISSE-NEXT: psllw $5, %xmm3
739 ; GFNISSE-NEXT: movdqa %xmm3, %xmm0
740 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm1
741 ; GFNISSE-NEXT: movdqa %xmm1, %xmm4
742 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm6, %xmm4
743 ; GFNISSE-NEXT: paddb %xmm3, %xmm3
744 ; GFNISSE-NEXT: movdqa %xmm3, %xmm0
745 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm1
746 ; GFNISSE-NEXT: movdqa %xmm1, %xmm4
747 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm7, %xmm4
748 ; GFNISSE-NEXT: paddb %xmm3, %xmm3
749 ; GFNISSE-NEXT: movdqa %xmm3, %xmm0
750 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm1
751 ; GFNISSE-NEXT: movdqa %xmm2, %xmm0
754 ; GFNIAVX1-LABEL: var_lshr_v32i8:
756 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
757 ; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm3 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
758 ; GFNIAVX1-NEXT: # xmm3 = mem[0,0]
759 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm2, %xmm4
760 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
761 ; GFNIAVX1-NEXT: vpsllw $5, %xmm5, %xmm5
762 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
763 ; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm4 = [0,0,128,64,32,16,8,4,0,0,128,64,32,16,8,4]
764 ; GFNIAVX1-NEXT: # xmm4 = mem[0,0]
765 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm2, %xmm6
766 ; GFNIAVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
767 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm2, %xmm2
768 ; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm6 = [0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2]
769 ; GFNIAVX1-NEXT: # xmm6 = mem[0,0]
770 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm6, %xmm2, %xmm7
771 ; GFNIAVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
772 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm2, %xmm2
773 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm3, %xmm0, %xmm3
774 ; GFNIAVX1-NEXT: vpsllw $5, %xmm1, %xmm1
775 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
776 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm0, %xmm3
777 ; GFNIAVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
778 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
779 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm6, %xmm0, %xmm3
780 ; GFNIAVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
781 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
782 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
783 ; GFNIAVX1-NEXT: retq
785 ; GFNIAVX2-LABEL: var_lshr_v32i8:
787 ; GFNIAVX2-NEXT: vpsllw $5, %ymm1, %ymm1
788 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
789 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
790 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
791 ; GFNIAVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
792 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
793 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
794 ; GFNIAVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
795 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
796 ; GFNIAVX2-NEXT: retq
798 ; GFNIAVX512VL-LABEL: var_lshr_v32i8:
799 ; GFNIAVX512VL: # %bb.0:
800 ; GFNIAVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1
801 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm2
802 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
803 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm2
804 ; GFNIAVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
805 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
806 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm2
807 ; GFNIAVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
808 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
809 ; GFNIAVX512VL-NEXT: retq
811 ; GFNIAVX512BW-LABEL: var_lshr_v32i8:
812 ; GFNIAVX512BW: # %bb.0:
813 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
814 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
815 ; GFNIAVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
816 ; GFNIAVX512BW-NEXT: vpmovwb %zmm0, %ymm0
817 ; GFNIAVX512BW-NEXT: retq
818 %shift = lshr <32 x i8> %a, %b
822 define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
823 ; GFNISSE-LABEL: var_ashr_v32i8:
825 ; GFNISSE-NEXT: movdqa %xmm0, %xmm4
826 ; GFNISSE-NEXT: psllw $5, %xmm2
827 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
828 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
829 ; GFNISSE-NEXT: movdqa %xmm5, %xmm6
830 ; GFNISSE-NEXT: psraw $4, %xmm6
831 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm5
832 ; GFNISSE-NEXT: movdqa %xmm5, %xmm6
833 ; GFNISSE-NEXT: psraw $2, %xmm6
834 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
835 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm5
836 ; GFNISSE-NEXT: movdqa %xmm5, %xmm6
837 ; GFNISSE-NEXT: psraw $1, %xmm6
838 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
839 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm5
840 ; GFNISSE-NEXT: psrlw $8, %xmm5
841 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
842 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
843 ; GFNISSE-NEXT: movdqa %xmm2, %xmm4
844 ; GFNISSE-NEXT: psraw $4, %xmm4
845 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm2
846 ; GFNISSE-NEXT: movdqa %xmm2, %xmm4
847 ; GFNISSE-NEXT: psraw $2, %xmm4
848 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
849 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm2
850 ; GFNISSE-NEXT: movdqa %xmm2, %xmm4
851 ; GFNISSE-NEXT: psraw $1, %xmm4
852 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
853 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm4, %xmm2
854 ; GFNISSE-NEXT: psrlw $8, %xmm2
855 ; GFNISSE-NEXT: packuswb %xmm5, %xmm2
856 ; GFNISSE-NEXT: psllw $5, %xmm3
857 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
858 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
859 ; GFNISSE-NEXT: movdqa %xmm4, %xmm5
860 ; GFNISSE-NEXT: psraw $4, %xmm5
861 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm4
862 ; GFNISSE-NEXT: movdqa %xmm4, %xmm5
863 ; GFNISSE-NEXT: psraw $2, %xmm5
864 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
865 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm4
866 ; GFNISSE-NEXT: movdqa %xmm4, %xmm5
867 ; GFNISSE-NEXT: psraw $1, %xmm5
868 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
869 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm4
870 ; GFNISSE-NEXT: psrlw $8, %xmm4
871 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
872 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
873 ; GFNISSE-NEXT: movdqa %xmm1, %xmm3
874 ; GFNISSE-NEXT: psraw $4, %xmm3
875 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm3, %xmm1
876 ; GFNISSE-NEXT: movdqa %xmm1, %xmm3
877 ; GFNISSE-NEXT: psraw $2, %xmm3
878 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
879 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm3, %xmm1
880 ; GFNISSE-NEXT: movdqa %xmm1, %xmm3
881 ; GFNISSE-NEXT: psraw $1, %xmm3
882 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
883 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm3, %xmm1
884 ; GFNISSE-NEXT: psrlw $8, %xmm1
885 ; GFNISSE-NEXT: packuswb %xmm4, %xmm1
886 ; GFNISSE-NEXT: movdqa %xmm2, %xmm0
889 ; GFNIAVX1-LABEL: var_ashr_v32i8:
891 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
892 ; GFNIAVX1-NEXT: vpsllw $5, %xmm2, %xmm2
893 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
894 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
895 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
896 ; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6
897 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
898 ; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6
899 ; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
900 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5
901 ; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6
902 ; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
903 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3
904 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
905 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
906 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
907 ; GFNIAVX1-NEXT: vpsraw $4, %xmm4, %xmm5
908 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
909 ; GFNIAVX1-NEXT: vpsraw $2, %xmm4, %xmm5
910 ; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
911 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
912 ; GFNIAVX1-NEXT: vpsraw $1, %xmm4, %xmm5
913 ; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
914 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
915 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
916 ; GFNIAVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
917 ; GFNIAVX1-NEXT: vpsllw $5, %xmm1, %xmm1
918 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
919 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
920 ; GFNIAVX1-NEXT: vpsraw $4, %xmm4, %xmm5
921 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
922 ; GFNIAVX1-NEXT: vpsraw $2, %xmm4, %xmm5
923 ; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
924 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4
925 ; GFNIAVX1-NEXT: vpsraw $1, %xmm4, %xmm5
926 ; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
927 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
928 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
929 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
930 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
931 ; GFNIAVX1-NEXT: vpsraw $4, %xmm0, %xmm4
932 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
933 ; GFNIAVX1-NEXT: vpsraw $2, %xmm0, %xmm4
934 ; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
935 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
936 ; GFNIAVX1-NEXT: vpsraw $1, %xmm0, %xmm4
937 ; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
938 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
939 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
940 ; GFNIAVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
941 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
942 ; GFNIAVX1-NEXT: retq
944 ; GFNIAVX2-LABEL: var_ashr_v32i8:
946 ; GFNIAVX2-NEXT: vpsllw $5, %ymm1, %ymm1
947 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
948 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
949 ; GFNIAVX2-NEXT: vpsraw $4, %ymm3, %ymm4
950 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
951 ; GFNIAVX2-NEXT: vpsraw $2, %ymm3, %ymm4
952 ; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
953 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
954 ; GFNIAVX2-NEXT: vpsraw $1, %ymm3, %ymm4
955 ; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
956 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
957 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
958 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
959 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
960 ; GFNIAVX2-NEXT: vpsraw $4, %ymm0, %ymm3
961 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
962 ; GFNIAVX2-NEXT: vpsraw $2, %ymm0, %ymm3
963 ; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
964 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
965 ; GFNIAVX2-NEXT: vpsraw $1, %ymm0, %ymm3
966 ; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
967 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
968 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
969 ; GFNIAVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
970 ; GFNIAVX2-NEXT: retq
972 ; GFNIAVX512VL-LABEL: var_ashr_v32i8:
973 ; GFNIAVX512VL: # %bb.0:
974 ; GFNIAVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1
975 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
976 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
977 ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm3, %ymm4
978 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
979 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm3, %ymm4
980 ; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
981 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
982 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm3, %ymm4
983 ; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
984 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
985 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
986 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
987 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
988 ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm0, %ymm3
989 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
990 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm0, %ymm3
991 ; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
992 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
993 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm0, %ymm3
994 ; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
995 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
996 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
997 ; GFNIAVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
998 ; GFNIAVX512VL-NEXT: retq
1000 ; GFNIAVX512BW-LABEL: var_ashr_v32i8:
1001 ; GFNIAVX512BW: # %bb.0:
1002 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
1003 ; GFNIAVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
1004 ; GFNIAVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
1005 ; GFNIAVX512BW-NEXT: vpmovwb %zmm0, %ymm0
1006 ; GFNIAVX512BW-NEXT: retq
1007 %shift = ashr <32 x i8> %a, %b
1008 ret <32 x i8> %shift
1011 define <32 x i8> @splatvar_shl_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
1012 ; GFNISSE-LABEL: splatvar_shl_v32i8:
1014 ; GFNISSE-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
1015 ; GFNISSE-NEXT: psllw %xmm2, %xmm0
1016 ; GFNISSE-NEXT: pcmpeqd %xmm3, %xmm3
1017 ; GFNISSE-NEXT: psllw %xmm2, %xmm3
1018 ; GFNISSE-NEXT: pxor %xmm4, %xmm4
1019 ; GFNISSE-NEXT: pshufb %xmm4, %xmm3
1020 ; GFNISSE-NEXT: pand %xmm3, %xmm0
1021 ; GFNISSE-NEXT: psllw %xmm2, %xmm1
1022 ; GFNISSE-NEXT: pand %xmm3, %xmm1
1023 ; GFNISSE-NEXT: retq
1025 ; GFNIAVX1-LABEL: splatvar_shl_v32i8:
1026 ; GFNIAVX1: # %bb.0:
1027 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
1028 ; GFNIAVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1029 ; GFNIAVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm2
1030 ; GFNIAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
1031 ; GFNIAVX1-NEXT: vpsllw %xmm1, %xmm3, %xmm3
1032 ; GFNIAVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
1033 ; GFNIAVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3
1034 ; GFNIAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
1035 ; GFNIAVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm0
1036 ; GFNIAVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
1037 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1038 ; GFNIAVX1-NEXT: retq
1040 ; GFNIAVX2-LABEL: splatvar_shl_v32i8:
1041 ; GFNIAVX2: # %bb.0:
1042 ; GFNIAVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1043 ; GFNIAVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
1044 ; GFNIAVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1045 ; GFNIAVX2-NEXT: vpsllw %xmm1, %xmm2, %xmm1
1046 ; GFNIAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
1047 ; GFNIAVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
1048 ; GFNIAVX2-NEXT: retq
1050 ; GFNIAVX512VL-LABEL: splatvar_shl_v32i8:
1051 ; GFNIAVX512VL: # %bb.0:
1052 ; GFNIAVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1053 ; GFNIAVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0
1054 ; GFNIAVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1055 ; GFNIAVX512VL-NEXT: vpsllw %xmm1, %xmm2, %xmm1
1056 ; GFNIAVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
1057 ; GFNIAVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm0
1058 ; GFNIAVX512VL-NEXT: retq
1060 ; GFNIAVX512BW-LABEL: splatvar_shl_v32i8:
1061 ; GFNIAVX512BW: # %bb.0:
1062 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
1063 ; GFNIAVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1064 ; GFNIAVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0
1065 ; GFNIAVX512BW-NEXT: vpmovwb %zmm0, %ymm0
1066 ; GFNIAVX512BW-NEXT: retq
1067 %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
1068 %shift = shl <32 x i8> %a, %splat
1069 ret <32 x i8> %shift
1072 define <32 x i8> @splatvar_lshr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
1073 ; GFNISSE-LABEL: splatvar_lshr_v32i8:
1075 ; GFNISSE-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
1076 ; GFNISSE-NEXT: psrlw %xmm2, %xmm0
1077 ; GFNISSE-NEXT: pcmpeqd %xmm3, %xmm3
1078 ; GFNISSE-NEXT: psrlw %xmm2, %xmm3
1079 ; GFNISSE-NEXT: pshufb {{.*#+}} xmm3 = xmm3[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
1080 ; GFNISSE-NEXT: pand %xmm3, %xmm0
1081 ; GFNISSE-NEXT: psrlw %xmm2, %xmm1
1082 ; GFNISSE-NEXT: pand %xmm3, %xmm1
1083 ; GFNISSE-NEXT: retq
1085 ; GFNIAVX1-LABEL: splatvar_lshr_v32i8:
1086 ; GFNIAVX1: # %bb.0:
1087 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
1088 ; GFNIAVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1089 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
1090 ; GFNIAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
1091 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
1092 ; GFNIAVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
1093 ; GFNIAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
1094 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
1095 ; GFNIAVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
1096 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1097 ; GFNIAVX1-NEXT: retq
1099 ; GFNIAVX2-LABEL: splatvar_lshr_v32i8:
1100 ; GFNIAVX2: # %bb.0:
1101 ; GFNIAVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1102 ; GFNIAVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
1103 ; GFNIAVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1104 ; GFNIAVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
1105 ; GFNIAVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
1106 ; GFNIAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
1107 ; GFNIAVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
1108 ; GFNIAVX2-NEXT: retq
1110 ; GFNIAVX512VL-LABEL: splatvar_lshr_v32i8:
1111 ; GFNIAVX512VL: # %bb.0:
1112 ; GFNIAVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1113 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
1114 ; GFNIAVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1115 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
1116 ; GFNIAVX512VL-NEXT: vpsrlw $8, %xmm1, %xmm1
1117 ; GFNIAVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
1118 ; GFNIAVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm0
1119 ; GFNIAVX512VL-NEXT: retq
1121 ; GFNIAVX512BW-LABEL: splatvar_lshr_v32i8:
1122 ; GFNIAVX512BW: # %bb.0:
1123 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
1124 ; GFNIAVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1125 ; GFNIAVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
1126 ; GFNIAVX512BW-NEXT: vpmovwb %zmm0, %ymm0
1127 ; GFNIAVX512BW-NEXT: retq
1128 %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
1129 %shift = lshr <32 x i8> %a, %splat
1130 ret <32 x i8> %shift
1133 define <32 x i8> @splatvar_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
1134 ; GFNISSE-LABEL: splatvar_ashr_v32i8:
1136 ; GFNISSE-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
1137 ; GFNISSE-NEXT: psrlw %xmm2, %xmm0
1138 ; GFNISSE-NEXT: pcmpeqd %xmm3, %xmm3
1139 ; GFNISSE-NEXT: psrlw %xmm2, %xmm3
1140 ; GFNISSE-NEXT: pshufb {{.*#+}} xmm3 = xmm3[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
1141 ; GFNISSE-NEXT: pand %xmm3, %xmm0
1142 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [32896,32896,32896,32896,32896,32896,32896,32896]
1143 ; GFNISSE-NEXT: psrlw %xmm2, %xmm4
1144 ; GFNISSE-NEXT: pxor %xmm4, %xmm0
1145 ; GFNISSE-NEXT: psubb %xmm4, %xmm0
1146 ; GFNISSE-NEXT: psrlw %xmm2, %xmm1
1147 ; GFNISSE-NEXT: pand %xmm3, %xmm1
1148 ; GFNISSE-NEXT: pxor %xmm4, %xmm1
1149 ; GFNISSE-NEXT: psubb %xmm4, %xmm1
1150 ; GFNISSE-NEXT: retq
1152 ; GFNIAVX1-LABEL: splatvar_ashr_v32i8:
1153 ; GFNIAVX1: # %bb.0:
1154 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
1155 ; GFNIAVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1156 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
1157 ; GFNIAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
1158 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
1159 ; GFNIAVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
1160 ; GFNIAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
1161 ; GFNIAVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [32896,32896,32896,32896,32896,32896,32896,32896]
1162 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm4, %xmm4
1163 ; GFNIAVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
1164 ; GFNIAVX1-NEXT: vpsubb %xmm4, %xmm2, %xmm2
1165 ; GFNIAVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
1166 ; GFNIAVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
1167 ; GFNIAVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
1168 ; GFNIAVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm0
1169 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1170 ; GFNIAVX1-NEXT: retq
1172 ; GFNIAVX2-LABEL: splatvar_ashr_v32i8:
1173 ; GFNIAVX2: # %bb.0:
1174 ; GFNIAVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1175 ; GFNIAVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
1176 ; GFNIAVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1177 ; GFNIAVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
1178 ; GFNIAVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
1179 ; GFNIAVX2-NEXT: vpbroadcastb %xmm2, %ymm2
1180 ; GFNIAVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
1181 ; GFNIAVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
1182 ; GFNIAVX2-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
1183 ; GFNIAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
1184 ; GFNIAVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
1185 ; GFNIAVX2-NEXT: retq
1187 ; GFNIAVX512VL-LABEL: splatvar_ashr_v32i8:
1188 ; GFNIAVX512VL: # %bb.0:
1189 ; GFNIAVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1190 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
1191 ; GFNIAVX512VL-NEXT: vpbroadcastd {{.*#+}} ymm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
1192 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
1193 ; GFNIAVX512VL-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
1194 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %xmm3, %xmm1
1195 ; GFNIAVX512VL-NEXT: vpsrlw $8, %xmm1, %xmm1
1196 ; GFNIAVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
1197 ; GFNIAVX512VL-NEXT: vpternlogq $108, %ymm0, %ymm2, %ymm1
1198 ; GFNIAVX512VL-NEXT: vpsubb %ymm2, %ymm1, %ymm0
1199 ; GFNIAVX512VL-NEXT: retq
1201 ; GFNIAVX512BW-LABEL: splatvar_ashr_v32i8:
1202 ; GFNIAVX512BW: # %bb.0:
1203 ; GFNIAVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
1204 ; GFNIAVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1205 ; GFNIAVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0
1206 ; GFNIAVX512BW-NEXT: vpmovwb %zmm0, %ymm0
1207 ; GFNIAVX512BW-NEXT: retq
1208 %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
1209 %shift = ashr <32 x i8> %a, %splat
1210 ret <32 x i8> %shift
1213 define <32 x i8> @constant_shl_v32i8(<32 x i8> %a) nounwind {
1214 ; GFNISSE-LABEL: constant_shl_v32i8:
1216 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm2 = [1,4,16,64,128,32,8,2]
1217 ; GFNISSE-NEXT: movdqa %xmm0, %xmm3
1218 ; GFNISSE-NEXT: pmaddubsw %xmm2, %xmm3
1219 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
1220 ; GFNISSE-NEXT: pand %xmm4, %xmm3
1221 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm5 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
1222 ; GFNISSE-NEXT: pmaddubsw %xmm5, %xmm0
1223 ; GFNISSE-NEXT: psllw $8, %xmm0
1224 ; GFNISSE-NEXT: por %xmm3, %xmm0
1225 ; GFNISSE-NEXT: movdqa %xmm1, %xmm3
1226 ; GFNISSE-NEXT: pmaddubsw %xmm2, %xmm3
1227 ; GFNISSE-NEXT: pand %xmm4, %xmm3
1228 ; GFNISSE-NEXT: pmaddubsw %xmm5, %xmm1
1229 ; GFNISSE-NEXT: psllw $8, %xmm1
1230 ; GFNISSE-NEXT: por %xmm3, %xmm1
1231 ; GFNISSE-NEXT: retq
1233 ; GFNIAVX1-LABEL: constant_shl_v32i8:
1234 ; GFNIAVX1: # %bb.0:
1235 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1236 ; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = [1,4,16,64,128,32,8,2]
1237 ; GFNIAVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm3
1238 ; GFNIAVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
1239 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
1240 ; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
1241 ; GFNIAVX1-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1
1242 ; GFNIAVX1-NEXT: vpsllw $8, %xmm1, %xmm1
1243 ; GFNIAVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
1244 ; GFNIAVX1-NEXT: vpmaddubsw %xmm2, %xmm0, %xmm2
1245 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
1246 ; GFNIAVX1-NEXT: vpmaddubsw %xmm5, %xmm0, %xmm0
1247 ; GFNIAVX1-NEXT: vpsllw $8, %xmm0, %xmm0
1248 ; GFNIAVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
1249 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1250 ; GFNIAVX1-NEXT: retq
1252 ; GFNIAVX2-LABEL: constant_shl_v32i8:
1253 ; GFNIAVX2: # %bb.0:
1254 ; GFNIAVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
1255 ; GFNIAVX2-NEXT: vpsllw $8, %ymm1, %ymm1
1256 ; GFNIAVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0]
1257 ; GFNIAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1258 ; GFNIAVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
1259 ; GFNIAVX2-NEXT: retq
1261 ; GFNIAVX512VL-LABEL: constant_shl_v32i8:
1262 ; GFNIAVX512VL: # %bb.0:
1263 ; GFNIAVX512VL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0]
1264 ; GFNIAVX512VL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
1265 ; GFNIAVX512VL-NEXT: vpsllw $8, %ymm0, %ymm0
1266 ; GFNIAVX512VL-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
1267 ; GFNIAVX512VL-NEXT: retq
1269 ; GFNIAVX512BW-LABEL: constant_shl_v32i8:
1270 ; GFNIAVX512BW: # %bb.0:
1271 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
1272 ; GFNIAVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
1273 ; GFNIAVX512BW-NEXT: vpmovwb %zmm0, %ymm0
1274 ; GFNIAVX512BW-NEXT: retq
1275 %shift = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
1276 ret <32 x i8> %shift
1279 define <32 x i8> @constant_lshr_v32i8(<32 x i8> %a) nounwind {
1280 ; GFNISSE-LABEL: constant_lshr_v32i8:
1282 ; GFNISSE-NEXT: pxor %xmm4, %xmm4
1283 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1284 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
1285 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm5 = [2,4,8,16,32,64,128,256]
1286 ; GFNISSE-NEXT: pmullw %xmm5, %xmm0
1287 ; GFNISSE-NEXT: psrlw $8, %xmm0
1288 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm6 = [256,128,64,32,16,8,4,2]
1289 ; GFNISSE-NEXT: pmullw %xmm6, %xmm2
1290 ; GFNISSE-NEXT: psrlw $8, %xmm2
1291 ; GFNISSE-NEXT: packuswb %xmm0, %xmm2
1292 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
1293 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
1294 ; GFNISSE-NEXT: pmullw %xmm5, %xmm1
1295 ; GFNISSE-NEXT: psrlw $8, %xmm1
1296 ; GFNISSE-NEXT: pmullw %xmm6, %xmm3
1297 ; GFNISSE-NEXT: psrlw $8, %xmm3
1298 ; GFNISSE-NEXT: packuswb %xmm1, %xmm3
1299 ; GFNISSE-NEXT: movdqa %xmm2, %xmm0
1300 ; GFNISSE-NEXT: movdqa %xmm3, %xmm1
1301 ; GFNISSE-NEXT: retq
1303 ; GFNIAVX1-LABEL: constant_lshr_v32i8:
1304 ; GFNIAVX1: # %bb.0:
1305 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1306 ; GFNIAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1307 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
1308 ; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2,4,8,16,32,64,128,256]
1309 ; GFNIAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
1310 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
1311 ; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
1312 ; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [256,128,64,32,16,8,4,2]
1313 ; GFNIAVX1-NEXT: vpmullw %xmm5, %xmm1, %xmm1
1314 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
1315 ; GFNIAVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
1316 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
1317 ; GFNIAVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2
1318 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
1319 ; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1320 ; GFNIAVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0
1321 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
1322 ; GFNIAVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
1323 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1324 ; GFNIAVX1-NEXT: retq
1326 ; GFNIAVX2-LABEL: constant_lshr_v32i8:
1327 ; GFNIAVX2: # %bb.0:
1328 ; GFNIAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
1329 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
1330 ; GFNIAVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [2,4,8,16,32,64,128,256,2,4,8,16,32,64,128,256]
1331 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
1332 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
1333 ; GFNIAVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
1334 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
1335 ; GFNIAVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
1336 ; GFNIAVX2-NEXT: retq
1338 ; GFNIAVX512VL-LABEL: constant_lshr_v32i8:
1339 ; GFNIAVX512VL: # %bb.0:
1340 ; GFNIAVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
1341 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
1342 ; GFNIAVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [2,4,8,16,32,64,128,256,2,4,8,16,32,64,128,256]
1343 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
1344 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
1345 ; GFNIAVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
1346 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
1347 ; GFNIAVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
1348 ; GFNIAVX512VL-NEXT: retq
1350 ; GFNIAVX512BW-LABEL: constant_lshr_v32i8:
1351 ; GFNIAVX512BW: # %bb.0:
1352 ; GFNIAVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
1353 ; GFNIAVX512BW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
1354 ; GFNIAVX512BW-NEXT: vpmovwb %zmm0, %ymm0
1355 ; GFNIAVX512BW-NEXT: retq
1356 %shift = lshr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
1357 ret <32 x i8> %shift
1360 define <32 x i8> @constant_ashr_v32i8(<32 x i8> %a) nounwind {
1361 ; GFNISSE-LABEL: constant_ashr_v32i8:
1363 ; GFNISSE-NEXT: movdqa %xmm0, %xmm2
1364 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
1365 ; GFNISSE-NEXT: psraw $8, %xmm2
1366 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm3 = [2,4,8,16,32,64,128,256]
1367 ; GFNISSE-NEXT: pmullw %xmm3, %xmm2
1368 ; GFNISSE-NEXT: psrlw $8, %xmm2
1369 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1370 ; GFNISSE-NEXT: psraw $8, %xmm0
1371 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [256,128,64,32,16,8,4,2]
1372 ; GFNISSE-NEXT: pmullw %xmm4, %xmm0
1373 ; GFNISSE-NEXT: psrlw $8, %xmm0
1374 ; GFNISSE-NEXT: packuswb %xmm2, %xmm0
1375 ; GFNISSE-NEXT: movdqa %xmm1, %xmm2
1376 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
1377 ; GFNISSE-NEXT: psraw $8, %xmm2
1378 ; GFNISSE-NEXT: pmullw %xmm3, %xmm2
1379 ; GFNISSE-NEXT: psrlw $8, %xmm2
1380 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1381 ; GFNISSE-NEXT: psraw $8, %xmm1
1382 ; GFNISSE-NEXT: pmullw %xmm4, %xmm1
1383 ; GFNISSE-NEXT: psrlw $8, %xmm1
1384 ; GFNISSE-NEXT: packuswb %xmm2, %xmm1
1385 ; GFNISSE-NEXT: retq
1387 ; GFNIAVX1-LABEL: constant_ashr_v32i8:
1388 ; GFNIAVX1: # %bb.0:
1389 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1390 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
1391 ; GFNIAVX1-NEXT: vpsraw $8, %xmm2, %xmm2
1392 ; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2,4,8,16,32,64,128,256]
1393 ; GFNIAVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
1394 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
1395 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1396 ; GFNIAVX1-NEXT: vpsraw $8, %xmm1, %xmm1
1397 ; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [256,128,64,32,16,8,4,2]
1398 ; GFNIAVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1
1399 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
1400 ; GFNIAVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
1401 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
1402 ; GFNIAVX1-NEXT: vpsraw $8, %xmm2, %xmm2
1403 ; GFNIAVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
1404 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
1405 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1406 ; GFNIAVX1-NEXT: vpsraw $8, %xmm0, %xmm0
1407 ; GFNIAVX1-NEXT: vpmullw %xmm4, %xmm0, %xmm0
1408 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
1409 ; GFNIAVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
1410 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1411 ; GFNIAVX1-NEXT: retq
1413 ; GFNIAVX2-LABEL: constant_ashr_v32i8:
1414 ; GFNIAVX2: # %bb.0:
1415 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
1416 ; GFNIAVX2-NEXT: vpsraw $8, %ymm1, %ymm1
1417 ; GFNIAVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [2,4,8,16,32,64,128,256,2,4,8,16,32,64,128,256]
1418 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
1419 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
1420 ; GFNIAVX2-NEXT: vpsraw $8, %ymm0, %ymm0
1421 ; GFNIAVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
1422 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
1423 ; GFNIAVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
1424 ; GFNIAVX2-NEXT: retq
1426 ; GFNIAVX512VL-LABEL: constant_ashr_v32i8:
1427 ; GFNIAVX512VL: # %bb.0:
1428 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
1429 ; GFNIAVX512VL-NEXT: vpsraw $8, %ymm1, %ymm1
1430 ; GFNIAVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [2,4,8,16,32,64,128,256,2,4,8,16,32,64,128,256]
1431 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
1432 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
1433 ; GFNIAVX512VL-NEXT: vpsraw $8, %ymm0, %ymm0
1434 ; GFNIAVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
1435 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
1436 ; GFNIAVX512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
1437 ; GFNIAVX512VL-NEXT: retq
1439 ; GFNIAVX512BW-LABEL: constant_ashr_v32i8:
1440 ; GFNIAVX512BW: # %bb.0:
1441 ; GFNIAVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
1442 ; GFNIAVX512BW-NEXT: vpsravw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
1443 ; GFNIAVX512BW-NEXT: vpmovwb %zmm0, %ymm0
1444 ; GFNIAVX512BW-NEXT: retq
1445 %shift = ashr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
1446 ret <32 x i8> %shift
1449 define <32 x i8> @splatconstant_shl_v32i8(<32 x i8> %a) nounwind {
1450 ; GFNISSE-LABEL: splatconstant_shl_v32i8:
1452 ; GFNISSE-NEXT: pmovsxwq {{.*#+}} xmm2 = [258,258]
1453 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm2, %xmm0
1454 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm2, %xmm1
1455 ; GFNISSE-NEXT: retq
1457 ; GFNIAVX1OR2-LABEL: splatconstant_shl_v32i8:
1458 ; GFNIAVX1OR2: # %bb.0:
1459 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1460 ; GFNIAVX1OR2-NEXT: retq
1462 ; GFNIAVX512-LABEL: splatconstant_shl_v32i8:
1463 ; GFNIAVX512: # %bb.0:
1464 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
1465 ; GFNIAVX512-NEXT: retq
1466 %shift = shl <32 x i8> %a, <i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6>
1467 ret <32 x i8> %shift
1470 define <32 x i8> @splatconstant_lshr_v32i8(<32 x i8> %a) nounwind {
1471 ; GFNISSE-LABEL: splatconstant_lshr_v32i8:
1473 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm2 = [0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2]
1474 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm2, %xmm0
1475 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm2, %xmm1
1476 ; GFNISSE-NEXT: retq
1478 ; GFNIAVX1OR2-LABEL: splatconstant_lshr_v32i8:
1479 ; GFNIAVX1OR2: # %bb.0:
1480 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1481 ; GFNIAVX1OR2-NEXT: retq
1483 ; GFNIAVX512-LABEL: splatconstant_lshr_v32i8:
1484 ; GFNIAVX512: # %bb.0:
1485 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
1486 ; GFNIAVX512-NEXT: retq
1487 %shift = lshr <32 x i8> %a, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
1488 ret <32 x i8> %shift
1491 define <32 x i8> @splatconstant_ashr_v32i8(<32 x i8> %a) nounwind {
1492 ; GFNISSE-LABEL: splatconstant_ashr_v32i8:
1494 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,64,32,16,8,4,128,128,128,64,32,16,8,4]
1495 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm2, %xmm0
1496 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm2, %xmm1
1497 ; GFNISSE-NEXT: retq
1499 ; GFNIAVX1OR2-LABEL: splatconstant_ashr_v32i8:
1500 ; GFNIAVX1OR2: # %bb.0:
1501 ; GFNIAVX1OR2-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1502 ; GFNIAVX1OR2-NEXT: retq
1504 ; GFNIAVX512-LABEL: splatconstant_ashr_v32i8:
1505 ; GFNIAVX512: # %bb.0:
1506 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
1507 ; GFNIAVX512-NEXT: retq
1508 %shift = ashr <32 x i8> %a, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
1509 ret <32 x i8> %shift
1513 ; 512 Bit Vector Shifts
1516 define <64 x i8> @var_shl_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
1517 ; GFNISSE-LABEL: var_shl_v64i8:
1519 ; GFNISSE-NEXT: movdqa %xmm4, %xmm8
1520 ; GFNISSE-NEXT: movdqa %xmm0, %xmm4
1521 ; GFNISSE-NEXT: pmovsxdq {{.*#+}} xmm9 = [16909320,16909320]
1522 ; GFNISSE-NEXT: movdqa %xmm0, %xmm10
1523 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm9, %xmm10
1524 ; GFNISSE-NEXT: psllw $5, %xmm8
1525 ; GFNISSE-NEXT: movdqa %xmm8, %xmm0
1526 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm10, %xmm4
1527 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm10 = [32,16,8,4,2,1,0,0,32,16,8,4,2,1,0,0]
1528 ; GFNISSE-NEXT: movdqa %xmm4, %xmm11
1529 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm10, %xmm11
1530 ; GFNISSE-NEXT: paddb %xmm8, %xmm8
1531 ; GFNISSE-NEXT: movdqa %xmm8, %xmm0
1532 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm11, %xmm4
1533 ; GFNISSE-NEXT: movdqa %xmm4, %xmm11
1534 ; GFNISSE-NEXT: paddb %xmm4, %xmm11
1535 ; GFNISSE-NEXT: paddb %xmm8, %xmm8
1536 ; GFNISSE-NEXT: movdqa %xmm8, %xmm0
1537 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm11, %xmm4
1538 ; GFNISSE-NEXT: movdqa %xmm1, %xmm8
1539 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm9, %xmm8
1540 ; GFNISSE-NEXT: psllw $5, %xmm5
1541 ; GFNISSE-NEXT: movdqa %xmm5, %xmm0
1542 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm1
1543 ; GFNISSE-NEXT: movdqa %xmm1, %xmm8
1544 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm10, %xmm8
1545 ; GFNISSE-NEXT: paddb %xmm5, %xmm5
1546 ; GFNISSE-NEXT: movdqa %xmm5, %xmm0
1547 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm1
1548 ; GFNISSE-NEXT: movdqa %xmm1, %xmm8
1549 ; GFNISSE-NEXT: paddb %xmm1, %xmm8
1550 ; GFNISSE-NEXT: paddb %xmm5, %xmm5
1551 ; GFNISSE-NEXT: movdqa %xmm5, %xmm0
1552 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm1
1553 ; GFNISSE-NEXT: movdqa %xmm2, %xmm5
1554 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm9, %xmm5
1555 ; GFNISSE-NEXT: psllw $5, %xmm6
1556 ; GFNISSE-NEXT: movdqa %xmm6, %xmm0
1557 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm2
1558 ; GFNISSE-NEXT: movdqa %xmm2, %xmm5
1559 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm10, %xmm5
1560 ; GFNISSE-NEXT: paddb %xmm6, %xmm6
1561 ; GFNISSE-NEXT: movdqa %xmm6, %xmm0
1562 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm2
1563 ; GFNISSE-NEXT: movdqa %xmm2, %xmm5
1564 ; GFNISSE-NEXT: paddb %xmm2, %xmm5
1565 ; GFNISSE-NEXT: paddb %xmm6, %xmm6
1566 ; GFNISSE-NEXT: movdqa %xmm6, %xmm0
1567 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm2
1568 ; GFNISSE-NEXT: movdqa %xmm3, %xmm5
1569 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm9, %xmm5
1570 ; GFNISSE-NEXT: psllw $5, %xmm7
1571 ; GFNISSE-NEXT: movdqa %xmm7, %xmm0
1572 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm3
1573 ; GFNISSE-NEXT: movdqa %xmm3, %xmm5
1574 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm10, %xmm5
1575 ; GFNISSE-NEXT: paddb %xmm7, %xmm7
1576 ; GFNISSE-NEXT: movdqa %xmm7, %xmm0
1577 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm3
1578 ; GFNISSE-NEXT: movdqa %xmm3, %xmm5
1579 ; GFNISSE-NEXT: paddb %xmm3, %xmm5
1580 ; GFNISSE-NEXT: paddb %xmm7, %xmm7
1581 ; GFNISSE-NEXT: movdqa %xmm7, %xmm0
1582 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm3
1583 ; GFNISSE-NEXT: movdqa %xmm4, %xmm0
1584 ; GFNISSE-NEXT: retq
1586 ; GFNIAVX1-LABEL: var_shl_v64i8:
1587 ; GFNIAVX1: # %bb.0:
1588 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
1589 ; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm4 = [8,4,2,1,0,0,0,0,8,4,2,1,0,0,0,0]
1590 ; GFNIAVX1-NEXT: # xmm4 = mem[0,0]
1591 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm5, %xmm6
1592 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
1593 ; GFNIAVX1-NEXT: vpsllw $5, %xmm7, %xmm7
1594 ; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm6
1595 ; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm5 = [32,16,8,4,2,1,0,0,32,16,8,4,2,1,0,0]
1596 ; GFNIAVX1-NEXT: # xmm5 = mem[0,0]
1597 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm6, %xmm8
1598 ; GFNIAVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm7
1599 ; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm8, %xmm6, %xmm6
1600 ; GFNIAVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm8
1601 ; GFNIAVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm7
1602 ; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm8, %xmm6, %xmm6
1603 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm0, %xmm7
1604 ; GFNIAVX1-NEXT: vpsllw $5, %xmm2, %xmm2
1605 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm7, %xmm0, %xmm0
1606 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm0, %xmm7
1607 ; GFNIAVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm2
1608 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm7, %xmm0, %xmm0
1609 ; GFNIAVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm7
1610 ; GFNIAVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm2
1611 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm7, %xmm0, %xmm0
1612 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
1613 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
1614 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm2, %xmm6
1615 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
1616 ; GFNIAVX1-NEXT: vpsllw $5, %xmm7, %xmm7
1617 ; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm2, %xmm2
1618 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm2, %xmm6
1619 ; GFNIAVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm7
1620 ; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm2, %xmm2
1621 ; GFNIAVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm6
1622 ; GFNIAVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm7
1623 ; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm2, %xmm2
1624 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm1, %xmm4
1625 ; GFNIAVX1-NEXT: vpsllw $5, %xmm3, %xmm3
1626 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
1627 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm1, %xmm4
1628 ; GFNIAVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
1629 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
1630 ; GFNIAVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm4
1631 ; GFNIAVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
1632 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
1633 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
1634 ; GFNIAVX1-NEXT: retq
1636 ; GFNIAVX2-LABEL: var_shl_v64i8:
1637 ; GFNIAVX2: # %bb.0:
1638 ; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [8,4,2,1,0,0,0,0,8,4,2,1,0,0,0,0,8,4,2,1,0,0,0,0,8,4,2,1,0,0,0,0]
1639 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm4, %ymm0, %ymm5
1640 ; GFNIAVX2-NEXT: vpsllw $5, %ymm2, %ymm2
1641 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
1642 ; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm5 = [32,16,8,4,2,1,0,0,32,16,8,4,2,1,0,0,32,16,8,4,2,1,0,0,32,16,8,4,2,1,0,0]
1643 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm5, %ymm0, %ymm6
1644 ; GFNIAVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
1645 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm6, %ymm0, %ymm0
1646 ; GFNIAVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm6
1647 ; GFNIAVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
1648 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm6, %ymm0, %ymm0
1649 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm4, %ymm1, %ymm2
1650 ; GFNIAVX2-NEXT: vpsllw $5, %ymm3, %ymm3
1651 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
1652 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm5, %ymm1, %ymm2
1653 ; GFNIAVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
1654 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
1655 ; GFNIAVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm2
1656 ; GFNIAVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
1657 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
1658 ; GFNIAVX2-NEXT: retq
1660 ; GFNIAVX512VL-LABEL: var_shl_v64i8:
1661 ; GFNIAVX512VL: # %bb.0:
1662 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
1663 ; GFNIAVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm3 = [8,4,2,1,0,0,0,0,8,4,2,1,0,0,0,0,8,4,2,1,0,0,0,0,8,4,2,1,0,0,0,0]
1664 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm3, %ymm2, %ymm4
1665 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm1, %ymm5
1666 ; GFNIAVX512VL-NEXT: vpsllw $5, %ymm5, %ymm5
1667 ; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm2, %ymm2
1668 ; GFNIAVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm4 = [32,16,8,4,2,1,0,0,32,16,8,4,2,1,0,0,32,16,8,4,2,1,0,0,32,16,8,4,2,1,0,0]
1669 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm4, %ymm2, %ymm6
1670 ; GFNIAVX512VL-NEXT: vpaddb %ymm5, %ymm5, %ymm5
1671 ; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm2, %ymm2
1672 ; GFNIAVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm6
1673 ; GFNIAVX512VL-NEXT: vpaddb %ymm5, %ymm5, %ymm5
1674 ; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm2, %ymm2
1675 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm3, %ymm0, %ymm3
1676 ; GFNIAVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1
1677 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
1678 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm4, %ymm0, %ymm3
1679 ; GFNIAVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
1680 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
1681 ; GFNIAVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm3
1682 ; GFNIAVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
1683 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
1684 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
1685 ; GFNIAVX512VL-NEXT: retq
1687 ; GFNIAVX512BW-LABEL: var_shl_v64i8:
1688 ; GFNIAVX512BW: # %bb.0:
1689 ; GFNIAVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
1690 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
1691 ; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
1692 ; GFNIAVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
1693 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
1694 ; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
1695 ; GFNIAVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
1696 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
1697 ; GFNIAVX512BW-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1}
1698 ; GFNIAVX512BW-NEXT: retq
1699 %shift = shl <64 x i8> %a, %b
1700 ret <64 x i8> %shift
1703 define <64 x i8> @var_lshr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
1704 ; GFNISSE-LABEL: var_lshr_v64i8:
1706 ; GFNISSE-NEXT: movdqa %xmm4, %xmm8
1707 ; GFNISSE-NEXT: movdqa %xmm0, %xmm4
1708 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm9 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
1709 ; GFNISSE-NEXT: movdqa %xmm0, %xmm10
1710 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm9, %xmm10
1711 ; GFNISSE-NEXT: psllw $5, %xmm8
1712 ; GFNISSE-NEXT: movdqa %xmm8, %xmm0
1713 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm10, %xmm4
1714 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm10 = [0,0,128,64,32,16,8,4,0,0,128,64,32,16,8,4]
1715 ; GFNISSE-NEXT: movdqa %xmm4, %xmm11
1716 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm10, %xmm11
1717 ; GFNISSE-NEXT: paddb %xmm8, %xmm8
1718 ; GFNISSE-NEXT: movdqa %xmm8, %xmm0
1719 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm11, %xmm4
1720 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm11 = [0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2]
1721 ; GFNISSE-NEXT: movdqa %xmm4, %xmm12
1722 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm11, %xmm12
1723 ; GFNISSE-NEXT: paddb %xmm8, %xmm8
1724 ; GFNISSE-NEXT: movdqa %xmm8, %xmm0
1725 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm12, %xmm4
1726 ; GFNISSE-NEXT: movdqa %xmm1, %xmm8
1727 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm9, %xmm8
1728 ; GFNISSE-NEXT: psllw $5, %xmm5
1729 ; GFNISSE-NEXT: movdqa %xmm5, %xmm0
1730 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm1
1731 ; GFNISSE-NEXT: movdqa %xmm1, %xmm8
1732 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm10, %xmm8
1733 ; GFNISSE-NEXT: paddb %xmm5, %xmm5
1734 ; GFNISSE-NEXT: movdqa %xmm5, %xmm0
1735 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm1
1736 ; GFNISSE-NEXT: movdqa %xmm1, %xmm8
1737 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm11, %xmm8
1738 ; GFNISSE-NEXT: paddb %xmm5, %xmm5
1739 ; GFNISSE-NEXT: movdqa %xmm5, %xmm0
1740 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm1
1741 ; GFNISSE-NEXT: movdqa %xmm2, %xmm5
1742 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm9, %xmm5
1743 ; GFNISSE-NEXT: psllw $5, %xmm6
1744 ; GFNISSE-NEXT: movdqa %xmm6, %xmm0
1745 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm2
1746 ; GFNISSE-NEXT: movdqa %xmm2, %xmm5
1747 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm10, %xmm5
1748 ; GFNISSE-NEXT: paddb %xmm6, %xmm6
1749 ; GFNISSE-NEXT: movdqa %xmm6, %xmm0
1750 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm2
1751 ; GFNISSE-NEXT: movdqa %xmm2, %xmm5
1752 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm11, %xmm5
1753 ; GFNISSE-NEXT: paddb %xmm6, %xmm6
1754 ; GFNISSE-NEXT: movdqa %xmm6, %xmm0
1755 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm2
1756 ; GFNISSE-NEXT: movdqa %xmm3, %xmm5
1757 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm9, %xmm5
1758 ; GFNISSE-NEXT: psllw $5, %xmm7
1759 ; GFNISSE-NEXT: movdqa %xmm7, %xmm0
1760 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm3
1761 ; GFNISSE-NEXT: movdqa %xmm3, %xmm5
1762 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm10, %xmm5
1763 ; GFNISSE-NEXT: paddb %xmm7, %xmm7
1764 ; GFNISSE-NEXT: movdqa %xmm7, %xmm0
1765 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm3
1766 ; GFNISSE-NEXT: movdqa %xmm3, %xmm5
1767 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm11, %xmm5
1768 ; GFNISSE-NEXT: paddb %xmm7, %xmm7
1769 ; GFNISSE-NEXT: movdqa %xmm7, %xmm0
1770 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm3
1771 ; GFNISSE-NEXT: movdqa %xmm4, %xmm0
1772 ; GFNISSE-NEXT: retq
1774 ; GFNIAVX1-LABEL: var_lshr_v64i8:
1775 ; GFNIAVX1: # %bb.0:
1776 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
1777 ; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
1778 ; GFNIAVX1-NEXT: # xmm4 = mem[0,0]
1779 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm5, %xmm6
1780 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
1781 ; GFNIAVX1-NEXT: vpsllw $5, %xmm7, %xmm7
1782 ; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm6
1783 ; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm5 = [0,0,128,64,32,16,8,4,0,0,128,64,32,16,8,4]
1784 ; GFNIAVX1-NEXT: # xmm5 = mem[0,0]
1785 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm6, %xmm8
1786 ; GFNIAVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm7
1787 ; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm8, %xmm6, %xmm8
1788 ; GFNIAVX1-NEXT: vmovddup {{.*#+}} xmm6 = [0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2]
1789 ; GFNIAVX1-NEXT: # xmm6 = mem[0,0]
1790 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm6, %xmm8, %xmm9
1791 ; GFNIAVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm7
1792 ; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm9, %xmm8, %xmm7
1793 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm0, %xmm8
1794 ; GFNIAVX1-NEXT: vpsllw $5, %xmm2, %xmm2
1795 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm8, %xmm0, %xmm0
1796 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm0, %xmm8
1797 ; GFNIAVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm2
1798 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm8, %xmm0, %xmm0
1799 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm6, %xmm0, %xmm8
1800 ; GFNIAVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm2
1801 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm8, %xmm0, %xmm0
1802 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm0
1803 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
1804 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm2, %xmm7
1805 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
1806 ; GFNIAVX1-NEXT: vpsllw $5, %xmm8, %xmm8
1807 ; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm2, %xmm2
1808 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm2, %xmm7
1809 ; GFNIAVX1-NEXT: vpaddb %xmm8, %xmm8, %xmm8
1810 ; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm2, %xmm2
1811 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm6, %xmm2, %xmm7
1812 ; GFNIAVX1-NEXT: vpaddb %xmm8, %xmm8, %xmm8
1813 ; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm2, %xmm2
1814 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm4, %xmm1, %xmm4
1815 ; GFNIAVX1-NEXT: vpsllw $5, %xmm3, %xmm3
1816 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
1817 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm5, %xmm1, %xmm4
1818 ; GFNIAVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
1819 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
1820 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %xmm6, %xmm1, %xmm4
1821 ; GFNIAVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
1822 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
1823 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
1824 ; GFNIAVX1-NEXT: retq
1826 ; GFNIAVX2-LABEL: var_lshr_v64i8:
1827 ; GFNIAVX2: # %bb.0:
1828 ; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
1829 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm4, %ymm0, %ymm5
1830 ; GFNIAVX2-NEXT: vpsllw $5, %ymm2, %ymm2
1831 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
1832 ; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm5 = [0,0,128,64,32,16,8,4,0,0,128,64,32,16,8,4,0,0,128,64,32,16,8,4,0,0,128,64,32,16,8,4]
1833 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm5, %ymm0, %ymm6
1834 ; GFNIAVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
1835 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm6, %ymm0, %ymm0
1836 ; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2]
1837 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm6, %ymm0, %ymm7
1838 ; GFNIAVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
1839 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm7, %ymm0, %ymm0
1840 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm4, %ymm1, %ymm2
1841 ; GFNIAVX2-NEXT: vpsllw $5, %ymm3, %ymm3
1842 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
1843 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm5, %ymm1, %ymm2
1844 ; GFNIAVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
1845 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
1846 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm6, %ymm1, %ymm2
1847 ; GFNIAVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
1848 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
1849 ; GFNIAVX2-NEXT: retq
1851 ; GFNIAVX512VL-LABEL: var_lshr_v64i8:
1852 ; GFNIAVX512VL: # %bb.0:
1853 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
1854 ; GFNIAVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm3 = [0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16,0,0,0,0,128,64,32,16]
1855 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm3, %ymm2, %ymm4
1856 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm1, %ymm5
1857 ; GFNIAVX512VL-NEXT: vpsllw $5, %ymm5, %ymm5
1858 ; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm2, %ymm2
1859 ; GFNIAVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,128,64,32,16,8,4,0,0,128,64,32,16,8,4,0,0,128,64,32,16,8,4,0,0,128,64,32,16,8,4]
1860 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm4, %ymm2, %ymm6
1861 ; GFNIAVX512VL-NEXT: vpaddb %ymm5, %ymm5, %ymm5
1862 ; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm2, %ymm2
1863 ; GFNIAVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2,0,128,64,32,16,8,4,2]
1864 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm6, %ymm2, %ymm7
1865 ; GFNIAVX512VL-NEXT: vpaddb %ymm5, %ymm5, %ymm5
1866 ; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm7, %ymm2, %ymm2
1867 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm3, %ymm0, %ymm3
1868 ; GFNIAVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1
1869 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
1870 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm4, %ymm0, %ymm3
1871 ; GFNIAVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
1872 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
1873 ; GFNIAVX512VL-NEXT: vgf2p8affineqb $0, %ymm6, %ymm0, %ymm3
1874 ; GFNIAVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
1875 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
1876 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
1877 ; GFNIAVX512VL-NEXT: retq
1879 ; GFNIAVX512BW-LABEL: var_lshr_v64i8:
1880 ; GFNIAVX512BW: # %bb.0:
1881 ; GFNIAVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
1882 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
1883 ; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
1884 ; GFNIAVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
1885 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
1886 ; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
1887 ; GFNIAVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
1888 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
1889 ; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
1890 ; GFNIAVX512BW-NEXT: retq
1891 %shift = lshr <64 x i8> %a, %b
1892 ret <64 x i8> %shift
1895 define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
1896 ; GFNISSE-LABEL: var_ashr_v64i8:
1898 ; GFNISSE-NEXT: movdqa %xmm0, %xmm8
1899 ; GFNISSE-NEXT: psllw $5, %xmm4
1900 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
1901 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
1902 ; GFNISSE-NEXT: movdqa %xmm9, %xmm10
1903 ; GFNISSE-NEXT: psraw $4, %xmm10
1904 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm10, %xmm9
1905 ; GFNISSE-NEXT: movdqa %xmm9, %xmm10
1906 ; GFNISSE-NEXT: psraw $2, %xmm10
1907 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1908 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm10, %xmm9
1909 ; GFNISSE-NEXT: movdqa %xmm9, %xmm10
1910 ; GFNISSE-NEXT: psraw $1, %xmm10
1911 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1912 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm10, %xmm9
1913 ; GFNISSE-NEXT: psrlw $8, %xmm9
1914 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
1915 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
1916 ; GFNISSE-NEXT: movdqa %xmm4, %xmm8
1917 ; GFNISSE-NEXT: psraw $4, %xmm8
1918 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm4
1919 ; GFNISSE-NEXT: movdqa %xmm4, %xmm8
1920 ; GFNISSE-NEXT: psraw $2, %xmm8
1921 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1922 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm4
1923 ; GFNISSE-NEXT: movdqa %xmm4, %xmm8
1924 ; GFNISSE-NEXT: psraw $1, %xmm8
1925 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1926 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm4
1927 ; GFNISSE-NEXT: psrlw $8, %xmm4
1928 ; GFNISSE-NEXT: packuswb %xmm9, %xmm4
1929 ; GFNISSE-NEXT: psllw $5, %xmm5
1930 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
1931 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm1[8],xmm8[9],xmm1[9],xmm8[10],xmm1[10],xmm8[11],xmm1[11],xmm8[12],xmm1[12],xmm8[13],xmm1[13],xmm8[14],xmm1[14],xmm8[15],xmm1[15]
1932 ; GFNISSE-NEXT: movdqa %xmm8, %xmm9
1933 ; GFNISSE-NEXT: psraw $4, %xmm9
1934 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm9, %xmm8
1935 ; GFNISSE-NEXT: movdqa %xmm8, %xmm9
1936 ; GFNISSE-NEXT: psraw $2, %xmm9
1937 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1938 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm9, %xmm8
1939 ; GFNISSE-NEXT: movdqa %xmm8, %xmm9
1940 ; GFNISSE-NEXT: psraw $1, %xmm9
1941 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1942 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm9, %xmm8
1943 ; GFNISSE-NEXT: psrlw $8, %xmm8
1944 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
1945 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1946 ; GFNISSE-NEXT: movdqa %xmm1, %xmm5
1947 ; GFNISSE-NEXT: psraw $4, %xmm5
1948 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm1
1949 ; GFNISSE-NEXT: movdqa %xmm1, %xmm5
1950 ; GFNISSE-NEXT: psraw $2, %xmm5
1951 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1952 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm1
1953 ; GFNISSE-NEXT: movdqa %xmm1, %xmm5
1954 ; GFNISSE-NEXT: psraw $1, %xmm5
1955 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1956 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm5, %xmm1
1957 ; GFNISSE-NEXT: psrlw $8, %xmm1
1958 ; GFNISSE-NEXT: packuswb %xmm8, %xmm1
1959 ; GFNISSE-NEXT: psllw $5, %xmm6
1960 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
1961 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
1962 ; GFNISSE-NEXT: movdqa %xmm5, %xmm8
1963 ; GFNISSE-NEXT: psraw $4, %xmm8
1964 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm5
1965 ; GFNISSE-NEXT: movdqa %xmm5, %xmm8
1966 ; GFNISSE-NEXT: psraw $2, %xmm8
1967 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1968 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm5
1969 ; GFNISSE-NEXT: movdqa %xmm5, %xmm8
1970 ; GFNISSE-NEXT: psraw $1, %xmm8
1971 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1972 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm8, %xmm5
1973 ; GFNISSE-NEXT: psrlw $8, %xmm5
1974 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
1975 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1976 ; GFNISSE-NEXT: movdqa %xmm2, %xmm6
1977 ; GFNISSE-NEXT: psraw $4, %xmm6
1978 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm2
1979 ; GFNISSE-NEXT: movdqa %xmm2, %xmm6
1980 ; GFNISSE-NEXT: psraw $2, %xmm6
1981 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1982 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm2
1983 ; GFNISSE-NEXT: movdqa %xmm2, %xmm6
1984 ; GFNISSE-NEXT: psraw $1, %xmm6
1985 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1986 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm2
1987 ; GFNISSE-NEXT: psrlw $8, %xmm2
1988 ; GFNISSE-NEXT: packuswb %xmm5, %xmm2
1989 ; GFNISSE-NEXT: psllw $5, %xmm7
1990 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm7[8],xmm0[9],xmm7[9],xmm0[10],xmm7[10],xmm0[11],xmm7[11],xmm0[12],xmm7[12],xmm0[13],xmm7[13],xmm0[14],xmm7[14],xmm0[15],xmm7[15]
1991 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
1992 ; GFNISSE-NEXT: movdqa %xmm5, %xmm6
1993 ; GFNISSE-NEXT: psraw $4, %xmm6
1994 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm5
1995 ; GFNISSE-NEXT: movdqa %xmm5, %xmm6
1996 ; GFNISSE-NEXT: psraw $2, %xmm6
1997 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
1998 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm5
1999 ; GFNISSE-NEXT: movdqa %xmm5, %xmm6
2000 ; GFNISSE-NEXT: psraw $1, %xmm6
2001 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
2002 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm5
2003 ; GFNISSE-NEXT: psrlw $8, %xmm5
2004 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
2005 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2006 ; GFNISSE-NEXT: movdqa %xmm3, %xmm6
2007 ; GFNISSE-NEXT: psraw $4, %xmm6
2008 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm3
2009 ; GFNISSE-NEXT: movdqa %xmm3, %xmm6
2010 ; GFNISSE-NEXT: psraw $2, %xmm6
2011 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
2012 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm3
2013 ; GFNISSE-NEXT: movdqa %xmm3, %xmm6
2014 ; GFNISSE-NEXT: psraw $1, %xmm6
2015 ; GFNISSE-NEXT: paddw %xmm0, %xmm0
2016 ; GFNISSE-NEXT: pblendvb %xmm0, %xmm6, %xmm3
2017 ; GFNISSE-NEXT: psrlw $8, %xmm3
2018 ; GFNISSE-NEXT: packuswb %xmm5, %xmm3
2019 ; GFNISSE-NEXT: movdqa %xmm4, %xmm0
2020 ; GFNISSE-NEXT: retq
2022 ; GFNIAVX1-LABEL: var_ashr_v64i8:
2023 ; GFNIAVX1: # %bb.0:
2024 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
2025 ; GFNIAVX1-NEXT: vpsllw $5, %xmm4, %xmm4
2026 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2027 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
2028 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2029 ; GFNIAVX1-NEXT: vpsraw $4, %xmm7, %xmm8
2030 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7
2031 ; GFNIAVX1-NEXT: vpsraw $2, %xmm7, %xmm8
2032 ; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
2033 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7
2034 ; GFNIAVX1-NEXT: vpsraw $1, %xmm7, %xmm8
2035 ; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
2036 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm5
2037 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
2038 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2039 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2040 ; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7
2041 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6
2042 ; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7
2043 ; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
2044 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6
2045 ; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7
2046 ; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
2047 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm4
2048 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
2049 ; GFNIAVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4
2050 ; GFNIAVX1-NEXT: vpsllw $5, %xmm2, %xmm2
2051 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2052 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2053 ; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7
2054 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm6
2055 ; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7
2056 ; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
2057 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm6
2058 ; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7
2059 ; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
2060 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm5
2061 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
2062 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2063 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2064 ; GFNIAVX1-NEXT: vpsraw $4, %xmm0, %xmm6
2065 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0
2066 ; GFNIAVX1-NEXT: vpsraw $2, %xmm0, %xmm6
2067 ; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
2068 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0
2069 ; GFNIAVX1-NEXT: vpsraw $1, %xmm0, %xmm6
2070 ; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
2071 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0
2072 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
2073 ; GFNIAVX1-NEXT: vpackuswb %xmm5, %xmm0, %xmm0
2074 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
2075 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
2076 ; GFNIAVX1-NEXT: vpsllw $5, %xmm2, %xmm2
2077 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2078 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
2079 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2080 ; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7
2081 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6
2082 ; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7
2083 ; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
2084 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6
2085 ; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7
2086 ; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
2087 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm4
2088 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
2089 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2090 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2091 ; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6
2092 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5
2093 ; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6
2094 ; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
2095 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5
2096 ; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6
2097 ; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
2098 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm2
2099 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
2100 ; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
2101 ; GFNIAVX1-NEXT: vpsllw $5, %xmm3, %xmm3
2102 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2103 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2104 ; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6
2105 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm5
2106 ; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6
2107 ; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
2108 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm5
2109 ; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6
2110 ; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
2111 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm4
2112 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
2113 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2114 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2115 ; GFNIAVX1-NEXT: vpsraw $4, %xmm1, %xmm5
2116 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1
2117 ; GFNIAVX1-NEXT: vpsraw $2, %xmm1, %xmm5
2118 ; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
2119 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1
2120 ; GFNIAVX1-NEXT: vpsraw $1, %xmm1, %xmm5
2121 ; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
2122 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1
2123 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
2124 ; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm1, %xmm1
2125 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
2126 ; GFNIAVX1-NEXT: retq
2128 ; GFNIAVX2-LABEL: var_ashr_v64i8:
2129 ; GFNIAVX2: # %bb.0:
2130 ; GFNIAVX2-NEXT: vpsllw $5, %ymm2, %ymm2
2131 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2132 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2133 ; GFNIAVX2-NEXT: vpsraw $4, %ymm5, %ymm6
2134 ; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
2135 ; GFNIAVX2-NEXT: vpsraw $2, %ymm5, %ymm6
2136 ; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm4
2137 ; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
2138 ; GFNIAVX2-NEXT: vpsraw $1, %ymm5, %ymm6
2139 ; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm4
2140 ; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm4
2141 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm4, %ymm4
2142 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2143 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2144 ; GFNIAVX2-NEXT: vpsraw $4, %ymm0, %ymm5
2145 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
2146 ; GFNIAVX2-NEXT: vpsraw $2, %ymm0, %ymm5
2147 ; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
2148 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
2149 ; GFNIAVX2-NEXT: vpsraw $1, %ymm0, %ymm5
2150 ; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
2151 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
2152 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
2153 ; GFNIAVX2-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
2154 ; GFNIAVX2-NEXT: vpsllw $5, %ymm3, %ymm2
2155 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2156 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2157 ; GFNIAVX2-NEXT: vpsraw $4, %ymm4, %ymm5
2158 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
2159 ; GFNIAVX2-NEXT: vpsraw $2, %ymm4, %ymm5
2160 ; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm3
2161 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
2162 ; GFNIAVX2-NEXT: vpsraw $1, %ymm4, %ymm5
2163 ; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm3
2164 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
2165 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm3, %ymm3
2166 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2167 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2168 ; GFNIAVX2-NEXT: vpsraw $4, %ymm1, %ymm4
2169 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
2170 ; GFNIAVX2-NEXT: vpsraw $2, %ymm1, %ymm4
2171 ; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
2172 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
2173 ; GFNIAVX2-NEXT: vpsraw $1, %ymm1, %ymm4
2174 ; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
2175 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
2176 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
2177 ; GFNIAVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
2178 ; GFNIAVX2-NEXT: retq
2180 ; GFNIAVX512VL-LABEL: var_ashr_v64i8:
2181 ; GFNIAVX512VL: # %bb.0:
2182 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm1, %ymm2
2183 ; GFNIAVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
2184 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2185 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm4
2186 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2187 ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm5, %ymm6
2188 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
2189 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm5, %ymm6
2190 ; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3
2191 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
2192 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm5, %ymm6
2193 ; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3
2194 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm3
2195 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
2196 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2197 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2198 ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm4, %ymm5
2199 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4
2200 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm4, %ymm5
2201 ; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
2202 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4
2203 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm4, %ymm5
2204 ; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
2205 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm2
2206 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
2207 ; GFNIAVX512VL-NEXT: vpackuswb %ymm3, %ymm2, %ymm2
2208 ; GFNIAVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1
2209 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2210 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2211 ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm4, %ymm5
2212 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
2213 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm4, %ymm5
2214 ; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3
2215 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
2216 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm4, %ymm5
2217 ; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3
2218 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
2219 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
2220 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2221 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2222 ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm0, %ymm4
2223 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
2224 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm0, %ymm4
2225 ; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
2226 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
2227 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm0, %ymm4
2228 ; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
2229 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
2230 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
2231 ; GFNIAVX512VL-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
2232 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
2233 ; GFNIAVX512VL-NEXT: retq
2235 ; GFNIAVX512BW-LABEL: var_ashr_v64i8:
2236 ; GFNIAVX512BW: # %bb.0:
2237 ; GFNIAVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
2238 ; GFNIAVX512BW-NEXT: vpsraw $4, %zmm2, %zmm3
2239 ; GFNIAVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
2240 ; GFNIAVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
2241 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1
2242 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
2243 ; GFNIAVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3
2244 ; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
2245 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1
2246 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
2247 ; GFNIAVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3
2248 ; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
2249 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1
2250 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
2251 ; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
2252 ; GFNIAVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
2253 ; GFNIAVX512BW-NEXT: vpsraw $4, %zmm0, %zmm3
2254 ; GFNIAVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
2255 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
2256 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
2257 ; GFNIAVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3
2258 ; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
2259 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
2260 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
2261 ; GFNIAVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3
2262 ; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
2263 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
2264 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
2265 ; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
2266 ; GFNIAVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
2267 ; GFNIAVX512BW-NEXT: retq
2268 %shift = ashr <64 x i8> %a, %b
2269 ret <64 x i8> %shift
2272 define <64 x i8> @splatvar_shl_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
2273 ; GFNISSE-LABEL: splatvar_shl_v64i8:
2275 ; GFNISSE-NEXT: pmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
2276 ; GFNISSE-NEXT: psllw %xmm4, %xmm0
2277 ; GFNISSE-NEXT: pcmpeqd %xmm5, %xmm5
2278 ; GFNISSE-NEXT: psllw %xmm4, %xmm5
2279 ; GFNISSE-NEXT: pxor %xmm6, %xmm6
2280 ; GFNISSE-NEXT: pshufb %xmm6, %xmm5
2281 ; GFNISSE-NEXT: pand %xmm5, %xmm0
2282 ; GFNISSE-NEXT: psllw %xmm4, %xmm1
2283 ; GFNISSE-NEXT: pand %xmm5, %xmm1
2284 ; GFNISSE-NEXT: psllw %xmm4, %xmm2
2285 ; GFNISSE-NEXT: pand %xmm5, %xmm2
2286 ; GFNISSE-NEXT: psllw %xmm4, %xmm3
2287 ; GFNISSE-NEXT: pand %xmm5, %xmm3
2288 ; GFNISSE-NEXT: retq
2290 ; GFNIAVX1-LABEL: splatvar_shl_v64i8:
2291 ; GFNIAVX1: # %bb.0:
2292 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
2293 ; GFNIAVX1-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
2294 ; GFNIAVX1-NEXT: vpsllw %xmm2, %xmm3, %xmm3
2295 ; GFNIAVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
2296 ; GFNIAVX1-NEXT: vpsllw %xmm2, %xmm4, %xmm4
2297 ; GFNIAVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
2298 ; GFNIAVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm4
2299 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
2300 ; GFNIAVX1-NEXT: vpsllw %xmm2, %xmm0, %xmm0
2301 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
2302 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
2303 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
2304 ; GFNIAVX1-NEXT: vpsllw %xmm2, %xmm3, %xmm3
2305 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
2306 ; GFNIAVX1-NEXT: vpsllw %xmm2, %xmm1, %xmm1
2307 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
2308 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
2309 ; GFNIAVX1-NEXT: retq
2311 ; GFNIAVX2-LABEL: splatvar_shl_v64i8:
2312 ; GFNIAVX2: # %bb.0:
2313 ; GFNIAVX2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
2314 ; GFNIAVX2-NEXT: vpsllw %xmm2, %ymm0, %ymm0
2315 ; GFNIAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2316 ; GFNIAVX2-NEXT: vpsllw %xmm2, %xmm3, %xmm3
2317 ; GFNIAVX2-NEXT: vpbroadcastb %xmm3, %ymm3
2318 ; GFNIAVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
2319 ; GFNIAVX2-NEXT: vpsllw %xmm2, %ymm1, %ymm1
2320 ; GFNIAVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
2321 ; GFNIAVX2-NEXT: retq
2323 ; GFNIAVX512VL-LABEL: splatvar_shl_v64i8:
2324 ; GFNIAVX512VL: # %bb.0:
2325 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
2326 ; GFNIAVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
2327 ; GFNIAVX512VL-NEXT: vpsllw %xmm1, %ymm2, %ymm2
2328 ; GFNIAVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0
2329 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
2330 ; GFNIAVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2331 ; GFNIAVX512VL-NEXT: vpsllw %xmm1, %xmm2, %xmm1
2332 ; GFNIAVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
2333 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm1, %zmm1
2334 ; GFNIAVX512VL-NEXT: vpandq %zmm1, %zmm0, %zmm0
2335 ; GFNIAVX512VL-NEXT: retq
2337 ; GFNIAVX512BW-LABEL: splatvar_shl_v64i8:
2338 ; GFNIAVX512BW: # %bb.0:
2339 ; GFNIAVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
2340 ; GFNIAVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0
2341 ; GFNIAVX512BW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2342 ; GFNIAVX512BW-NEXT: vpsllw %xmm1, %xmm2, %xmm1
2343 ; GFNIAVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
2344 ; GFNIAVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
2345 ; GFNIAVX512BW-NEXT: retq
2346 %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
2347 %shift = shl <64 x i8> %a, %splat
2348 ret <64 x i8> %shift
2351 define <64 x i8> @splatvar_lshr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
2352 ; GFNISSE-LABEL: splatvar_lshr_v64i8:
2354 ; GFNISSE-NEXT: pmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
2355 ; GFNISSE-NEXT: psrlw %xmm4, %xmm0
2356 ; GFNISSE-NEXT: pcmpeqd %xmm5, %xmm5
2357 ; GFNISSE-NEXT: psrlw %xmm4, %xmm5
2358 ; GFNISSE-NEXT: pshufb {{.*#+}} xmm5 = xmm5[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2359 ; GFNISSE-NEXT: pand %xmm5, %xmm0
2360 ; GFNISSE-NEXT: psrlw %xmm4, %xmm1
2361 ; GFNISSE-NEXT: pand %xmm5, %xmm1
2362 ; GFNISSE-NEXT: psrlw %xmm4, %xmm2
2363 ; GFNISSE-NEXT: pand %xmm5, %xmm2
2364 ; GFNISSE-NEXT: psrlw %xmm4, %xmm3
2365 ; GFNISSE-NEXT: pand %xmm5, %xmm3
2366 ; GFNISSE-NEXT: retq
2368 ; GFNIAVX1-LABEL: splatvar_lshr_v64i8:
2369 ; GFNIAVX1: # %bb.0:
2370 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
2371 ; GFNIAVX1-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
2372 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm3, %xmm3
2373 ; GFNIAVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
2374 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm4, %xmm4
2375 ; GFNIAVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2376 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
2377 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm0, %xmm0
2378 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
2379 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
2380 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
2381 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm3, %xmm3
2382 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
2383 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm1, %xmm1
2384 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
2385 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
2386 ; GFNIAVX1-NEXT: retq
2388 ; GFNIAVX2-LABEL: splatvar_lshr_v64i8:
2389 ; GFNIAVX2: # %bb.0:
2390 ; GFNIAVX2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
2391 ; GFNIAVX2-NEXT: vpsrlw %xmm2, %ymm0, %ymm0
2392 ; GFNIAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2393 ; GFNIAVX2-NEXT: vpsrlw %xmm2, %xmm3, %xmm3
2394 ; GFNIAVX2-NEXT: vpsrlw $8, %xmm3, %xmm3
2395 ; GFNIAVX2-NEXT: vpbroadcastb %xmm3, %ymm3
2396 ; GFNIAVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
2397 ; GFNIAVX2-NEXT: vpsrlw %xmm2, %ymm1, %ymm1
2398 ; GFNIAVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
2399 ; GFNIAVX2-NEXT: retq
2401 ; GFNIAVX512VL-LABEL: splatvar_lshr_v64i8:
2402 ; GFNIAVX512VL: # %bb.0:
2403 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
2404 ; GFNIAVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
2405 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
2406 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
2407 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
2408 ; GFNIAVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2409 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
2410 ; GFNIAVX512VL-NEXT: vpsrlw $8, %xmm1, %xmm1
2411 ; GFNIAVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
2412 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm1, %zmm1
2413 ; GFNIAVX512VL-NEXT: vpandq %zmm1, %zmm0, %zmm0
2414 ; GFNIAVX512VL-NEXT: retq
2416 ; GFNIAVX512BW-LABEL: splatvar_lshr_v64i8:
2417 ; GFNIAVX512BW: # %bb.0:
2418 ; GFNIAVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
2419 ; GFNIAVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
2420 ; GFNIAVX512BW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2421 ; GFNIAVX512BW-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
2422 ; GFNIAVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
2423 ; GFNIAVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
2424 ; GFNIAVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
2425 ; GFNIAVX512BW-NEXT: retq
2426 %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
2427 %shift = lshr <64 x i8> %a, %splat
2428 ret <64 x i8> %shift
2431 define <64 x i8> @splatvar_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
2432 ; GFNISSE-LABEL: splatvar_ashr_v64i8:
2434 ; GFNISSE-NEXT: pmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
2435 ; GFNISSE-NEXT: psrlw %xmm4, %xmm0
2436 ; GFNISSE-NEXT: pcmpeqd %xmm5, %xmm5
2437 ; GFNISSE-NEXT: psrlw %xmm4, %xmm5
2438 ; GFNISSE-NEXT: pshufb {{.*#+}} xmm5 = xmm5[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2439 ; GFNISSE-NEXT: pand %xmm5, %xmm0
2440 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm6 = [32896,32896,32896,32896,32896,32896,32896,32896]
2441 ; GFNISSE-NEXT: psrlw %xmm4, %xmm6
2442 ; GFNISSE-NEXT: pxor %xmm6, %xmm0
2443 ; GFNISSE-NEXT: psubb %xmm6, %xmm0
2444 ; GFNISSE-NEXT: psrlw %xmm4, %xmm1
2445 ; GFNISSE-NEXT: pand %xmm5, %xmm1
2446 ; GFNISSE-NEXT: pxor %xmm6, %xmm1
2447 ; GFNISSE-NEXT: psubb %xmm6, %xmm1
2448 ; GFNISSE-NEXT: psrlw %xmm4, %xmm2
2449 ; GFNISSE-NEXT: pand %xmm5, %xmm2
2450 ; GFNISSE-NEXT: pxor %xmm6, %xmm2
2451 ; GFNISSE-NEXT: psubb %xmm6, %xmm2
2452 ; GFNISSE-NEXT: psrlw %xmm4, %xmm3
2453 ; GFNISSE-NEXT: pand %xmm5, %xmm3
2454 ; GFNISSE-NEXT: pxor %xmm6, %xmm3
2455 ; GFNISSE-NEXT: psubb %xmm6, %xmm3
2456 ; GFNISSE-NEXT: retq
2458 ; GFNIAVX1-LABEL: splatvar_ashr_v64i8:
2459 ; GFNIAVX1: # %bb.0:
2460 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
2461 ; GFNIAVX1-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
2462 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm3, %xmm3
2463 ; GFNIAVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
2464 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm4, %xmm4
2465 ; GFNIAVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2466 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
2467 ; GFNIAVX1-NEXT: vbroadcastss {{.*#+}} xmm5 = [32896,32896,32896,32896,32896,32896,32896,32896]
2468 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm5, %xmm5
2469 ; GFNIAVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
2470 ; GFNIAVX1-NEXT: vpsubb %xmm5, %xmm3, %xmm3
2471 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm0, %xmm0
2472 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
2473 ; GFNIAVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0
2474 ; GFNIAVX1-NEXT: vpsubb %xmm5, %xmm0, %xmm0
2475 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
2476 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
2477 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm3, %xmm3
2478 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
2479 ; GFNIAVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
2480 ; GFNIAVX1-NEXT: vpsubb %xmm5, %xmm3, %xmm3
2481 ; GFNIAVX1-NEXT: vpsrlw %xmm2, %xmm1, %xmm1
2482 ; GFNIAVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
2483 ; GFNIAVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
2484 ; GFNIAVX1-NEXT: vpsubb %xmm5, %xmm1, %xmm1
2485 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
2486 ; GFNIAVX1-NEXT: retq
2488 ; GFNIAVX2-LABEL: splatvar_ashr_v64i8:
2489 ; GFNIAVX2: # %bb.0:
2490 ; GFNIAVX2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
2491 ; GFNIAVX2-NEXT: vpsrlw %xmm2, %ymm0, %ymm0
2492 ; GFNIAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2493 ; GFNIAVX2-NEXT: vpsrlw %xmm2, %xmm3, %xmm3
2494 ; GFNIAVX2-NEXT: vpsrlw $8, %xmm3, %xmm3
2495 ; GFNIAVX2-NEXT: vpbroadcastb %xmm3, %ymm3
2496 ; GFNIAVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
2497 ; GFNIAVX2-NEXT: vpbroadcastb {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
2498 ; GFNIAVX2-NEXT: vpsrlw %xmm2, %ymm4, %ymm4
2499 ; GFNIAVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0
2500 ; GFNIAVX2-NEXT: vpsubb %ymm4, %ymm0, %ymm0
2501 ; GFNIAVX2-NEXT: vpsrlw %xmm2, %ymm1, %ymm1
2502 ; GFNIAVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
2503 ; GFNIAVX2-NEXT: vpxor %ymm4, %ymm1, %ymm1
2504 ; GFNIAVX2-NEXT: vpsubb %ymm4, %ymm1, %ymm1
2505 ; GFNIAVX2-NEXT: retq
2507 ; GFNIAVX512VL-LABEL: splatvar_ashr_v64i8:
2508 ; GFNIAVX512VL: # %bb.0:
2509 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
2510 ; GFNIAVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
2511 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
2512 ; GFNIAVX512VL-NEXT: vpbroadcastd {{.*#+}} ymm3 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
2513 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %ymm3, %ymm3
2514 ; GFNIAVX512VL-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
2515 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %xmm4, %xmm4
2516 ; GFNIAVX512VL-NEXT: vpsrlw $8, %xmm4, %xmm4
2517 ; GFNIAVX512VL-NEXT: vpbroadcastb %xmm4, %ymm4
2518 ; GFNIAVX512VL-NEXT: vpternlogq $108, %ymm4, %ymm3, %ymm2
2519 ; GFNIAVX512VL-NEXT: vpsubb %ymm3, %ymm2, %ymm2
2520 ; GFNIAVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
2521 ; GFNIAVX512VL-NEXT: vpternlogq $108, %ymm4, %ymm3, %ymm0
2522 ; GFNIAVX512VL-NEXT: vpsubb %ymm3, %ymm0, %ymm0
2523 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
2524 ; GFNIAVX512VL-NEXT: retq
2526 ; GFNIAVX512BW-LABEL: splatvar_ashr_v64i8:
2527 ; GFNIAVX512BW: # %bb.0:
2528 ; GFNIAVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
2529 ; GFNIAVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
2530 ; GFNIAVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
2531 ; GFNIAVX512BW-NEXT: vpsrlw %xmm1, %zmm2, %zmm2
2532 ; GFNIAVX512BW-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2533 ; GFNIAVX512BW-NEXT: vpsrlw %xmm1, %xmm3, %xmm1
2534 ; GFNIAVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
2535 ; GFNIAVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
2536 ; GFNIAVX512BW-NEXT: vpternlogq $108, %zmm0, %zmm2, %zmm1
2537 ; GFNIAVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm0
2538 ; GFNIAVX512BW-NEXT: retq
2539 %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
2540 %shift = ashr <64 x i8> %a, %splat
2541 ret <64 x i8> %shift
2544 define <64 x i8> @constant_shl_v64i8(<64 x i8> %a) nounwind {
2545 ; GFNISSE-LABEL: constant_shl_v64i8:
2547 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm4 = [1,4,16,64,128,32,8,2]
2548 ; GFNISSE-NEXT: movdqa %xmm0, %xmm6
2549 ; GFNISSE-NEXT: pmaddubsw %xmm4, %xmm6
2550 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
2551 ; GFNISSE-NEXT: pand %xmm5, %xmm6
2552 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm7 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
2553 ; GFNISSE-NEXT: pmaddubsw %xmm7, %xmm0
2554 ; GFNISSE-NEXT: psllw $8, %xmm0
2555 ; GFNISSE-NEXT: por %xmm6, %xmm0
2556 ; GFNISSE-NEXT: movdqa %xmm1, %xmm6
2557 ; GFNISSE-NEXT: pmaddubsw %xmm4, %xmm6
2558 ; GFNISSE-NEXT: pand %xmm5, %xmm6
2559 ; GFNISSE-NEXT: pmaddubsw %xmm7, %xmm1
2560 ; GFNISSE-NEXT: psllw $8, %xmm1
2561 ; GFNISSE-NEXT: por %xmm6, %xmm1
2562 ; GFNISSE-NEXT: movdqa %xmm2, %xmm6
2563 ; GFNISSE-NEXT: pmaddubsw %xmm4, %xmm6
2564 ; GFNISSE-NEXT: pand %xmm5, %xmm6
2565 ; GFNISSE-NEXT: pmaddubsw %xmm7, %xmm2
2566 ; GFNISSE-NEXT: psllw $8, %xmm2
2567 ; GFNISSE-NEXT: por %xmm6, %xmm2
2568 ; GFNISSE-NEXT: movdqa %xmm3, %xmm6
2569 ; GFNISSE-NEXT: pmaddubsw %xmm4, %xmm6
2570 ; GFNISSE-NEXT: pand %xmm5, %xmm6
2571 ; GFNISSE-NEXT: pmaddubsw %xmm7, %xmm3
2572 ; GFNISSE-NEXT: psllw $8, %xmm3
2573 ; GFNISSE-NEXT: por %xmm6, %xmm3
2574 ; GFNISSE-NEXT: retq
2576 ; GFNIAVX1-LABEL: constant_shl_v64i8:
2577 ; GFNIAVX1: # %bb.0:
2578 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
2579 ; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = [1,4,16,64,128,32,8,2]
2580 ; GFNIAVX1-NEXT: vpmaddubsw %xmm3, %xmm2, %xmm4
2581 ; GFNIAVX1-NEXT: vbroadcastss {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
2582 ; GFNIAVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
2583 ; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
2584 ; GFNIAVX1-NEXT: vpmaddubsw %xmm6, %xmm2, %xmm2
2585 ; GFNIAVX1-NEXT: vpsllw $8, %xmm2, %xmm2
2586 ; GFNIAVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
2587 ; GFNIAVX1-NEXT: vpmaddubsw %xmm3, %xmm0, %xmm4
2588 ; GFNIAVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
2589 ; GFNIAVX1-NEXT: vpmaddubsw %xmm6, %xmm0, %xmm0
2590 ; GFNIAVX1-NEXT: vpsllw $8, %xmm0, %xmm0
2591 ; GFNIAVX1-NEXT: vpor %xmm0, %xmm4, %xmm0
2592 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
2593 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
2594 ; GFNIAVX1-NEXT: vpmaddubsw %xmm3, %xmm2, %xmm4
2595 ; GFNIAVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
2596 ; GFNIAVX1-NEXT: vpmaddubsw %xmm6, %xmm2, %xmm2
2597 ; GFNIAVX1-NEXT: vpsllw $8, %xmm2, %xmm2
2598 ; GFNIAVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
2599 ; GFNIAVX1-NEXT: vpmaddubsw %xmm3, %xmm1, %xmm3
2600 ; GFNIAVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
2601 ; GFNIAVX1-NEXT: vpmaddubsw %xmm6, %xmm1, %xmm1
2602 ; GFNIAVX1-NEXT: vpsllw $8, %xmm1, %xmm1
2603 ; GFNIAVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
2604 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
2605 ; GFNIAVX1-NEXT: retq
2607 ; GFNIAVX2-LABEL: constant_shl_v64i8:
2608 ; GFNIAVX2: # %bb.0:
2609 ; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0]
2610 ; GFNIAVX2-NEXT: # ymm2 = mem[0,1,0,1]
2611 ; GFNIAVX2-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm3
2612 ; GFNIAVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
2613 ; GFNIAVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
2614 ; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
2615 ; GFNIAVX2-NEXT: # ymm5 = mem[0,1,0,1]
2616 ; GFNIAVX2-NEXT: vpmaddubsw %ymm5, %ymm0, %ymm0
2617 ; GFNIAVX2-NEXT: vpsllw $8, %ymm0, %ymm0
2618 ; GFNIAVX2-NEXT: vpor %ymm0, %ymm3, %ymm0
2619 ; GFNIAVX2-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm2
2620 ; GFNIAVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
2621 ; GFNIAVX2-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm1
2622 ; GFNIAVX2-NEXT: vpsllw $8, %ymm1, %ymm1
2623 ; GFNIAVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
2624 ; GFNIAVX2-NEXT: retq
2626 ; GFNIAVX512VL-LABEL: constant_shl_v64i8:
2627 ; GFNIAVX512VL: # %bb.0:
2628 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
2629 ; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0]
2630 ; GFNIAVX512VL-NEXT: # ymm2 = mem[0,1,0,1]
2631 ; GFNIAVX512VL-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm3
2632 ; GFNIAVX512VL-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm2
2633 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
2634 ; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
2635 ; GFNIAVX512VL-NEXT: # ymm3 = mem[0,1,0,1]
2636 ; GFNIAVX512VL-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm0
2637 ; GFNIAVX512VL-NEXT: vpsllw $8, %ymm0, %ymm0
2638 ; GFNIAVX512VL-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1
2639 ; GFNIAVX512VL-NEXT: vpsllw $8, %ymm1, %ymm1
2640 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
2641 ; GFNIAVX512VL-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm0
2642 ; GFNIAVX512VL-NEXT: retq
2644 ; GFNIAVX512BW-LABEL: constant_shl_v64i8:
2645 ; GFNIAVX512BW: # %bb.0:
2646 ; GFNIAVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0]
2647 ; GFNIAVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1]
2648 ; GFNIAVX512BW-NEXT: vpsllw $8, %zmm0, %zmm0
2649 ; GFNIAVX512BW-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
2650 ; GFNIAVX512BW-NEXT: retq
2651 %shift = shl <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
2652 ret <64 x i8> %shift
2655 define <64 x i8> @constant_lshr_v64i8(<64 x i8> %a) nounwind {
2656 ; GFNISSE-LABEL: constant_lshr_v64i8:
2658 ; GFNISSE-NEXT: movdqa %xmm1, %xmm4
2659 ; GFNISSE-NEXT: movdqa %xmm0, %xmm1
2660 ; GFNISSE-NEXT: pxor %xmm6, %xmm6
2661 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2662 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm6[8],xmm1[9],xmm6[9],xmm1[10],xmm6[10],xmm1[11],xmm6[11],xmm1[12],xmm6[12],xmm1[13],xmm6[13],xmm1[14],xmm6[14],xmm1[15],xmm6[15]
2663 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm7 = [2,4,8,16,32,64,128,256]
2664 ; GFNISSE-NEXT: pmullw %xmm7, %xmm1
2665 ; GFNISSE-NEXT: psrlw $8, %xmm1
2666 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm8 = [256,128,64,32,16,8,4,2]
2667 ; GFNISSE-NEXT: pmullw %xmm8, %xmm0
2668 ; GFNISSE-NEXT: psrlw $8, %xmm0
2669 ; GFNISSE-NEXT: packuswb %xmm1, %xmm0
2670 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
2671 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
2672 ; GFNISSE-NEXT: pmullw %xmm7, %xmm4
2673 ; GFNISSE-NEXT: psrlw $8, %xmm4
2674 ; GFNISSE-NEXT: pmullw %xmm8, %xmm1
2675 ; GFNISSE-NEXT: psrlw $8, %xmm1
2676 ; GFNISSE-NEXT: packuswb %xmm4, %xmm1
2677 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2678 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm6[8],xmm2[9],xmm6[9],xmm2[10],xmm6[10],xmm2[11],xmm6[11],xmm2[12],xmm6[12],xmm2[13],xmm6[13],xmm2[14],xmm6[14],xmm2[15],xmm6[15]
2679 ; GFNISSE-NEXT: pmullw %xmm7, %xmm2
2680 ; GFNISSE-NEXT: psrlw $8, %xmm2
2681 ; GFNISSE-NEXT: pmullw %xmm8, %xmm4
2682 ; GFNISSE-NEXT: psrlw $8, %xmm4
2683 ; GFNISSE-NEXT: packuswb %xmm2, %xmm4
2684 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
2685 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
2686 ; GFNISSE-NEXT: pmullw %xmm7, %xmm3
2687 ; GFNISSE-NEXT: psrlw $8, %xmm3
2688 ; GFNISSE-NEXT: pmullw %xmm8, %xmm5
2689 ; GFNISSE-NEXT: psrlw $8, %xmm5
2690 ; GFNISSE-NEXT: packuswb %xmm3, %xmm5
2691 ; GFNISSE-NEXT: movdqa %xmm4, %xmm2
2692 ; GFNISSE-NEXT: movdqa %xmm5, %xmm3
2693 ; GFNISSE-NEXT: retq
2695 ; GFNIAVX1-LABEL: constant_lshr_v64i8:
2696 ; GFNIAVX1: # %bb.0:
2697 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
2698 ; GFNIAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
2699 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
2700 ; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [2,4,8,16,32,64,128,256]
2701 ; GFNIAVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4
2702 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
2703 ; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2704 ; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [256,128,64,32,16,8,4,2]
2705 ; GFNIAVX1-NEXT: vpmullw %xmm6, %xmm2, %xmm2
2706 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
2707 ; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
2708 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
2709 ; GFNIAVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4
2710 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
2711 ; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2712 ; GFNIAVX1-NEXT: vpmullw %xmm6, %xmm0, %xmm0
2713 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
2714 ; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm0, %xmm0
2715 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
2716 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
2717 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
2718 ; GFNIAVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4
2719 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
2720 ; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2721 ; GFNIAVX1-NEXT: vpmullw %xmm6, %xmm2, %xmm2
2722 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
2723 ; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
2724 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
2725 ; GFNIAVX1-NEXT: vpmullw %xmm5, %xmm3, %xmm3
2726 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
2727 ; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2728 ; GFNIAVX1-NEXT: vpmullw %xmm6, %xmm1, %xmm1
2729 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
2730 ; GFNIAVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
2731 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
2732 ; GFNIAVX1-NEXT: retq
2734 ; GFNIAVX2-LABEL: constant_lshr_v64i8:
2735 ; GFNIAVX2: # %bb.0:
2736 ; GFNIAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
2737 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
2738 ; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [2,4,8,16,32,64,128,256,2,4,8,16,32,64,128,256]
2739 ; GFNIAVX2-NEXT: # ymm4 = mem[0,1,0,1]
2740 ; GFNIAVX2-NEXT: vpmullw %ymm4, %ymm3, %ymm3
2741 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm3, %ymm3
2742 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
2743 ; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
2744 ; GFNIAVX2-NEXT: # ymm5 = mem[0,1,0,1]
2745 ; GFNIAVX2-NEXT: vpmullw %ymm5, %ymm0, %ymm0
2746 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
2747 ; GFNIAVX2-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
2748 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
2749 ; GFNIAVX2-NEXT: vpmullw %ymm4, %ymm3, %ymm3
2750 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm3, %ymm3
2751 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
2752 ; GFNIAVX2-NEXT: vpmullw %ymm5, %ymm1, %ymm1
2753 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
2754 ; GFNIAVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
2755 ; GFNIAVX2-NEXT: retq
2757 ; GFNIAVX512VL-LABEL: constant_lshr_v64i8:
2758 ; GFNIAVX512VL: # %bb.0:
2759 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
2760 ; GFNIAVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
2761 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
2762 ; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [2,4,8,16,32,64,128,256,2,4,8,16,32,64,128,256]
2763 ; GFNIAVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
2764 ; GFNIAVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
2765 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
2766 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
2767 ; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
2768 ; GFNIAVX512VL-NEXT: # ymm5 = mem[0,1,0,1]
2769 ; GFNIAVX512VL-NEXT: vpmullw %ymm5, %ymm1, %ymm1
2770 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
2771 ; GFNIAVX512VL-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
2772 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
2773 ; GFNIAVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
2774 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
2775 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
2776 ; GFNIAVX512VL-NEXT: vpmullw %ymm5, %ymm0, %ymm0
2777 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
2778 ; GFNIAVX512VL-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
2779 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
2780 ; GFNIAVX512VL-NEXT: retq
2782 ; GFNIAVX512BW-LABEL: constant_lshr_v64i8:
2783 ; GFNIAVX512BW: # %bb.0:
2784 ; GFNIAVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
2785 ; GFNIAVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
2786 ; GFNIAVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
2787 ; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
2788 ; GFNIAVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
2789 ; GFNIAVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
2790 ; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
2791 ; GFNIAVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
2792 ; GFNIAVX512BW-NEXT: retq
2793 %shift = lshr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
2794 ret <64 x i8> %shift
2797 define <64 x i8> @constant_ashr_v64i8(<64 x i8> %a) nounwind {
2798 ; GFNISSE-LABEL: constant_ashr_v64i8:
2800 ; GFNISSE-NEXT: movdqa %xmm0, %xmm6
2801 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
2802 ; GFNISSE-NEXT: psraw $8, %xmm6
2803 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [2,4,8,16,32,64,128,256]
2804 ; GFNISSE-NEXT: pmullw %xmm4, %xmm6
2805 ; GFNISSE-NEXT: psrlw $8, %xmm6
2806 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2807 ; GFNISSE-NEXT: psraw $8, %xmm0
2808 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm5 = [256,128,64,32,16,8,4,2]
2809 ; GFNISSE-NEXT: pmullw %xmm5, %xmm0
2810 ; GFNISSE-NEXT: psrlw $8, %xmm0
2811 ; GFNISSE-NEXT: packuswb %xmm6, %xmm0
2812 ; GFNISSE-NEXT: movdqa %xmm1, %xmm6
2813 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15]
2814 ; GFNISSE-NEXT: psraw $8, %xmm6
2815 ; GFNISSE-NEXT: pmullw %xmm4, %xmm6
2816 ; GFNISSE-NEXT: psrlw $8, %xmm6
2817 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2818 ; GFNISSE-NEXT: psraw $8, %xmm1
2819 ; GFNISSE-NEXT: pmullw %xmm5, %xmm1
2820 ; GFNISSE-NEXT: psrlw $8, %xmm1
2821 ; GFNISSE-NEXT: packuswb %xmm6, %xmm1
2822 ; GFNISSE-NEXT: movdqa %xmm2, %xmm6
2823 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
2824 ; GFNISSE-NEXT: psraw $8, %xmm6
2825 ; GFNISSE-NEXT: pmullw %xmm4, %xmm6
2826 ; GFNISSE-NEXT: psrlw $8, %xmm6
2827 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2828 ; GFNISSE-NEXT: psraw $8, %xmm2
2829 ; GFNISSE-NEXT: pmullw %xmm5, %xmm2
2830 ; GFNISSE-NEXT: psrlw $8, %xmm2
2831 ; GFNISSE-NEXT: packuswb %xmm6, %xmm2
2832 ; GFNISSE-NEXT: movdqa %xmm3, %xmm6
2833 ; GFNISSE-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
2834 ; GFNISSE-NEXT: psraw $8, %xmm6
2835 ; GFNISSE-NEXT: pmullw %xmm4, %xmm6
2836 ; GFNISSE-NEXT: psrlw $8, %xmm6
2837 ; GFNISSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2838 ; GFNISSE-NEXT: psraw $8, %xmm3
2839 ; GFNISSE-NEXT: pmullw %xmm5, %xmm3
2840 ; GFNISSE-NEXT: psrlw $8, %xmm3
2841 ; GFNISSE-NEXT: packuswb %xmm6, %xmm3
2842 ; GFNISSE-NEXT: retq
2844 ; GFNIAVX1-LABEL: constant_ashr_v64i8:
2845 ; GFNIAVX1: # %bb.0:
2846 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
2847 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2848 ; GFNIAVX1-NEXT: vpsraw $8, %xmm2, %xmm4
2849 ; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2,4,8,16,32,64,128,256]
2850 ; GFNIAVX1-NEXT: vpmullw %xmm2, %xmm4, %xmm4
2851 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
2852 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2853 ; GFNIAVX1-NEXT: vpsraw $8, %xmm3, %xmm5
2854 ; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [256,128,64,32,16,8,4,2]
2855 ; GFNIAVX1-NEXT: vpmullw %xmm3, %xmm5, %xmm5
2856 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
2857 ; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm4
2858 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2859 ; GFNIAVX1-NEXT: vpsraw $8, %xmm5, %xmm5
2860 ; GFNIAVX1-NEXT: vpmullw %xmm2, %xmm5, %xmm5
2861 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
2862 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2863 ; GFNIAVX1-NEXT: vpsraw $8, %xmm0, %xmm0
2864 ; GFNIAVX1-NEXT: vpmullw %xmm3, %xmm0, %xmm0
2865 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
2866 ; GFNIAVX1-NEXT: vpackuswb %xmm5, %xmm0, %xmm0
2867 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
2868 ; GFNIAVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
2869 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2870 ; GFNIAVX1-NEXT: vpsraw $8, %xmm5, %xmm5
2871 ; GFNIAVX1-NEXT: vpmullw %xmm2, %xmm5, %xmm5
2872 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
2873 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2874 ; GFNIAVX1-NEXT: vpsraw $8, %xmm4, %xmm4
2875 ; GFNIAVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm4
2876 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
2877 ; GFNIAVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4
2878 ; GFNIAVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2879 ; GFNIAVX1-NEXT: vpsraw $8, %xmm5, %xmm5
2880 ; GFNIAVX1-NEXT: vpmullw %xmm2, %xmm5, %xmm2
2881 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
2882 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2883 ; GFNIAVX1-NEXT: vpsraw $8, %xmm1, %xmm1
2884 ; GFNIAVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2885 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
2886 ; GFNIAVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
2887 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
2888 ; GFNIAVX1-NEXT: retq
2890 ; GFNIAVX2-LABEL: constant_ashr_v64i8:
2891 ; GFNIAVX2: # %bb.0:
2892 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2893 ; GFNIAVX2-NEXT: vpsraw $8, %ymm2, %ymm2
2894 ; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [2,4,8,16,32,64,128,256,2,4,8,16,32,64,128,256]
2895 ; GFNIAVX2-NEXT: # ymm3 = mem[0,1,0,1]
2896 ; GFNIAVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2
2897 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
2898 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2899 ; GFNIAVX2-NEXT: vpsraw $8, %ymm0, %ymm0
2900 ; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
2901 ; GFNIAVX2-NEXT: # ymm4 = mem[0,1,0,1]
2902 ; GFNIAVX2-NEXT: vpmullw %ymm4, %ymm0, %ymm0
2903 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
2904 ; GFNIAVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
2905 ; GFNIAVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2906 ; GFNIAVX2-NEXT: vpsraw $8, %ymm2, %ymm2
2907 ; GFNIAVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2
2908 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
2909 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2910 ; GFNIAVX2-NEXT: vpsraw $8, %ymm1, %ymm1
2911 ; GFNIAVX2-NEXT: vpmullw %ymm4, %ymm1, %ymm1
2912 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
2913 ; GFNIAVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
2914 ; GFNIAVX2-NEXT: retq
2916 ; GFNIAVX512VL-LABEL: constant_ashr_v64i8:
2917 ; GFNIAVX512VL: # %bb.0:
2918 ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
2919 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2920 ; GFNIAVX512VL-NEXT: vpsraw $8, %ymm2, %ymm2
2921 ; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [2,4,8,16,32,64,128,256,2,4,8,16,32,64,128,256]
2922 ; GFNIAVX512VL-NEXT: # ymm3 = mem[0,1,0,1]
2923 ; GFNIAVX512VL-NEXT: vpmullw %ymm3, %ymm2, %ymm2
2924 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
2925 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2926 ; GFNIAVX512VL-NEXT: vpsraw $8, %ymm1, %ymm1
2927 ; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
2928 ; GFNIAVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
2929 ; GFNIAVX512VL-NEXT: vpmullw %ymm4, %ymm1, %ymm1
2930 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
2931 ; GFNIAVX512VL-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
2932 ; GFNIAVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
2933 ; GFNIAVX512VL-NEXT: vpsraw $8, %ymm2, %ymm2
2934 ; GFNIAVX512VL-NEXT: vpmullw %ymm3, %ymm2, %ymm2
2935 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
2936 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
2937 ; GFNIAVX512VL-NEXT: vpsraw $8, %ymm0, %ymm0
2938 ; GFNIAVX512VL-NEXT: vpmullw %ymm4, %ymm0, %ymm0
2939 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
2940 ; GFNIAVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
2941 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
2942 ; GFNIAVX512VL-NEXT: retq
2944 ; GFNIAVX512BW-LABEL: constant_ashr_v64i8:
2945 ; GFNIAVX512BW: # %bb.0:
2946 ; GFNIAVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
2947 ; GFNIAVX512BW-NEXT: vpsraw $8, %zmm1, %zmm1
2948 ; GFNIAVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
2949 ; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
2950 ; GFNIAVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
2951 ; GFNIAVX512BW-NEXT: vpsraw $8, %zmm0, %zmm0
2952 ; GFNIAVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
2953 ; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
2954 ; GFNIAVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
2955 ; GFNIAVX512BW-NEXT: retq
2956 %shift = ashr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
2957 ret <64 x i8> %shift
2960 define <64 x i8> @splatconstant_shl_v64i8(<64 x i8> %a) nounwind {
2961 ; GFNISSE-LABEL: splatconstant_shl_v64i8:
2963 ; GFNISSE-NEXT: pmovsxdq {{.*#+}} xmm4 = [66052,66052]
2964 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm0
2965 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm1
2966 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm2
2967 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm3
2968 ; GFNISSE-NEXT: retq
2970 ; GFNIAVX1-LABEL: splatconstant_shl_v64i8:
2971 ; GFNIAVX1: # %bb.0:
2972 ; GFNIAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4,2,1,0,0,0,0,0,4,2,1,0,0,0,0,0,4,2,1,0,0,0,0,0,4,2,1,0,0,0,0,0]
2973 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
2974 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
2975 ; GFNIAVX1-NEXT: retq
2977 ; GFNIAVX2-LABEL: splatconstant_shl_v64i8:
2978 ; GFNIAVX2: # %bb.0:
2979 ; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4,2,1,0,0,0,0,0,4,2,1,0,0,0,0,0,4,2,1,0,0,0,0,0,4,2,1,0,0,0,0,0]
2980 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
2981 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
2982 ; GFNIAVX2-NEXT: retq
2984 ; GFNIAVX512-LABEL: splatconstant_shl_v64i8:
2985 ; GFNIAVX512: # %bb.0:
2986 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
2987 ; GFNIAVX512-NEXT: retq
2988 %shift = shl <64 x i8> %a, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
2989 ret <64 x i8> %shift
2992 define <64 x i8> @splatconstant_lshr_v64i8(<64 x i8> %a) nounwind {
2993 ; GFNISSE-LABEL: splatconstant_lshr_v64i8:
2995 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128]
2996 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm0
2997 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm1
2998 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm2
2999 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm3
3000 ; GFNISSE-NEXT: retq
3002 ; GFNIAVX1-LABEL: splatconstant_lshr_v64i8:
3003 ; GFNIAVX1: # %bb.0:
3004 ; GFNIAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128]
3005 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
3006 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
3007 ; GFNIAVX1-NEXT: retq
3009 ; GFNIAVX2-LABEL: splatconstant_lshr_v64i8:
3010 ; GFNIAVX2: # %bb.0:
3011 ; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128,0,0,0,0,0,0,0,128]
3012 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
3013 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
3014 ; GFNIAVX2-NEXT: retq
3016 ; GFNIAVX512-LABEL: splatconstant_lshr_v64i8:
3017 ; GFNIAVX512: # %bb.0:
3018 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
3019 ; GFNIAVX512-NEXT: retq
3020 %shift = lshr <64 x i8> %a, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
3021 ret <64 x i8> %shift
3024 define <64 x i8> @splatconstant_ashr_v64i8(<64 x i8> %a) nounwind {
3025 ; GFNISSE-LABEL: splatconstant_ashr_v64i8:
3027 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [128,128,64,32,16,8,4,2,128,128,64,32,16,8,4,2]
3028 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm0
3029 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm1
3030 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm2
3031 ; GFNISSE-NEXT: gf2p8affineqb $0, %xmm4, %xmm3
3032 ; GFNISSE-NEXT: retq
3034 ; GFNIAVX1-LABEL: splatconstant_ashr_v64i8:
3035 ; GFNIAVX1: # %bb.0:
3036 ; GFNIAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [128,128,64,32,16,8,4,2,128,128,64,32,16,8,4,2,128,128,64,32,16,8,4,2,128,128,64,32,16,8,4,2]
3037 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
3038 ; GFNIAVX1-NEXT: vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
3039 ; GFNIAVX1-NEXT: retq
3041 ; GFNIAVX2-LABEL: splatconstant_ashr_v64i8:
3042 ; GFNIAVX2: # %bb.0:
3043 ; GFNIAVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [128,128,64,32,16,8,4,2,128,128,64,32,16,8,4,2,128,128,64,32,16,8,4,2,128,128,64,32,16,8,4,2]
3044 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm2, %ymm0, %ymm0
3045 ; GFNIAVX2-NEXT: vgf2p8affineqb $0, %ymm2, %ymm1, %ymm1
3046 ; GFNIAVX2-NEXT: retq
3048 ; GFNIAVX512-LABEL: splatconstant_ashr_v64i8:
3049 ; GFNIAVX512: # %bb.0:
3050 ; GFNIAVX512-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
3051 ; GFNIAVX512-NEXT: retq
3052 %shift = ashr <64 x i8> %a, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3053 ret <64 x i8> %shift
3055 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: