1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 -mattr=prefer-256-bit | FileCheck %s --check-prefixes=CHECK,CHECK-SKX
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 -mattr=prefer-256-bit,avx512vbmi | FileCheck %s --check-prefixes=CHECK,CHECK-SKX,CHECK-SKX-VBMI
4 ; Make sure CPUs default to prefer-256-bit. avx512vnni isn't interesting as it just adds an isel peephole for vpmaddwd+vpaddd
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX512
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-avx512vnni -mcpu=cascadelake | FileCheck %s --check-prefixes=CHECK,CHECK-AVX512
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-avx512vnni -mcpu=cooperlake | FileCheck %s --check-prefixes=CHECK,CHECK-AVX512
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=cannonlake | FileCheck %s --check-prefixes=CHECK,CHECK-VBMI
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-avx512vnni -mcpu=icelake-client | FileCheck %s --check-prefixes=CHECK,CHECK-VBMI
10 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-avx512vnni -mcpu=icelake-server | FileCheck %s --check-prefixes=CHECK,CHECK-VBMI
11 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-avx512vnni -mcpu=tigerlake | FileCheck %s --check-prefixes=CHECK,CHECK-VBMI
13 ; This file primarily contains tests for specific places in X86ISelLowering.cpp that needed be made aware of the legalizer not allowing 512-bit vectors due to prefer-256-bit even though AVX512 is enabled.
15 define dso_local void @add256(<16 x i32>* %a, <16 x i32>* %b, <16 x i32>* %c) "min-legal-vector-width"="256" {
16 ; CHECK-LABEL: add256:
18 ; CHECK-NEXT: vmovdqa (%rdi), %ymm0
19 ; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
20 ; CHECK-NEXT: vpaddd 32(%rsi), %ymm1, %ymm1
21 ; CHECK-NEXT: vpaddd (%rsi), %ymm0, %ymm0
22 ; CHECK-NEXT: vmovdqa %ymm0, (%rdx)
23 ; CHECK-NEXT: vmovdqa %ymm1, 32(%rdx)
24 ; CHECK-NEXT: vzeroupper
26 %d = load <16 x i32>, <16 x i32>* %a
27 %e = load <16 x i32>, <16 x i32>* %b
28 %f = add <16 x i32> %d, %e
29 store <16 x i32> %f, <16 x i32>* %c
33 define dso_local void @add512(<16 x i32>* %a, <16 x i32>* %b, <16 x i32>* %c) "min-legal-vector-width"="512" {
34 ; CHECK-LABEL: add512:
36 ; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0
37 ; CHECK-NEXT: vpaddd (%rsi), %zmm0, %zmm0
38 ; CHECK-NEXT: vmovdqa64 %zmm0, (%rdx)
39 ; CHECK-NEXT: vzeroupper
41 %d = load <16 x i32>, <16 x i32>* %a
42 %e = load <16 x i32>, <16 x i32>* %b
43 %f = add <16 x i32> %d, %e
44 store <16 x i32> %f, <16 x i32>* %c
48 define dso_local void @avg_v64i8_256(<64 x i8>* %a, <64 x i8>* %b) "min-legal-vector-width"="256" {
49 ; CHECK-LABEL: avg_v64i8_256:
51 ; CHECK-NEXT: vmovdqa (%rsi), %ymm0
52 ; CHECK-NEXT: vmovdqa 32(%rsi), %ymm1
53 ; CHECK-NEXT: vpavgb (%rdi), %ymm0, %ymm0
54 ; CHECK-NEXT: vpavgb 32(%rdi), %ymm1, %ymm1
55 ; CHECK-NEXT: vmovdqu %ymm1, (%rax)
56 ; CHECK-NEXT: vmovdqu %ymm0, (%rax)
57 ; CHECK-NEXT: vzeroupper
59 %1 = load <64 x i8>, <64 x i8>* %a
60 %2 = load <64 x i8>, <64 x i8>* %b
61 %3 = zext <64 x i8> %1 to <64 x i32>
62 %4 = zext <64 x i8> %2 to <64 x i32>
63 %5 = add nuw nsw <64 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
64 %6 = add nuw nsw <64 x i32> %5, %4
65 %7 = lshr <64 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
66 %8 = trunc <64 x i32> %7 to <64 x i8>
67 store <64 x i8> %8, <64 x i8>* undef, align 4
72 define dso_local void @avg_v64i8_512(<64 x i8>* %a, <64 x i8>* %b) "min-legal-vector-width"="512" {
73 ; CHECK-LABEL: avg_v64i8_512:
75 ; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0
76 ; CHECK-NEXT: vpavgb (%rsi), %zmm0, %zmm0
77 ; CHECK-NEXT: vmovdqu64 %zmm0, (%rax)
78 ; CHECK-NEXT: vzeroupper
80 %1 = load <64 x i8>, <64 x i8>* %a
81 %2 = load <64 x i8>, <64 x i8>* %b
82 %3 = zext <64 x i8> %1 to <64 x i32>
83 %4 = zext <64 x i8> %2 to <64 x i32>
84 %5 = add nuw nsw <64 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
85 %6 = add nuw nsw <64 x i32> %5, %4
86 %7 = lshr <64 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
87 %8 = trunc <64 x i32> %7 to <64 x i8>
88 store <64 x i8> %8, <64 x i8>* undef, align 4
92 define dso_local void @pmaddwd_32_256(<32 x i16>* %APtr, <32 x i16>* %BPtr, <16 x i32>* %CPtr) "min-legal-vector-width"="256" {
93 ; CHECK-LABEL: pmaddwd_32_256:
95 ; CHECK-NEXT: vmovdqa (%rdi), %ymm0
96 ; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
97 ; CHECK-NEXT: vpmaddwd 32(%rsi), %ymm1, %ymm1
98 ; CHECK-NEXT: vpmaddwd (%rsi), %ymm0, %ymm0
99 ; CHECK-NEXT: vmovdqa %ymm0, (%rdx)
100 ; CHECK-NEXT: vmovdqa %ymm1, 32(%rdx)
101 ; CHECK-NEXT: vzeroupper
103 %A = load <32 x i16>, <32 x i16>* %APtr
104 %B = load <32 x i16>, <32 x i16>* %BPtr
105 %a = sext <32 x i16> %A to <32 x i32>
106 %b = sext <32 x i16> %B to <32 x i32>
107 %m = mul nsw <32 x i32> %a, %b
108 %odd = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
109 %even = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
110 %ret = add <16 x i32> %odd, %even
111 store <16 x i32> %ret, <16 x i32>* %CPtr
115 define dso_local void @pmaddwd_32_512(<32 x i16>* %APtr, <32 x i16>* %BPtr, <16 x i32>* %CPtr) "min-legal-vector-width"="512" {
116 ; CHECK-LABEL: pmaddwd_32_512:
118 ; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0
119 ; CHECK-NEXT: vpmaddwd (%rsi), %zmm0, %zmm0
120 ; CHECK-NEXT: vmovdqa64 %zmm0, (%rdx)
121 ; CHECK-NEXT: vzeroupper
123 %A = load <32 x i16>, <32 x i16>* %APtr
124 %B = load <32 x i16>, <32 x i16>* %BPtr
125 %a = sext <32 x i16> %A to <32 x i32>
126 %b = sext <32 x i16> %B to <32 x i32>
127 %m = mul nsw <32 x i32> %a, %b
128 %odd = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
129 %even = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
130 %ret = add <16 x i32> %odd, %even
131 store <16 x i32> %ret, <16 x i32>* %CPtr
135 define dso_local void @psubus_64i8_max_256(<64 x i8>* %xptr, <64 x i8>* %yptr, <64 x i8>* %zptr) "min-legal-vector-width"="256" {
136 ; CHECK-LABEL: psubus_64i8_max_256:
138 ; CHECK-NEXT: vmovdqa (%rdi), %ymm0
139 ; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
140 ; CHECK-NEXT: vpsubusb 32(%rsi), %ymm1, %ymm1
141 ; CHECK-NEXT: vpsubusb (%rsi), %ymm0, %ymm0
142 ; CHECK-NEXT: vmovdqa %ymm0, (%rdx)
143 ; CHECK-NEXT: vmovdqa %ymm1, 32(%rdx)
144 ; CHECK-NEXT: vzeroupper
146 %x = load <64 x i8>, <64 x i8>* %xptr
147 %y = load <64 x i8>, <64 x i8>* %yptr
148 %cmp = icmp ult <64 x i8> %x, %y
149 %max = select <64 x i1> %cmp, <64 x i8> %y, <64 x i8> %x
150 %res = sub <64 x i8> %max, %y
151 store <64 x i8> %res, <64 x i8>* %zptr
155 define dso_local void @psubus_64i8_max_512(<64 x i8>* %xptr, <64 x i8>* %yptr, <64 x i8>* %zptr) "min-legal-vector-width"="512" {
156 ; CHECK-LABEL: psubus_64i8_max_512:
158 ; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0
159 ; CHECK-NEXT: vpsubusb (%rsi), %zmm0, %zmm0
160 ; CHECK-NEXT: vmovdqa64 %zmm0, (%rdx)
161 ; CHECK-NEXT: vzeroupper
163 %x = load <64 x i8>, <64 x i8>* %xptr
164 %y = load <64 x i8>, <64 x i8>* %yptr
165 %cmp = icmp ult <64 x i8> %x, %y
166 %max = select <64 x i1> %cmp, <64 x i8> %y, <64 x i8> %x
167 %res = sub <64 x i8> %max, %y
168 store <64 x i8> %res, <64 x i8>* %zptr
172 define dso_local i32 @_Z9test_charPcS_i_256(i8* nocapture readonly, i8* nocapture readonly, i32) "min-legal-vector-width"="256" {
173 ; CHECK-SKX-LABEL: _Z9test_charPcS_i_256:
174 ; CHECK-SKX: # %bb.0: # %entry
175 ; CHECK-SKX-NEXT: movl %edx, %eax
176 ; CHECK-SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
177 ; CHECK-SKX-NEXT: xorl %ecx, %ecx
178 ; CHECK-SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
179 ; CHECK-SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
180 ; CHECK-SKX-NEXT: .p2align 4, 0x90
181 ; CHECK-SKX-NEXT: .LBB8_1: # %vector.body
182 ; CHECK-SKX-NEXT: # =>This Inner Loop Header: Depth=1
183 ; CHECK-SKX-NEXT: vpmovsxbw 16(%rdi,%rcx), %ymm3
184 ; CHECK-SKX-NEXT: vpmovsxbw (%rdi,%rcx), %ymm4
185 ; CHECK-SKX-NEXT: vpmovsxbw 16(%rsi,%rcx), %ymm5
186 ; CHECK-SKX-NEXT: vpmaddwd %ymm3, %ymm5, %ymm3
187 ; CHECK-SKX-NEXT: vpaddd %ymm2, %ymm3, %ymm2
188 ; CHECK-SKX-NEXT: vpmovsxbw (%rsi,%rcx), %ymm3
189 ; CHECK-SKX-NEXT: vpmaddwd %ymm4, %ymm3, %ymm3
190 ; CHECK-SKX-NEXT: vpaddd %ymm1, %ymm3, %ymm1
191 ; CHECK-SKX-NEXT: addq $32, %rcx
192 ; CHECK-SKX-NEXT: cmpq %rcx, %rax
193 ; CHECK-SKX-NEXT: jne .LBB8_1
194 ; CHECK-SKX-NEXT: # %bb.2: # %middle.block
195 ; CHECK-SKX-NEXT: vpaddd %ymm0, %ymm1, %ymm1
196 ; CHECK-SKX-NEXT: vpaddd %ymm0, %ymm2, %ymm0
197 ; CHECK-SKX-NEXT: vpaddd %ymm0, %ymm1, %ymm0
198 ; CHECK-SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
199 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
200 ; CHECK-SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
201 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
202 ; CHECK-SKX-NEXT: vpsrlq $32, %xmm0, %xmm1
203 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
204 ; CHECK-SKX-NEXT: vmovd %xmm0, %eax
205 ; CHECK-SKX-NEXT: vzeroupper
206 ; CHECK-SKX-NEXT: retq
208 ; CHECK-AVX512-LABEL: _Z9test_charPcS_i_256:
209 ; CHECK-AVX512: # %bb.0: # %entry
210 ; CHECK-AVX512-NEXT: movl %edx, %eax
211 ; CHECK-AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0
212 ; CHECK-AVX512-NEXT: xorl %ecx, %ecx
213 ; CHECK-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
214 ; CHECK-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
215 ; CHECK-AVX512-NEXT: .p2align 4, 0x90
216 ; CHECK-AVX512-NEXT: .LBB8_1: # %vector.body
217 ; CHECK-AVX512-NEXT: # =>This Inner Loop Header: Depth=1
218 ; CHECK-AVX512-NEXT: vpmovsxbw 16(%rdi,%rcx), %ymm3
219 ; CHECK-AVX512-NEXT: vpmovsxbw (%rdi,%rcx), %ymm4
220 ; CHECK-AVX512-NEXT: vpmovsxbw 16(%rsi,%rcx), %ymm5
221 ; CHECK-AVX512-NEXT: vpmaddwd %ymm3, %ymm5, %ymm3
222 ; CHECK-AVX512-NEXT: vpaddd %ymm2, %ymm3, %ymm2
223 ; CHECK-AVX512-NEXT: vpmovsxbw (%rsi,%rcx), %ymm3
224 ; CHECK-AVX512-NEXT: vpmaddwd %ymm4, %ymm3, %ymm3
225 ; CHECK-AVX512-NEXT: vpaddd %ymm1, %ymm3, %ymm1
226 ; CHECK-AVX512-NEXT: addq $32, %rcx
227 ; CHECK-AVX512-NEXT: cmpq %rcx, %rax
228 ; CHECK-AVX512-NEXT: jne .LBB8_1
229 ; CHECK-AVX512-NEXT: # %bb.2: # %middle.block
230 ; CHECK-AVX512-NEXT: vpaddd %ymm0, %ymm1, %ymm1
231 ; CHECK-AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
232 ; CHECK-AVX512-NEXT: vpaddd %ymm0, %ymm1, %ymm0
233 ; CHECK-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
234 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
235 ; CHECK-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
236 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
237 ; CHECK-AVX512-NEXT: vpsrlq $32, %xmm0, %xmm1
238 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
239 ; CHECK-AVX512-NEXT: vmovd %xmm0, %eax
240 ; CHECK-AVX512-NEXT: vzeroupper
241 ; CHECK-AVX512-NEXT: retq
243 ; CHECK-VBMI-LABEL: _Z9test_charPcS_i_256:
244 ; CHECK-VBMI: # %bb.0: # %entry
245 ; CHECK-VBMI-NEXT: movl %edx, %eax
246 ; CHECK-VBMI-NEXT: vpxor %xmm0, %xmm0, %xmm0
247 ; CHECK-VBMI-NEXT: xorl %ecx, %ecx
248 ; CHECK-VBMI-NEXT: vpxor %xmm1, %xmm1, %xmm1
249 ; CHECK-VBMI-NEXT: vpxor %xmm2, %xmm2, %xmm2
250 ; CHECK-VBMI-NEXT: .p2align 4, 0x90
251 ; CHECK-VBMI-NEXT: .LBB8_1: # %vector.body
252 ; CHECK-VBMI-NEXT: # =>This Inner Loop Header: Depth=1
253 ; CHECK-VBMI-NEXT: vpmovsxbw 16(%rdi,%rcx), %ymm3
254 ; CHECK-VBMI-NEXT: vpmovsxbw (%rdi,%rcx), %ymm4
255 ; CHECK-VBMI-NEXT: vpmovsxbw 16(%rsi,%rcx), %ymm5
256 ; CHECK-VBMI-NEXT: vpmaddwd %ymm3, %ymm5, %ymm3
257 ; CHECK-VBMI-NEXT: vpaddd %ymm2, %ymm3, %ymm2
258 ; CHECK-VBMI-NEXT: vpmovsxbw (%rsi,%rcx), %ymm3
259 ; CHECK-VBMI-NEXT: vpmaddwd %ymm4, %ymm3, %ymm3
260 ; CHECK-VBMI-NEXT: vpaddd %ymm1, %ymm3, %ymm1
261 ; CHECK-VBMI-NEXT: addq $32, %rcx
262 ; CHECK-VBMI-NEXT: cmpq %rcx, %rax
263 ; CHECK-VBMI-NEXT: jne .LBB8_1
264 ; CHECK-VBMI-NEXT: # %bb.2: # %middle.block
265 ; CHECK-VBMI-NEXT: vpaddd %ymm0, %ymm1, %ymm1
266 ; CHECK-VBMI-NEXT: vpaddd %ymm0, %ymm2, %ymm0
267 ; CHECK-VBMI-NEXT: vpaddd %ymm0, %ymm1, %ymm0
268 ; CHECK-VBMI-NEXT: vextracti128 $1, %ymm0, %xmm1
269 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
270 ; CHECK-VBMI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
271 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
272 ; CHECK-VBMI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
273 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
274 ; CHECK-VBMI-NEXT: vmovd %xmm0, %eax
275 ; CHECK-VBMI-NEXT: vzeroupper
276 ; CHECK-VBMI-NEXT: retq
278 %3 = zext i32 %2 to i64
279 br label %vector.body
282 %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
283 %vec.phi = phi <32 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
284 %4 = getelementptr inbounds i8, i8* %0, i64 %index
285 %5 = bitcast i8* %4 to <32 x i8>*
286 %wide.load = load <32 x i8>, <32 x i8>* %5, align 1
287 %6 = sext <32 x i8> %wide.load to <32 x i32>
288 %7 = getelementptr inbounds i8, i8* %1, i64 %index
289 %8 = bitcast i8* %7 to <32 x i8>*
290 %wide.load14 = load <32 x i8>, <32 x i8>* %8, align 1
291 %9 = sext <32 x i8> %wide.load14 to <32 x i32>
292 %10 = mul nsw <32 x i32> %9, %6
293 %11 = add nsw <32 x i32> %10, %vec.phi
294 %index.next = add i64 %index, 32
295 %12 = icmp eq i64 %index.next, %3
296 br i1 %12, label %middle.block, label %vector.body
299 %rdx.shuf1 = shufflevector <32 x i32> %11, <32 x i32> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
300 %bin.rdx1 = add <32 x i32> %11, %rdx.shuf1
301 %rdx.shuf = shufflevector <32 x i32> %bin.rdx1, <32 x i32> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
302 %bin.rdx = add <32 x i32> %bin.rdx1, %rdx.shuf
303 %rdx.shuf15 = shufflevector <32 x i32> %bin.rdx, <32 x i32> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
304 %bin.rdx32 = add <32 x i32> %bin.rdx, %rdx.shuf15
305 %rdx.shuf17 = shufflevector <32 x i32> %bin.rdx32, <32 x i32> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
306 %bin.rdx18 = add <32 x i32> %bin.rdx32, %rdx.shuf17
307 %rdx.shuf19 = shufflevector <32 x i32> %bin.rdx18, <32 x i32> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
308 %bin.rdx20 = add <32 x i32> %bin.rdx18, %rdx.shuf19
309 %13 = extractelement <32 x i32> %bin.rdx20, i32 0
313 define dso_local i32 @_Z9test_charPcS_i_512(i8* nocapture readonly, i8* nocapture readonly, i32) "min-legal-vector-width"="512" {
314 ; CHECK-SKX-LABEL: _Z9test_charPcS_i_512:
315 ; CHECK-SKX: # %bb.0: # %entry
316 ; CHECK-SKX-NEXT: movl %edx, %eax
317 ; CHECK-SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
318 ; CHECK-SKX-NEXT: xorl %ecx, %ecx
319 ; CHECK-SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
320 ; CHECK-SKX-NEXT: .p2align 4, 0x90
321 ; CHECK-SKX-NEXT: .LBB9_1: # %vector.body
322 ; CHECK-SKX-NEXT: # =>This Inner Loop Header: Depth=1
323 ; CHECK-SKX-NEXT: vpmovsxbw (%rdi,%rcx), %zmm2
324 ; CHECK-SKX-NEXT: vpmovsxbw (%rsi,%rcx), %zmm3
325 ; CHECK-SKX-NEXT: vpmaddwd %zmm2, %zmm3, %zmm2
326 ; CHECK-SKX-NEXT: vpaddd %zmm1, %zmm2, %zmm1
327 ; CHECK-SKX-NEXT: addq $32, %rcx
328 ; CHECK-SKX-NEXT: cmpq %rcx, %rax
329 ; CHECK-SKX-NEXT: jne .LBB9_1
330 ; CHECK-SKX-NEXT: # %bb.2: # %middle.block
331 ; CHECK-SKX-NEXT: vpaddd %zmm0, %zmm1, %zmm0
332 ; CHECK-SKX-NEXT: vextracti64x4 $1, %zmm0, %ymm1
333 ; CHECK-SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0
334 ; CHECK-SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
335 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
336 ; CHECK-SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
337 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
338 ; CHECK-SKX-NEXT: vpsrlq $32, %xmm0, %xmm1
339 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
340 ; CHECK-SKX-NEXT: vmovd %xmm0, %eax
341 ; CHECK-SKX-NEXT: vzeroupper
342 ; CHECK-SKX-NEXT: retq
344 ; CHECK-AVX512-LABEL: _Z9test_charPcS_i_512:
345 ; CHECK-AVX512: # %bb.0: # %entry
346 ; CHECK-AVX512-NEXT: movl %edx, %eax
347 ; CHECK-AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0
348 ; CHECK-AVX512-NEXT: xorl %ecx, %ecx
349 ; CHECK-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
350 ; CHECK-AVX512-NEXT: .p2align 4, 0x90
351 ; CHECK-AVX512-NEXT: .LBB9_1: # %vector.body
352 ; CHECK-AVX512-NEXT: # =>This Inner Loop Header: Depth=1
353 ; CHECK-AVX512-NEXT: vpmovsxbw (%rdi,%rcx), %zmm2
354 ; CHECK-AVX512-NEXT: vpmovsxbw (%rsi,%rcx), %zmm3
355 ; CHECK-AVX512-NEXT: vpmaddwd %zmm2, %zmm3, %zmm2
356 ; CHECK-AVX512-NEXT: vpaddd %zmm1, %zmm2, %zmm1
357 ; CHECK-AVX512-NEXT: addq $32, %rcx
358 ; CHECK-AVX512-NEXT: cmpq %rcx, %rax
359 ; CHECK-AVX512-NEXT: jne .LBB9_1
360 ; CHECK-AVX512-NEXT: # %bb.2: # %middle.block
361 ; CHECK-AVX512-NEXT: vpaddd %zmm0, %zmm1, %zmm0
362 ; CHECK-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
363 ; CHECK-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
364 ; CHECK-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
365 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
366 ; CHECK-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
367 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
368 ; CHECK-AVX512-NEXT: vpsrlq $32, %xmm0, %xmm1
369 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
370 ; CHECK-AVX512-NEXT: vmovd %xmm0, %eax
371 ; CHECK-AVX512-NEXT: vzeroupper
372 ; CHECK-AVX512-NEXT: retq
374 ; CHECK-VBMI-LABEL: _Z9test_charPcS_i_512:
375 ; CHECK-VBMI: # %bb.0: # %entry
376 ; CHECK-VBMI-NEXT: movl %edx, %eax
377 ; CHECK-VBMI-NEXT: vpxor %xmm0, %xmm0, %xmm0
378 ; CHECK-VBMI-NEXT: xorl %ecx, %ecx
379 ; CHECK-VBMI-NEXT: vpxor %xmm1, %xmm1, %xmm1
380 ; CHECK-VBMI-NEXT: .p2align 4, 0x90
381 ; CHECK-VBMI-NEXT: .LBB9_1: # %vector.body
382 ; CHECK-VBMI-NEXT: # =>This Inner Loop Header: Depth=1
383 ; CHECK-VBMI-NEXT: vpmovsxbw (%rdi,%rcx), %zmm2
384 ; CHECK-VBMI-NEXT: vpmovsxbw (%rsi,%rcx), %zmm3
385 ; CHECK-VBMI-NEXT: vpmaddwd %zmm2, %zmm3, %zmm2
386 ; CHECK-VBMI-NEXT: vpaddd %zmm1, %zmm2, %zmm1
387 ; CHECK-VBMI-NEXT: addq $32, %rcx
388 ; CHECK-VBMI-NEXT: cmpq %rcx, %rax
389 ; CHECK-VBMI-NEXT: jne .LBB9_1
390 ; CHECK-VBMI-NEXT: # %bb.2: # %middle.block
391 ; CHECK-VBMI-NEXT: vpaddd %zmm0, %zmm1, %zmm0
392 ; CHECK-VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm1
393 ; CHECK-VBMI-NEXT: vpaddd %zmm1, %zmm0, %zmm0
394 ; CHECK-VBMI-NEXT: vextracti128 $1, %ymm0, %xmm1
395 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
396 ; CHECK-VBMI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
397 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
398 ; CHECK-VBMI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
399 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
400 ; CHECK-VBMI-NEXT: vmovd %xmm0, %eax
401 ; CHECK-VBMI-NEXT: vzeroupper
402 ; CHECK-VBMI-NEXT: retq
404 %3 = zext i32 %2 to i64
405 br label %vector.body
408 %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
409 %vec.phi = phi <32 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
410 %4 = getelementptr inbounds i8, i8* %0, i64 %index
411 %5 = bitcast i8* %4 to <32 x i8>*
412 %wide.load = load <32 x i8>, <32 x i8>* %5, align 1
413 %6 = sext <32 x i8> %wide.load to <32 x i32>
414 %7 = getelementptr inbounds i8, i8* %1, i64 %index
415 %8 = bitcast i8* %7 to <32 x i8>*
416 %wide.load14 = load <32 x i8>, <32 x i8>* %8, align 1
417 %9 = sext <32 x i8> %wide.load14 to <32 x i32>
418 %10 = mul nsw <32 x i32> %9, %6
419 %11 = add nsw <32 x i32> %10, %vec.phi
420 %index.next = add i64 %index, 32
421 %12 = icmp eq i64 %index.next, %3
422 br i1 %12, label %middle.block, label %vector.body
425 %rdx.shuf1 = shufflevector <32 x i32> %11, <32 x i32> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
426 %bin.rdx1 = add <32 x i32> %11, %rdx.shuf1
427 %rdx.shuf = shufflevector <32 x i32> %bin.rdx1, <32 x i32> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
428 %bin.rdx = add <32 x i32> %bin.rdx1, %rdx.shuf
429 %rdx.shuf15 = shufflevector <32 x i32> %bin.rdx, <32 x i32> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
430 %bin.rdx32 = add <32 x i32> %bin.rdx, %rdx.shuf15
431 %rdx.shuf17 = shufflevector <32 x i32> %bin.rdx32, <32 x i32> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
432 %bin.rdx18 = add <32 x i32> %bin.rdx32, %rdx.shuf17
433 %rdx.shuf19 = shufflevector <32 x i32> %bin.rdx18, <32 x i32> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
434 %bin.rdx20 = add <32 x i32> %bin.rdx18, %rdx.shuf19
435 %13 = extractelement <32 x i32> %bin.rdx20, i32 0
439 @a = dso_local global [1024 x i8] zeroinitializer, align 16
440 @b = dso_local global [1024 x i8] zeroinitializer, align 16
442 define dso_local i32 @sad_16i8_256() "min-legal-vector-width"="256" {
443 ; CHECK-SKX-LABEL: sad_16i8_256:
444 ; CHECK-SKX: # %bb.0: # %entry
445 ; CHECK-SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
446 ; CHECK-SKX-NEXT: movq $-1024, %rax # imm = 0xFC00
447 ; CHECK-SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
448 ; CHECK-SKX-NEXT: .p2align 4, 0x90
449 ; CHECK-SKX-NEXT: .LBB10_1: # %vector.body
450 ; CHECK-SKX-NEXT: # =>This Inner Loop Header: Depth=1
451 ; CHECK-SKX-NEXT: vmovdqu a+1024(%rax), %xmm2
452 ; CHECK-SKX-NEXT: vpsadbw b+1024(%rax), %xmm2, %xmm2
453 ; CHECK-SKX-NEXT: vpaddd %ymm1, %ymm2, %ymm1
454 ; CHECK-SKX-NEXT: addq $4, %rax
455 ; CHECK-SKX-NEXT: jne .LBB10_1
456 ; CHECK-SKX-NEXT: # %bb.2: # %middle.block
457 ; CHECK-SKX-NEXT: vpaddd %ymm0, %ymm1, %ymm0
458 ; CHECK-SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
459 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
460 ; CHECK-SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
461 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
462 ; CHECK-SKX-NEXT: vpsrlq $32, %xmm0, %xmm1
463 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
464 ; CHECK-SKX-NEXT: vmovd %xmm0, %eax
465 ; CHECK-SKX-NEXT: vzeroupper
466 ; CHECK-SKX-NEXT: retq
468 ; CHECK-AVX512-LABEL: sad_16i8_256:
469 ; CHECK-AVX512: # %bb.0: # %entry
470 ; CHECK-AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0
471 ; CHECK-AVX512-NEXT: movq $-1024, %rax # imm = 0xFC00
472 ; CHECK-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
473 ; CHECK-AVX512-NEXT: .p2align 4, 0x90
474 ; CHECK-AVX512-NEXT: .LBB10_1: # %vector.body
475 ; CHECK-AVX512-NEXT: # =>This Inner Loop Header: Depth=1
476 ; CHECK-AVX512-NEXT: vmovdqu a+1024(%rax), %xmm2
477 ; CHECK-AVX512-NEXT: vpsadbw b+1024(%rax), %xmm2, %xmm2
478 ; CHECK-AVX512-NEXT: vpaddd %ymm1, %ymm2, %ymm1
479 ; CHECK-AVX512-NEXT: addq $4, %rax
480 ; CHECK-AVX512-NEXT: jne .LBB10_1
481 ; CHECK-AVX512-NEXT: # %bb.2: # %middle.block
482 ; CHECK-AVX512-NEXT: vpaddd %ymm0, %ymm1, %ymm0
483 ; CHECK-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
484 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
485 ; CHECK-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
486 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
487 ; CHECK-AVX512-NEXT: vpsrlq $32, %xmm0, %xmm1
488 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
489 ; CHECK-AVX512-NEXT: vmovd %xmm0, %eax
490 ; CHECK-AVX512-NEXT: vzeroupper
491 ; CHECK-AVX512-NEXT: retq
493 ; CHECK-VBMI-LABEL: sad_16i8_256:
494 ; CHECK-VBMI: # %bb.0: # %entry
495 ; CHECK-VBMI-NEXT: vpxor %xmm0, %xmm0, %xmm0
496 ; CHECK-VBMI-NEXT: movq $-1024, %rax # imm = 0xFC00
497 ; CHECK-VBMI-NEXT: vpxor %xmm1, %xmm1, %xmm1
498 ; CHECK-VBMI-NEXT: .p2align 4, 0x90
499 ; CHECK-VBMI-NEXT: .LBB10_1: # %vector.body
500 ; CHECK-VBMI-NEXT: # =>This Inner Loop Header: Depth=1
501 ; CHECK-VBMI-NEXT: vmovdqu a+1024(%rax), %xmm2
502 ; CHECK-VBMI-NEXT: vpsadbw b+1024(%rax), %xmm2, %xmm2
503 ; CHECK-VBMI-NEXT: vpaddd %ymm1, %ymm2, %ymm1
504 ; CHECK-VBMI-NEXT: addq $4, %rax
505 ; CHECK-VBMI-NEXT: jne .LBB10_1
506 ; CHECK-VBMI-NEXT: # %bb.2: # %middle.block
507 ; CHECK-VBMI-NEXT: vpaddd %ymm0, %ymm1, %ymm0
508 ; CHECK-VBMI-NEXT: vextracti128 $1, %ymm0, %xmm1
509 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
510 ; CHECK-VBMI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
511 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
512 ; CHECK-VBMI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
513 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
514 ; CHECK-VBMI-NEXT: vmovd %xmm0, %eax
515 ; CHECK-VBMI-NEXT: vzeroupper
516 ; CHECK-VBMI-NEXT: retq
518 br label %vector.body
521 %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
522 %vec.phi = phi <16 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
523 %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
524 %1 = bitcast i8* %0 to <16 x i8>*
525 %wide.load = load <16 x i8>, <16 x i8>* %1, align 4
526 %2 = zext <16 x i8> %wide.load to <16 x i32>
527 %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
528 %4 = bitcast i8* %3 to <16 x i8>*
529 %wide.load1 = load <16 x i8>, <16 x i8>* %4, align 4
530 %5 = zext <16 x i8> %wide.load1 to <16 x i32>
531 %6 = sub nsw <16 x i32> %2, %5
532 %7 = icmp sgt <16 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
533 %8 = sub nsw <16 x i32> zeroinitializer, %6
534 %9 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %8
535 %10 = add nsw <16 x i32> %9, %vec.phi
536 %index.next = add i64 %index, 4
537 %11 = icmp eq i64 %index.next, 1024
538 br i1 %11, label %middle.block, label %vector.body
541 %rdx.shuf = shufflevector <16 x i32> %10, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
542 %bin.rdx = add <16 x i32> %10, %rdx.shuf
543 %rdx.shuf2 = shufflevector <16 x i32> %bin.rdx, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
544 %bin.rdx2 = add <16 x i32> %bin.rdx, %rdx.shuf2
545 %rdx.shuf3 = shufflevector <16 x i32> %bin.rdx2, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
546 %bin.rdx3 = add <16 x i32> %bin.rdx2, %rdx.shuf3
547 %rdx.shuf4 = shufflevector <16 x i32> %bin.rdx3, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
548 %bin.rdx4 = add <16 x i32> %bin.rdx3, %rdx.shuf4
549 %12 = extractelement <16 x i32> %bin.rdx4, i32 0
553 define dso_local i32 @sad_16i8_512() "min-legal-vector-width"="512" {
554 ; CHECK-SKX-LABEL: sad_16i8_512:
555 ; CHECK-SKX: # %bb.0: # %entry
556 ; CHECK-SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
557 ; CHECK-SKX-NEXT: movq $-1024, %rax # imm = 0xFC00
558 ; CHECK-SKX-NEXT: .p2align 4, 0x90
559 ; CHECK-SKX-NEXT: .LBB11_1: # %vector.body
560 ; CHECK-SKX-NEXT: # =>This Inner Loop Header: Depth=1
561 ; CHECK-SKX-NEXT: vmovdqu a+1024(%rax), %xmm1
562 ; CHECK-SKX-NEXT: vpsadbw b+1024(%rax), %xmm1, %xmm1
563 ; CHECK-SKX-NEXT: vpaddd %zmm0, %zmm1, %zmm0
564 ; CHECK-SKX-NEXT: addq $4, %rax
565 ; CHECK-SKX-NEXT: jne .LBB11_1
566 ; CHECK-SKX-NEXT: # %bb.2: # %middle.block
567 ; CHECK-SKX-NEXT: vextracti64x4 $1, %zmm0, %ymm1
568 ; CHECK-SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0
569 ; CHECK-SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
570 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
571 ; CHECK-SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
572 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
573 ; CHECK-SKX-NEXT: vpsrlq $32, %xmm0, %xmm1
574 ; CHECK-SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
575 ; CHECK-SKX-NEXT: vmovd %xmm0, %eax
576 ; CHECK-SKX-NEXT: vzeroupper
577 ; CHECK-SKX-NEXT: retq
579 ; CHECK-AVX512-LABEL: sad_16i8_512:
580 ; CHECK-AVX512: # %bb.0: # %entry
581 ; CHECK-AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0
582 ; CHECK-AVX512-NEXT: movq $-1024, %rax # imm = 0xFC00
583 ; CHECK-AVX512-NEXT: .p2align 4, 0x90
584 ; CHECK-AVX512-NEXT: .LBB11_1: # %vector.body
585 ; CHECK-AVX512-NEXT: # =>This Inner Loop Header: Depth=1
586 ; CHECK-AVX512-NEXT: vmovdqu a+1024(%rax), %xmm1
587 ; CHECK-AVX512-NEXT: vpsadbw b+1024(%rax), %xmm1, %xmm1
588 ; CHECK-AVX512-NEXT: vpaddd %zmm0, %zmm1, %zmm0
589 ; CHECK-AVX512-NEXT: addq $4, %rax
590 ; CHECK-AVX512-NEXT: jne .LBB11_1
591 ; CHECK-AVX512-NEXT: # %bb.2: # %middle.block
592 ; CHECK-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
593 ; CHECK-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
594 ; CHECK-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
595 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
596 ; CHECK-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
597 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
598 ; CHECK-AVX512-NEXT: vpsrlq $32, %xmm0, %xmm1
599 ; CHECK-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
600 ; CHECK-AVX512-NEXT: vmovd %xmm0, %eax
601 ; CHECK-AVX512-NEXT: vzeroupper
602 ; CHECK-AVX512-NEXT: retq
604 ; CHECK-VBMI-LABEL: sad_16i8_512:
605 ; CHECK-VBMI: # %bb.0: # %entry
606 ; CHECK-VBMI-NEXT: vpxor %xmm0, %xmm0, %xmm0
607 ; CHECK-VBMI-NEXT: movq $-1024, %rax # imm = 0xFC00
608 ; CHECK-VBMI-NEXT: .p2align 4, 0x90
609 ; CHECK-VBMI-NEXT: .LBB11_1: # %vector.body
610 ; CHECK-VBMI-NEXT: # =>This Inner Loop Header: Depth=1
611 ; CHECK-VBMI-NEXT: vmovdqu a+1024(%rax), %xmm1
612 ; CHECK-VBMI-NEXT: vpsadbw b+1024(%rax), %xmm1, %xmm1
613 ; CHECK-VBMI-NEXT: vpaddd %zmm0, %zmm1, %zmm0
614 ; CHECK-VBMI-NEXT: addq $4, %rax
615 ; CHECK-VBMI-NEXT: jne .LBB11_1
616 ; CHECK-VBMI-NEXT: # %bb.2: # %middle.block
617 ; CHECK-VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm1
618 ; CHECK-VBMI-NEXT: vpaddd %zmm1, %zmm0, %zmm0
619 ; CHECK-VBMI-NEXT: vextracti128 $1, %ymm0, %xmm1
620 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
621 ; CHECK-VBMI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
622 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
623 ; CHECK-VBMI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
624 ; CHECK-VBMI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
625 ; CHECK-VBMI-NEXT: vmovd %xmm0, %eax
626 ; CHECK-VBMI-NEXT: vzeroupper
627 ; CHECK-VBMI-NEXT: retq
629 br label %vector.body
632 %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
633 %vec.phi = phi <16 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
634 %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
635 %1 = bitcast i8* %0 to <16 x i8>*
636 %wide.load = load <16 x i8>, <16 x i8>* %1, align 4
637 %2 = zext <16 x i8> %wide.load to <16 x i32>
638 %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
639 %4 = bitcast i8* %3 to <16 x i8>*
640 %wide.load1 = load <16 x i8>, <16 x i8>* %4, align 4
641 %5 = zext <16 x i8> %wide.load1 to <16 x i32>
642 %6 = sub nsw <16 x i32> %2, %5
643 %7 = icmp sgt <16 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
644 %8 = sub nsw <16 x i32> zeroinitializer, %6
645 %9 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %8
646 %10 = add nsw <16 x i32> %9, %vec.phi
647 %index.next = add i64 %index, 4
648 %11 = icmp eq i64 %index.next, 1024
649 br i1 %11, label %middle.block, label %vector.body
652 %rdx.shuf = shufflevector <16 x i32> %10, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
653 %bin.rdx = add <16 x i32> %10, %rdx.shuf
654 %rdx.shuf2 = shufflevector <16 x i32> %bin.rdx, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
655 %bin.rdx2 = add <16 x i32> %bin.rdx, %rdx.shuf2
656 %rdx.shuf3 = shufflevector <16 x i32> %bin.rdx2, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
657 %bin.rdx3 = add <16 x i32> %bin.rdx2, %rdx.shuf3
658 %rdx.shuf4 = shufflevector <16 x i32> %bin.rdx3, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
659 %bin.rdx4 = add <16 x i32> %bin.rdx3, %rdx.shuf4
660 %12 = extractelement <16 x i32> %bin.rdx4, i32 0
664 define dso_local void @sbto16f32_256(<16 x i16> %a, <16 x float>* %res) "min-legal-vector-width"="256" {
665 ; CHECK-LABEL: sbto16f32_256:
667 ; CHECK-NEXT: vpmovw2m %ymm0, %k0
668 ; CHECK-NEXT: kshiftrw $8, %k0, %k1
669 ; CHECK-NEXT: vpmovm2d %k1, %ymm0
670 ; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
671 ; CHECK-NEXT: vpmovm2d %k0, %ymm1
672 ; CHECK-NEXT: vcvtdq2ps %ymm1, %ymm1
673 ; CHECK-NEXT: vmovaps %ymm1, (%rdi)
674 ; CHECK-NEXT: vmovaps %ymm0, 32(%rdi)
675 ; CHECK-NEXT: vzeroupper
677 %mask = icmp slt <16 x i16> %a, zeroinitializer
678 %1 = sitofp <16 x i1> %mask to <16 x float>
679 store <16 x float> %1, <16 x float>* %res
683 define dso_local void @sbto16f32_512(<16 x i16> %a, <16 x float>* %res) "min-legal-vector-width"="512" {
684 ; CHECK-LABEL: sbto16f32_512:
686 ; CHECK-NEXT: vpmovw2m %ymm0, %k0
687 ; CHECK-NEXT: vpmovm2d %k0, %zmm0
688 ; CHECK-NEXT: vcvtdq2ps %zmm0, %zmm0
689 ; CHECK-NEXT: vmovaps %zmm0, (%rdi)
690 ; CHECK-NEXT: vzeroupper
692 %mask = icmp slt <16 x i16> %a, zeroinitializer
693 %1 = sitofp <16 x i1> %mask to <16 x float>
694 store <16 x float> %1, <16 x float>* %res
698 define dso_local void @sbto16f64_256(<16 x i16> %a, <16 x double>* %res) "min-legal-vector-width"="256" {
699 ; CHECK-LABEL: sbto16f64_256:
701 ; CHECK-NEXT: vpmovw2m %ymm0, %k0
702 ; CHECK-NEXT: kshiftrw $8, %k0, %k1
703 ; CHECK-NEXT: vpmovm2d %k1, %ymm0
704 ; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm1
705 ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
706 ; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm0
707 ; CHECK-NEXT: vpmovm2d %k0, %ymm2
708 ; CHECK-NEXT: vcvtdq2pd %xmm2, %ymm3
709 ; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm2
710 ; CHECK-NEXT: vcvtdq2pd %xmm2, %ymm2
711 ; CHECK-NEXT: vmovaps %ymm2, 32(%rdi)
712 ; CHECK-NEXT: vmovaps %ymm3, (%rdi)
713 ; CHECK-NEXT: vmovaps %ymm0, 96(%rdi)
714 ; CHECK-NEXT: vmovaps %ymm1, 64(%rdi)
715 ; CHECK-NEXT: vzeroupper
717 %mask = icmp slt <16 x i16> %a, zeroinitializer
718 %1 = sitofp <16 x i1> %mask to <16 x double>
719 store <16 x double> %1, <16 x double>* %res
723 define dso_local void @sbto16f64_512(<16 x i16> %a, <16 x double>* %res) "min-legal-vector-width"="512" {
724 ; CHECK-LABEL: sbto16f64_512:
726 ; CHECK-NEXT: vpmovw2m %ymm0, %k0
727 ; CHECK-NEXT: vpmovm2d %k0, %zmm0
728 ; CHECK-NEXT: vcvtdq2pd %ymm0, %zmm1
729 ; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
730 ; CHECK-NEXT: vcvtdq2pd %ymm0, %zmm0
731 ; CHECK-NEXT: vmovaps %zmm0, 64(%rdi)
732 ; CHECK-NEXT: vmovaps %zmm1, (%rdi)
733 ; CHECK-NEXT: vzeroupper
735 %mask = icmp slt <16 x i16> %a, zeroinitializer
736 %1 = sitofp <16 x i1> %mask to <16 x double>
737 store <16 x double> %1, <16 x double>* %res
741 define dso_local void @ubto16f32_256(<16 x i16> %a, <16 x float>* %res) "min-legal-vector-width"="256" {
742 ; CHECK-LABEL: ubto16f32_256:
744 ; CHECK-NEXT: vpmovw2m %ymm0, %k0
745 ; CHECK-NEXT: kshiftrw $8, %k0, %k1
746 ; CHECK-NEXT: vpmovm2d %k1, %ymm0
747 ; CHECK-NEXT: vpsrld $31, %ymm0, %ymm0
748 ; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
749 ; CHECK-NEXT: vpmovm2d %k0, %ymm1
750 ; CHECK-NEXT: vpsrld $31, %ymm1, %ymm1
751 ; CHECK-NEXT: vcvtdq2ps %ymm1, %ymm1
752 ; CHECK-NEXT: vmovaps %ymm1, (%rdi)
753 ; CHECK-NEXT: vmovaps %ymm0, 32(%rdi)
754 ; CHECK-NEXT: vzeroupper
756 %mask = icmp slt <16 x i16> %a, zeroinitializer
757 %1 = uitofp <16 x i1> %mask to <16 x float>
758 store <16 x float> %1, <16 x float>* %res
762 define dso_local void @ubto16f32_512(<16 x i16> %a, <16 x float>* %res) "min-legal-vector-width"="512" {
763 ; CHECK-LABEL: ubto16f32_512:
765 ; CHECK-NEXT: vpmovw2m %ymm0, %k0
766 ; CHECK-NEXT: vpmovm2d %k0, %zmm0
767 ; CHECK-NEXT: vpsrld $31, %zmm0, %zmm0
768 ; CHECK-NEXT: vcvtdq2ps %zmm0, %zmm0
769 ; CHECK-NEXT: vmovaps %zmm0, (%rdi)
770 ; CHECK-NEXT: vzeroupper
772 %mask = icmp slt <16 x i16> %a, zeroinitializer
773 %1 = uitofp <16 x i1> %mask to <16 x float>
774 store <16 x float> %1, <16 x float>* %res
778 define dso_local void @ubto16f64_256(<16 x i16> %a, <16 x double>* %res) "min-legal-vector-width"="256" {
779 ; CHECK-LABEL: ubto16f64_256:
781 ; CHECK-NEXT: vpmovw2m %ymm0, %k0
782 ; CHECK-NEXT: kshiftrw $8, %k0, %k1
783 ; CHECK-NEXT: vpmovm2d %k1, %ymm0
784 ; CHECK-NEXT: vpsrld $31, %ymm0, %ymm0
785 ; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm1
786 ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
787 ; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm0
788 ; CHECK-NEXT: vpmovm2d %k0, %ymm2
789 ; CHECK-NEXT: vpsrld $31, %ymm2, %ymm2
790 ; CHECK-NEXT: vcvtdq2pd %xmm2, %ymm3
791 ; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm2
792 ; CHECK-NEXT: vcvtdq2pd %xmm2, %ymm2
793 ; CHECK-NEXT: vmovaps %ymm2, 32(%rdi)
794 ; CHECK-NEXT: vmovaps %ymm3, (%rdi)
795 ; CHECK-NEXT: vmovaps %ymm0, 96(%rdi)
796 ; CHECK-NEXT: vmovaps %ymm1, 64(%rdi)
797 ; CHECK-NEXT: vzeroupper
799 %mask = icmp slt <16 x i16> %a, zeroinitializer
800 %1 = uitofp <16 x i1> %mask to <16 x double>
801 store <16 x double> %1, <16 x double>* %res
805 define dso_local void @ubto16f64_512(<16 x i16> %a, <16 x double>* %res) "min-legal-vector-width"="512" {
806 ; CHECK-LABEL: ubto16f64_512:
808 ; CHECK-NEXT: vpmovw2m %ymm0, %k0
809 ; CHECK-NEXT: vpmovm2d %k0, %zmm0
810 ; CHECK-NEXT: vpsrld $31, %zmm0, %zmm0
811 ; CHECK-NEXT: vcvtdq2pd %ymm0, %zmm1
812 ; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
813 ; CHECK-NEXT: vcvtdq2pd %ymm0, %zmm0
814 ; CHECK-NEXT: vmovaps %zmm0, 64(%rdi)
815 ; CHECK-NEXT: vmovaps %zmm1, (%rdi)
816 ; CHECK-NEXT: vzeroupper
818 %mask = icmp slt <16 x i16> %a, zeroinitializer
819 %1 = uitofp <16 x i1> %mask to <16 x double>
820 store <16 x double> %1, <16 x double>* %res
824 define <16 x i16> @test_16f32toub_256(<16 x float>* %ptr, <16 x i16> %passthru) "min-legal-vector-width"="256" {
825 ; CHECK-LABEL: test_16f32toub_256:
827 ; CHECK-NEXT: vcvttps2dq (%rdi), %ymm1
828 ; CHECK-NEXT: vpslld $31, %ymm1, %ymm1
829 ; CHECK-NEXT: vpmovd2m %ymm1, %k0
830 ; CHECK-NEXT: vcvttps2dq 32(%rdi), %ymm1
831 ; CHECK-NEXT: vpslld $31, %ymm1, %ymm1
832 ; CHECK-NEXT: vpmovd2m %ymm1, %k1
833 ; CHECK-NEXT: kunpckbw %k0, %k1, %k1
834 ; CHECK-NEXT: vmovdqu16 %ymm0, %ymm0 {%k1} {z}
836 %a = load <16 x float>, <16 x float>* %ptr
837 %mask = fptoui <16 x float> %a to <16 x i1>
838 %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer
839 ret <16 x i16> %select
842 define <16 x i16> @test_16f32toub_512(<16 x float>* %ptr, <16 x i16> %passthru) "min-legal-vector-width"="512" {
843 ; CHECK-LABEL: test_16f32toub_512:
845 ; CHECK-NEXT: vcvttps2dq (%rdi), %zmm1
846 ; CHECK-NEXT: vpslld $31, %zmm1, %zmm1
847 ; CHECK-NEXT: vpmovd2m %zmm1, %k1
848 ; CHECK-NEXT: vmovdqu16 %ymm0, %ymm0 {%k1} {z}
850 %a = load <16 x float>, <16 x float>* %ptr
851 %mask = fptoui <16 x float> %a to <16 x i1>
852 %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer
853 ret <16 x i16> %select
856 define <16 x i16> @test_16f32tosb_256(<16 x float>* %ptr, <16 x i16> %passthru) "min-legal-vector-width"="256" {
857 ; CHECK-LABEL: test_16f32tosb_256:
859 ; CHECK-NEXT: vcvttps2dq (%rdi), %ymm1
860 ; CHECK-NEXT: vpmovd2m %ymm1, %k0
861 ; CHECK-NEXT: vcvttps2dq 32(%rdi), %ymm1
862 ; CHECK-NEXT: vpmovd2m %ymm1, %k1
863 ; CHECK-NEXT: kunpckbw %k0, %k1, %k1
864 ; CHECK-NEXT: vmovdqu16 %ymm0, %ymm0 {%k1} {z}
866 %a = load <16 x float>, <16 x float>* %ptr
867 %mask = fptosi <16 x float> %a to <16 x i1>
868 %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer
869 ret <16 x i16> %select
872 define <16 x i16> @test_16f32tosb_512(<16 x float>* %ptr, <16 x i16> %passthru) "min-legal-vector-width"="512" {
873 ; CHECK-LABEL: test_16f32tosb_512:
875 ; CHECK-NEXT: vcvttps2dq (%rdi), %zmm1
876 ; CHECK-NEXT: vpmovd2m %zmm1, %k1
877 ; CHECK-NEXT: vmovdqu16 %ymm0, %ymm0 {%k1} {z}
879 %a = load <16 x float>, <16 x float>* %ptr
880 %mask = fptosi <16 x float> %a to <16 x i1>
881 %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer
882 ret <16 x i16> %select
885 define dso_local void @mul256(<64 x i8>* %a, <64 x i8>* %b, <64 x i8>* %c) "min-legal-vector-width"="256" {
886 ; CHECK-SKX-VBMI-LABEL: mul256:
887 ; CHECK-SKX-VBMI: # %bb.0:
888 ; CHECK-SKX-VBMI-NEXT: vmovdqa (%rdi), %ymm0
889 ; CHECK-SKX-VBMI-NEXT: vmovdqa 32(%rdi), %ymm1
890 ; CHECK-SKX-VBMI-NEXT: vmovdqa (%rsi), %ymm2
891 ; CHECK-SKX-VBMI-NEXT: vmovdqa 32(%rsi), %ymm3
892 ; CHECK-SKX-VBMI-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
893 ; CHECK-SKX-VBMI-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
894 ; CHECK-SKX-VBMI-NEXT: vpmullw %ymm4, %ymm5, %ymm4
895 ; CHECK-SKX-VBMI-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
896 ; CHECK-SKX-VBMI-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
897 ; CHECK-SKX-VBMI-NEXT: vpmullw %ymm3, %ymm1, %ymm1
898 ; CHECK-SKX-VBMI-NEXT: vmovdqa {{.*#+}} ymm3 = [0,2,4,6,8,10,12,14,32,34,36,38,40,42,44,46,16,18,20,22,24,26,28,30,48,50,52,54,56,58,60,62]
899 ; CHECK-SKX-VBMI-NEXT: vpermt2b %ymm4, %ymm3, %ymm1
900 ; CHECK-SKX-VBMI-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
901 ; CHECK-SKX-VBMI-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
902 ; CHECK-SKX-VBMI-NEXT: vpmullw %ymm4, %ymm5, %ymm4
903 ; CHECK-SKX-VBMI-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
904 ; CHECK-SKX-VBMI-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
905 ; CHECK-SKX-VBMI-NEXT: vpmullw %ymm2, %ymm0, %ymm0
906 ; CHECK-SKX-VBMI-NEXT: vpermt2b %ymm4, %ymm3, %ymm0
907 ; CHECK-SKX-VBMI-NEXT: vmovdqa %ymm0, (%rdx)
908 ; CHECK-SKX-VBMI-NEXT: vmovdqa %ymm1, 32(%rdx)
909 ; CHECK-SKX-VBMI-NEXT: vzeroupper
910 ; CHECK-SKX-VBMI-NEXT: retq
912 ; CHECK-AVX512-LABEL: mul256:
913 ; CHECK-AVX512: # %bb.0:
914 ; CHECK-AVX512-NEXT: vmovdqa (%rdi), %ymm0
915 ; CHECK-AVX512-NEXT: vmovdqa 32(%rdi), %ymm1
916 ; CHECK-AVX512-NEXT: vmovdqa (%rsi), %ymm2
917 ; CHECK-AVX512-NEXT: vmovdqa 32(%rsi), %ymm3
918 ; CHECK-AVX512-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
919 ; CHECK-AVX512-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
920 ; CHECK-AVX512-NEXT: vpmullw %ymm4, %ymm5, %ymm4
921 ; CHECK-AVX512-NEXT: vpbroadcastw {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
922 ; CHECK-AVX512-NEXT: vpand %ymm5, %ymm4, %ymm4
923 ; CHECK-AVX512-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
924 ; CHECK-AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
925 ; CHECK-AVX512-NEXT: vpmullw %ymm3, %ymm1, %ymm1
926 ; CHECK-AVX512-NEXT: vpand %ymm5, %ymm1, %ymm1
927 ; CHECK-AVX512-NEXT: vpackuswb %ymm4, %ymm1, %ymm1
928 ; CHECK-AVX512-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
929 ; CHECK-AVX512-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
930 ; CHECK-AVX512-NEXT: vpmullw %ymm3, %ymm4, %ymm3
931 ; CHECK-AVX512-NEXT: vpand %ymm5, %ymm3, %ymm3
932 ; CHECK-AVX512-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
933 ; CHECK-AVX512-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
934 ; CHECK-AVX512-NEXT: vpmullw %ymm2, %ymm0, %ymm0
935 ; CHECK-AVX512-NEXT: vpand %ymm5, %ymm0, %ymm0
936 ; CHECK-AVX512-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
937 ; CHECK-AVX512-NEXT: vmovdqa %ymm0, (%rdx)
938 ; CHECK-AVX512-NEXT: vmovdqa %ymm1, 32(%rdx)
939 ; CHECK-AVX512-NEXT: vzeroupper
940 ; CHECK-AVX512-NEXT: retq
942 ; CHECK-VBMI-LABEL: mul256:
943 ; CHECK-VBMI: # %bb.0:
944 ; CHECK-VBMI-NEXT: vmovdqa (%rdi), %ymm0
945 ; CHECK-VBMI-NEXT: vmovdqa 32(%rdi), %ymm1
946 ; CHECK-VBMI-NEXT: vmovdqa (%rsi), %ymm2
947 ; CHECK-VBMI-NEXT: vmovdqa 32(%rsi), %ymm3
948 ; CHECK-VBMI-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
949 ; CHECK-VBMI-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
950 ; CHECK-VBMI-NEXT: vpmullw %ymm4, %ymm5, %ymm4
951 ; CHECK-VBMI-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
952 ; CHECK-VBMI-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
953 ; CHECK-VBMI-NEXT: vpmullw %ymm3, %ymm1, %ymm1
954 ; CHECK-VBMI-NEXT: vmovdqa {{.*#+}} ymm3 = [0,2,4,6,8,10,12,14,32,34,36,38,40,42,44,46,16,18,20,22,24,26,28,30,48,50,52,54,56,58,60,62]
955 ; CHECK-VBMI-NEXT: vpermt2b %ymm4, %ymm3, %ymm1
956 ; CHECK-VBMI-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
957 ; CHECK-VBMI-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
958 ; CHECK-VBMI-NEXT: vpmullw %ymm4, %ymm5, %ymm4
959 ; CHECK-VBMI-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
960 ; CHECK-VBMI-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
961 ; CHECK-VBMI-NEXT: vpmullw %ymm2, %ymm0, %ymm0
962 ; CHECK-VBMI-NEXT: vpermt2b %ymm4, %ymm3, %ymm0
963 ; CHECK-VBMI-NEXT: vmovdqa %ymm0, (%rdx)
964 ; CHECK-VBMI-NEXT: vmovdqa %ymm1, 32(%rdx)
965 ; CHECK-VBMI-NEXT: vzeroupper
966 ; CHECK-VBMI-NEXT: retq
967 %d = load <64 x i8>, <64 x i8>* %a
968 %e = load <64 x i8>, <64 x i8>* %b
969 %f = mul <64 x i8> %d, %e
970 store <64 x i8> %f, <64 x i8>* %c
974 define dso_local void @mul512(<64 x i8>* %a, <64 x i8>* %b, <64 x i8>* %c) "min-legal-vector-width"="512" {
975 ; CHECK-SKX-VBMI-LABEL: mul512:
976 ; CHECK-SKX-VBMI: # %bb.0:
977 ; CHECK-SKX-VBMI-NEXT: vmovdqa64 (%rdi), %zmm0
978 ; CHECK-SKX-VBMI-NEXT: vmovdqa64 (%rsi), %zmm1
979 ; CHECK-SKX-VBMI-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
980 ; CHECK-SKX-VBMI-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
981 ; CHECK-SKX-VBMI-NEXT: vpmullw %zmm2, %zmm3, %zmm2
982 ; CHECK-SKX-VBMI-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
983 ; CHECK-SKX-VBMI-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
984 ; CHECK-SKX-VBMI-NEXT: vpmullw %zmm1, %zmm0, %zmm0
985 ; CHECK-SKX-VBMI-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,2,4,6,8,10,12,14,64,66,68,70,72,74,76,78,16,18,20,22,24,26,28,30,80,82,84,86,88,90,92,94,32,34,36,38,40,42,44,46,96,98,100,102,104,106,108,110,48,50,52,54,56,58,60,62,112,114,116,118,120,122,124,126]
986 ; CHECK-SKX-VBMI-NEXT: vpermi2b %zmm2, %zmm0, %zmm1
987 ; CHECK-SKX-VBMI-NEXT: vmovdqa64 %zmm1, (%rdx)
988 ; CHECK-SKX-VBMI-NEXT: vzeroupper
989 ; CHECK-SKX-VBMI-NEXT: retq
991 ; CHECK-AVX512-LABEL: mul512:
992 ; CHECK-AVX512: # %bb.0:
993 ; CHECK-AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
994 ; CHECK-AVX512-NEXT: vmovdqa64 (%rsi), %zmm1
995 ; CHECK-AVX512-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
996 ; CHECK-AVX512-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
997 ; CHECK-AVX512-NEXT: vpmullw %zmm2, %zmm3, %zmm2
998 ; CHECK-AVX512-NEXT: vpbroadcastw {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
999 ; CHECK-AVX512-NEXT: vpandq %zmm3, %zmm2, %zmm2
1000 ; CHECK-AVX512-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
1001 ; CHECK-AVX512-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
1002 ; CHECK-AVX512-NEXT: vpmullw %zmm1, %zmm0, %zmm0
1003 ; CHECK-AVX512-NEXT: vpandq %zmm3, %zmm0, %zmm0
1004 ; CHECK-AVX512-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
1005 ; CHECK-AVX512-NEXT: vmovdqa64 %zmm0, (%rdx)
1006 ; CHECK-AVX512-NEXT: vzeroupper
1007 ; CHECK-AVX512-NEXT: retq
1009 ; CHECK-VBMI-LABEL: mul512:
1010 ; CHECK-VBMI: # %bb.0:
1011 ; CHECK-VBMI-NEXT: vmovdqa64 (%rdi), %zmm0
1012 ; CHECK-VBMI-NEXT: vmovdqa64 (%rsi), %zmm1
1013 ; CHECK-VBMI-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
1014 ; CHECK-VBMI-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
1015 ; CHECK-VBMI-NEXT: vpmullw %zmm2, %zmm3, %zmm2
1016 ; CHECK-VBMI-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
1017 ; CHECK-VBMI-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
1018 ; CHECK-VBMI-NEXT: vpmullw %zmm1, %zmm0, %zmm0
1019 ; CHECK-VBMI-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,2,4,6,8,10,12,14,64,66,68,70,72,74,76,78,16,18,20,22,24,26,28,30,80,82,84,86,88,90,92,94,32,34,36,38,40,42,44,46,96,98,100,102,104,106,108,110,48,50,52,54,56,58,60,62,112,114,116,118,120,122,124,126]
1020 ; CHECK-VBMI-NEXT: vpermi2b %zmm2, %zmm0, %zmm1
1021 ; CHECK-VBMI-NEXT: vmovdqa64 %zmm1, (%rdx)
1022 ; CHECK-VBMI-NEXT: vzeroupper
1023 ; CHECK-VBMI-NEXT: retq
1024 %d = load <64 x i8>, <64 x i8>* %a
1025 %e = load <64 x i8>, <64 x i8>* %b
1026 %f = mul <64 x i8> %d, %e
1027 store <64 x i8> %f, <64 x i8>* %c
1031 ; This threw an assertion at one point.
1032 define <4 x i32> @mload_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) "min-legal-vector-width"="256" {
1033 ; CHECK-LABEL: mload_v4i32:
1035 ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1
1036 ; CHECK-NEXT: vpblendmd (%rdi), %xmm1, %xmm0 {%k1}
1038 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
1039 %res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr, i32 4, <4 x i1> %mask, <4 x i32> %dst)
1042 declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
1044 define <16 x i32> @trunc_v16i64_v16i32(<16 x i64>* %x) nounwind "min-legal-vector-width"="256" {
1045 ; CHECK-LABEL: trunc_v16i64_v16i32:
1047 ; CHECK-NEXT: vmovdqa (%rdi), %ymm0
1048 ; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
1049 ; CHECK-NEXT: vmovdqa 64(%rdi), %ymm2
1050 ; CHECK-NEXT: vmovdqa 96(%rdi), %ymm3
1051 ; CHECK-NEXT: vpmovqd %ymm0, %xmm0
1052 ; CHECK-NEXT: vpmovqd %ymm1, %xmm1
1053 ; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
1054 ; CHECK-NEXT: vpmovqd %ymm2, %xmm1
1055 ; CHECK-NEXT: vpmovqd %ymm3, %xmm2
1056 ; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
1058 %a = load <16 x i64>, <16 x i64>* %x
1059 %b = trunc <16 x i64> %a to <16 x i32>
1063 define <16 x i8> @trunc_v16i64_v16i8(<16 x i64>* %x) nounwind "min-legal-vector-width"="256" {
1064 ; CHECK-LABEL: trunc_v16i64_v16i8:
1066 ; CHECK-NEXT: vmovdqa (%rdi), %ymm0
1067 ; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
1068 ; CHECK-NEXT: vmovdqa 64(%rdi), %ymm2
1069 ; CHECK-NEXT: vmovdqa 96(%rdi), %ymm3
1070 ; CHECK-NEXT: vpmovqb %ymm3, %xmm3
1071 ; CHECK-NEXT: vpmovqb %ymm2, %xmm2
1072 ; CHECK-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
1073 ; CHECK-NEXT: vpmovqb %ymm1, %xmm1
1074 ; CHECK-NEXT: vpmovqb %ymm0, %xmm0
1075 ; CHECK-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1076 ; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
1077 ; CHECK-NEXT: vzeroupper
1079 %a = load <16 x i64>, <16 x i64>* %x
1080 %b = trunc <16 x i64> %a to <16 x i8>
1084 define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %x) nounwind "min-legal-vector-width"="256" {
1085 ; CHECK-LABEL: trunc_v16i32_v16i8:
1087 ; CHECK-NEXT: vmovdqa (%rdi), %ymm0
1088 ; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
1089 ; CHECK-NEXT: vpmovdb %ymm1, %xmm1
1090 ; CHECK-NEXT: vpmovdb %ymm0, %xmm0
1091 ; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1092 ; CHECK-NEXT: vzeroupper
1094 %a = load <16 x i32>, <16 x i32>* %x
1095 %b = trunc <16 x i32> %a to <16 x i8>
1099 define <8 x i8> @trunc_v8i64_v8i8(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
1100 ; CHECK-LABEL: trunc_v8i64_v8i8:
1102 ; CHECK-NEXT: vmovdqa (%rdi), %ymm0
1103 ; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
1104 ; CHECK-NEXT: vpmovqb %ymm1, %xmm1
1105 ; CHECK-NEXT: vpmovqb %ymm0, %xmm0
1106 ; CHECK-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1107 ; CHECK-NEXT: vzeroupper
1109 %a = load <8 x i64>, <8 x i64>* %x
1110 %b = trunc <8 x i64> %a to <8 x i8>
1114 define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
1115 ; CHECK-LABEL: trunc_v8i64_v8i16:
1117 ; CHECK-NEXT: vmovdqa (%rdi), %ymm0
1118 ; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
1119 ; CHECK-NEXT: vpmovqw %ymm1, %xmm1
1120 ; CHECK-NEXT: vpmovqw %ymm0, %xmm0
1121 ; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1122 ; CHECK-NEXT: vzeroupper
1124 %a = load <8 x i64>, <8 x i64>* %x
1125 %b = trunc <8 x i64> %a to <8 x i16>
1129 define <8 x i32> @trunc_v8i64_v8i32_zeroes(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
1130 ; CHECK-LABEL: trunc_v8i64_v8i32_zeroes:
1132 ; CHECK-NEXT: vpsrlq $48, 32(%rdi), %ymm0
1133 ; CHECK-NEXT: vpsrlq $48, (%rdi), %ymm1
1134 ; CHECK-NEXT: vpackusdw %ymm0, %ymm1, %ymm0
1135 ; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
1137 %a = load <8 x i64>, <8 x i64>* %x
1138 %b = lshr <8 x i64> %a, <i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48>
1139 %c = trunc <8 x i64> %b to <8 x i32>
1143 define <16 x i16> @trunc_v16i32_v16i16_zeroes(<16 x i32>* %x) nounwind "min-legal-vector-width"="256" {
1144 ; CHECK-LABEL: trunc_v16i32_v16i16_zeroes:
1146 ; CHECK-NEXT: vmovdqa (%rdi), %ymm1
1147 ; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]
1148 ; CHECK-NEXT: vpermi2w 32(%rdi), %ymm1, %ymm0
1150 %a = load <16 x i32>, <16 x i32>* %x
1151 %b = lshr <16 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
1152 %c = trunc <16 x i32> %b to <16 x i16>
1156 define <32 x i8> @trunc_v32i16_v32i8_zeroes(<32 x i16>* %x) nounwind "min-legal-vector-width"="256" {
1157 ; CHECK-SKX-VBMI-LABEL: trunc_v32i16_v32i8_zeroes:
1158 ; CHECK-SKX-VBMI: # %bb.0:
1159 ; CHECK-SKX-VBMI-NEXT: vmovdqa (%rdi), %ymm1
1160 ; CHECK-SKX-VBMI-NEXT: vmovdqa {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63]
1161 ; CHECK-SKX-VBMI-NEXT: vpermi2b 32(%rdi), %ymm1, %ymm0
1162 ; CHECK-SKX-VBMI-NEXT: retq
1164 ; CHECK-AVX512-LABEL: trunc_v32i16_v32i8_zeroes:
1165 ; CHECK-AVX512: # %bb.0:
1166 ; CHECK-AVX512-NEXT: vpsrlw $8, 32(%rdi), %ymm0
1167 ; CHECK-AVX512-NEXT: vpsrlw $8, (%rdi), %ymm1
1168 ; CHECK-AVX512-NEXT: vpackuswb %ymm0, %ymm1, %ymm0
1169 ; CHECK-AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
1170 ; CHECK-AVX512-NEXT: retq
1172 ; CHECK-VBMI-LABEL: trunc_v32i16_v32i8_zeroes:
1173 ; CHECK-VBMI: # %bb.0:
1174 ; CHECK-VBMI-NEXT: vmovdqa (%rdi), %ymm1
1175 ; CHECK-VBMI-NEXT: vmovdqa {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63]
1176 ; CHECK-VBMI-NEXT: vpermi2b 32(%rdi), %ymm1, %ymm0
1177 ; CHECK-VBMI-NEXT: retq
1178 %a = load <32 x i16>, <32 x i16>* %x
1179 %b = lshr <32 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
1180 %c = trunc <32 x i16> %b to <32 x i8>
1184 define <8 x i32> @trunc_v8i64_v8i32_sign(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
1185 ; CHECK-LABEL: trunc_v8i64_v8i32_sign:
1187 ; CHECK-NEXT: vpsraq $48, 32(%rdi), %ymm0
1188 ; CHECK-NEXT: vpsraq $48, (%rdi), %ymm1
1189 ; CHECK-NEXT: vpackssdw %ymm0, %ymm1, %ymm0
1190 ; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
1192 %a = load <8 x i64>, <8 x i64>* %x
1193 %b = ashr <8 x i64> %a, <i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48>
1194 %c = trunc <8 x i64> %b to <8 x i32>
1198 define <16 x i16> @trunc_v16i32_v16i16_sign(<16 x i32>* %x) nounwind "min-legal-vector-width"="256" {
1199 ; CHECK-LABEL: trunc_v16i32_v16i16_sign:
1201 ; CHECK-NEXT: vmovdqa (%rdi), %ymm1
1202 ; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]
1203 ; CHECK-NEXT: vpermi2w 32(%rdi), %ymm1, %ymm0
1205 %a = load <16 x i32>, <16 x i32>* %x
1206 %b = ashr <16 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
1207 %c = trunc <16 x i32> %b to <16 x i16>
1211 define <32 x i8> @trunc_v32i16_v32i8_sign(<32 x i16>* %x) nounwind "min-legal-vector-width"="256" {
1212 ; CHECK-SKX-VBMI-LABEL: trunc_v32i16_v32i8_sign:
1213 ; CHECK-SKX-VBMI: # %bb.0:
1214 ; CHECK-SKX-VBMI-NEXT: vmovdqa (%rdi), %ymm1
1215 ; CHECK-SKX-VBMI-NEXT: vmovdqa {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63]
1216 ; CHECK-SKX-VBMI-NEXT: vpermi2b 32(%rdi), %ymm1, %ymm0
1217 ; CHECK-SKX-VBMI-NEXT: retq
1219 ; CHECK-AVX512-LABEL: trunc_v32i16_v32i8_sign:
1220 ; CHECK-AVX512: # %bb.0:
1221 ; CHECK-AVX512-NEXT: vpsrlw $8, 32(%rdi), %ymm0
1222 ; CHECK-AVX512-NEXT: vpsrlw $8, (%rdi), %ymm1
1223 ; CHECK-AVX512-NEXT: vpackuswb %ymm0, %ymm1, %ymm0
1224 ; CHECK-AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
1225 ; CHECK-AVX512-NEXT: retq
1227 ; CHECK-VBMI-LABEL: trunc_v32i16_v32i8_sign:
1228 ; CHECK-VBMI: # %bb.0:
1229 ; CHECK-VBMI-NEXT: vmovdqa (%rdi), %ymm1
1230 ; CHECK-VBMI-NEXT: vmovdqa {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63]
1231 ; CHECK-VBMI-NEXT: vpermi2b 32(%rdi), %ymm1, %ymm0
1232 ; CHECK-VBMI-NEXT: retq
1233 %a = load <32 x i16>, <32 x i16>* %x
1234 %b = ashr <32 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
1235 %c = trunc <32 x i16> %b to <32 x i8>
1239 define dso_local void @zext_v16i8_v16i64(<16 x i8> %x, <16 x i64>* %y) nounwind "min-legal-vector-width"="256" {
1240 ; CHECK-LABEL: zext_v16i8_v16i64:
1242 ; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
1243 ; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
1244 ; CHECK-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
1245 ; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1
1246 ; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1247 ; CHECK-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
1248 ; CHECK-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
1249 ; CHECK-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
1250 ; CHECK-NEXT: vmovdqa %ymm0, (%rdi)
1251 ; CHECK-NEXT: vmovdqa %ymm1, 64(%rdi)
1252 ; CHECK-NEXT: vmovdqa %ymm3, 96(%rdi)
1253 ; CHECK-NEXT: vmovdqa %ymm2, 32(%rdi)
1254 ; CHECK-NEXT: vzeroupper
1256 %a = zext <16 x i8> %x to <16 x i64>
1257 store <16 x i64> %a, <16 x i64>* %y
1261 define dso_local void @sext_v16i8_v16i64(<16 x i8> %x, <16 x i64>* %y) nounwind "min-legal-vector-width"="256" {
1262 ; CHECK-LABEL: sext_v16i8_v16i64:
1264 ; CHECK-NEXT: vpmovsxbw %xmm0, %ymm1
1265 ; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
1266 ; CHECK-NEXT: vpmovsxwq %xmm2, %ymm2
1267 ; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1
1268 ; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
1269 ; CHECK-NEXT: vpmovsxwq %xmm3, %ymm3
1270 ; CHECK-NEXT: vpmovsxwq %xmm1, %ymm1
1271 ; CHECK-NEXT: vpmovsxbq %xmm0, %ymm0
1272 ; CHECK-NEXT: vmovdqa %ymm0, (%rdi)
1273 ; CHECK-NEXT: vmovdqa %ymm1, 64(%rdi)
1274 ; CHECK-NEXT: vmovdqa %ymm3, 96(%rdi)
1275 ; CHECK-NEXT: vmovdqa %ymm2, 32(%rdi)
1276 ; CHECK-NEXT: vzeroupper
1278 %a = sext <16 x i8> %x to <16 x i64>
1279 store <16 x i64> %a, <16 x i64>* %y
1283 define dso_local void @vselect_split_v8i16_setcc(<8 x i16> %s, <8 x i16> %t, <8 x i64>* %p, <8 x i64>* %q, <8 x i64>* %r) "min-legal-vector-width"="256" {
1284 ; CHECK-LABEL: vselect_split_v8i16_setcc:
1286 ; CHECK-NEXT: vmovdqa (%rsi), %ymm2
1287 ; CHECK-NEXT: vmovdqa 32(%rsi), %ymm3
1288 ; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
1289 ; CHECK-NEXT: kshiftrb $4, %k1, %k2
1290 ; CHECK-NEXT: vmovdqa64 32(%rdi), %ymm3 {%k2}
1291 ; CHECK-NEXT: vmovdqa64 (%rdi), %ymm2 {%k1}
1292 ; CHECK-NEXT: vmovdqa %ymm2, (%rdx)
1293 ; CHECK-NEXT: vmovdqa %ymm3, 32(%rdx)
1294 ; CHECK-NEXT: vzeroupper
1296 %x = load <8 x i64>, <8 x i64>* %p
1297 %y = load <8 x i64>, <8 x i64>* %q
1298 %a = icmp eq <8 x i16> %s, %t
1299 %b = select <8 x i1> %a, <8 x i64> %x, <8 x i64> %y
1300 store <8 x i64> %b, <8 x i64>* %r
1304 define dso_local void @vselect_split_v8i32_setcc(<8 x i32> %s, <8 x i32> %t, <8 x i64>* %p, <8 x i64>* %q, <8 x i64>* %r) "min-legal-vector-width"="256" {
1305 ; CHECK-LABEL: vselect_split_v8i32_setcc:
1307 ; CHECK-NEXT: vmovdqa (%rsi), %ymm2
1308 ; CHECK-NEXT: vmovdqa 32(%rsi), %ymm3
1309 ; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
1310 ; CHECK-NEXT: kshiftrb $4, %k1, %k2
1311 ; CHECK-NEXT: vmovdqa64 32(%rdi), %ymm3 {%k2}
1312 ; CHECK-NEXT: vmovdqa64 (%rdi), %ymm2 {%k1}
1313 ; CHECK-NEXT: vmovdqa %ymm2, (%rdx)
1314 ; CHECK-NEXT: vmovdqa %ymm3, 32(%rdx)
1315 ; CHECK-NEXT: vzeroupper
1317 %x = load <8 x i64>, <8 x i64>* %p
1318 %y = load <8 x i64>, <8 x i64>* %q
1319 %a = icmp eq <8 x i32> %s, %t
1320 %b = select <8 x i1> %a, <8 x i64> %x, <8 x i64> %y
1321 store <8 x i64> %b, <8 x i64>* %r
1325 define dso_local void @vselect_split_v16i8_setcc(<16 x i8> %s, <16 x i8> %t, <16 x i32>* %p, <16 x i32>* %q, <16 x i32>* %r) "min-legal-vector-width"="256" {
1326 ; CHECK-LABEL: vselect_split_v16i8_setcc:
1328 ; CHECK-NEXT: vmovdqa (%rsi), %ymm2
1329 ; CHECK-NEXT: vmovdqa 32(%rsi), %ymm3
1330 ; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
1331 ; CHECK-NEXT: kshiftrw $8, %k1, %k2
1332 ; CHECK-NEXT: vmovdqa32 32(%rdi), %ymm3 {%k2}
1333 ; CHECK-NEXT: vmovdqa32 (%rdi), %ymm2 {%k1}
1334 ; CHECK-NEXT: vmovdqa %ymm2, (%rdx)
1335 ; CHECK-NEXT: vmovdqa %ymm3, 32(%rdx)
1336 ; CHECK-NEXT: vzeroupper
1338 %x = load <16 x i32>, <16 x i32>* %p
1339 %y = load <16 x i32>, <16 x i32>* %q
1340 %a = icmp eq <16 x i8> %s, %t
1341 %b = select <16 x i1> %a, <16 x i32> %x, <16 x i32> %y
1342 store <16 x i32> %b, <16 x i32>* %r
1346 define dso_local void @vselect_split_v16i16_setcc(<16 x i16> %s, <16 x i16> %t, <16 x i32>* %p, <16 x i32>* %q, <16 x i32>* %r) "min-legal-vector-width"="256" {
1347 ; CHECK-LABEL: vselect_split_v16i16_setcc:
1349 ; CHECK-NEXT: vmovdqa (%rsi), %ymm2
1350 ; CHECK-NEXT: vmovdqa 32(%rsi), %ymm3
1351 ; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
1352 ; CHECK-NEXT: kshiftrw $8, %k1, %k2
1353 ; CHECK-NEXT: vmovdqa32 32(%rdi), %ymm3 {%k2}
1354 ; CHECK-NEXT: vmovdqa32 (%rdi), %ymm2 {%k1}
1355 ; CHECK-NEXT: vmovdqa %ymm2, (%rdx)
1356 ; CHECK-NEXT: vmovdqa %ymm3, 32(%rdx)
1357 ; CHECK-NEXT: vzeroupper
1359 %x = load <16 x i32>, <16 x i32>* %p
1360 %y = load <16 x i32>, <16 x i32>* %q
1361 %a = icmp eq <16 x i16> %s, %t
1362 %b = select <16 x i1> %a, <16 x i32> %x, <16 x i32> %y
1363 store <16 x i32> %b, <16 x i32>* %r
1367 define <16 x i8> @trunc_packus_v16i32_v16i8(<16 x i32>* %p) "min-legal-vector-width"="256" {
1368 ; CHECK-LABEL: trunc_packus_v16i32_v16i8:
1370 ; CHECK-NEXT: vmovdqa (%rdi), %ymm0
1371 ; CHECK-NEXT: vpackusdw 32(%rdi), %ymm0, %ymm0
1372 ; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
1373 ; CHECK-NEXT: vpmovuswb %ymm0, %xmm0
1374 ; CHECK-NEXT: vzeroupper
1376 %a = load <16 x i32>, <16 x i32>* %p
1377 %b = icmp slt <16 x i32> %a, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
1378 %c = select <16 x i1> %b, <16 x i32> %a, <16 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
1379 %d = icmp sgt <16 x i32> %c, zeroinitializer
1380 %e = select <16 x i1> %d, <16 x i32> %c, <16 x i32> zeroinitializer
1381 %f = trunc <16 x i32> %e to <16 x i8>
1385 define dso_local void @trunc_packus_v16i32_v16i8_store(<16 x i32>* %p, <16 x i8>* %q) "min-legal-vector-width"="256" {
1386 ; CHECK-LABEL: trunc_packus_v16i32_v16i8_store:
1388 ; CHECK-NEXT: vmovdqa (%rdi), %ymm0
1389 ; CHECK-NEXT: vpackusdw 32(%rdi), %ymm0, %ymm0
1390 ; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
1391 ; CHECK-NEXT: vpmovuswb %ymm0, (%rsi)
1392 ; CHECK-NEXT: vzeroupper
1394 %a = load <16 x i32>, <16 x i32>* %p
1395 %b = icmp slt <16 x i32> %a, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
1396 %c = select <16 x i1> %b, <16 x i32> %a, <16 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
1397 %d = icmp sgt <16 x i32> %c, zeroinitializer
1398 %e = select <16 x i1> %d, <16 x i32> %c, <16 x i32> zeroinitializer
1399 %f = trunc <16 x i32> %e to <16 x i8>
1400 store <16 x i8> %f, <16 x i8>* %q
1404 define <64 x i1> @v64i1_argument_return(<64 x i1> %x) "min-legal-vector-width"="256" {
1405 ; CHECK-LABEL: v64i1_argument_return:
1411 define dso_local void @v64i1_shuffle(<64 x i8>* %x, <64 x i8>* %y) "min-legal-vector-width"="256" {
1412 ; CHECK-LABEL: v64i1_shuffle:
1413 ; CHECK: # %bb.0: # %entry
1414 ; CHECK-NEXT: vmovdqa (%rdi), %ymm1
1415 ; CHECK-NEXT: vmovdqa 32(%rdi), %ymm0
1416 ; CHECK-NEXT: vptestnmb %ymm1, %ymm1, %k0
1417 ; CHECK-NEXT: kshiftrd $1, %k0, %k1
1418 ; CHECK-NEXT: kshiftlq $63, %k0, %k2
1419 ; CHECK-NEXT: kshiftrq $62, %k2, %k2
1420 ; CHECK-NEXT: kshiftlq $63, %k1, %k1
1421 ; CHECK-NEXT: kshiftrq $63, %k1, %k1
1422 ; CHECK-NEXT: korq %k2, %k1, %k1
1423 ; CHECK-NEXT: movq $-5, %rax
1424 ; CHECK-NEXT: kmovq %rax, %k2
1425 ; CHECK-NEXT: kandq %k2, %k1, %k1
1426 ; CHECK-NEXT: kshiftrd $3, %k0, %k2
1427 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1428 ; CHECK-NEXT: kshiftrq $61, %k2, %k2
1429 ; CHECK-NEXT: korq %k2, %k1, %k1
1430 ; CHECK-NEXT: movq $-9, %rax
1431 ; CHECK-NEXT: kmovq %rax, %k2
1432 ; CHECK-NEXT: kandq %k2, %k1, %k1
1433 ; CHECK-NEXT: kshiftrd $2, %k0, %k2
1434 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1435 ; CHECK-NEXT: kshiftrq $60, %k2, %k2
1436 ; CHECK-NEXT: korq %k2, %k1, %k1
1437 ; CHECK-NEXT: movq $-17, %rax
1438 ; CHECK-NEXT: kmovq %rax, %k2
1439 ; CHECK-NEXT: kandq %k2, %k1, %k1
1440 ; CHECK-NEXT: kshiftrd $5, %k0, %k2
1441 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1442 ; CHECK-NEXT: kshiftrq $59, %k2, %k2
1443 ; CHECK-NEXT: korq %k2, %k1, %k1
1444 ; CHECK-NEXT: movq $-33, %rax
1445 ; CHECK-NEXT: kmovq %rax, %k2
1446 ; CHECK-NEXT: kandq %k2, %k1, %k1
1447 ; CHECK-NEXT: kshiftrd $4, %k0, %k2
1448 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1449 ; CHECK-NEXT: kshiftrq $58, %k2, %k2
1450 ; CHECK-NEXT: korq %k2, %k1, %k1
1451 ; CHECK-NEXT: movq $-65, %rax
1452 ; CHECK-NEXT: kmovq %rax, %k2
1453 ; CHECK-NEXT: kandq %k2, %k1, %k1
1454 ; CHECK-NEXT: kshiftrd $7, %k0, %k2
1455 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1456 ; CHECK-NEXT: kshiftrq $57, %k2, %k2
1457 ; CHECK-NEXT: korq %k2, %k1, %k1
1458 ; CHECK-NEXT: movq $-129, %rax
1459 ; CHECK-NEXT: kmovq %rax, %k2
1460 ; CHECK-NEXT: kandq %k2, %k1, %k1
1461 ; CHECK-NEXT: kshiftrd $6, %k0, %k2
1462 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1463 ; CHECK-NEXT: kshiftrq $56, %k2, %k2
1464 ; CHECK-NEXT: korq %k2, %k1, %k1
1465 ; CHECK-NEXT: movq $-257, %rax # imm = 0xFEFF
1466 ; CHECK-NEXT: kmovq %rax, %k2
1467 ; CHECK-NEXT: kandq %k2, %k1, %k1
1468 ; CHECK-NEXT: kshiftrd $9, %k0, %k2
1469 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1470 ; CHECK-NEXT: kshiftrq $55, %k2, %k2
1471 ; CHECK-NEXT: korq %k2, %k1, %k1
1472 ; CHECK-NEXT: movq $-513, %rax # imm = 0xFDFF
1473 ; CHECK-NEXT: kmovq %rax, %k2
1474 ; CHECK-NEXT: kandq %k2, %k1, %k1
1475 ; CHECK-NEXT: kshiftrd $8, %k0, %k2
1476 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1477 ; CHECK-NEXT: kshiftrq $54, %k2, %k2
1478 ; CHECK-NEXT: korq %k2, %k1, %k1
1479 ; CHECK-NEXT: movq $-1025, %rax # imm = 0xFBFF
1480 ; CHECK-NEXT: kmovq %rax, %k2
1481 ; CHECK-NEXT: kandq %k2, %k1, %k1
1482 ; CHECK-NEXT: kshiftrd $11, %k0, %k2
1483 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1484 ; CHECK-NEXT: kshiftrq $53, %k2, %k2
1485 ; CHECK-NEXT: korq %k2, %k1, %k1
1486 ; CHECK-NEXT: movq $-2049, %rax # imm = 0xF7FF
1487 ; CHECK-NEXT: kmovq %rax, %k2
1488 ; CHECK-NEXT: kandq %k2, %k1, %k1
1489 ; CHECK-NEXT: kshiftrd $10, %k0, %k2
1490 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1491 ; CHECK-NEXT: kshiftrq $52, %k2, %k2
1492 ; CHECK-NEXT: korq %k2, %k1, %k1
1493 ; CHECK-NEXT: movq $-4097, %rax # imm = 0xEFFF
1494 ; CHECK-NEXT: kmovq %rax, %k2
1495 ; CHECK-NEXT: kandq %k2, %k1, %k1
1496 ; CHECK-NEXT: kshiftrd $13, %k0, %k2
1497 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1498 ; CHECK-NEXT: kshiftrq $51, %k2, %k2
1499 ; CHECK-NEXT: korq %k2, %k1, %k1
1500 ; CHECK-NEXT: movq $-8193, %rax # imm = 0xDFFF
1501 ; CHECK-NEXT: kmovq %rax, %k2
1502 ; CHECK-NEXT: kandq %k2, %k1, %k1
1503 ; CHECK-NEXT: kshiftrd $12, %k0, %k2
1504 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1505 ; CHECK-NEXT: kshiftrq $50, %k2, %k2
1506 ; CHECK-NEXT: korq %k2, %k1, %k1
1507 ; CHECK-NEXT: movq $-16385, %rax # imm = 0xBFFF
1508 ; CHECK-NEXT: kmovq %rax, %k2
1509 ; CHECK-NEXT: kandq %k2, %k1, %k1
1510 ; CHECK-NEXT: kshiftrd $15, %k0, %k2
1511 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1512 ; CHECK-NEXT: kshiftrq $49, %k2, %k2
1513 ; CHECK-NEXT: korq %k2, %k1, %k1
1514 ; CHECK-NEXT: movq $-32769, %rax # imm = 0xFFFF7FFF
1515 ; CHECK-NEXT: kmovq %rax, %k2
1516 ; CHECK-NEXT: kandq %k2, %k1, %k1
1517 ; CHECK-NEXT: kshiftrd $14, %k0, %k2
1518 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1519 ; CHECK-NEXT: kshiftrq $48, %k2, %k2
1520 ; CHECK-NEXT: korq %k2, %k1, %k1
1521 ; CHECK-NEXT: movq $-65537, %rax # imm = 0xFFFEFFFF
1522 ; CHECK-NEXT: kmovq %rax, %k2
1523 ; CHECK-NEXT: kandq %k2, %k1, %k1
1524 ; CHECK-NEXT: kshiftrd $17, %k0, %k2
1525 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1526 ; CHECK-NEXT: kshiftrq $47, %k2, %k2
1527 ; CHECK-NEXT: korq %k2, %k1, %k1
1528 ; CHECK-NEXT: movq $-131073, %rax # imm = 0xFFFDFFFF
1529 ; CHECK-NEXT: kmovq %rax, %k2
1530 ; CHECK-NEXT: kandq %k2, %k1, %k1
1531 ; CHECK-NEXT: kshiftrd $16, %k0, %k2
1532 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1533 ; CHECK-NEXT: kshiftrq $46, %k2, %k2
1534 ; CHECK-NEXT: korq %k2, %k1, %k1
1535 ; CHECK-NEXT: movq $-262145, %rax # imm = 0xFFFBFFFF
1536 ; CHECK-NEXT: kmovq %rax, %k2
1537 ; CHECK-NEXT: kandq %k2, %k1, %k1
1538 ; CHECK-NEXT: kshiftrd $19, %k0, %k2
1539 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1540 ; CHECK-NEXT: kshiftrq $45, %k2, %k2
1541 ; CHECK-NEXT: korq %k2, %k1, %k1
1542 ; CHECK-NEXT: movq $-524289, %rax # imm = 0xFFF7FFFF
1543 ; CHECK-NEXT: kmovq %rax, %k2
1544 ; CHECK-NEXT: kandq %k2, %k1, %k1
1545 ; CHECK-NEXT: kshiftrd $18, %k0, %k2
1546 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1547 ; CHECK-NEXT: kshiftrq $44, %k2, %k2
1548 ; CHECK-NEXT: korq %k2, %k1, %k1
1549 ; CHECK-NEXT: movq $-1048577, %rax # imm = 0xFFEFFFFF
1550 ; CHECK-NEXT: kmovq %rax, %k2
1551 ; CHECK-NEXT: kandq %k2, %k1, %k1
1552 ; CHECK-NEXT: kshiftrd $21, %k0, %k2
1553 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1554 ; CHECK-NEXT: kshiftrq $43, %k2, %k2
1555 ; CHECK-NEXT: korq %k2, %k1, %k1
1556 ; CHECK-NEXT: movq $-2097153, %rax # imm = 0xFFDFFFFF
1557 ; CHECK-NEXT: kmovq %rax, %k2
1558 ; CHECK-NEXT: kandq %k2, %k1, %k1
1559 ; CHECK-NEXT: kshiftrd $20, %k0, %k2
1560 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1561 ; CHECK-NEXT: kshiftrq $42, %k2, %k2
1562 ; CHECK-NEXT: korq %k2, %k1, %k1
1563 ; CHECK-NEXT: movq $-4194305, %rax # imm = 0xFFBFFFFF
1564 ; CHECK-NEXT: kmovq %rax, %k2
1565 ; CHECK-NEXT: kandq %k2, %k1, %k1
1566 ; CHECK-NEXT: kshiftrd $23, %k0, %k2
1567 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1568 ; CHECK-NEXT: kshiftrq $41, %k2, %k2
1569 ; CHECK-NEXT: korq %k2, %k1, %k1
1570 ; CHECK-NEXT: movq $-8388609, %rax # imm = 0xFF7FFFFF
1571 ; CHECK-NEXT: kmovq %rax, %k2
1572 ; CHECK-NEXT: kandq %k2, %k1, %k1
1573 ; CHECK-NEXT: kshiftrd $22, %k0, %k2
1574 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1575 ; CHECK-NEXT: kshiftrq $40, %k2, %k2
1576 ; CHECK-NEXT: korq %k2, %k1, %k1
1577 ; CHECK-NEXT: movq $-16777217, %rax # imm = 0xFEFFFFFF
1578 ; CHECK-NEXT: kmovq %rax, %k2
1579 ; CHECK-NEXT: kandq %k2, %k1, %k1
1580 ; CHECK-NEXT: kshiftrd $25, %k0, %k2
1581 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1582 ; CHECK-NEXT: kshiftrq $39, %k2, %k2
1583 ; CHECK-NEXT: korq %k2, %k1, %k1
1584 ; CHECK-NEXT: movq $-33554433, %rax # imm = 0xFDFFFFFF
1585 ; CHECK-NEXT: kmovq %rax, %k2
1586 ; CHECK-NEXT: kandq %k2, %k1, %k1
1587 ; CHECK-NEXT: kshiftrd $24, %k0, %k2
1588 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1589 ; CHECK-NEXT: kshiftrq $38, %k2, %k2
1590 ; CHECK-NEXT: korq %k2, %k1, %k1
1591 ; CHECK-NEXT: movq $-67108865, %rax # imm = 0xFBFFFFFF
1592 ; CHECK-NEXT: kmovq %rax, %k2
1593 ; CHECK-NEXT: kandq %k2, %k1, %k1
1594 ; CHECK-NEXT: kshiftrd $27, %k0, %k2
1595 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1596 ; CHECK-NEXT: kshiftrq $37, %k2, %k2
1597 ; CHECK-NEXT: korq %k2, %k1, %k1
1598 ; CHECK-NEXT: movq $-134217729, %rax # imm = 0xF7FFFFFF
1599 ; CHECK-NEXT: kmovq %rax, %k2
1600 ; CHECK-NEXT: kandq %k2, %k1, %k1
1601 ; CHECK-NEXT: kshiftrd $26, %k0, %k2
1602 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1603 ; CHECK-NEXT: kshiftrq $36, %k2, %k2
1604 ; CHECK-NEXT: korq %k2, %k1, %k1
1605 ; CHECK-NEXT: movq $-268435457, %rax # imm = 0xEFFFFFFF
1606 ; CHECK-NEXT: kmovq %rax, %k2
1607 ; CHECK-NEXT: kandq %k2, %k1, %k1
1608 ; CHECK-NEXT: kshiftrd $29, %k0, %k2
1609 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1610 ; CHECK-NEXT: kshiftrq $35, %k2, %k2
1611 ; CHECK-NEXT: korq %k2, %k1, %k1
1612 ; CHECK-NEXT: movq $-536870913, %rax # imm = 0xDFFFFFFF
1613 ; CHECK-NEXT: kmovq %rax, %k2
1614 ; CHECK-NEXT: kandq %k2, %k1, %k1
1615 ; CHECK-NEXT: kshiftrd $28, %k0, %k2
1616 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1617 ; CHECK-NEXT: kshiftrq $34, %k2, %k2
1618 ; CHECK-NEXT: korq %k2, %k1, %k1
1619 ; CHECK-NEXT: movq $-1073741825, %rax # imm = 0xBFFFFFFF
1620 ; CHECK-NEXT: kmovq %rax, %k2
1621 ; CHECK-NEXT: kandq %k2, %k1, %k1
1622 ; CHECK-NEXT: kshiftrd $31, %k0, %k2
1623 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1624 ; CHECK-NEXT: kshiftrq $33, %k2, %k2
1625 ; CHECK-NEXT: korq %k2, %k1, %k1
1626 ; CHECK-NEXT: movabsq $-2147483649, %rax # imm = 0xFFFFFFFF7FFFFFFF
1627 ; CHECK-NEXT: kmovq %rax, %k2
1628 ; CHECK-NEXT: kandq %k2, %k1, %k2
1629 ; CHECK-NEXT: vptestnmb %ymm0, %ymm0, %k1
1630 ; CHECK-NEXT: kshiftrd $30, %k0, %k0
1631 ; CHECK-NEXT: kshiftlq $63, %k0, %k0
1632 ; CHECK-NEXT: kshiftrq $32, %k0, %k0
1633 ; CHECK-NEXT: korq %k0, %k2, %k0
1634 ; CHECK-NEXT: movabsq $-4294967297, %rax # imm = 0xFFFFFFFEFFFFFFFF
1635 ; CHECK-NEXT: kmovq %rax, %k2
1636 ; CHECK-NEXT: kandq %k2, %k0, %k0
1637 ; CHECK-NEXT: kshiftrd $1, %k1, %k2
1638 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1639 ; CHECK-NEXT: kshiftrq $31, %k2, %k2
1640 ; CHECK-NEXT: korq %k2, %k0, %k0
1641 ; CHECK-NEXT: movabsq $-8589934593, %rax # imm = 0xFFFFFFFDFFFFFFFF
1642 ; CHECK-NEXT: kmovq %rax, %k2
1643 ; CHECK-NEXT: kandq %k2, %k0, %k0
1644 ; CHECK-NEXT: kshiftlq $63, %k1, %k2
1645 ; CHECK-NEXT: kshiftrq $30, %k2, %k2
1646 ; CHECK-NEXT: korq %k2, %k0, %k0
1647 ; CHECK-NEXT: movabsq $-17179869185, %rax # imm = 0xFFFFFFFBFFFFFFFF
1648 ; CHECK-NEXT: kmovq %rax, %k2
1649 ; CHECK-NEXT: kandq %k2, %k0, %k0
1650 ; CHECK-NEXT: kshiftrd $3, %k1, %k2
1651 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1652 ; CHECK-NEXT: kshiftrq $29, %k2, %k2
1653 ; CHECK-NEXT: korq %k2, %k0, %k0
1654 ; CHECK-NEXT: movabsq $-34359738369, %rax # imm = 0xFFFFFFF7FFFFFFFF
1655 ; CHECK-NEXT: kmovq %rax, %k2
1656 ; CHECK-NEXT: kandq %k2, %k0, %k0
1657 ; CHECK-NEXT: kshiftrd $2, %k1, %k2
1658 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1659 ; CHECK-NEXT: kshiftrq $28, %k2, %k2
1660 ; CHECK-NEXT: korq %k2, %k0, %k0
1661 ; CHECK-NEXT: movabsq $-68719476737, %rax # imm = 0xFFFFFFEFFFFFFFFF
1662 ; CHECK-NEXT: kmovq %rax, %k2
1663 ; CHECK-NEXT: kandq %k2, %k0, %k0
1664 ; CHECK-NEXT: kshiftrd $5, %k1, %k2
1665 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1666 ; CHECK-NEXT: kshiftrq $27, %k2, %k2
1667 ; CHECK-NEXT: korq %k2, %k0, %k0
1668 ; CHECK-NEXT: movabsq $-137438953473, %rax # imm = 0xFFFFFFDFFFFFFFFF
1669 ; CHECK-NEXT: kmovq %rax, %k2
1670 ; CHECK-NEXT: kandq %k2, %k0, %k0
1671 ; CHECK-NEXT: kshiftrd $4, %k1, %k2
1672 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1673 ; CHECK-NEXT: kshiftrq $26, %k2, %k2
1674 ; CHECK-NEXT: korq %k2, %k0, %k0
1675 ; CHECK-NEXT: movabsq $-274877906945, %rax # imm = 0xFFFFFFBFFFFFFFFF
1676 ; CHECK-NEXT: kmovq %rax, %k2
1677 ; CHECK-NEXT: kandq %k2, %k0, %k0
1678 ; CHECK-NEXT: kshiftrd $7, %k1, %k2
1679 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1680 ; CHECK-NEXT: kshiftrq $25, %k2, %k2
1681 ; CHECK-NEXT: korq %k2, %k0, %k0
1682 ; CHECK-NEXT: movabsq $-549755813889, %rax # imm = 0xFFFFFF7FFFFFFFFF
1683 ; CHECK-NEXT: kmovq %rax, %k2
1684 ; CHECK-NEXT: kandq %k2, %k0, %k0
1685 ; CHECK-NEXT: kshiftrd $6, %k1, %k2
1686 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1687 ; CHECK-NEXT: kshiftrq $24, %k2, %k2
1688 ; CHECK-NEXT: korq %k2, %k0, %k0
1689 ; CHECK-NEXT: movabsq $-1099511627777, %rax # imm = 0xFFFFFEFFFFFFFFFF
1690 ; CHECK-NEXT: kmovq %rax, %k2
1691 ; CHECK-NEXT: kandq %k2, %k0, %k0
1692 ; CHECK-NEXT: kshiftrd $9, %k1, %k2
1693 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1694 ; CHECK-NEXT: kshiftrq $23, %k2, %k2
1695 ; CHECK-NEXT: korq %k2, %k0, %k0
1696 ; CHECK-NEXT: movabsq $-2199023255553, %rax # imm = 0xFFFFFDFFFFFFFFFF
1697 ; CHECK-NEXT: kmovq %rax, %k2
1698 ; CHECK-NEXT: kandq %k2, %k0, %k0
1699 ; CHECK-NEXT: kshiftrd $8, %k1, %k2
1700 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1701 ; CHECK-NEXT: kshiftrq $22, %k2, %k2
1702 ; CHECK-NEXT: korq %k2, %k0, %k0
1703 ; CHECK-NEXT: movabsq $-4398046511105, %rax # imm = 0xFFFFFBFFFFFFFFFF
1704 ; CHECK-NEXT: kmovq %rax, %k2
1705 ; CHECK-NEXT: kandq %k2, %k0, %k0
1706 ; CHECK-NEXT: kshiftrd $11, %k1, %k2
1707 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1708 ; CHECK-NEXT: kshiftrq $21, %k2, %k2
1709 ; CHECK-NEXT: korq %k2, %k0, %k0
1710 ; CHECK-NEXT: movabsq $-8796093022209, %rax # imm = 0xFFFFF7FFFFFFFFFF
1711 ; CHECK-NEXT: kmovq %rax, %k2
1712 ; CHECK-NEXT: kandq %k2, %k0, %k0
1713 ; CHECK-NEXT: kshiftrd $10, %k1, %k2
1714 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1715 ; CHECK-NEXT: kshiftrq $20, %k2, %k2
1716 ; CHECK-NEXT: korq %k2, %k0, %k0
1717 ; CHECK-NEXT: movabsq $-17592186044417, %rax # imm = 0xFFFFEFFFFFFFFFFF
1718 ; CHECK-NEXT: kmovq %rax, %k2
1719 ; CHECK-NEXT: kandq %k2, %k0, %k0
1720 ; CHECK-NEXT: kshiftrd $13, %k1, %k2
1721 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1722 ; CHECK-NEXT: kshiftrq $19, %k2, %k2
1723 ; CHECK-NEXT: korq %k2, %k0, %k0
1724 ; CHECK-NEXT: movabsq $-35184372088833, %rax # imm = 0xFFFFDFFFFFFFFFFF
1725 ; CHECK-NEXT: kmovq %rax, %k2
1726 ; CHECK-NEXT: kandq %k2, %k0, %k0
1727 ; CHECK-NEXT: kshiftrd $12, %k1, %k2
1728 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1729 ; CHECK-NEXT: kshiftrq $18, %k2, %k2
1730 ; CHECK-NEXT: korq %k2, %k0, %k0
1731 ; CHECK-NEXT: movabsq $-70368744177665, %rax # imm = 0xFFFFBFFFFFFFFFFF
1732 ; CHECK-NEXT: kmovq %rax, %k2
1733 ; CHECK-NEXT: kandq %k2, %k0, %k0
1734 ; CHECK-NEXT: kshiftrd $15, %k1, %k2
1735 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1736 ; CHECK-NEXT: kshiftrq $17, %k2, %k2
1737 ; CHECK-NEXT: korq %k2, %k0, %k0
1738 ; CHECK-NEXT: movabsq $-140737488355329, %rax # imm = 0xFFFF7FFFFFFFFFFF
1739 ; CHECK-NEXT: kmovq %rax, %k2
1740 ; CHECK-NEXT: kandq %k2, %k0, %k0
1741 ; CHECK-NEXT: kshiftrd $14, %k1, %k2
1742 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1743 ; CHECK-NEXT: kshiftrq $16, %k2, %k2
1744 ; CHECK-NEXT: korq %k2, %k0, %k0
1745 ; CHECK-NEXT: movabsq $-281474976710657, %rax # imm = 0xFFFEFFFFFFFFFFFF
1746 ; CHECK-NEXT: kmovq %rax, %k2
1747 ; CHECK-NEXT: kandq %k2, %k0, %k0
1748 ; CHECK-NEXT: kshiftrd $17, %k1, %k2
1749 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1750 ; CHECK-NEXT: kshiftrq $15, %k2, %k2
1751 ; CHECK-NEXT: korq %k2, %k0, %k0
1752 ; CHECK-NEXT: movabsq $-562949953421313, %rax # imm = 0xFFFDFFFFFFFFFFFF
1753 ; CHECK-NEXT: kmovq %rax, %k2
1754 ; CHECK-NEXT: kandq %k2, %k0, %k0
1755 ; CHECK-NEXT: kshiftrd $16, %k1, %k2
1756 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1757 ; CHECK-NEXT: kshiftrq $14, %k2, %k2
1758 ; CHECK-NEXT: korq %k2, %k0, %k0
1759 ; CHECK-NEXT: movabsq $-1125899906842625, %rax # imm = 0xFFFBFFFFFFFFFFFF
1760 ; CHECK-NEXT: kmovq %rax, %k2
1761 ; CHECK-NEXT: kandq %k2, %k0, %k0
1762 ; CHECK-NEXT: kshiftrd $19, %k1, %k2
1763 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1764 ; CHECK-NEXT: kshiftrq $13, %k2, %k2
1765 ; CHECK-NEXT: korq %k2, %k0, %k0
1766 ; CHECK-NEXT: movabsq $-2251799813685249, %rax # imm = 0xFFF7FFFFFFFFFFFF
1767 ; CHECK-NEXT: kmovq %rax, %k2
1768 ; CHECK-NEXT: kandq %k2, %k0, %k0
1769 ; CHECK-NEXT: kshiftrd $18, %k1, %k2
1770 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1771 ; CHECK-NEXT: kshiftrq $12, %k2, %k2
1772 ; CHECK-NEXT: korq %k2, %k0, %k0
1773 ; CHECK-NEXT: movabsq $-4503599627370497, %rax # imm = 0xFFEFFFFFFFFFFFFF
1774 ; CHECK-NEXT: kmovq %rax, %k2
1775 ; CHECK-NEXT: kandq %k2, %k0, %k0
1776 ; CHECK-NEXT: kshiftrd $21, %k1, %k2
1777 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1778 ; CHECK-NEXT: kshiftrq $11, %k2, %k2
1779 ; CHECK-NEXT: korq %k2, %k0, %k0
1780 ; CHECK-NEXT: movabsq $-9007199254740993, %rax # imm = 0xFFDFFFFFFFFFFFFF
1781 ; CHECK-NEXT: kmovq %rax, %k2
1782 ; CHECK-NEXT: kandq %k2, %k0, %k0
1783 ; CHECK-NEXT: kshiftrd $20, %k1, %k2
1784 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1785 ; CHECK-NEXT: kshiftrq $10, %k2, %k2
1786 ; CHECK-NEXT: korq %k2, %k0, %k0
1787 ; CHECK-NEXT: movabsq $-18014398509481985, %rax # imm = 0xFFBFFFFFFFFFFFFF
1788 ; CHECK-NEXT: kmovq %rax, %k2
1789 ; CHECK-NEXT: kandq %k2, %k0, %k0
1790 ; CHECK-NEXT: kshiftrd $23, %k1, %k2
1791 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1792 ; CHECK-NEXT: kshiftrq $9, %k2, %k2
1793 ; CHECK-NEXT: korq %k2, %k0, %k0
1794 ; CHECK-NEXT: movabsq $-36028797018963969, %rax # imm = 0xFF7FFFFFFFFFFFFF
1795 ; CHECK-NEXT: kmovq %rax, %k2
1796 ; CHECK-NEXT: kandq %k2, %k0, %k0
1797 ; CHECK-NEXT: kshiftrd $22, %k1, %k2
1798 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1799 ; CHECK-NEXT: kshiftrq $8, %k2, %k2
1800 ; CHECK-NEXT: korq %k2, %k0, %k0
1801 ; CHECK-NEXT: movabsq $-72057594037927937, %rax # imm = 0xFEFFFFFFFFFFFFFF
1802 ; CHECK-NEXT: kmovq %rax, %k2
1803 ; CHECK-NEXT: kandq %k2, %k0, %k0
1804 ; CHECK-NEXT: kshiftrd $25, %k1, %k2
1805 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1806 ; CHECK-NEXT: kshiftrq $7, %k2, %k2
1807 ; CHECK-NEXT: korq %k2, %k0, %k0
1808 ; CHECK-NEXT: movabsq $-144115188075855873, %rax # imm = 0xFDFFFFFFFFFFFFFF
1809 ; CHECK-NEXT: kmovq %rax, %k2
1810 ; CHECK-NEXT: kandq %k2, %k0, %k0
1811 ; CHECK-NEXT: kshiftrd $24, %k1, %k2
1812 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1813 ; CHECK-NEXT: kshiftrq $6, %k2, %k2
1814 ; CHECK-NEXT: korq %k2, %k0, %k0
1815 ; CHECK-NEXT: movabsq $-288230376151711745, %rax # imm = 0xFBFFFFFFFFFFFFFF
1816 ; CHECK-NEXT: kmovq %rax, %k2
1817 ; CHECK-NEXT: kandq %k2, %k0, %k0
1818 ; CHECK-NEXT: kshiftrd $27, %k1, %k2
1819 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1820 ; CHECK-NEXT: kshiftrq $5, %k2, %k2
1821 ; CHECK-NEXT: korq %k2, %k0, %k0
1822 ; CHECK-NEXT: movabsq $-576460752303423489, %rax # imm = 0xF7FFFFFFFFFFFFFF
1823 ; CHECK-NEXT: kmovq %rax, %k2
1824 ; CHECK-NEXT: kandq %k2, %k0, %k0
1825 ; CHECK-NEXT: kshiftrd $26, %k1, %k2
1826 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1827 ; CHECK-NEXT: kshiftrq $4, %k2, %k2
1828 ; CHECK-NEXT: korq %k2, %k0, %k0
1829 ; CHECK-NEXT: movabsq $-1152921504606846977, %rax # imm = 0xEFFFFFFFFFFFFFFF
1830 ; CHECK-NEXT: kmovq %rax, %k2
1831 ; CHECK-NEXT: kandq %k2, %k0, %k0
1832 ; CHECK-NEXT: kshiftrd $29, %k1, %k2
1833 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1834 ; CHECK-NEXT: kshiftrq $3, %k2, %k2
1835 ; CHECK-NEXT: korq %k2, %k0, %k0
1836 ; CHECK-NEXT: movabsq $-2305843009213693953, %rax # imm = 0xDFFFFFFFFFFFFFFF
1837 ; CHECK-NEXT: kmovq %rax, %k2
1838 ; CHECK-NEXT: kandq %k2, %k0, %k0
1839 ; CHECK-NEXT: kshiftrd $28, %k1, %k2
1840 ; CHECK-NEXT: kshiftlq $63, %k2, %k2
1841 ; CHECK-NEXT: kshiftrq $2, %k2, %k2
1842 ; CHECK-NEXT: korq %k2, %k0, %k0
1843 ; CHECK-NEXT: movabsq $-4611686018427387905, %rax # imm = 0xBFFFFFFFFFFFFFFF
1844 ; CHECK-NEXT: kmovq %rax, %k2
1845 ; CHECK-NEXT: kandq %k2, %k0, %k0
1846 ; CHECK-NEXT: kshiftrd $31, %k1, %k2
1847 ; CHECK-NEXT: kshiftlq $62, %k2, %k2
1848 ; CHECK-NEXT: korq %k2, %k0, %k0
1849 ; CHECK-NEXT: kshiftrd $30, %k1, %k1
1850 ; CHECK-NEXT: kshiftlq $1, %k0, %k0
1851 ; CHECK-NEXT: kshiftrq $1, %k0, %k0
1852 ; CHECK-NEXT: kshiftlq $63, %k1, %k1
1853 ; CHECK-NEXT: korq %k1, %k0, %k1
1854 ; CHECK-NEXT: vmovdqu8 %ymm1, (%rsi) {%k1}
1855 ; CHECK-NEXT: kshiftrq $32, %k1, %k1
1856 ; CHECK-NEXT: vmovdqu8 %ymm0, 32(%rsi) {%k1}
1857 ; CHECK-NEXT: vzeroupper
1860 %a = load <64 x i8>, <64 x i8>* %x
1861 %b = icmp eq <64 x i8> %a, zeroinitializer
1862 %shuf = shufflevector <64 x i1> %b, <64 x i1> undef, <64 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14, i32 17, i32 16, i32 19, i32 18, i32 21, i32 20, i32 23, i32 22, i32 25, i32 24, i32 27, i32 26, i32 29, i32 28, i32 31, i32 30, i32 33, i32 32, i32 35, i32 34, i32 37, i32 36, i32 39, i32 38, i32 41, i32 40, i32 43, i32 42, i32 45, i32 44, i32 47, i32 46, i32 49, i32 48, i32 51, i32 50, i32 53, i32 52, i32 55, i32 54, i32 57, i32 56, i32 59, i32 58, i32 61, i32 60, i32 63, i32 62>
1863 call void @llvm.masked.store.v64i8.p0v64i8(<64 x i8> %a, <64 x i8>* %y, i32 1, <64 x i1> %shuf)
1866 declare void @llvm.masked.store.v64i8.p0v64i8(<64 x i8>, <64 x i8>*, i32, <64 x i1>)
1868 @mem64_dst = dso_local global i64 0, align 8
1869 @mem64_src = dso_local global i64 0, align 8
1870 define dso_local i32 @v64i1_inline_asm() "min-legal-vector-width"="256" {
1871 ; CHECK-LABEL: v64i1_inline_asm:
1873 ; CHECK-NEXT: kmovq mem64_src(%rip), %k0
1875 ; CHECK-NEXT: #NO_APP
1876 ; CHECK-NEXT: kmovq %k0, mem64_dst(%rip)
1877 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax
1879 %1 = alloca i32, align 4
1880 %2 = load i64, i64* @mem64_src, align 8
1881 %3 = call i64 asm "", "=k,k,~{dirflag},~{fpsr},~{flags}"(i64 %2)
1882 store i64 %3, i64* @mem64_dst, align 8
1883 %4 = load i32, i32* %1, align 4
1887 define dso_local void @cmp_v8i64_sext(<8 x i64>* %xptr, <8 x i64>* %yptr, <8 x i64>* %zptr) "min-legal-vector-width"="256" {
1888 ; CHECK-LABEL: cmp_v8i64_sext:
1890 ; CHECK-NEXT: vmovdqa (%rsi), %ymm0
1891 ; CHECK-NEXT: vmovdqa 32(%rsi), %ymm1
1892 ; CHECK-NEXT: vpcmpgtq 32(%rdi), %ymm1, %ymm1
1893 ; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0
1894 ; CHECK-NEXT: vmovdqa %ymm0, (%rdx)
1895 ; CHECK-NEXT: vmovdqa %ymm1, 32(%rdx)
1896 ; CHECK-NEXT: vzeroupper
1898 %x = load <8 x i64>, <8 x i64>* %xptr
1899 %y = load <8 x i64>, <8 x i64>* %yptr
1900 %cmp = icmp slt <8 x i64> %x, %y
1901 %ext = sext <8 x i1> %cmp to <8 x i64>
1902 store <8 x i64> %ext, <8 x i64>* %zptr
1906 define dso_local void @cmp_v8i64_zext(<8 x i64>* %xptr, <8 x i64>* %yptr, <8 x i64>* %zptr) "min-legal-vector-width"="256" {
1907 ; CHECK-LABEL: cmp_v8i64_zext:
1909 ; CHECK-NEXT: vmovdqa (%rsi), %ymm0
1910 ; CHECK-NEXT: vmovdqa 32(%rsi), %ymm1
1911 ; CHECK-NEXT: vpcmpgtq 32(%rdi), %ymm1, %ymm1
1912 ; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0
1913 ; CHECK-NEXT: vpsrlq $63, %ymm1, %ymm1
1914 ; CHECK-NEXT: vpsrlq $63, %ymm0, %ymm0
1915 ; CHECK-NEXT: vmovdqa %ymm0, (%rdx)
1916 ; CHECK-NEXT: vmovdqa %ymm1, 32(%rdx)
1917 ; CHECK-NEXT: vzeroupper
1919 %x = load <8 x i64>, <8 x i64>* %xptr
1920 %y = load <8 x i64>, <8 x i64>* %yptr
1921 %cmp = icmp slt <8 x i64> %x, %y
1922 %ext = zext <8 x i1> %cmp to <8 x i64>
1923 store <8 x i64> %ext, <8 x i64>* %zptr
1927 define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind "min-legal-vector-width"="256" {
1928 ; CHECK-LABEL: var_rotate_v16i8:
1930 ; CHECK-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
1931 ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
1932 ; CHECK-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
1933 ; CHECK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
1934 ; CHECK-NEXT: vpsllvw %xmm2, %xmm3, %xmm2
1935 ; CHECK-NEXT: vpsrlw $8, %xmm2, %xmm2
1936 ; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1937 ; CHECK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
1938 ; CHECK-NEXT: vpsllvw %xmm1, %xmm0, %xmm0
1939 ; CHECK-NEXT: vpsrlw $8, %xmm0, %xmm0
1940 ; CHECK-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
1942 %b8 = sub <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %b
1943 %shl = shl <16 x i8> %a, %b
1944 %lshr = lshr <16 x i8> %a, %b8
1945 %or = or <16 x i8> %shl, %lshr
1949 define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind "min-legal-vector-width"="256" {
1950 ; CHECK-LABEL: var_rotate_v32i8:
1952 ; CHECK-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm1
1953 ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
1954 ; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
1955 ; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
1956 ; CHECK-NEXT: vpsllvw %ymm3, %ymm4, %ymm3
1957 ; CHECK-NEXT: vpsrlw $8, %ymm3, %ymm3
1958 ; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
1959 ; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
1960 ; CHECK-NEXT: vpsllvw %ymm1, %ymm0, %ymm0
1961 ; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm0
1962 ; CHECK-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
1964 %b8 = sub <32 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %b
1965 %shl = shl <32 x i8> %a, %b
1966 %lshr = lshr <32 x i8> %a, %b8
1967 %or = or <32 x i8> %shl, %lshr
1971 define <32 x i8> @splatvar_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind "min-legal-vector-width"="256" {
1972 ; CHECK-LABEL: splatvar_rotate_v32i8:
1974 ; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
1975 ; CHECK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
1976 ; CHECK-NEXT: vpsllw %xmm1, %ymm2, %ymm2
1977 ; CHECK-NEXT: vpsrlw $8, %ymm2, %ymm2
1978 ; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
1979 ; CHECK-NEXT: vpsllw %xmm1, %ymm0, %ymm0
1980 ; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm0
1981 ; CHECK-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
1983 %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
1984 %splat8 = sub <32 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %splat
1985 %shl = shl <32 x i8> %a, %splat
1986 %lshr = lshr <32 x i8> %a, %splat8
1987 %or = or <32 x i8> %shl, %lshr
1991 define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind "min-legal-vector-width"="256" {
1992 ; CHECK-LABEL: constant_rotate_v32i8:
1994 ; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
1995 ; CHECK-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
1996 ; CHECK-NEXT: vpsrlw $8, %ymm1, %ymm1
1997 ; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
1998 ; CHECK-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1999 ; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm0
2000 ; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
2002 %shl = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
2003 %lshr = lshr <32 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
2004 %or = or <32 x i8> %shl, %lshr
2008 define <32 x i8> @splatconstant_rotate_v32i8(<32 x i8> %a) nounwind "min-legal-vector-width"="256" {
2009 ; CHECK-LABEL: splatconstant_rotate_v32i8:
2011 ; CHECK-NEXT: vpsllw $4, %ymm0, %ymm1
2012 ; CHECK-NEXT: vpsrlw $4, %ymm0, %ymm0
2013 ; CHECK-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
2015 %shl = shl <32 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
2016 %lshr = lshr <32 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
2017 %or = or <32 x i8> %shl, %lshr
2021 define <32 x i8> @splatconstant_rotate_mask_v32i8(<32 x i8> %a) nounwind "min-legal-vector-width"="256" {
2022 ; CHECK-LABEL: splatconstant_rotate_mask_v32i8:
2024 ; CHECK-NEXT: vpsllw $4, %ymm0, %ymm1
2025 ; CHECK-NEXT: vpsrlw $4, %ymm0, %ymm0
2026 ; CHECK-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
2027 ; CHECK-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
2029 %shl = shl <32 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
2030 %lshr = lshr <32 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
2031 %rmask = and <32 x i8> %lshr, <i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55>
2032 %lmask = and <32 x i8> %shl, <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
2033 %or = or <32 x i8> %lmask, %rmask