1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mcpu=skylake-avx512 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SKX
3 ; RUN: llc -mcpu=knl < %s | FileCheck %s --check-prefix=ALL --check-prefix=KNL
5 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
6 target triple = "x86_64-unknown-linux-gnu"
8 define <16 x float> @expandload_v16f32_const_undef(float* %base) {
9 ; SKX-LABEL: expandload_v16f32_const_undef:
11 ; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF
12 ; SKX-NEXT: kmovd %eax, %k1
13 ; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
16 ; KNL-LABEL: expandload_v16f32_const_undef:
18 ; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF
19 ; KNL-NEXT: kmovw %eax, %k1
20 ; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
22 %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
26 define <16 x float> @expandload_v16f32_const(float* %base, <16 x float> %src0) {
27 ; SKX-LABEL: expandload_v16f32_const:
29 ; SKX-NEXT: movw $30719, %ax # imm = 0x77FF
30 ; SKX-NEXT: kmovd %eax, %k1
31 ; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1}
34 ; KNL-LABEL: expandload_v16f32_const:
36 ; KNL-NEXT: movw $30719, %ax # imm = 0x77FF
37 ; KNL-NEXT: kmovw %eax, %k1
38 ; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
40 %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x float> %src0)
44 define <8 x double> @expandload_v8f64_v8i1(double* %base, <8 x double> %src0, <8 x i1> %mask) {
45 ; SKX-LABEL: expandload_v8f64_v8i1:
47 ; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
48 ; SKX-NEXT: vpmovw2m %xmm1, %k1
49 ; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k1}
52 ; KNL-LABEL: expandload_v8f64_v8i1:
54 ; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
55 ; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
56 ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
57 ; KNL-NEXT: vexpandpd (%rdi), %zmm0 {%k1}
59 %res = call <8 x double> @llvm.masked.expandload.v8f64(double* %base, <8 x i1> %mask, <8 x double> %src0)
63 define <4 x float> @expandload_v4f32_const(float* %base, <4 x float> %src0) {
64 ; SKX-LABEL: expandload_v4f32_const:
66 ; SKX-NEXT: movb $7, %al
67 ; SKX-NEXT: kmovd %eax, %k1
68 ; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1}
71 ; KNL-LABEL: expandload_v4f32_const:
73 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
74 ; KNL-NEXT: movw $7, %ax
75 ; KNL-NEXT: kmovw %eax, %k1
76 ; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
77 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
79 %res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x float> %src0)
83 define <2 x i64> @expandload_v2i64_const(i64* %base, <2 x i64> %src0) {
84 ; SKX-LABEL: expandload_v2i64_const:
86 ; SKX-NEXT: movb $2, %al
87 ; SKX-NEXT: kmovd %eax, %k1
88 ; SKX-NEXT: vpexpandq (%rdi), %xmm0 {%k1}
91 ; KNL-LABEL: expandload_v2i64_const:
93 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
94 ; KNL-NEXT: movb $2, %al
95 ; KNL-NEXT: kmovw %eax, %k1
96 ; KNL-NEXT: vpexpandq (%rdi), %zmm0 {%k1}
97 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
99 %res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> <i1 false, i1 true>, <2 x i64> %src0)
103 declare <16 x float> @llvm.masked.expandload.v16f32(float*, <16 x i1>, <16 x float>)
104 declare <8 x double> @llvm.masked.expandload.v8f64(double*, <8 x i1>, <8 x double>)
105 declare <4 x float> @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>)
106 declare <2 x i64> @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>)
108 define void @compressstore_v16f32_const(float* %base, <16 x float> %V) {
109 ; SKX-LABEL: compressstore_v16f32_const:
111 ; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF
112 ; SKX-NEXT: kmovd %eax, %k1
113 ; SKX-NEXT: vcompressps %zmm0, (%rdi) {%k1}
114 ; SKX-NEXT: vzeroupper
117 ; KNL-LABEL: compressstore_v16f32_const:
119 ; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF
120 ; KNL-NEXT: kmovw %eax, %k1
121 ; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
123 call void @llvm.masked.compressstore.v16f32(<16 x float> %V, float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>)
127 define void @compressstore_v8f32_v8i1(float* %base, <8 x float> %V, <8 x i1> %mask) {
128 ; SKX-LABEL: compressstore_v8f32_v8i1:
130 ; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
131 ; SKX-NEXT: vpmovw2m %xmm1, %k1
132 ; SKX-NEXT: vcompressps %ymm0, (%rdi) {%k1}
133 ; SKX-NEXT: vzeroupper
136 ; KNL-LABEL: compressstore_v8f32_v8i1:
138 ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
139 ; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
140 ; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
141 ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
142 ; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
144 call void @llvm.masked.compressstore.v8f32(<8 x float> %V, float* %base, <8 x i1> %mask)
148 define void @compressstore_v8f64_v8i1(double* %base, <8 x double> %V, <8 x i1> %mask) {
149 ; SKX-LABEL: compressstore_v8f64_v8i1:
151 ; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
152 ; SKX-NEXT: vpmovw2m %xmm1, %k1
153 ; SKX-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
154 ; SKX-NEXT: vzeroupper
157 ; KNL-LABEL: compressstore_v8f64_v8i1:
159 ; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
160 ; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
161 ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
162 ; KNL-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
164 call void @llvm.masked.compressstore.v8f64(<8 x double> %V, double* %base, <8 x i1> %mask)
168 define void @compressstore_v8i64_v8i1(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
169 ; SKX-LABEL: compressstore_v8i64_v8i1:
171 ; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
172 ; SKX-NEXT: vpmovw2m %xmm1, %k1
173 ; SKX-NEXT: vpcompressq %zmm0, (%rdi) {%k1}
174 ; SKX-NEXT: vzeroupper
177 ; KNL-LABEL: compressstore_v8i64_v8i1:
179 ; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
180 ; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
181 ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
182 ; KNL-NEXT: vpcompressq %zmm0, (%rdi) {%k1}
184 call void @llvm.masked.compressstore.v8i64(<8 x i64> %V, i64* %base, <8 x i1> %mask)
188 define void @compressstore_v4i64_v4i1(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
189 ; SKX-LABEL: compressstore_v4i64_v4i1:
191 ; SKX-NEXT: vpslld $31, %xmm1, %xmm1
192 ; SKX-NEXT: vpmovd2m %xmm1, %k1
193 ; SKX-NEXT: vpcompressq %ymm0, (%rdi) {%k1}
194 ; SKX-NEXT: vzeroupper
197 ; KNL-LABEL: compressstore_v4i64_v4i1:
199 ; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
200 ; KNL-NEXT: vpslld $31, %xmm1, %xmm1
201 ; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
202 ; KNL-NEXT: kshiftlw $12, %k0, %k0
203 ; KNL-NEXT: kshiftrw $12, %k0, %k1
204 ; KNL-NEXT: vpcompressq %zmm0, (%rdi) {%k1}
206 call void @llvm.masked.compressstore.v4i64(<4 x i64> %V, i64* %base, <4 x i1> %mask)
210 define void @compressstore_v2i64_v2i1(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
211 ; SKX-LABEL: compressstore_v2i64_v2i1:
213 ; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
214 ; SKX-NEXT: vpmovq2m %xmm1, %k1
215 ; SKX-NEXT: vpcompressq %xmm0, (%rdi) {%k1}
218 ; KNL-LABEL: compressstore_v2i64_v2i1:
220 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
221 ; KNL-NEXT: vpsllq $63, %xmm1, %xmm1
222 ; KNL-NEXT: vptestmq %zmm1, %zmm1, %k0
223 ; KNL-NEXT: kshiftlw $14, %k0, %k0
224 ; KNL-NEXT: kshiftrw $14, %k0, %k1
225 ; KNL-NEXT: vpcompressq %zmm0, (%rdi) {%k1}
227 call void @llvm.masked.compressstore.v2i64(<2 x i64> %V, i64* %base, <2 x i1> %mask)
231 define void @compressstore_v4f32_v4i1(float* %base, <4 x float> %V, <4 x i1> %mask) {
232 ; SKX-LABEL: compressstore_v4f32_v4i1:
234 ; SKX-NEXT: vpslld $31, %xmm1, %xmm1
235 ; SKX-NEXT: vpmovd2m %xmm1, %k1
236 ; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1}
239 ; KNL-LABEL: compressstore_v4f32_v4i1:
241 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
242 ; KNL-NEXT: vpslld $31, %xmm1, %xmm1
243 ; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
244 ; KNL-NEXT: kshiftlw $12, %k0, %k0
245 ; KNL-NEXT: kshiftrw $12, %k0, %k1
246 ; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
248 call void @llvm.masked.compressstore.v4f32(<4 x float> %V, float* %base, <4 x i1> %mask)
252 define <2 x float> @expandload_v2f32_v2i1(float* %base, <2 x float> %src0, <2 x i32> %trigger) {
253 ; SKX-LABEL: expandload_v2f32_v2i1:
255 ; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
256 ; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
257 ; SKX-NEXT: vptestnmq %xmm1, %xmm1, %k1
258 ; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1}
261 ; KNL-LABEL: expandload_v2f32_v2i1:
263 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
264 ; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
265 ; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
266 ; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0
267 ; KNL-NEXT: kshiftlw $14, %k0, %k0
268 ; KNL-NEXT: kshiftrw $14, %k0, %k1
269 ; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
270 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
272 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
273 %res = call <2 x float> @llvm.masked.expandload.v2f32(float* %base, <2 x i1> %mask, <2 x float> %src0)
277 define void @compressstore_v2f32_v2i32(float* %base, <2 x float> %V, <2 x i32> %trigger) {
278 ; SKX-LABEL: compressstore_v2f32_v2i32:
280 ; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
281 ; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
282 ; SKX-NEXT: vptestnmq %xmm1, %xmm1, %k1
283 ; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1}
286 ; KNL-LABEL: compressstore_v2f32_v2i32:
288 ; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
289 ; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
290 ; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
291 ; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0
292 ; KNL-NEXT: kshiftlw $14, %k0, %k0
293 ; KNL-NEXT: kshiftrw $14, %k0, %k1
294 ; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
296 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
297 call void @llvm.masked.compressstore.v2f32(<2 x float> %V, float* %base, <2 x i1> %mask)
301 define <32 x float> @expandload_v32f32_v32i32(float* %base, <32 x float> %src0, <32 x i32> %trigger) {
302 ; ALL-LABEL: expandload_v32f32_v32i32:
304 ; ALL-NEXT: vptestnmd %zmm3, %zmm3, %k1
305 ; ALL-NEXT: vptestnmd %zmm2, %zmm2, %k2
306 ; ALL-NEXT: kmovw %k2, %eax
307 ; ALL-NEXT: popcntl %eax, %eax
308 ; ALL-NEXT: vexpandps (%rdi,%rax,4), %zmm1 {%k1}
309 ; ALL-NEXT: vexpandps (%rdi), %zmm0 {%k2}
311 %mask = icmp eq <32 x i32> %trigger, zeroinitializer
312 %res = call <32 x float> @llvm.masked.expandload.v32f32(float* %base, <32 x i1> %mask, <32 x float> %src0)
313 ret <32 x float> %res
316 define <16 x double> @compressstore_v16f64_v16i32(double* %base, <16 x double> %src0, <16 x i32> %trigger) {
317 ; SKX-LABEL: compressstore_v16f64_v16i32:
319 ; SKX-NEXT: vextracti64x4 $1, %zmm2, %ymm3
320 ; SKX-NEXT: vptestnmd %ymm3, %ymm3, %k1
321 ; SKX-NEXT: vptestnmd %ymm2, %ymm2, %k2
322 ; SKX-NEXT: kmovb %k2, %eax
323 ; SKX-NEXT: popcntl %eax, %eax
324 ; SKX-NEXT: vexpandpd (%rdi,%rax,8), %zmm1 {%k1}
325 ; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k2}
328 ; KNL-LABEL: compressstore_v16f64_v16i32:
330 ; KNL-NEXT: vextracti64x4 $1, %zmm2, %ymm3
331 ; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1
332 ; KNL-NEXT: vptestnmd %zmm2, %zmm2, %k2
333 ; KNL-NEXT: vexpandpd (%rdi), %zmm0 {%k2}
334 ; KNL-NEXT: kmovw %k2, %eax
335 ; KNL-NEXT: movzbl %al, %eax
336 ; KNL-NEXT: popcntl %eax, %eax
337 ; KNL-NEXT: vexpandpd (%rdi,%rax,8), %zmm1 {%k1}
339 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
340 %res = call <16 x double> @llvm.masked.expandload.v16f64(double* %base, <16 x i1> %mask, <16 x double> %src0)
341 ret <16 x double> %res
344 define void @compressstore_v32f32_v32i32(float* %base, <32 x float> %V, <32 x i32> %trigger) {
345 ; SKX-LABEL: compressstore_v32f32_v32i32:
347 ; SKX-NEXT: vptestnmd %zmm3, %zmm3, %k1
348 ; SKX-NEXT: vptestnmd %zmm2, %zmm2, %k2
349 ; SKX-NEXT: kmovw %k2, %eax
350 ; SKX-NEXT: popcntl %eax, %eax
351 ; SKX-NEXT: vcompressps %zmm1, (%rdi,%rax,4) {%k1}
352 ; SKX-NEXT: vcompressps %zmm0, (%rdi) {%k2}
353 ; SKX-NEXT: vzeroupper
356 ; KNL-LABEL: compressstore_v32f32_v32i32:
358 ; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1
359 ; KNL-NEXT: vptestnmd %zmm2, %zmm2, %k2
360 ; KNL-NEXT: kmovw %k2, %eax
361 ; KNL-NEXT: popcntl %eax, %eax
362 ; KNL-NEXT: vcompressps %zmm1, (%rdi,%rax,4) {%k1}
363 ; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k2}
365 %mask = icmp eq <32 x i32> %trigger, zeroinitializer
366 call void @llvm.masked.compressstore.v32f32(<32 x float> %V, float* %base, <32 x i1> %mask)
370 define void @compressstore_v16f64_v16i1(double* %base, <16 x double> %V, <16 x i1> %mask) {
371 ; SKX-LABEL: compressstore_v16f64_v16i1:
373 ; SKX-NEXT: vpsllw $7, %xmm2, %xmm2
374 ; SKX-NEXT: vpmovb2m %xmm2, %k1
375 ; SKX-NEXT: kshiftrw $8, %k1, %k2
376 ; SKX-NEXT: kmovb %k1, %eax
377 ; SKX-NEXT: popcntl %eax, %eax
378 ; SKX-NEXT: vcompresspd %zmm1, (%rdi,%rax,8) {%k2}
379 ; SKX-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
380 ; SKX-NEXT: vzeroupper
383 ; KNL-LABEL: compressstore_v16f64_v16i1:
385 ; KNL-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
386 ; KNL-NEXT: vpslld $31, %zmm2, %zmm2
387 ; KNL-NEXT: vptestmd %zmm2, %zmm2, %k1
388 ; KNL-NEXT: kshiftrw $8, %k1, %k2
389 ; KNL-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
390 ; KNL-NEXT: kmovw %k1, %eax
391 ; KNL-NEXT: movzbl %al, %eax
392 ; KNL-NEXT: popcntl %eax, %eax
393 ; KNL-NEXT: vcompresspd %zmm1, (%rdi,%rax,8) {%k2}
395 call void @llvm.masked.compressstore.v16f64(<16 x double> %V, double* %base, <16 x i1> %mask)
399 declare void @llvm.masked.compressstore.v16f32(<16 x float>, float* , <16 x i1>)
400 declare void @llvm.masked.compressstore.v8f32(<8 x float>, float* , <8 x i1>)
401 declare void @llvm.masked.compressstore.v8f64(<8 x double>, double* , <8 x i1>)
402 declare void @llvm.masked.compressstore.v16i32(<16 x i32>, i32* , <16 x i1>)
403 declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32* , <8 x i1>)
404 declare void @llvm.masked.compressstore.v8i64(<8 x i64>, i64* , <8 x i1>)
405 declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32* , <4 x i1>)
406 declare void @llvm.masked.compressstore.v4f32(<4 x float>, float* , <4 x i1>)
407 declare void @llvm.masked.compressstore.v4i64(<4 x i64>, i64* , <4 x i1>)
408 declare void @llvm.masked.compressstore.v2i64(<2 x i64>, i64* , <2 x i1>)
409 declare void @llvm.masked.compressstore.v2f32(<2 x float>, float* , <2 x i1>)
410 declare void @llvm.masked.compressstore.v32f32(<32 x float>, float* , <32 x i1>)
411 declare void @llvm.masked.compressstore.v16f64(<16 x double>, double* , <16 x i1>)
412 declare void @llvm.masked.compressstore.v32f64(<32 x double>, double* , <32 x i1>)
414 declare <2 x float> @llvm.masked.expandload.v2f32(float* , <2 x i1> , <2 x float> )
415 declare <32 x float> @llvm.masked.expandload.v32f32(float* , <32 x i1> , <32 x float> )
416 declare <16 x double> @llvm.masked.expandload.v16f64(double* , <16 x i1> , <16 x double> )