1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl < %s | FileCheck %s
4 ; Skylake-avx512 target supports masked load/store for i8 and i16 vectors
6 define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, ptr %addr, <16 x i8> %val) {
7 ; CHECK-LABEL: test_mask_load_16xi8:
9 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
10 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
11 ; CHECK-NEXT: vmovdqu8 (%rdi), %xmm0 {%k1} {z}
13 %res = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
16 declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32, <16 x i1>, <16 x i8>)
18 define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, ptr %addr, <32 x i8> %val) {
19 ; CHECK-LABEL: test_mask_load_32xi8:
21 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
22 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
23 ; CHECK-NEXT: vpblendmb (%rdi), %ymm1, %ymm0 {%k1}
25 %res = call <32 x i8> @llvm.masked.load.v32i8.p0(ptr %addr, i32 4, <32 x i1>%mask, <32 x i8> %val)
28 declare <32 x i8> @llvm.masked.load.v32i8.p0(ptr, i32, <32 x i1>, <32 x i8>)
30 define <64 x i8> @test_mask_load_64xi8(<64 x i1> %mask, ptr %addr, <64 x i8> %val) {
31 ; CHECK-LABEL: test_mask_load_64xi8:
33 ; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
34 ; CHECK-NEXT: vpmovb2m %zmm0, %k1
35 ; CHECK-NEXT: vpblendmb (%rdi), %zmm1, %zmm0 {%k1}
37 %res = call <64 x i8> @llvm.masked.load.v64i8.p0(ptr %addr, i32 4, <64 x i1>%mask, <64 x i8> %val)
40 declare <64 x i8> @llvm.masked.load.v64i8.p0(ptr, i32, <64 x i1>, <64 x i8>)
42 define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, ptr %addr, <8 x i16> %val) {
43 ; CHECK-LABEL: test_mask_load_8xi16:
45 ; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
46 ; CHECK-NEXT: vpmovw2m %xmm0, %k1
47 ; CHECK-NEXT: vmovdqu16 (%rdi), %xmm0 {%k1} {z}
49 %res = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
52 declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32, <8 x i1>, <8 x i16>)
54 define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, ptr %addr, <16 x i16> %val) {
55 ; CHECK-LABEL: test_mask_load_16xi16:
57 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
58 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
59 ; CHECK-NEXT: vmovdqu16 (%rdi), %ymm0 {%k1} {z}
61 %res = call <16 x i16> @llvm.masked.load.v16i16.p0(ptr %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
64 declare <16 x i16> @llvm.masked.load.v16i16.p0(ptr, i32, <16 x i1>, <16 x i16>)
66 define <32 x i16> @test_mask_load_32xi16(<32 x i1> %mask, ptr %addr, <32 x i16> %val) {
67 ; CHECK-LABEL: test_mask_load_32xi16:
69 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
70 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
71 ; CHECK-NEXT: vpblendmw (%rdi), %zmm1, %zmm0 {%k1}
73 %res = call <32 x i16> @llvm.masked.load.v32i16.p0(ptr %addr, i32 4, <32 x i1>%mask, <32 x i16> %val)
76 declare <32 x i16> @llvm.masked.load.v32i16.p0(ptr, i32, <32 x i1>, <32 x i16>)
78 define void @test_mask_store_16xi8(<16 x i1> %mask, ptr %addr, <16 x i8> %val) {
79 ; CHECK-LABEL: test_mask_store_16xi8:
81 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
82 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
83 ; CHECK-NEXT: vmovdqu8 %xmm1, (%rdi) {%k1}
85 call void @llvm.masked.store.v16i8.p0(<16 x i8> %val, ptr %addr, i32 4, <16 x i1>%mask)
88 declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32, <16 x i1>)
90 define void @test_mask_store_32xi8(<32 x i1> %mask, ptr %addr, <32 x i8> %val) {
91 ; CHECK-LABEL: test_mask_store_32xi8:
93 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
94 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
95 ; CHECK-NEXT: vmovdqu8 %ymm1, (%rdi) {%k1}
96 ; CHECK-NEXT: vzeroupper
98 call void @llvm.masked.store.v32i8.p0(<32 x i8> %val, ptr %addr, i32 4, <32 x i1>%mask)
101 declare void @llvm.masked.store.v32i8.p0(<32 x i8>, ptr, i32, <32 x i1>)
103 define void @test_mask_store_64xi8(<64 x i1> %mask, ptr %addr, <64 x i8> %val) {
104 ; CHECK-LABEL: test_mask_store_64xi8:
106 ; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
107 ; CHECK-NEXT: vpmovb2m %zmm0, %k1
108 ; CHECK-NEXT: vmovdqu8 %zmm1, (%rdi) {%k1}
109 ; CHECK-NEXT: vzeroupper
111 call void @llvm.masked.store.v64i8.p0(<64 x i8> %val, ptr %addr, i32 4, <64 x i1>%mask)
114 declare void @llvm.masked.store.v64i8.p0(<64 x i8>, ptr, i32, <64 x i1>)
116 define void @test_mask_store_8xi16(<8 x i1> %mask, ptr %addr, <8 x i16> %val) {
117 ; CHECK-LABEL: test_mask_store_8xi16:
119 ; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
120 ; CHECK-NEXT: vpmovw2m %xmm0, %k1
121 ; CHECK-NEXT: vmovdqu16 %xmm1, (%rdi) {%k1}
123 call void @llvm.masked.store.v8i16.p0(<8 x i16> %val, ptr %addr, i32 4, <8 x i1>%mask)
126 declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32, <8 x i1>)
128 define void @test_mask_store_16xi16(<16 x i1> %mask, ptr %addr, <16 x i16> %val) {
129 ; CHECK-LABEL: test_mask_store_16xi16:
131 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
132 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
133 ; CHECK-NEXT: vmovdqu16 %ymm1, (%rdi) {%k1}
134 ; CHECK-NEXT: vzeroupper
136 call void @llvm.masked.store.v16i16.p0(<16 x i16> %val, ptr %addr, i32 4, <16 x i1>%mask)
139 declare void @llvm.masked.store.v16i16.p0(<16 x i16>, ptr, i32, <16 x i1>)
141 define void @test_mask_store_32xi16(<32 x i1> %mask, ptr %addr, <32 x i16> %val) {
142 ; CHECK-LABEL: test_mask_store_32xi16:
144 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
145 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
146 ; CHECK-NEXT: vmovdqu16 %zmm1, (%rdi) {%k1}
147 ; CHECK-NEXT: vzeroupper
149 call void @llvm.masked.store.v32i16.p0(<32 x i16> %val, ptr %addr, i32 4, <32 x i1>%mask)
153 declare void @llvm.masked.store.v32i16.p0(<32 x i16>, ptr, i32, <32 x i1>)
155 ; Make sure we scalarize masked loads of f16.
156 define <16 x half> @test_mask_load_16xf16(<16 x i1> %mask, ptr %addr) {
157 ; CHECK-LABEL: test_mask_load_16xf16:
159 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
160 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
161 ; CHECK-NEXT: vmovdqu16 (%rdi), %ymm0 {%k1} {z}
163 %res = call <16 x half> @llvm.masked.load.v16f16(ptr %addr, i32 4, <16 x i1>%mask, <16 x half> zeroinitializer)
166 declare <16 x half> @llvm.masked.load.v16f16(ptr, i32, <16 x i1>, <16 x half>)
168 ; Make sure we scalarize masked stores of f16.
169 define void @test_mask_store_16xf16(<16 x i1> %mask, ptr %addr, <16 x half> %val) {
170 ; CHECK-LABEL: test_mask_store_16xf16:
172 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
173 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
174 ; CHECK-NEXT: vmovdqu16 %ymm1, (%rdi) {%k1}
175 ; CHECK-NEXT: vzeroupper
177 call void @llvm.masked.store.v16f16.p0(<16 x half> %val, ptr %addr, i32 4, <16 x i1>%mask)
180 declare void @llvm.masked.store.v16f16.p0(<16 x half>, ptr, i32, <16 x i1>)