1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
5 define void @masked_load_v1i8(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
6 ; CHECK-LABEL: masked_load_v1i8:
8 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
9 ; CHECK-NEXT: vle8.v v8, (a1)
10 ; CHECK-NEXT: vmseq.vi v0, v8, 0
11 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
12 ; CHECK-NEXT: vse8.v v8, (a2)
14 %m = load <1 x i8>, ptr %m_ptr
15 %mask = icmp eq <1 x i8> %m, zeroinitializer
16 %load = call <1 x i8> @llvm.masked.load.v1i8(ptr %a, i32 8, <1 x i1> %mask, <1 x i8> undef)
17 store <1 x i8> %load, ptr %res_ptr
20 declare <1 x i8> @llvm.masked.load.v1i8(ptr, i32, <1 x i1>, <1 x i8>)
22 define void @masked_load_v1i16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
23 ; CHECK-LABEL: masked_load_v1i16:
25 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
26 ; CHECK-NEXT: vle16.v v8, (a1)
27 ; CHECK-NEXT: vmseq.vi v0, v8, 0
28 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
29 ; CHECK-NEXT: vse16.v v8, (a2)
31 %m = load <1 x i16>, ptr %m_ptr
32 %mask = icmp eq <1 x i16> %m, zeroinitializer
33 %load = call <1 x i16> @llvm.masked.load.v1i16(ptr %a, i32 8, <1 x i1> %mask, <1 x i16> undef)
34 store <1 x i16> %load, ptr %res_ptr
37 declare <1 x i16> @llvm.masked.load.v1i16(ptr, i32, <1 x i1>, <1 x i16>)
39 define void @masked_load_v1i32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
40 ; CHECK-LABEL: masked_load_v1i32:
42 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
43 ; CHECK-NEXT: vle32.v v8, (a1)
44 ; CHECK-NEXT: vmseq.vi v0, v8, 0
45 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
46 ; CHECK-NEXT: vse32.v v8, (a2)
48 %m = load <1 x i32>, ptr %m_ptr
49 %mask = icmp eq <1 x i32> %m, zeroinitializer
50 %load = call <1 x i32> @llvm.masked.load.v1i32(ptr %a, i32 8, <1 x i1> %mask, <1 x i32> undef)
51 store <1 x i32> %load, ptr %res_ptr
54 declare <1 x i32> @llvm.masked.load.v1i32(ptr, i32, <1 x i1>, <1 x i32>)
56 define void @masked_load_v1i64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
57 ; CHECK-LABEL: masked_load_v1i64:
59 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
60 ; CHECK-NEXT: vle64.v v8, (a1)
61 ; CHECK-NEXT: vmseq.vi v0, v8, 0
62 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
63 ; CHECK-NEXT: vse64.v v8, (a2)
65 %m = load <1 x i64>, ptr %m_ptr
66 %mask = icmp eq <1 x i64> %m, zeroinitializer
67 %load = call <1 x i64> @llvm.masked.load.v1i64(ptr %a, i32 8, <1 x i1> %mask, <1 x i64> undef)
68 store <1 x i64> %load, ptr %res_ptr
71 declare <1 x i64> @llvm.masked.load.v1i64(ptr, i32, <1 x i1>, <1 x i64>)
73 define void @masked_load_v2i8(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
74 ; CHECK-LABEL: masked_load_v2i8:
76 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
77 ; CHECK-NEXT: vle8.v v8, (a1)
78 ; CHECK-NEXT: vmseq.vi v0, v8, 0
79 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
80 ; CHECK-NEXT: vse8.v v8, (a2)
82 %m = load <2 x i8>, ptr %m_ptr
83 %mask = icmp eq <2 x i8> %m, zeroinitializer
84 %load = call <2 x i8> @llvm.masked.load.v2i8(ptr %a, i32 8, <2 x i1> %mask, <2 x i8> undef)
85 store <2 x i8> %load, ptr %res_ptr
88 declare <2 x i8> @llvm.masked.load.v2i8(ptr, i32, <2 x i1>, <2 x i8>)
90 define void @masked_load_v2i16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
91 ; CHECK-LABEL: masked_load_v2i16:
93 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
94 ; CHECK-NEXT: vle16.v v8, (a1)
95 ; CHECK-NEXT: vmseq.vi v0, v8, 0
96 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
97 ; CHECK-NEXT: vse16.v v8, (a2)
99 %m = load <2 x i16>, ptr %m_ptr
100 %mask = icmp eq <2 x i16> %m, zeroinitializer
101 %load = call <2 x i16> @llvm.masked.load.v2i16(ptr %a, i32 8, <2 x i1> %mask, <2 x i16> undef)
102 store <2 x i16> %load, ptr %res_ptr
105 declare <2 x i16> @llvm.masked.load.v2i16(ptr, i32, <2 x i1>, <2 x i16>)
107 define void @masked_load_v2i32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
108 ; CHECK-LABEL: masked_load_v2i32:
110 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
111 ; CHECK-NEXT: vle32.v v8, (a1)
112 ; CHECK-NEXT: vmseq.vi v0, v8, 0
113 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
114 ; CHECK-NEXT: vse32.v v8, (a2)
116 %m = load <2 x i32>, ptr %m_ptr
117 %mask = icmp eq <2 x i32> %m, zeroinitializer
118 %load = call <2 x i32> @llvm.masked.load.v2i32(ptr %a, i32 8, <2 x i1> %mask, <2 x i32> undef)
119 store <2 x i32> %load, ptr %res_ptr
122 declare <2 x i32> @llvm.masked.load.v2i32(ptr, i32, <2 x i1>, <2 x i32>)
124 define void @masked_load_v2i64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
125 ; CHECK-LABEL: masked_load_v2i64:
127 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
128 ; CHECK-NEXT: vle64.v v8, (a1)
129 ; CHECK-NEXT: vmseq.vi v0, v8, 0
130 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
131 ; CHECK-NEXT: vse64.v v8, (a2)
133 %m = load <2 x i64>, ptr %m_ptr
134 %mask = icmp eq <2 x i64> %m, zeroinitializer
135 %load = call <2 x i64> @llvm.masked.load.v2i64(ptr %a, i32 8, <2 x i1> %mask, <2 x i64> undef)
136 store <2 x i64> %load, ptr %res_ptr
139 declare <2 x i64> @llvm.masked.load.v2i64(ptr, i32, <2 x i1>, <2 x i64>)
141 define void @masked_load_v4i8(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
142 ; CHECK-LABEL: masked_load_v4i8:
144 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
145 ; CHECK-NEXT: vle8.v v8, (a1)
146 ; CHECK-NEXT: vmseq.vi v0, v8, 0
147 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
148 ; CHECK-NEXT: vse8.v v8, (a2)
150 %m = load <4 x i8>, ptr %m_ptr
151 %mask = icmp eq <4 x i8> %m, zeroinitializer
152 %load = call <4 x i8> @llvm.masked.load.v4i8(ptr %a, i32 8, <4 x i1> %mask, <4 x i8> undef)
153 store <4 x i8> %load, ptr %res_ptr
156 declare <4 x i8> @llvm.masked.load.v4i8(ptr, i32, <4 x i1>, <4 x i8>)
158 define void @masked_load_v4i16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
159 ; CHECK-LABEL: masked_load_v4i16:
161 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
162 ; CHECK-NEXT: vle16.v v8, (a1)
163 ; CHECK-NEXT: vmseq.vi v0, v8, 0
164 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
165 ; CHECK-NEXT: vse16.v v8, (a2)
167 %m = load <4 x i16>, ptr %m_ptr
168 %mask = icmp eq <4 x i16> %m, zeroinitializer
169 %load = call <4 x i16> @llvm.masked.load.v4i16(ptr %a, i32 8, <4 x i1> %mask, <4 x i16> undef)
170 store <4 x i16> %load, ptr %res_ptr
173 declare <4 x i16> @llvm.masked.load.v4i16(ptr, i32, <4 x i1>, <4 x i16>)
175 define void @masked_load_v4i32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
176 ; CHECK-LABEL: masked_load_v4i32:
178 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
179 ; CHECK-NEXT: vle32.v v8, (a1)
180 ; CHECK-NEXT: vmseq.vi v0, v8, 0
181 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
182 ; CHECK-NEXT: vse32.v v8, (a2)
184 %m = load <4 x i32>, ptr %m_ptr
185 %mask = icmp eq <4 x i32> %m, zeroinitializer
186 %load = call <4 x i32> @llvm.masked.load.v4i32(ptr %a, i32 8, <4 x i1> %mask, <4 x i32> undef)
187 store <4 x i32> %load, ptr %res_ptr
190 declare <4 x i32> @llvm.masked.load.v4i32(ptr, i32, <4 x i1>, <4 x i32>)
192 define void @masked_load_v4i64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
193 ; CHECK-LABEL: masked_load_v4i64:
195 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
196 ; CHECK-NEXT: vle64.v v8, (a1)
197 ; CHECK-NEXT: vmseq.vi v0, v8, 0
198 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
199 ; CHECK-NEXT: vse64.v v8, (a2)
201 %m = load <4 x i64>, ptr %m_ptr
202 %mask = icmp eq <4 x i64> %m, zeroinitializer
203 %load = call <4 x i64> @llvm.masked.load.v4i64(ptr %a, i32 8, <4 x i1> %mask, <4 x i64> undef)
204 store <4 x i64> %load, ptr %res_ptr
207 declare <4 x i64> @llvm.masked.load.v4i64(ptr, i32, <4 x i1>, <4 x i64>)
209 define void @masked_load_v8i8(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
210 ; CHECK-LABEL: masked_load_v8i8:
212 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
213 ; CHECK-NEXT: vle8.v v8, (a1)
214 ; CHECK-NEXT: vmseq.vi v0, v8, 0
215 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
216 ; CHECK-NEXT: vse8.v v8, (a2)
218 %m = load <8 x i8>, ptr %m_ptr
219 %mask = icmp eq <8 x i8> %m, zeroinitializer
220 %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %a, i32 8, <8 x i1> %mask, <8 x i8> undef)
221 store <8 x i8> %load, ptr %res_ptr
224 declare <8 x i8> @llvm.masked.load.v8i8(ptr, i32, <8 x i1>, <8 x i8>)
226 define void @masked_load_v8i16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
227 ; CHECK-LABEL: masked_load_v8i16:
229 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
230 ; CHECK-NEXT: vle16.v v8, (a1)
231 ; CHECK-NEXT: vmseq.vi v0, v8, 0
232 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
233 ; CHECK-NEXT: vse16.v v8, (a2)
235 %m = load <8 x i16>, ptr %m_ptr
236 %mask = icmp eq <8 x i16> %m, zeroinitializer
237 %load = call <8 x i16> @llvm.masked.load.v8i16(ptr %a, i32 8, <8 x i1> %mask, <8 x i16> undef)
238 store <8 x i16> %load, ptr %res_ptr
241 declare <8 x i16> @llvm.masked.load.v8i16(ptr, i32, <8 x i1>, <8 x i16>)
243 define void @masked_load_v8i32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
244 ; CHECK-LABEL: masked_load_v8i32:
246 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
247 ; CHECK-NEXT: vle32.v v8, (a1)
248 ; CHECK-NEXT: vmseq.vi v0, v8, 0
249 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
250 ; CHECK-NEXT: vse32.v v8, (a2)
252 %m = load <8 x i32>, ptr %m_ptr
253 %mask = icmp eq <8 x i32> %m, zeroinitializer
254 %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %a, i32 8, <8 x i1> %mask, <8 x i32> undef)
255 store <8 x i32> %load, ptr %res_ptr
258 declare <8 x i32> @llvm.masked.load.v8i32(ptr, i32, <8 x i1>, <8 x i32>)
260 define void @masked_load_v8i64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
261 ; CHECK-LABEL: masked_load_v8i64:
263 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
264 ; CHECK-NEXT: vle64.v v8, (a1)
265 ; CHECK-NEXT: vmseq.vi v0, v8, 0
266 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
267 ; CHECK-NEXT: vse64.v v8, (a2)
269 %m = load <8 x i64>, ptr %m_ptr
270 %mask = icmp eq <8 x i64> %m, zeroinitializer
271 %load = call <8 x i64> @llvm.masked.load.v8i64(ptr %a, i32 8, <8 x i1> %mask, <8 x i64> undef)
272 store <8 x i64> %load, ptr %res_ptr
275 declare <8 x i64> @llvm.masked.load.v8i64(ptr, i32, <8 x i1>, <8 x i64>)
277 define void @masked_load_v16i8(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
278 ; CHECK-LABEL: masked_load_v16i8:
280 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
281 ; CHECK-NEXT: vle8.v v8, (a1)
282 ; CHECK-NEXT: vmseq.vi v0, v8, 0
283 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
284 ; CHECK-NEXT: vse8.v v8, (a2)
286 %m = load <16 x i8>, ptr %m_ptr
287 %mask = icmp eq <16 x i8> %m, zeroinitializer
288 %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %a, i32 8, <16 x i1> %mask, <16 x i8> undef)
289 store <16 x i8> %load, ptr %res_ptr
292 declare <16 x i8> @llvm.masked.load.v16i8(ptr, i32, <16 x i1>, <16 x i8>)
294 define void @masked_load_v16i16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
295 ; CHECK-LABEL: masked_load_v16i16:
297 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
298 ; CHECK-NEXT: vle16.v v8, (a1)
299 ; CHECK-NEXT: vmseq.vi v0, v8, 0
300 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
301 ; CHECK-NEXT: vse16.v v8, (a2)
303 %m = load <16 x i16>, ptr %m_ptr
304 %mask = icmp eq <16 x i16> %m, zeroinitializer
305 %load = call <16 x i16> @llvm.masked.load.v16i16(ptr %a, i32 8, <16 x i1> %mask, <16 x i16> undef)
306 store <16 x i16> %load, ptr %res_ptr
309 declare <16 x i16> @llvm.masked.load.v16i16(ptr, i32, <16 x i1>, <16 x i16>)
311 define void @masked_load_v16i32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
312 ; CHECK-LABEL: masked_load_v16i32:
314 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
315 ; CHECK-NEXT: vle32.v v8, (a1)
316 ; CHECK-NEXT: vmseq.vi v0, v8, 0
317 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
318 ; CHECK-NEXT: vse32.v v8, (a2)
320 %m = load <16 x i32>, ptr %m_ptr
321 %mask = icmp eq <16 x i32> %m, zeroinitializer
322 %load = call <16 x i32> @llvm.masked.load.v16i32(ptr %a, i32 8, <16 x i1> %mask, <16 x i32> undef)
323 store <16 x i32> %load, ptr %res_ptr
326 declare <16 x i32> @llvm.masked.load.v16i32(ptr, i32, <16 x i1>, <16 x i32>)
328 define void @masked_load_v16i64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
329 ; CHECK-LABEL: masked_load_v16i64:
331 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
332 ; CHECK-NEXT: vle64.v v8, (a1)
333 ; CHECK-NEXT: vmseq.vi v0, v8, 0
334 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
335 ; CHECK-NEXT: vse64.v v8, (a2)
337 %m = load <16 x i64>, ptr %m_ptr
338 %mask = icmp eq <16 x i64> %m, zeroinitializer
339 %load = call <16 x i64> @llvm.masked.load.v16i64(ptr %a, i32 8, <16 x i1> %mask, <16 x i64> undef)
340 store <16 x i64> %load, ptr %res_ptr
343 declare <16 x i64> @llvm.masked.load.v16i64(ptr, i32, <16 x i1>, <16 x i64>)
345 define void @masked_load_v32i8(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
346 ; CHECK-LABEL: masked_load_v32i8:
348 ; CHECK-NEXT: li a3, 32
349 ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
350 ; CHECK-NEXT: vle8.v v8, (a1)
351 ; CHECK-NEXT: vmseq.vi v0, v8, 0
352 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
353 ; CHECK-NEXT: vse8.v v8, (a2)
355 %m = load <32 x i8>, ptr %m_ptr
356 %mask = icmp eq <32 x i8> %m, zeroinitializer
357 %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %a, i32 8, <32 x i1> %mask, <32 x i8> undef)
358 store <32 x i8> %load, ptr %res_ptr
361 declare <32 x i8> @llvm.masked.load.v32i8(ptr, i32, <32 x i1>, <32 x i8>)
363 define void @masked_load_v32i16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
364 ; CHECK-LABEL: masked_load_v32i16:
366 ; CHECK-NEXT: li a3, 32
367 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma
368 ; CHECK-NEXT: vle16.v v8, (a1)
369 ; CHECK-NEXT: vmseq.vi v0, v8, 0
370 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
371 ; CHECK-NEXT: vse16.v v8, (a2)
373 %m = load <32 x i16>, ptr %m_ptr
374 %mask = icmp eq <32 x i16> %m, zeroinitializer
375 %load = call <32 x i16> @llvm.masked.load.v32i16(ptr %a, i32 8, <32 x i1> %mask, <32 x i16> undef)
376 store <32 x i16> %load, ptr %res_ptr
379 declare <32 x i16> @llvm.masked.load.v32i16(ptr, i32, <32 x i1>, <32 x i16>)
381 define void @masked_load_v32i32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
382 ; CHECK-LABEL: masked_load_v32i32:
384 ; CHECK-NEXT: li a3, 32
385 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
386 ; CHECK-NEXT: vle32.v v8, (a1)
387 ; CHECK-NEXT: vmseq.vi v0, v8, 0
388 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
389 ; CHECK-NEXT: vse32.v v8, (a2)
391 %m = load <32 x i32>, ptr %m_ptr
392 %mask = icmp eq <32 x i32> %m, zeroinitializer
393 %load = call <32 x i32> @llvm.masked.load.v32i32(ptr %a, i32 8, <32 x i1> %mask, <32 x i32> undef)
394 store <32 x i32> %load, ptr %res_ptr
397 declare <32 x i32> @llvm.masked.load.v32i32(ptr, i32, <32 x i1>, <32 x i32>)
399 define void @masked_load_v32i64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
400 ; RV32-LABEL: masked_load_v32i64:
402 ; RV32-NEXT: addi a3, a1, 128
403 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
404 ; RV32-NEXT: vle64.v v16, (a3)
405 ; RV32-NEXT: vle64.v v0, (a1)
406 ; RV32-NEXT: li a1, 32
407 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
408 ; RV32-NEXT: vmv.v.i v24, 0
409 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
410 ; RV32-NEXT: vmseq.vv v8, v0, v24
411 ; RV32-NEXT: vmseq.vv v0, v16, v24
412 ; RV32-NEXT: addi a1, a0, 128
413 ; RV32-NEXT: vle64.v v16, (a1), v0.t
414 ; RV32-NEXT: vmv1r.v v0, v8
415 ; RV32-NEXT: vle64.v v8, (a0), v0.t
416 ; RV32-NEXT: vse64.v v8, (a2)
417 ; RV32-NEXT: addi a0, a2, 128
418 ; RV32-NEXT: vse64.v v16, (a0)
421 ; RV64-LABEL: masked_load_v32i64:
423 ; RV64-NEXT: addi a3, a1, 128
424 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
425 ; RV64-NEXT: vle64.v v16, (a1)
426 ; RV64-NEXT: vle64.v v24, (a3)
427 ; RV64-NEXT: vmseq.vi v8, v16, 0
428 ; RV64-NEXT: vmseq.vi v0, v24, 0
429 ; RV64-NEXT: addi a1, a0, 128
430 ; RV64-NEXT: vle64.v v16, (a1), v0.t
431 ; RV64-NEXT: vmv1r.v v0, v8
432 ; RV64-NEXT: vle64.v v8, (a0), v0.t
433 ; RV64-NEXT: vse64.v v8, (a2)
434 ; RV64-NEXT: addi a0, a2, 128
435 ; RV64-NEXT: vse64.v v16, (a0)
437 %m = load <32 x i64>, ptr %m_ptr
438 %mask = icmp eq <32 x i64> %m, zeroinitializer
439 %load = call <32 x i64> @llvm.masked.load.v32i64(ptr %a, i32 8, <32 x i1> %mask, <32 x i64> undef)
440 store <32 x i64> %load, ptr %res_ptr
443 declare <32 x i64> @llvm.masked.load.v32i64(ptr, i32, <32 x i1>, <32 x i64>)
445 define void @masked_load_v64i8(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
446 ; CHECK-LABEL: masked_load_v64i8:
448 ; CHECK-NEXT: li a3, 64
449 ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
450 ; CHECK-NEXT: vle8.v v8, (a1)
451 ; CHECK-NEXT: vmseq.vi v0, v8, 0
452 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
453 ; CHECK-NEXT: vse8.v v8, (a2)
455 %m = load <64 x i8>, ptr %m_ptr
456 %mask = icmp eq <64 x i8> %m, zeroinitializer
457 %load = call <64 x i8> @llvm.masked.load.v64i8(ptr %a, i32 8, <64 x i1> %mask, <64 x i8> undef)
458 store <64 x i8> %load, ptr %res_ptr
461 declare <64 x i8> @llvm.masked.load.v64i8(ptr, i32, <64 x i1>, <64 x i8>)
463 define void @masked_load_v64i16(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
464 ; CHECK-LABEL: masked_load_v64i16:
466 ; CHECK-NEXT: li a3, 64
467 ; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma
468 ; CHECK-NEXT: vle16.v v8, (a1)
469 ; CHECK-NEXT: vmseq.vi v0, v8, 0
470 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
471 ; CHECK-NEXT: vse16.v v8, (a2)
473 %m = load <64 x i16>, ptr %m_ptr
474 %mask = icmp eq <64 x i16> %m, zeroinitializer
475 %load = call <64 x i16> @llvm.masked.load.v64i16(ptr %a, i32 8, <64 x i1> %mask, <64 x i16> undef)
476 store <64 x i16> %load, ptr %res_ptr
479 declare <64 x i16> @llvm.masked.load.v64i16(ptr, i32, <64 x i1>, <64 x i16>)
481 define void @masked_load_v64i32(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
482 ; CHECK-LABEL: masked_load_v64i32:
484 ; CHECK-NEXT: addi a3, a1, 128
485 ; CHECK-NEXT: li a4, 32
486 ; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
487 ; CHECK-NEXT: vle32.v v16, (a1)
488 ; CHECK-NEXT: vle32.v v24, (a3)
489 ; CHECK-NEXT: vmseq.vi v8, v16, 0
490 ; CHECK-NEXT: vmseq.vi v0, v24, 0
491 ; CHECK-NEXT: addi a1, a0, 128
492 ; CHECK-NEXT: vle32.v v16, (a1), v0.t
493 ; CHECK-NEXT: vmv1r.v v0, v8
494 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
495 ; CHECK-NEXT: vse32.v v8, (a2)
496 ; CHECK-NEXT: addi a0, a2, 128
497 ; CHECK-NEXT: vse32.v v16, (a0)
499 %m = load <64 x i32>, ptr %m_ptr
500 %mask = icmp eq <64 x i32> %m, zeroinitializer
501 %load = call <64 x i32> @llvm.masked.load.v64i32(ptr %a, i32 8, <64 x i1> %mask, <64 x i32> undef)
502 store <64 x i32> %load, ptr %res_ptr
505 declare <64 x i32> @llvm.masked.load.v64i32(ptr, i32, <64 x i1>, <64 x i32>)
507 define void @masked_load_v128i8(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
508 ; CHECK-LABEL: masked_load_v128i8:
510 ; CHECK-NEXT: li a3, 128
511 ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
512 ; CHECK-NEXT: vle8.v v8, (a1)
513 ; CHECK-NEXT: vmseq.vi v0, v8, 0
514 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
515 ; CHECK-NEXT: vse8.v v8, (a2)
517 %m = load <128 x i8>, ptr %m_ptr
518 %mask = icmp eq <128 x i8> %m, zeroinitializer
519 %load = call <128 x i8> @llvm.masked.load.v128i8(ptr %a, i32 8, <128 x i1> %mask, <128 x i8> undef)
520 store <128 x i8> %load, ptr %res_ptr
523 declare <128 x i8> @llvm.masked.load.v128i8(ptr, i32, <128 x i1>, <128 x i8>)
525 define void @masked_load_v256i8(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
526 ; CHECK-LABEL: masked_load_v256i8:
528 ; CHECK-NEXT: addi a3, a1, 128
529 ; CHECK-NEXT: li a4, 128
530 ; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
531 ; CHECK-NEXT: vle8.v v16, (a1)
532 ; CHECK-NEXT: vle8.v v24, (a3)
533 ; CHECK-NEXT: vmseq.vi v8, v16, 0
534 ; CHECK-NEXT: vmseq.vi v0, v24, 0
535 ; CHECK-NEXT: addi a1, a0, 128
536 ; CHECK-NEXT: vle8.v v16, (a1), v0.t
537 ; CHECK-NEXT: vmv1r.v v0, v8
538 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
539 ; CHECK-NEXT: vse8.v v8, (a2)
540 ; CHECK-NEXT: addi a0, a2, 128
541 ; CHECK-NEXT: vse8.v v16, (a0)
543 %m = load <256 x i8>, ptr %m_ptr
544 %mask = icmp eq <256 x i8> %m, zeroinitializer
545 %load = call <256 x i8> @llvm.masked.load.v256i8(ptr %a, i32 8, <256 x i1> %mask, <256 x i8> undef)
546 store <256 x i8> %load, ptr %res_ptr
549 declare <256 x i8> @llvm.masked.load.v256i8(ptr, i32, <256 x i1>, <256 x i8>)