1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
5 define <vscale x 1 x i8> @masked_load_nxv1i8(ptr %a, <vscale x 1 x i1> %mask) nounwind {
6 ; CHECK-LABEL: masked_load_nxv1i8:
8 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
9 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
11 %load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef)
12 ret <vscale x 1 x i8> %load
14 declare <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i8>)
16 define <vscale x 1 x i16> @masked_load_nxv1i16(ptr %a, <vscale x 1 x i1> %mask) nounwind {
17 ; CHECK-LABEL: masked_load_nxv1i16:
19 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
20 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
22 %load = call <vscale x 1 x i16> @llvm.masked.load.nxv1i16(ptr %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef)
23 ret <vscale x 1 x i16> %load
25 declare <vscale x 1 x i16> @llvm.masked.load.nxv1i16(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i16>)
27 define <vscale x 1 x i32> @masked_load_nxv1i32(ptr %a, <vscale x 1 x i1> %mask) nounwind {
28 ; CHECK-LABEL: masked_load_nxv1i32:
30 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
31 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
33 %load = call <vscale x 1 x i32> @llvm.masked.load.nxv1i32(ptr %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef)
34 ret <vscale x 1 x i32> %load
36 declare <vscale x 1 x i32> @llvm.masked.load.nxv1i32(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i32>)
38 define <vscale x 1 x i64> @masked_load_nxv1i64(ptr %a, <vscale x 1 x i1> %mask) nounwind {
39 ; CHECK-LABEL: masked_load_nxv1i64:
41 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
42 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
44 %load = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64(ptr %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x i64> undef)
45 ret <vscale x 1 x i64> %load
47 declare <vscale x 1 x i64> @llvm.masked.load.nxv1i64(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i64>)
49 define <vscale x 2 x i8> @masked_load_nxv2i8(ptr %a, <vscale x 2 x i1> %mask) nounwind {
50 ; CHECK-LABEL: masked_load_nxv2i8:
52 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
53 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
55 %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
56 ret <vscale x 2 x i8> %load
58 declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
60 define <vscale x 2 x i16> @masked_load_nxv2i16(ptr %a, <vscale x 2 x i1> %mask) nounwind {
61 ; CHECK-LABEL: masked_load_nxv2i16:
63 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
64 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
66 %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
67 ret <vscale x 2 x i16> %load
69 declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
71 define <vscale x 2 x i32> @masked_load_nxv2i32(ptr %a, <vscale x 2 x i1> %mask) nounwind {
72 ; CHECK-LABEL: masked_load_nxv2i32:
74 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
75 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
77 %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
78 ret <vscale x 2 x i32> %load
80 declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
82 define <vscale x 2 x i64> @masked_load_nxv2i64(ptr %a, <vscale x 2 x i1> %mask) nounwind {
83 ; CHECK-LABEL: masked_load_nxv2i64:
85 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
86 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
88 %load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
89 ret <vscale x 2 x i64> %load
91 declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
93 define <vscale x 4 x i8> @masked_load_nxv4i8(ptr %a, <vscale x 4 x i1> %mask) nounwind {
94 ; CHECK-LABEL: masked_load_nxv4i8:
96 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
97 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
99 %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
100 ret <vscale x 4 x i8> %load
102 declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
104 define <vscale x 4 x i16> @masked_load_nxv4i16(ptr %a, <vscale x 4 x i1> %mask) nounwind {
105 ; CHECK-LABEL: masked_load_nxv4i16:
107 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
108 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
110 %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
111 ret <vscale x 4 x i16> %load
113 declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
115 define <vscale x 4 x i32> @masked_load_nxv4i32(ptr %a, <vscale x 4 x i1> %mask) nounwind {
116 ; CHECK-LABEL: masked_load_nxv4i32:
118 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
119 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
121 %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
122 ret <vscale x 4 x i32> %load
124 declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
126 define <vscale x 4 x i64> @masked_load_nxv4i64(ptr %a, <vscale x 4 x i1> %mask) nounwind {
127 ; CHECK-LABEL: masked_load_nxv4i64:
129 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
130 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
132 %load = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64(ptr %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x i64> undef)
133 ret <vscale x 4 x i64> %load
135 declare <vscale x 4 x i64> @llvm.masked.load.nxv4i64(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i64>)
137 define <vscale x 8 x i8> @masked_load_nxv8i8(ptr %a, <vscale x 8 x i1> %mask) nounwind {
138 ; CHECK-LABEL: masked_load_nxv8i8:
140 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
141 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
143 %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
144 ret <vscale x 8 x i8> %load
146 declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
148 define <vscale x 8 x i16> @masked_load_nxv8i16(ptr %a, <vscale x 8 x i1> %mask) nounwind {
149 ; CHECK-LABEL: masked_load_nxv8i16:
151 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
152 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
154 %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
155 ret <vscale x 8 x i16> %load
157 declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
159 define <vscale x 8 x i32> @masked_load_nxv8i32(ptr %a, <vscale x 8 x i1> %mask) nounwind {
160 ; CHECK-LABEL: masked_load_nxv8i32:
162 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
163 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
165 %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef)
166 ret <vscale x 8 x i32> %load
168 declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
170 define <vscale x 8 x i64> @masked_load_nxv8i64(ptr %a, <vscale x 8 x i1> %mask) nounwind {
171 ; CHECK-LABEL: masked_load_nxv8i64:
173 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
174 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
176 %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
177 ret <vscale x 8 x i64> %load
179 declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)
181 define <vscale x 16 x i8> @masked_load_nxv16i8(ptr %a, <vscale x 16 x i1> %mask) nounwind {
182 ; CHECK-LABEL: masked_load_nxv16i8:
184 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
185 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
187 %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
188 ret <vscale x 16 x i8> %load
190 declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
192 define <vscale x 16 x i16> @masked_load_nxv16i16(ptr %a, <vscale x 16 x i1> %mask) nounwind {
193 ; CHECK-LABEL: masked_load_nxv16i16:
195 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
196 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
198 %load = call <vscale x 16 x i16> @llvm.masked.load.nxv16i16(ptr %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i16> undef)
199 ret <vscale x 16 x i16> %load
201 declare <vscale x 16 x i16> @llvm.masked.load.nxv16i16(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i16>)
203 define <vscale x 16 x i32> @masked_load_nxv16i32(ptr %a, <vscale x 16 x i1> %mask) nounwind {
204 ; CHECK-LABEL: masked_load_nxv16i32:
206 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
207 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
209 %load = call <vscale x 16 x i32> @llvm.masked.load.nxv16i32(ptr %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x i32> undef)
210 ret <vscale x 16 x i32> %load
212 declare <vscale x 16 x i32> @llvm.masked.load.nxv16i32(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i32>)
214 define <vscale x 32 x i8> @masked_load_nxv32i8(ptr %a, <vscale x 32 x i1> %mask) nounwind {
215 ; CHECK-LABEL: masked_load_nxv32i8:
217 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
218 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
220 %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %a, i32 1, <vscale x 32 x i1> %mask, <vscale x 32 x i8> undef)
221 ret <vscale x 32 x i8> %load
223 declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
225 define <vscale x 32 x i16> @masked_load_nxv32i16(ptr %a, <vscale x 32 x i1> %mask) nounwind {
226 ; CHECK-LABEL: masked_load_nxv32i16:
228 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
229 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
231 %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x i16> undef)
232 ret <vscale x 32 x i16> %load
234 declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)
236 define <vscale x 64 x i8> @masked_load_nxv64i8(ptr %a, <vscale x 64 x i1> %mask) nounwind {
237 ; CHECK-LABEL: masked_load_nxv64i8:
239 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
240 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
242 %load = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr %a, i32 1, <vscale x 64 x i1> %mask, <vscale x 64 x i8> undef)
243 ret <vscale x 64 x i8> %load
245 declare <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr, i32, <vscale x 64 x i1>, <vscale x 64 x i8>)
247 define <vscale x 2 x i8> @masked_load_zero_mask(ptr %a) nounwind {
248 ; CHECK-LABEL: masked_load_zero_mask:
251 %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> zeroinitializer, <vscale x 2 x i8> undef)
252 ret <vscale x 2 x i8> %load
255 define <vscale x 2 x i8> @masked_load_allones_mask(ptr %a, <vscale x 2 x i8> %maskedoff) nounwind {
256 ; CHECK-LABEL: masked_load_allones_mask:
258 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
259 ; CHECK-NEXT: vle8.v v8, (a0)
261 %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> splat (i1 1), <vscale x 2 x i8> %maskedoff)
262 ret <vscale x 2 x i8> %load