1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
6 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s
8 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s
11 declare <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr, <vscale x 1 x i1>, i32)
13 define <vscale x 1 x i8> @vpload_nxv1i8(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
14 ; CHECK-LABEL: vpload_nxv1i8:
16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
17 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
19 %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
20 ret <vscale x 1 x i8> %load
23 define <vscale x 1 x i8> @vpload_nxv1i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
24 ; CHECK-LABEL: vpload_nxv1i8_allones_mask:
26 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
27 ; CHECK-NEXT: vle8.v v8, (a0)
29 %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
30 ret <vscale x 1 x i8> %load
33 define <vscale x 1 x i8> @vpload_nxv1i8_passthru(ptr %ptr, <vscale x 1 x i1> %m, <vscale x 1 x i8> %passthru, i32 zeroext %evl) {
34 ; CHECK-LABEL: vpload_nxv1i8_passthru:
36 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
37 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
39 %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
40 %merge = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %load, <vscale x 1 x i8> %passthru, i32 %evl)
41 ret <vscale x 1 x i8> %merge
44 declare <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0(ptr, <vscale x 2 x i1>, i32)
46 define <vscale x 2 x i8> @vpload_nxv2i8(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
47 ; CHECK-LABEL: vpload_nxv2i8:
49 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
50 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
52 %load = call <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
53 ret <vscale x 2 x i8> %load
56 declare <vscale x 3 x i8> @llvm.vp.load.nxv3i8.p0(ptr, <vscale x 3 x i1>, i32)
58 define <vscale x 3 x i8> @vpload_nxv3i8(ptr %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) {
59 ; CHECK-LABEL: vpload_nxv3i8:
61 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
62 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
64 %load = call <vscale x 3 x i8> @llvm.vp.load.nxv3i8.p0(ptr %ptr, <vscale x 3 x i1> %m, i32 %evl)
65 ret <vscale x 3 x i8> %load
68 declare <vscale x 4 x i6> @llvm.vp.load.nxv4i6.nxv4i6.p0(<vscale x 4 x i6>*, <vscale x 4 x i1>, i32)
70 define <vscale x 4 x i6> @vpload_nxv4i6(<vscale x 4 x i6>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
71 ; CHECK-LABEL: vpload_nxv4i6:
73 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
74 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
76 %load = call <vscale x 4 x i6> @llvm.vp.load.nxv4i6.nxv4i6.p0(<vscale x 4 x i6>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
77 ret <vscale x 4 x i6> %load
80 declare <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0(ptr, <vscale x 4 x i1>, i32)
82 define <vscale x 4 x i8> @vpload_nxv4i8(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
83 ; CHECK-LABEL: vpload_nxv4i8:
85 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
86 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
88 %load = call <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
89 ret <vscale x 4 x i8> %load
92 declare <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr, <vscale x 8 x i1>, i32)
94 define <vscale x 8 x i8> @vpload_nxv8i8(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
95 ; CHECK-LABEL: vpload_nxv8i8:
97 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
98 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
100 %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
101 ret <vscale x 8 x i8> %load
104 define <vscale x 8 x i8> @vpload_nxv8i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
105 ; CHECK-LABEL: vpload_nxv8i8_allones_mask:
107 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
108 ; CHECK-NEXT: vle8.v v8, (a0)
110 %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
111 ret <vscale x 8 x i8> %load
114 declare <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0(ptr, <vscale x 1 x i1>, i32)
116 define <vscale x 1 x i16> @vpload_nxv1i16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
117 ; CHECK-LABEL: vpload_nxv1i16:
119 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
120 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
122 %load = call <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
123 ret <vscale x 1 x i16> %load
126 declare <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr, <vscale x 2 x i1>, i32)
128 define <vscale x 2 x i16> @vpload_nxv2i16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
129 ; CHECK-LABEL: vpload_nxv2i16:
131 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
132 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
134 %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
135 ret <vscale x 2 x i16> %load
138 define <vscale x 2 x i16> @vpload_nxv2i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
139 ; CHECK-LABEL: vpload_nxv2i16_allones_mask:
141 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
142 ; CHECK-NEXT: vle16.v v8, (a0)
144 %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
145 ret <vscale x 2 x i16> %load
148 declare <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(ptr, <vscale x 4 x i1>, i32)
150 define <vscale x 4 x i16> @vpload_nxv4i16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
151 ; CHECK-LABEL: vpload_nxv4i16:
153 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
154 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
156 %load = call <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
157 ret <vscale x 4 x i16> %load
160 declare <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr, <vscale x 8 x i1>, i32)
162 define <vscale x 8 x i16> @vpload_nxv8i16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
163 ; CHECK-LABEL: vpload_nxv8i16:
165 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
166 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
168 %load = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
169 ret <vscale x 8 x i16> %load
172 declare <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0(ptr, <vscale x 1 x i1>, i32)
174 define <vscale x 1 x i32> @vpload_nxv1i32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
175 ; CHECK-LABEL: vpload_nxv1i32:
177 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
178 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
180 %load = call <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
181 ret <vscale x 1 x i32> %load
184 declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr, <vscale x 2 x i1>, i32)
186 define <vscale x 2 x i32> @vpload_nxv2i32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
187 ; CHECK-LABEL: vpload_nxv2i32:
189 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
190 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
192 %load = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
193 ret <vscale x 2 x i32> %load
196 declare <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr, <vscale x 4 x i1>, i32)
198 define <vscale x 4 x i32> @vpload_nxv4i32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
199 ; CHECK-LABEL: vpload_nxv4i32:
201 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
202 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
204 %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
205 ret <vscale x 4 x i32> %load
208 define <vscale x 4 x i32> @vpload_nxv4i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
209 ; CHECK-LABEL: vpload_nxv4i32_allones_mask:
211 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
212 ; CHECK-NEXT: vle32.v v8, (a0)
214 %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
215 ret <vscale x 4 x i32> %load
218 declare <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0(ptr, <vscale x 8 x i1>, i32)
220 define <vscale x 8 x i32> @vpload_nxv8i32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
221 ; CHECK-LABEL: vpload_nxv8i32:
223 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
224 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
226 %load = call <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
227 ret <vscale x 8 x i32> %load
230 declare <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr, <vscale x 1 x i1>, i32)
232 define <vscale x 1 x i64> @vpload_nxv1i64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
233 ; CHECK-LABEL: vpload_nxv1i64:
235 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
236 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
238 %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
239 ret <vscale x 1 x i64> %load
242 define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
243 ; CHECK-LABEL: vpload_nxv1i64_allones_mask:
245 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
246 ; CHECK-NEXT: vle64.v v8, (a0)
248 %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
249 ret <vscale x 1 x i64> %load
252 declare <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr, <vscale x 2 x i1>, i32)
254 define <vscale x 2 x i64> @vpload_nxv2i64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
255 ; CHECK-LABEL: vpload_nxv2i64:
257 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
258 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
260 %load = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
261 ret <vscale x 2 x i64> %load
264 declare <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(ptr, <vscale x 4 x i1>, i32)
266 define <vscale x 4 x i64> @vpload_nxv4i64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
267 ; CHECK-LABEL: vpload_nxv4i64:
269 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
270 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
272 %load = call <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
273 ret <vscale x 4 x i64> %load
276 declare <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr, <vscale x 8 x i1>, i32)
278 define <vscale x 8 x i64> @vpload_nxv8i64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
279 ; CHECK-LABEL: vpload_nxv8i64:
281 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
282 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
284 %load = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
285 ret <vscale x 8 x i64> %load
288 declare <vscale x 1 x bfloat> @llvm.vp.load.nxv1bf16.p0(ptr, <vscale x 1 x i1>, i32)
290 define <vscale x 1 x bfloat> @vpload_nxv1bf16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
291 ; CHECK-LABEL: vpload_nxv1bf16:
293 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
294 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
296 %load = call <vscale x 1 x bfloat> @llvm.vp.load.nxv1bf16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
297 ret <vscale x 1 x bfloat> %load
300 declare <vscale x 2 x bfloat> @llvm.vp.load.nxv2bf16.p0(ptr, <vscale x 2 x i1>, i32)
302 define <vscale x 2 x bfloat> @vpload_nxv2bf16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
303 ; CHECK-LABEL: vpload_nxv2bf16:
305 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
306 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
308 %load = call <vscale x 2 x bfloat> @llvm.vp.load.nxv2bf16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
309 ret <vscale x 2 x bfloat> %load
312 define <vscale x 2 x bfloat> @vpload_nxv2bf16_allones_mask(ptr %ptr, i32 zeroext %evl) {
313 ; CHECK-LABEL: vpload_nxv2bf16_allones_mask:
315 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
316 ; CHECK-NEXT: vle16.v v8, (a0)
318 %load = call <vscale x 2 x bfloat> @llvm.vp.load.nxv2bf16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
319 ret <vscale x 2 x bfloat> %load
322 declare <vscale x 4 x bfloat> @llvm.vp.load.nxv4bf16.p0(ptr, <vscale x 4 x i1>, i32)
324 define <vscale x 4 x bfloat> @vpload_nxv4bf16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
325 ; CHECK-LABEL: vpload_nxv4bf16:
327 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
328 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
330 %load = call <vscale x 4 x bfloat> @llvm.vp.load.nxv4bf16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
331 ret <vscale x 4 x bfloat> %load
334 declare <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr, <vscale x 8 x i1>, i32)
336 define <vscale x 8 x bfloat> @vpload_nxv8bf16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
337 ; CHECK-LABEL: vpload_nxv8bf16:
339 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
340 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
342 %load = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
343 ret <vscale x 8 x bfloat> %load
346 declare <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0(ptr, <vscale x 1 x i1>, i32)
348 define <vscale x 1 x half> @vpload_nxv1f16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
349 ; CHECK-LABEL: vpload_nxv1f16:
351 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
352 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
354 %load = call <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
355 ret <vscale x 1 x half> %load
358 declare <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr, <vscale x 2 x i1>, i32)
360 define <vscale x 2 x half> @vpload_nxv2f16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
361 ; CHECK-LABEL: vpload_nxv2f16:
363 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
364 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
366 %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
367 ret <vscale x 2 x half> %load
370 define <vscale x 2 x half> @vpload_nxv2f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
371 ; CHECK-LABEL: vpload_nxv2f16_allones_mask:
373 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
374 ; CHECK-NEXT: vle16.v v8, (a0)
376 %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
377 ret <vscale x 2 x half> %load
380 declare <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0(ptr, <vscale x 4 x i1>, i32)
382 define <vscale x 4 x half> @vpload_nxv4f16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
383 ; CHECK-LABEL: vpload_nxv4f16:
385 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
386 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
388 %load = call <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
389 ret <vscale x 4 x half> %load
392 declare <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr, <vscale x 8 x i1>, i32)
394 define <vscale x 8 x half> @vpload_nxv8f16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
395 ; CHECK-LABEL: vpload_nxv8f16:
397 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
398 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
400 %load = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
401 ret <vscale x 8 x half> %load
404 declare <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0(ptr, <vscale x 1 x i1>, i32)
406 define <vscale x 1 x float> @vpload_nxv1f32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
407 ; CHECK-LABEL: vpload_nxv1f32:
409 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
410 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
412 %load = call <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
413 ret <vscale x 1 x float> %load
416 declare <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(ptr, <vscale x 2 x i1>, i32)
418 define <vscale x 2 x float> @vpload_nxv2f32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
419 ; CHECK-LABEL: vpload_nxv2f32:
421 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
422 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
424 %load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
425 ret <vscale x 2 x float> %load
428 declare <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr, <vscale x 4 x i1>, i32)
430 define <vscale x 4 x float> @vpload_nxv4f32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
431 ; CHECK-LABEL: vpload_nxv4f32:
433 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
434 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
436 %load = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
437 ret <vscale x 4 x float> %load
440 declare <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr, <vscale x 8 x i1>, i32)
442 define <vscale x 8 x float> @vpload_nxv8f32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
443 ; CHECK-LABEL: vpload_nxv8f32:
445 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
446 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
448 %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
449 ret <vscale x 8 x float> %load
452 define <vscale x 8 x float> @vpload_nxv8f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
453 ; CHECK-LABEL: vpload_nxv8f32_allones_mask:
455 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
456 ; CHECK-NEXT: vle32.v v8, (a0)
458 %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
459 ret <vscale x 8 x float> %load
462 declare <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0(ptr, <vscale x 1 x i1>, i32)
464 define <vscale x 1 x double> @vpload_nxv1f64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
465 ; CHECK-LABEL: vpload_nxv1f64:
467 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
468 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
470 %load = call <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
471 ret <vscale x 1 x double> %load
474 declare <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(ptr, <vscale x 2 x i1>, i32)
476 define <vscale x 2 x double> @vpload_nxv2f64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
477 ; CHECK-LABEL: vpload_nxv2f64:
479 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
480 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
482 %load = call <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
483 ret <vscale x 2 x double> %load
486 declare <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr, <vscale x 4 x i1>, i32)
488 define <vscale x 4 x double> @vpload_nxv4f64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
489 ; CHECK-LABEL: vpload_nxv4f64:
491 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
492 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
494 %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
495 ret <vscale x 4 x double> %load
498 define <vscale x 4 x double> @vpload_nxv4f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
499 ; CHECK-LABEL: vpload_nxv4f64_allones_mask:
501 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
502 ; CHECK-NEXT: vle64.v v8, (a0)
504 %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
505 ret <vscale x 4 x double> %load
508 declare <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0(ptr, <vscale x 8 x i1>, i32)
510 define <vscale x 8 x double> @vpload_nxv8f64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
511 ; CHECK-LABEL: vpload_nxv8f64:
513 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
514 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
516 %load = call <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
517 ret <vscale x 8 x double> %load
520 declare <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr, <vscale x 16 x i1>, i32)
522 define <vscale x 16 x double> @vpload_nxv16f64(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
523 ; CHECK-LABEL: vpload_nxv16f64:
525 ; CHECK-NEXT: vmv1r.v v8, v0
526 ; CHECK-NEXT: csrr a2, vlenb
527 ; CHECK-NEXT: sub a3, a1, a2
528 ; CHECK-NEXT: slli a4, a2, 3
529 ; CHECK-NEXT: srli a5, a2, 3
530 ; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
531 ; CHECK-NEXT: vslidedown.vx v0, v0, a5
532 ; CHECK-NEXT: sltu a5, a1, a3
533 ; CHECK-NEXT: addi a5, a5, -1
534 ; CHECK-NEXT: and a3, a5, a3
535 ; CHECK-NEXT: add a4, a0, a4
536 ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
537 ; CHECK-NEXT: vle64.v v16, (a4), v0.t
538 ; CHECK-NEXT: bltu a1, a2, .LBB44_2
539 ; CHECK-NEXT: # %bb.1:
540 ; CHECK-NEXT: mv a1, a2
541 ; CHECK-NEXT: .LBB44_2:
542 ; CHECK-NEXT: vmv1r.v v0, v8
543 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
544 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
546 %load = call <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
547 ret <vscale x 16 x double> %load
550 declare <vscale x 17 x double> @llvm.vp.load.nxv17f64.p0(ptr, <vscale x 17 x i1>, i32)
552 declare <vscale x 1 x double> @llvm.vector.extract.nxv1f64(<vscale x 17 x double> %vec, i64 %idx)
553 declare <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x double> %vec, i64 %idx)
555 ; Note: We can't return <vscale x 17 x double> as that introduces a vector
556 ; store can't yet be legalized through widening. In order to test purely the
557 ; vp.load legalization, manually split it.
559 ; Widen to nxv32f64 then split into 4 x nxv8f64, of which 1 is empty.
561 define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17 x i1> %m, i32 zeroext %evl) {
562 ; CHECK-LABEL: vpload_nxv17f64:
564 ; CHECK-NEXT: vmv1r.v v8, v0
565 ; CHECK-NEXT: csrr a3, vlenb
566 ; CHECK-NEXT: slli a5, a3, 1
567 ; CHECK-NEXT: mv a4, a2
568 ; CHECK-NEXT: bltu a2, a5, .LBB45_2
569 ; CHECK-NEXT: # %bb.1:
570 ; CHECK-NEXT: mv a4, a5
571 ; CHECK-NEXT: .LBB45_2:
572 ; CHECK-NEXT: sub a6, a4, a3
573 ; CHECK-NEXT: slli a7, a3, 3
574 ; CHECK-NEXT: srli t0, a3, 3
575 ; CHECK-NEXT: sub a5, a2, a5
576 ; CHECK-NEXT: vsetvli t1, zero, e8, mf4, ta, ma
577 ; CHECK-NEXT: vslidedown.vx v0, v8, t0
578 ; CHECK-NEXT: sltu t0, a4, a6
579 ; CHECK-NEXT: add a7, a0, a7
580 ; CHECK-NEXT: addi t0, t0, -1
581 ; CHECK-NEXT: and a6, t0, a6
582 ; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
583 ; CHECK-NEXT: vle64.v v16, (a7), v0.t
584 ; CHECK-NEXT: sltu a2, a2, a5
585 ; CHECK-NEXT: addi a2, a2, -1
586 ; CHECK-NEXT: and a2, a2, a5
587 ; CHECK-NEXT: bltu a2, a3, .LBB45_4
588 ; CHECK-NEXT: # %bb.3:
589 ; CHECK-NEXT: mv a2, a3
590 ; CHECK-NEXT: .LBB45_4:
591 ; CHECK-NEXT: slli a5, a3, 4
592 ; CHECK-NEXT: srli a6, a3, 2
593 ; CHECK-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
594 ; CHECK-NEXT: vslidedown.vx v0, v8, a6
595 ; CHECK-NEXT: add a5, a0, a5
596 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
597 ; CHECK-NEXT: vle64.v v24, (a5), v0.t
598 ; CHECK-NEXT: bltu a4, a3, .LBB45_6
599 ; CHECK-NEXT: # %bb.5:
600 ; CHECK-NEXT: mv a4, a3
601 ; CHECK-NEXT: .LBB45_6:
602 ; CHECK-NEXT: vmv1r.v v0, v8
603 ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
604 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
605 ; CHECK-NEXT: vs1r.v v24, (a1)
607 %load = call <vscale x 17 x double> @llvm.vp.load.nxv17f64.p0(ptr %ptr, <vscale x 17 x i1> %m, i32 %evl)
608 %lo = call <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x double> %load, i64 0)
609 %hi = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64(<vscale x 17 x double> %load, i64 16)
610 store <vscale x 1 x double> %hi, ptr %out
611 ret <vscale x 16 x double> %lo
614 define <vscale x 8 x i8> @vpload_all_active_nxv8i8(ptr %ptr) {
615 ; CHECK-LABEL: vpload_all_active_nxv8i8:
617 ; CHECK-NEXT: vl1r.v v8, (a0)
619 %vscale = call i32 @llvm.vscale()
620 %evl = mul i32 %vscale, 8
621 %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
622 ret <vscale x 8 x i8> %load