1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr, <vscale x 1 x i1>, i32)
9 define <vscale x 1 x i8> @vpload_nxv1i8(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
10 ; CHECK-LABEL: vpload_nxv1i8:
12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
13 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
15 %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
16 ret <vscale x 1 x i8> %load
19 define <vscale x 1 x i8> @vpload_nxv1i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
20 ; CHECK-LABEL: vpload_nxv1i8_allones_mask:
22 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
23 ; CHECK-NEXT: vle8.v v8, (a0)
25 %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
26 ret <vscale x 1 x i8> %load
29 define <vscale x 1 x i8> @vpload_nxv1i8_passthru(ptr %ptr, <vscale x 1 x i1> %m, <vscale x 1 x i8> %passthru, i32 zeroext %evl) {
30 ; CHECK-LABEL: vpload_nxv1i8_passthru:
32 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
33 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
35 %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
36 %merge = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %load, <vscale x 1 x i8> %passthru, i32 %evl)
37 ret <vscale x 1 x i8> %merge
40 declare <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0(ptr, <vscale x 2 x i1>, i32)
42 define <vscale x 2 x i8> @vpload_nxv2i8(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
43 ; CHECK-LABEL: vpload_nxv2i8:
45 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
46 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
48 %load = call <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
49 ret <vscale x 2 x i8> %load
52 declare <vscale x 3 x i8> @llvm.vp.load.nxv3i8.p0(ptr, <vscale x 3 x i1>, i32)
54 define <vscale x 3 x i8> @vpload_nxv3i8(ptr %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) {
55 ; CHECK-LABEL: vpload_nxv3i8:
57 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
58 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
60 %load = call <vscale x 3 x i8> @llvm.vp.load.nxv3i8.p0(ptr %ptr, <vscale x 3 x i1> %m, i32 %evl)
61 ret <vscale x 3 x i8> %load
64 declare <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0(ptr, <vscale x 4 x i1>, i32)
66 define <vscale x 4 x i8> @vpload_nxv4i8(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
67 ; CHECK-LABEL: vpload_nxv4i8:
69 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
70 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
72 %load = call <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
73 ret <vscale x 4 x i8> %load
76 declare <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr, <vscale x 8 x i1>, i32)
78 define <vscale x 8 x i8> @vpload_nxv8i8(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
79 ; CHECK-LABEL: vpload_nxv8i8:
81 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
82 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
84 %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
85 ret <vscale x 8 x i8> %load
88 define <vscale x 8 x i8> @vpload_nxv8i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
89 ; CHECK-LABEL: vpload_nxv8i8_allones_mask:
91 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
92 ; CHECK-NEXT: vle8.v v8, (a0)
94 %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
95 ret <vscale x 8 x i8> %load
98 declare <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0(ptr, <vscale x 1 x i1>, i32)
100 define <vscale x 1 x i16> @vpload_nxv1i16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
101 ; CHECK-LABEL: vpload_nxv1i16:
103 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
104 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
106 %load = call <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
107 ret <vscale x 1 x i16> %load
110 declare <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr, <vscale x 2 x i1>, i32)
112 define <vscale x 2 x i16> @vpload_nxv2i16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
113 ; CHECK-LABEL: vpload_nxv2i16:
115 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
116 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
118 %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
119 ret <vscale x 2 x i16> %load
122 define <vscale x 2 x i16> @vpload_nxv2i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
123 ; CHECK-LABEL: vpload_nxv2i16_allones_mask:
125 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
126 ; CHECK-NEXT: vle16.v v8, (a0)
128 %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
129 ret <vscale x 2 x i16> %load
132 declare <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(ptr, <vscale x 4 x i1>, i32)
134 define <vscale x 4 x i16> @vpload_nxv4i16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
135 ; CHECK-LABEL: vpload_nxv4i16:
137 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
138 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
140 %load = call <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
141 ret <vscale x 4 x i16> %load
144 declare <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr, <vscale x 8 x i1>, i32)
146 define <vscale x 8 x i16> @vpload_nxv8i16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
147 ; CHECK-LABEL: vpload_nxv8i16:
149 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
150 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
152 %load = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
153 ret <vscale x 8 x i16> %load
156 declare <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0(ptr, <vscale x 1 x i1>, i32)
158 define <vscale x 1 x i32> @vpload_nxv1i32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
159 ; CHECK-LABEL: vpload_nxv1i32:
161 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
162 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
164 %load = call <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
165 ret <vscale x 1 x i32> %load
168 declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr, <vscale x 2 x i1>, i32)
170 define <vscale x 2 x i32> @vpload_nxv2i32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
171 ; CHECK-LABEL: vpload_nxv2i32:
173 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
174 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
176 %load = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
177 ret <vscale x 2 x i32> %load
180 declare <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr, <vscale x 4 x i1>, i32)
182 define <vscale x 4 x i32> @vpload_nxv4i32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
183 ; CHECK-LABEL: vpload_nxv4i32:
185 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
186 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
188 %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
189 ret <vscale x 4 x i32> %load
192 define <vscale x 4 x i32> @vpload_nxv4i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
193 ; CHECK-LABEL: vpload_nxv4i32_allones_mask:
195 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
196 ; CHECK-NEXT: vle32.v v8, (a0)
198 %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
199 ret <vscale x 4 x i32> %load
202 declare <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0(ptr, <vscale x 8 x i1>, i32)
204 define <vscale x 8 x i32> @vpload_nxv8i32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
205 ; CHECK-LABEL: vpload_nxv8i32:
207 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
208 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
210 %load = call <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
211 ret <vscale x 8 x i32> %load
214 declare <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr, <vscale x 1 x i1>, i32)
216 define <vscale x 1 x i64> @vpload_nxv1i64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
217 ; CHECK-LABEL: vpload_nxv1i64:
219 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
220 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
222 %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
223 ret <vscale x 1 x i64> %load
226 define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
227 ; CHECK-LABEL: vpload_nxv1i64_allones_mask:
229 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
230 ; CHECK-NEXT: vle64.v v8, (a0)
232 %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
233 ret <vscale x 1 x i64> %load
236 declare <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr, <vscale x 2 x i1>, i32)
238 define <vscale x 2 x i64> @vpload_nxv2i64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
239 ; CHECK-LABEL: vpload_nxv2i64:
241 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
242 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
244 %load = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
245 ret <vscale x 2 x i64> %load
248 declare <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(ptr, <vscale x 4 x i1>, i32)
250 define <vscale x 4 x i64> @vpload_nxv4i64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
251 ; CHECK-LABEL: vpload_nxv4i64:
253 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
254 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
256 %load = call <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
257 ret <vscale x 4 x i64> %load
260 declare <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr, <vscale x 8 x i1>, i32)
262 define <vscale x 8 x i64> @vpload_nxv8i64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
263 ; CHECK-LABEL: vpload_nxv8i64:
265 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
266 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
268 %load = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
269 ret <vscale x 8 x i64> %load
272 declare <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0(ptr, <vscale x 1 x i1>, i32)
274 define <vscale x 1 x half> @vpload_nxv1f16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
275 ; CHECK-LABEL: vpload_nxv1f16:
277 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
278 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
280 %load = call <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
281 ret <vscale x 1 x half> %load
284 declare <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr, <vscale x 2 x i1>, i32)
286 define <vscale x 2 x half> @vpload_nxv2f16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
287 ; CHECK-LABEL: vpload_nxv2f16:
289 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
290 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
292 %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
293 ret <vscale x 2 x half> %load
296 define <vscale x 2 x half> @vpload_nxv2f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
297 ; CHECK-LABEL: vpload_nxv2f16_allones_mask:
299 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
300 ; CHECK-NEXT: vle16.v v8, (a0)
302 %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
303 ret <vscale x 2 x half> %load
306 declare <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0(ptr, <vscale x 4 x i1>, i32)
308 define <vscale x 4 x half> @vpload_nxv4f16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
309 ; CHECK-LABEL: vpload_nxv4f16:
311 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
312 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
314 %load = call <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
315 ret <vscale x 4 x half> %load
318 declare <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr, <vscale x 8 x i1>, i32)
320 define <vscale x 8 x half> @vpload_nxv8f16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
321 ; CHECK-LABEL: vpload_nxv8f16:
323 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
324 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
326 %load = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
327 ret <vscale x 8 x half> %load
330 declare <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0(ptr, <vscale x 1 x i1>, i32)
332 define <vscale x 1 x float> @vpload_nxv1f32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
333 ; CHECK-LABEL: vpload_nxv1f32:
335 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
336 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
338 %load = call <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
339 ret <vscale x 1 x float> %load
342 declare <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(ptr, <vscale x 2 x i1>, i32)
344 define <vscale x 2 x float> @vpload_nxv2f32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
345 ; CHECK-LABEL: vpload_nxv2f32:
347 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
348 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
350 %load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
351 ret <vscale x 2 x float> %load
354 declare <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr, <vscale x 4 x i1>, i32)
356 define <vscale x 4 x float> @vpload_nxv4f32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
357 ; CHECK-LABEL: vpload_nxv4f32:
359 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
360 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
362 %load = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
363 ret <vscale x 4 x float> %load
366 declare <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr, <vscale x 8 x i1>, i32)
368 define <vscale x 8 x float> @vpload_nxv8f32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
369 ; CHECK-LABEL: vpload_nxv8f32:
371 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
372 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
374 %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
375 ret <vscale x 8 x float> %load
378 define <vscale x 8 x float> @vpload_nxv8f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
379 ; CHECK-LABEL: vpload_nxv8f32_allones_mask:
381 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
382 ; CHECK-NEXT: vle32.v v8, (a0)
384 %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
385 ret <vscale x 8 x float> %load
388 declare <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0(ptr, <vscale x 1 x i1>, i32)
390 define <vscale x 1 x double> @vpload_nxv1f64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
391 ; CHECK-LABEL: vpload_nxv1f64:
393 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
394 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
396 %load = call <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
397 ret <vscale x 1 x double> %load
400 declare <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(ptr, <vscale x 2 x i1>, i32)
402 define <vscale x 2 x double> @vpload_nxv2f64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
403 ; CHECK-LABEL: vpload_nxv2f64:
405 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
406 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
408 %load = call <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
409 ret <vscale x 2 x double> %load
412 declare <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr, <vscale x 4 x i1>, i32)
414 define <vscale x 4 x double> @vpload_nxv4f64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
415 ; CHECK-LABEL: vpload_nxv4f64:
417 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
418 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
420 %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
421 ret <vscale x 4 x double> %load
424 define <vscale x 4 x double> @vpload_nxv4f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
425 ; CHECK-LABEL: vpload_nxv4f64_allones_mask:
427 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
428 ; CHECK-NEXT: vle64.v v8, (a0)
430 %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
431 ret <vscale x 4 x double> %load
434 declare <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0(ptr, <vscale x 8 x i1>, i32)
436 define <vscale x 8 x double> @vpload_nxv8f64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
437 ; CHECK-LABEL: vpload_nxv8f64:
439 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
440 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
442 %load = call <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
443 ret <vscale x 8 x double> %load
446 declare <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr, <vscale x 16 x i1>, i32)
448 define <vscale x 16 x double> @vpload_nxv16f64(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
449 ; CHECK-LABEL: vpload_nxv16f64:
451 ; CHECK-NEXT: vmv1r.v v8, v0
452 ; CHECK-NEXT: csrr a2, vlenb
453 ; CHECK-NEXT: sub a3, a1, a2
454 ; CHECK-NEXT: sltu a4, a1, a3
455 ; CHECK-NEXT: addi a4, a4, -1
456 ; CHECK-NEXT: and a3, a4, a3
457 ; CHECK-NEXT: slli a4, a2, 3
458 ; CHECK-NEXT: srli a5, a2, 3
459 ; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
460 ; CHECK-NEXT: vslidedown.vx v0, v0, a5
461 ; CHECK-NEXT: add a4, a0, a4
462 ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
463 ; CHECK-NEXT: vle64.v v16, (a4), v0.t
464 ; CHECK-NEXT: bltu a1, a2, .LBB38_2
465 ; CHECK-NEXT: # %bb.1:
466 ; CHECK-NEXT: mv a1, a2
467 ; CHECK-NEXT: .LBB38_2:
468 ; CHECK-NEXT: vmv1r.v v0, v8
469 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
470 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
472 %load = call <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
473 ret <vscale x 16 x double> %load
476 declare <vscale x 17 x double> @llvm.vp.load.nxv17f64.p0(ptr, <vscale x 17 x i1>, i32)
478 declare <vscale x 1 x double> @llvm.vector.extract.nxv1f64(<vscale x 17 x double> %vec, i64 %idx)
479 declare <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x double> %vec, i64 %idx)
481 ; Note: We can't return <vscale x 17 x double> as that introduces a vector
482 ; store can't yet be legalized through widening. In order to test purely the
483 ; vp.load legalization, manually split it.
485 ; Widen to nxv32f64 then split into 4 x nxv8f64, of which 1 is empty.
487 define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17 x i1> %m, i32 zeroext %evl) {
488 ; CHECK-LABEL: vpload_nxv17f64:
490 ; CHECK-NEXT: csrr a3, vlenb
491 ; CHECK-NEXT: slli a5, a3, 1
492 ; CHECK-NEXT: vmv1r.v v8, v0
493 ; CHECK-NEXT: mv a4, a2
494 ; CHECK-NEXT: bltu a2, a5, .LBB39_2
495 ; CHECK-NEXT: # %bb.1:
496 ; CHECK-NEXT: mv a4, a5
497 ; CHECK-NEXT: .LBB39_2:
498 ; CHECK-NEXT: sub a6, a4, a3
499 ; CHECK-NEXT: sltu a7, a4, a6
500 ; CHECK-NEXT: addi a7, a7, -1
501 ; CHECK-NEXT: and a6, a7, a6
502 ; CHECK-NEXT: slli a7, a3, 3
503 ; CHECK-NEXT: srli t0, a3, 3
504 ; CHECK-NEXT: vsetvli t1, zero, e8, mf4, ta, ma
505 ; CHECK-NEXT: vslidedown.vx v0, v8, t0
506 ; CHECK-NEXT: add a7, a0, a7
507 ; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
508 ; CHECK-NEXT: vle64.v v16, (a7), v0.t
509 ; CHECK-NEXT: sub a5, a2, a5
510 ; CHECK-NEXT: sltu a2, a2, a5
511 ; CHECK-NEXT: addi a2, a2, -1
512 ; CHECK-NEXT: and a2, a2, a5
513 ; CHECK-NEXT: bltu a2, a3, .LBB39_4
514 ; CHECK-NEXT: # %bb.3:
515 ; CHECK-NEXT: mv a2, a3
516 ; CHECK-NEXT: .LBB39_4:
517 ; CHECK-NEXT: slli a5, a3, 4
518 ; CHECK-NEXT: srli a6, a3, 2
519 ; CHECK-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
520 ; CHECK-NEXT: vslidedown.vx v0, v8, a6
521 ; CHECK-NEXT: add a5, a0, a5
522 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
523 ; CHECK-NEXT: vle64.v v24, (a5), v0.t
524 ; CHECK-NEXT: bltu a4, a3, .LBB39_6
525 ; CHECK-NEXT: # %bb.5:
526 ; CHECK-NEXT: mv a4, a3
527 ; CHECK-NEXT: .LBB39_6:
528 ; CHECK-NEXT: vmv1r.v v0, v8
529 ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
530 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
531 ; CHECK-NEXT: vs1r.v v24, (a1)
533 %load = call <vscale x 17 x double> @llvm.vp.load.nxv17f64.p0(ptr %ptr, <vscale x 17 x i1> %m, i32 %evl)
534 %lo = call <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x double> %load, i64 0)
535 %hi = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64(<vscale x 17 x double> %load, i64 16)
536 store <vscale x 1 x double> %hi, ptr %out
537 ret <vscale x 16 x double> %lo