1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
5 define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) {
6 ; RV32-LABEL: name: vload_nx1i8
7 ; RV32: bb.1 (%ir-block.0):
8 ; RV32-NEXT: liveins: $x10
10 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
11 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
12 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
13 ; RV32-NEXT: PseudoRET implicit $v8
15 ; RV64-LABEL: name: vload_nx1i8
16 ; RV64: bb.1 (%ir-block.0):
17 ; RV64-NEXT: liveins: $x10
19 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
20 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
21 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
22 ; RV64-NEXT: PseudoRET implicit $v8
23 %va = load <vscale x 1 x i8>, ptr %pa
24 ret <vscale x 1 x i8> %va
27 define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) {
28 ; RV32-LABEL: name: vload_nx2i8
29 ; RV32: bb.1 (%ir-block.0):
30 ; RV32-NEXT: liveins: $x10
32 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
33 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
34 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
35 ; RV32-NEXT: PseudoRET implicit $v8
37 ; RV64-LABEL: name: vload_nx2i8
38 ; RV64: bb.1 (%ir-block.0):
39 ; RV64-NEXT: liveins: $x10
41 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
42 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
43 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
44 ; RV64-NEXT: PseudoRET implicit $v8
45 %va = load <vscale x 2 x i8>, ptr %pa
46 ret <vscale x 2 x i8> %va
49 define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) {
50 ; RV32-LABEL: name: vload_nx4i8
51 ; RV32: bb.1 (%ir-block.0):
52 ; RV32-NEXT: liveins: $x10
54 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
55 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
56 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
57 ; RV32-NEXT: PseudoRET implicit $v8
59 ; RV64-LABEL: name: vload_nx4i8
60 ; RV64: bb.1 (%ir-block.0):
61 ; RV64-NEXT: liveins: $x10
63 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
64 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
65 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
66 ; RV64-NEXT: PseudoRET implicit $v8
67 %va = load <vscale x 4 x i8>, ptr %pa
68 ret <vscale x 4 x i8> %va
71 define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) {
72 ; RV32-LABEL: name: vload_nx8i8
73 ; RV32: bb.1 (%ir-block.0):
74 ; RV32-NEXT: liveins: $x10
76 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
77 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
78 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
79 ; RV32-NEXT: PseudoRET implicit $v8
81 ; RV64-LABEL: name: vload_nx8i8
82 ; RV64: bb.1 (%ir-block.0):
83 ; RV64-NEXT: liveins: $x10
85 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
86 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
87 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
88 ; RV64-NEXT: PseudoRET implicit $v8
89 %va = load <vscale x 8 x i8>, ptr %pa
90 ret <vscale x 8 x i8> %va
93 define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) {
94 ; RV32-LABEL: name: vload_nx16i8
95 ; RV32: bb.1 (%ir-block.0):
96 ; RV32-NEXT: liveins: $x10
98 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
99 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
100 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
101 ; RV32-NEXT: PseudoRET implicit $v8m2
103 ; RV64-LABEL: name: vload_nx16i8
104 ; RV64: bb.1 (%ir-block.0):
105 ; RV64-NEXT: liveins: $x10
107 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
108 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
109 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
110 ; RV64-NEXT: PseudoRET implicit $v8m2
111 %va = load <vscale x 16 x i8>, ptr %pa
112 ret <vscale x 16 x i8> %va
115 define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) {
116 ; RV32-LABEL: name: vload_nx32i8
117 ; RV32: bb.1 (%ir-block.0):
118 ; RV32-NEXT: liveins: $x10
120 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
121 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
122 ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
123 ; RV32-NEXT: PseudoRET implicit $v8m4
125 ; RV64-LABEL: name: vload_nx32i8
126 ; RV64: bb.1 (%ir-block.0):
127 ; RV64-NEXT: liveins: $x10
129 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
130 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
131 ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
132 ; RV64-NEXT: PseudoRET implicit $v8m4
133 %va = load <vscale x 32 x i8>, ptr %pa
134 ret <vscale x 32 x i8> %va
137 define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) {
138 ; RV32-LABEL: name: vload_nx64i8
139 ; RV32: bb.1 (%ir-block.0):
140 ; RV32-NEXT: liveins: $x10
142 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
143 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
144 ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
145 ; RV32-NEXT: PseudoRET implicit $v8m8
147 ; RV64-LABEL: name: vload_nx64i8
148 ; RV64: bb.1 (%ir-block.0):
149 ; RV64-NEXT: liveins: $x10
151 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
152 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
153 ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
154 ; RV64-NEXT: PseudoRET implicit $v8m8
155 %va = load <vscale x 64 x i8>, ptr %pa
156 ret <vscale x 64 x i8> %va
159 define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) {
160 ; RV32-LABEL: name: vload_nx1i16
161 ; RV32: bb.1 (%ir-block.0):
162 ; RV32-NEXT: liveins: $x10
164 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
165 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
166 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
167 ; RV32-NEXT: PseudoRET implicit $v8
169 ; RV64-LABEL: name: vload_nx1i16
170 ; RV64: bb.1 (%ir-block.0):
171 ; RV64-NEXT: liveins: $x10
173 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
174 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
175 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
176 ; RV64-NEXT: PseudoRET implicit $v8
177 %va = load <vscale x 1 x i16>, ptr %pa
178 ret <vscale x 1 x i16> %va
181 define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) {
182 ; RV32-LABEL: name: vload_nx2i16
183 ; RV32: bb.1 (%ir-block.0):
184 ; RV32-NEXT: liveins: $x10
186 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
187 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
188 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
189 ; RV32-NEXT: PseudoRET implicit $v8
191 ; RV64-LABEL: name: vload_nx2i16
192 ; RV64: bb.1 (%ir-block.0):
193 ; RV64-NEXT: liveins: $x10
195 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
196 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
197 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
198 ; RV64-NEXT: PseudoRET implicit $v8
199 %va = load <vscale x 2 x i16>, ptr %pa
200 ret <vscale x 2 x i16> %va
203 define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) {
204 ; RV32-LABEL: name: vload_nx4i16
205 ; RV32: bb.1 (%ir-block.0):
206 ; RV32-NEXT: liveins: $x10
208 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
209 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
210 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
211 ; RV32-NEXT: PseudoRET implicit $v8
213 ; RV64-LABEL: name: vload_nx4i16
214 ; RV64: bb.1 (%ir-block.0):
215 ; RV64-NEXT: liveins: $x10
217 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
218 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
219 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
220 ; RV64-NEXT: PseudoRET implicit $v8
221 %va = load <vscale x 4 x i16>, ptr %pa
222 ret <vscale x 4 x i16> %va
225 define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) {
226 ; RV32-LABEL: name: vload_nx8i16
227 ; RV32: bb.1 (%ir-block.0):
228 ; RV32-NEXT: liveins: $x10
230 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
231 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
232 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
233 ; RV32-NEXT: PseudoRET implicit $v8m2
235 ; RV64-LABEL: name: vload_nx8i16
236 ; RV64: bb.1 (%ir-block.0):
237 ; RV64-NEXT: liveins: $x10
239 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
240 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
241 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
242 ; RV64-NEXT: PseudoRET implicit $v8m2
243 %va = load <vscale x 8 x i16>, ptr %pa
244 ret <vscale x 8 x i16> %va
247 define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) {
248 ; RV32-LABEL: name: vload_nx16i16
249 ; RV32: bb.1 (%ir-block.0):
250 ; RV32-NEXT: liveins: $x10
252 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
253 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
254 ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
255 ; RV32-NEXT: PseudoRET implicit $v8m4
257 ; RV64-LABEL: name: vload_nx16i16
258 ; RV64: bb.1 (%ir-block.0):
259 ; RV64-NEXT: liveins: $x10
261 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
262 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
263 ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
264 ; RV64-NEXT: PseudoRET implicit $v8m4
265 %va = load <vscale x 16 x i16>, ptr %pa
266 ret <vscale x 16 x i16> %va
269 define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) {
270 ; RV32-LABEL: name: vload_nx32i16
271 ; RV32: bb.1 (%ir-block.0):
272 ; RV32-NEXT: liveins: $x10
274 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
275 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
276 ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
277 ; RV32-NEXT: PseudoRET implicit $v8m8
279 ; RV64-LABEL: name: vload_nx32i16
280 ; RV64: bb.1 (%ir-block.0):
281 ; RV64-NEXT: liveins: $x10
283 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
284 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
285 ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
286 ; RV64-NEXT: PseudoRET implicit $v8m8
287 %va = load <vscale x 32 x i16>, ptr %pa
288 ret <vscale x 32 x i16> %va
291 define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) {
292 ; RV32-LABEL: name: vload_nx1i32
293 ; RV32: bb.1 (%ir-block.0):
294 ; RV32-NEXT: liveins: $x10
296 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
297 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
298 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
299 ; RV32-NEXT: PseudoRET implicit $v8
301 ; RV64-LABEL: name: vload_nx1i32
302 ; RV64: bb.1 (%ir-block.0):
303 ; RV64-NEXT: liveins: $x10
305 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
306 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
307 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
308 ; RV64-NEXT: PseudoRET implicit $v8
309 %va = load <vscale x 1 x i32>, ptr %pa
310 ret <vscale x 1 x i32> %va
313 define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) {
314 ; RV32-LABEL: name: vload_nx2i32
315 ; RV32: bb.1 (%ir-block.0):
316 ; RV32-NEXT: liveins: $x10
318 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
319 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
320 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
321 ; RV32-NEXT: PseudoRET implicit $v8
323 ; RV64-LABEL: name: vload_nx2i32
324 ; RV64: bb.1 (%ir-block.0):
325 ; RV64-NEXT: liveins: $x10
327 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
328 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
329 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
330 ; RV64-NEXT: PseudoRET implicit $v8
331 %va = load <vscale x 2 x i32>, ptr %pa
332 ret <vscale x 2 x i32> %va
335 define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) {
336 ; RV32-LABEL: name: vload_nx4i32
337 ; RV32: bb.1 (%ir-block.0):
338 ; RV32-NEXT: liveins: $x10
340 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
341 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
342 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
343 ; RV32-NEXT: PseudoRET implicit $v8m2
345 ; RV64-LABEL: name: vload_nx4i32
346 ; RV64: bb.1 (%ir-block.0):
347 ; RV64-NEXT: liveins: $x10
349 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
350 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
351 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
352 ; RV64-NEXT: PseudoRET implicit $v8m2
353 %va = load <vscale x 4 x i32>, ptr %pa
354 ret <vscale x 4 x i32> %va
357 define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) {
358 ; RV32-LABEL: name: vload_nx8i32
359 ; RV32: bb.1 (%ir-block.0):
360 ; RV32-NEXT: liveins: $x10
362 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
363 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
364 ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
365 ; RV32-NEXT: PseudoRET implicit $v8m4
367 ; RV64-LABEL: name: vload_nx8i32
368 ; RV64: bb.1 (%ir-block.0):
369 ; RV64-NEXT: liveins: $x10
371 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
372 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
373 ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
374 ; RV64-NEXT: PseudoRET implicit $v8m4
375 %va = load <vscale x 8 x i32>, ptr %pa
376 ret <vscale x 8 x i32> %va
379 define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) {
380 ; RV32-LABEL: name: vload_nx16i32
381 ; RV32: bb.1 (%ir-block.0):
382 ; RV32-NEXT: liveins: $x10
384 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
385 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
386 ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
387 ; RV32-NEXT: PseudoRET implicit $v8m8
389 ; RV64-LABEL: name: vload_nx16i32
390 ; RV64: bb.1 (%ir-block.0):
391 ; RV64-NEXT: liveins: $x10
393 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
394 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
395 ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
396 ; RV64-NEXT: PseudoRET implicit $v8m8
397 %va = load <vscale x 16 x i32>, ptr %pa
398 ret <vscale x 16 x i32> %va
401 define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) {
402 ; RV32-LABEL: name: vload_nx1i64
403 ; RV32: bb.1 (%ir-block.0):
404 ; RV32-NEXT: liveins: $x10
406 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
407 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
408 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
409 ; RV32-NEXT: PseudoRET implicit $v8
411 ; RV64-LABEL: name: vload_nx1i64
412 ; RV64: bb.1 (%ir-block.0):
413 ; RV64-NEXT: liveins: $x10
415 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
416 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
417 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
418 ; RV64-NEXT: PseudoRET implicit $v8
419 %va = load <vscale x 1 x i64>, ptr %pa
420 ret <vscale x 1 x i64> %va
423 define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) {
424 ; RV32-LABEL: name: vload_nx2i64
425 ; RV32: bb.1 (%ir-block.0):
426 ; RV32-NEXT: liveins: $x10
428 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
429 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
430 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
431 ; RV32-NEXT: PseudoRET implicit $v8m2
433 ; RV64-LABEL: name: vload_nx2i64
434 ; RV64: bb.1 (%ir-block.0):
435 ; RV64-NEXT: liveins: $x10
437 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
438 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
439 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
440 ; RV64-NEXT: PseudoRET implicit $v8m2
441 %va = load <vscale x 2 x i64>, ptr %pa
442 ret <vscale x 2 x i64> %va
445 define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) {
446 ; RV32-LABEL: name: vload_nx4i64
447 ; RV32: bb.1 (%ir-block.0):
448 ; RV32-NEXT: liveins: $x10
450 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
451 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
452 ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
453 ; RV32-NEXT: PseudoRET implicit $v8m4
455 ; RV64-LABEL: name: vload_nx4i64
456 ; RV64: bb.1 (%ir-block.0):
457 ; RV64-NEXT: liveins: $x10
459 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
460 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
461 ; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
462 ; RV64-NEXT: PseudoRET implicit $v8m4
463 %va = load <vscale x 4 x i64>, ptr %pa
464 ret <vscale x 4 x i64> %va
467 define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) {
468 ; RV32-LABEL: name: vload_nx8i64
469 ; RV32: bb.1 (%ir-block.0):
470 ; RV32-NEXT: liveins: $x10
472 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
473 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
474 ; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
475 ; RV32-NEXT: PseudoRET implicit $v8m8
477 ; RV64-LABEL: name: vload_nx8i64
478 ; RV64: bb.1 (%ir-block.0):
479 ; RV64-NEXT: liveins: $x10
481 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
482 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
483 ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
484 ; RV64-NEXT: PseudoRET implicit $v8m8
485 %va = load <vscale x 8 x i64>, ptr %pa
486 ret <vscale x 8 x i64> %va
489 define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) {
490 ; RV32-LABEL: name: vload_nx16i8_align1
491 ; RV32: bb.1 (%ir-block.0):
492 ; RV32-NEXT: liveins: $x10
494 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
495 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
496 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
497 ; RV32-NEXT: PseudoRET implicit $v8m2
499 ; RV64-LABEL: name: vload_nx16i8_align1
500 ; RV64: bb.1 (%ir-block.0):
501 ; RV64-NEXT: liveins: $x10
503 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
504 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
505 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
506 ; RV64-NEXT: PseudoRET implicit $v8m2
507 %va = load <vscale x 16 x i8>, ptr %pa, align 1
508 ret <vscale x 16 x i8> %va
511 define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) {
512 ; RV32-LABEL: name: vload_nx16i8_align2
513 ; RV32: bb.1 (%ir-block.0):
514 ; RV32-NEXT: liveins: $x10
516 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
517 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
518 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
519 ; RV32-NEXT: PseudoRET implicit $v8m2
521 ; RV64-LABEL: name: vload_nx16i8_align2
522 ; RV64: bb.1 (%ir-block.0):
523 ; RV64-NEXT: liveins: $x10
525 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
526 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
527 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
528 ; RV64-NEXT: PseudoRET implicit $v8m2
529 %va = load <vscale x 16 x i8>, ptr %pa, align 2
530 ret <vscale x 16 x i8> %va
533 define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) {
534 ; RV32-LABEL: name: vload_nx16i8_align16
535 ; RV32: bb.1 (%ir-block.0):
536 ; RV32-NEXT: liveins: $x10
538 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
539 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
540 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
541 ; RV32-NEXT: PseudoRET implicit $v8m2
543 ; RV64-LABEL: name: vload_nx16i8_align16
544 ; RV64: bb.1 (%ir-block.0):
545 ; RV64-NEXT: liveins: $x10
547 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
548 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
549 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
550 ; RV64-NEXT: PseudoRET implicit $v8m2
551 %va = load <vscale x 16 x i8>, ptr %pa, align 16
552 ret <vscale x 16 x i8> %va
555 define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) {
556 ; RV32-LABEL: name: vload_nx16i8_align64
557 ; RV32: bb.1 (%ir-block.0):
558 ; RV32-NEXT: liveins: $x10
560 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
561 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
562 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
563 ; RV32-NEXT: PseudoRET implicit $v8m2
565 ; RV64-LABEL: name: vload_nx16i8_align64
566 ; RV64: bb.1 (%ir-block.0):
567 ; RV64-NEXT: liveins: $x10
569 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
570 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
571 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
572 ; RV64-NEXT: PseudoRET implicit $v8m2
573 %va = load <vscale x 16 x i8>, ptr %pa, align 64
574 ret <vscale x 16 x i8> %va
577 define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) {
578 ; RV32-LABEL: name: vload_nx4i16_align1
579 ; RV32: bb.1 (%ir-block.0):
580 ; RV32-NEXT: liveins: $x10
582 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
583 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 1)
584 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
585 ; RV32-NEXT: PseudoRET implicit $v8
587 ; RV64-LABEL: name: vload_nx4i16_align1
588 ; RV64: bb.1 (%ir-block.0):
589 ; RV64-NEXT: liveins: $x10
591 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
592 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 1)
593 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
594 ; RV64-NEXT: PseudoRET implicit $v8
595 %va = load <vscale x 4 x i16>, ptr %pa, align 1
596 ret <vscale x 4 x i16> %va
599 define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) {
600 ; RV32-LABEL: name: vload_nx4i16_align2
601 ; RV32: bb.1 (%ir-block.0):
602 ; RV32-NEXT: liveins: $x10
604 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
605 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
606 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
607 ; RV32-NEXT: PseudoRET implicit $v8
609 ; RV64-LABEL: name: vload_nx4i16_align2
610 ; RV64: bb.1 (%ir-block.0):
611 ; RV64-NEXT: liveins: $x10
613 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
614 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
615 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
616 ; RV64-NEXT: PseudoRET implicit $v8
617 %va = load <vscale x 4 x i16>, ptr %pa, align 2
618 ret <vscale x 4 x i16> %va
621 define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) {
622 ; RV32-LABEL: name: vload_nx4i16_align4
623 ; RV32: bb.1 (%ir-block.0):
624 ; RV32-NEXT: liveins: $x10
626 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
627 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
628 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
629 ; RV32-NEXT: PseudoRET implicit $v8
631 ; RV64-LABEL: name: vload_nx4i16_align4
632 ; RV64: bb.1 (%ir-block.0):
633 ; RV64-NEXT: liveins: $x10
635 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
636 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
637 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
638 ; RV64-NEXT: PseudoRET implicit $v8
639 %va = load <vscale x 4 x i16>, ptr %pa, align 4
640 ret <vscale x 4 x i16> %va
642 define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) {
643 ; RV32-LABEL: name: vload_nx4i16_align8
644 ; RV32: bb.1 (%ir-block.0):
645 ; RV32-NEXT: liveins: $x10
647 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
648 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
649 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
650 ; RV32-NEXT: PseudoRET implicit $v8
652 ; RV64-LABEL: name: vload_nx4i16_align8
653 ; RV64: bb.1 (%ir-block.0):
654 ; RV64-NEXT: liveins: $x10
656 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
657 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
658 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
659 ; RV64-NEXT: PseudoRET implicit $v8
660 %va = load <vscale x 4 x i16>, ptr %pa, align 8
661 ret <vscale x 4 x i16> %va
664 define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) {
665 ; RV32-LABEL: name: vload_nx4i16_align16
666 ; RV32: bb.1 (%ir-block.0):
667 ; RV32-NEXT: liveins: $x10
669 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
670 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
671 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
672 ; RV32-NEXT: PseudoRET implicit $v8
674 ; RV64-LABEL: name: vload_nx4i16_align16
675 ; RV64: bb.1 (%ir-block.0):
676 ; RV64-NEXT: liveins: $x10
678 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
679 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
680 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
681 ; RV64-NEXT: PseudoRET implicit $v8
682 %va = load <vscale x 4 x i16>, ptr %pa, align 16
683 ret <vscale x 4 x i16> %va
686 define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) {
687 ; RV32-LABEL: name: vload_nx2i32_align2
688 ; RV32: bb.1 (%ir-block.0):
689 ; RV32-NEXT: liveins: $x10
691 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
692 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 2)
693 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
694 ; RV32-NEXT: PseudoRET implicit $v8
696 ; RV64-LABEL: name: vload_nx2i32_align2
697 ; RV64: bb.1 (%ir-block.0):
698 ; RV64-NEXT: liveins: $x10
700 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
701 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 2)
702 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
703 ; RV64-NEXT: PseudoRET implicit $v8
704 %va = load <vscale x 2 x i32>, ptr %pa, align 2
705 ret <vscale x 2 x i32> %va
708 define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) {
709 ; RV32-LABEL: name: vload_nx2i32_align4
710 ; RV32: bb.1 (%ir-block.0):
711 ; RV32-NEXT: liveins: $x10
713 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
714 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
715 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
716 ; RV32-NEXT: PseudoRET implicit $v8
718 ; RV64-LABEL: name: vload_nx2i32_align4
719 ; RV64: bb.1 (%ir-block.0):
720 ; RV64-NEXT: liveins: $x10
722 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
723 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
724 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
725 ; RV64-NEXT: PseudoRET implicit $v8
726 %va = load <vscale x 2 x i32>, ptr %pa, align 4
727 ret <vscale x 2 x i32> %va
730 define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) {
731 ; RV32-LABEL: name: vload_nx2i32_align8
732 ; RV32: bb.1 (%ir-block.0):
733 ; RV32-NEXT: liveins: $x10
735 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
736 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
737 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
738 ; RV32-NEXT: PseudoRET implicit $v8
740 ; RV64-LABEL: name: vload_nx2i32_align8
741 ; RV64: bb.1 (%ir-block.0):
742 ; RV64-NEXT: liveins: $x10
744 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
745 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
746 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
747 ; RV64-NEXT: PseudoRET implicit $v8
748 %va = load <vscale x 2 x i32>, ptr %pa, align 8
749 ret <vscale x 2 x i32> %va
752 define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) {
753 ; RV32-LABEL: name: vload_nx2i32_align16
754 ; RV32: bb.1 (%ir-block.0):
755 ; RV32-NEXT: liveins: $x10
757 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
758 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
759 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
760 ; RV32-NEXT: PseudoRET implicit $v8
762 ; RV64-LABEL: name: vload_nx2i32_align16
763 ; RV64: bb.1 (%ir-block.0):
764 ; RV64-NEXT: liveins: $x10
766 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
767 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
768 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
769 ; RV64-NEXT: PseudoRET implicit $v8
770 %va = load <vscale x 2 x i32>, ptr %pa, align 16
771 ret <vscale x 2 x i32> %va
774 define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) {
775 ; RV32-LABEL: name: vload_nx2i32_align256
776 ; RV32: bb.1 (%ir-block.0):
777 ; RV32-NEXT: liveins: $x10
779 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
780 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
781 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
782 ; RV32-NEXT: PseudoRET implicit $v8
784 ; RV64-LABEL: name: vload_nx2i32_align256
785 ; RV64: bb.1 (%ir-block.0):
786 ; RV64-NEXT: liveins: $x10
788 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
789 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
790 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
791 ; RV64-NEXT: PseudoRET implicit $v8
792 %va = load <vscale x 2 x i32>, ptr %pa, align 256
793 ret <vscale x 2 x i32> %va
795 define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) {
796 ; RV32-LABEL: name: vload_nx2i64_align4
797 ; RV32: bb.1 (%ir-block.0):
798 ; RV32-NEXT: liveins: $x10
800 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
801 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 4)
802 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
803 ; RV32-NEXT: PseudoRET implicit $v8m2
805 ; RV64-LABEL: name: vload_nx2i64_align4
806 ; RV64: bb.1 (%ir-block.0):
807 ; RV64-NEXT: liveins: $x10
809 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
810 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 4)
811 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
812 ; RV64-NEXT: PseudoRET implicit $v8m2
813 %va = load <vscale x 2 x i64>, ptr %pa, align 4
814 ret <vscale x 2 x i64> %va
817 define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) {
818 ; RV32-LABEL: name: vload_nx2i64_align8
819 ; RV32: bb.1 (%ir-block.0):
820 ; RV32-NEXT: liveins: $x10
822 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
823 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
824 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
825 ; RV32-NEXT: PseudoRET implicit $v8m2
827 ; RV64-LABEL: name: vload_nx2i64_align8
828 ; RV64: bb.1 (%ir-block.0):
829 ; RV64-NEXT: liveins: $x10
831 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
832 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
833 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
834 ; RV64-NEXT: PseudoRET implicit $v8m2
835 %va = load <vscale x 2 x i64>, ptr %pa, align 8
836 ret <vscale x 2 x i64> %va
839 define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) {
840 ; RV32-LABEL: name: vload_nx2i64_align16
841 ; RV32: bb.1 (%ir-block.0):
842 ; RV32-NEXT: liveins: $x10
844 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
845 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
846 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
847 ; RV32-NEXT: PseudoRET implicit $v8m2
849 ; RV64-LABEL: name: vload_nx2i64_align16
850 ; RV64: bb.1 (%ir-block.0):
851 ; RV64-NEXT: liveins: $x10
853 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
854 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
855 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
856 ; RV64-NEXT: PseudoRET implicit $v8m2
857 %va = load <vscale x 2 x i64>, ptr %pa, align 16
858 ret <vscale x 2 x i64> %va
861 define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) {
862 ; RV32-LABEL: name: vload_nx2i64_align32
863 ; RV32: bb.1 (%ir-block.0):
864 ; RV32-NEXT: liveins: $x10
866 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
867 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
868 ; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
869 ; RV32-NEXT: PseudoRET implicit $v8m2
871 ; RV64-LABEL: name: vload_nx2i64_align32
872 ; RV64: bb.1 (%ir-block.0):
873 ; RV64-NEXT: liveins: $x10
875 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
876 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
877 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
878 ; RV64-NEXT: PseudoRET implicit $v8m2
879 %va = load <vscale x 2 x i64>, ptr %pa, align 32
880 ret <vscale x 2 x i64> %va
883 define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) {
884 ; RV32-LABEL: name: vload_nx1ptr
885 ; RV32: bb.1 (%ir-block.0):
886 ; RV32-NEXT: liveins: $x10
888 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
889 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
890 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
891 ; RV32-NEXT: PseudoRET implicit $v8
893 ; RV64-LABEL: name: vload_nx1ptr
894 ; RV64: bb.1 (%ir-block.0):
895 ; RV64-NEXT: liveins: $x10
897 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
898 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
899 ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
900 ; RV64-NEXT: PseudoRET implicit $v8
901 %va = load <vscale x 1 x ptr>, ptr %pa
902 ret <vscale x 1 x ptr> %va
905 define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) {
906 ; RV32-LABEL: name: vload_nx2ptr
907 ; RV32: bb.1 (%ir-block.0):
908 ; RV32-NEXT: liveins: $x10
910 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
911 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
912 ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
913 ; RV32-NEXT: PseudoRET implicit $v8
915 ; RV64-LABEL: name: vload_nx2ptr
916 ; RV64: bb.1 (%ir-block.0):
917 ; RV64-NEXT: liveins: $x10
919 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
920 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
921 ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x p0>)
922 ; RV64-NEXT: PseudoRET implicit $v8m2
923 %va = load <vscale x 2x ptr>, ptr %pa
924 ret <vscale x 2 x ptr> %va
927 define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) {
928 ; RV32-LABEL: name: vload_nx8ptr
929 ; RV32: bb.1 (%ir-block.0):
930 ; RV32-NEXT: liveins: $x10
932 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
933 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
934 ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
935 ; RV32-NEXT: PseudoRET implicit $v8m4
937 ; RV64-LABEL: name: vload_nx8ptr
938 ; RV64: bb.1 (%ir-block.0):
939 ; RV64-NEXT: liveins: $x10
941 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
942 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
943 ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x p0>)
944 ; RV64-NEXT: PseudoRET implicit $v8m8
945 %va = load <vscale x 8 x ptr>, ptr %pa
946 ret <vscale x 8 x ptr> %va