1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
5 define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) {
6 ; RV32-LABEL: name: vstore_nx1i8
7 ; RV32: bb.1 (%ir-block.0):
8 ; RV32-NEXT: liveins: $v8, $x10
10 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
11 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
12 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
13 ; RV32-NEXT: PseudoRET
15 ; RV64-LABEL: name: vstore_nx1i8
16 ; RV64: bb.1 (%ir-block.0):
17 ; RV64-NEXT: liveins: $v8, $x10
19 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
20 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
21 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
22 ; RV64-NEXT: PseudoRET
23 store <vscale x 1 x i8> %b, ptr %pa
27 define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) {
28 ; RV32-LABEL: name: vstore_nx2i8
29 ; RV32: bb.1 (%ir-block.0):
30 ; RV32-NEXT: liveins: $v8, $x10
32 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
33 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
34 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
35 ; RV32-NEXT: PseudoRET
37 ; RV64-LABEL: name: vstore_nx2i8
38 ; RV64: bb.1 (%ir-block.0):
39 ; RV64-NEXT: liveins: $v8, $x10
41 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
42 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
43 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
44 ; RV64-NEXT: PseudoRET
45 store <vscale x 2 x i8> %b, ptr %pa
49 define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) {
50 ; RV32-LABEL: name: vstore_nx4i8
51 ; RV32: bb.1 (%ir-block.0):
52 ; RV32-NEXT: liveins: $v8, $x10
54 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
55 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
56 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
57 ; RV32-NEXT: PseudoRET
59 ; RV64-LABEL: name: vstore_nx4i8
60 ; RV64: bb.1 (%ir-block.0):
61 ; RV64-NEXT: liveins: $v8, $x10
63 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
64 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
65 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
66 ; RV64-NEXT: PseudoRET
67 store <vscale x 4 x i8> %b, ptr %pa
71 define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) {
72 ; RV32-LABEL: name: vstore_nx8i8
73 ; RV32: bb.1 (%ir-block.0):
74 ; RV32-NEXT: liveins: $v8, $x10
76 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
77 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
78 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
79 ; RV32-NEXT: PseudoRET
81 ; RV64-LABEL: name: vstore_nx8i8
82 ; RV64: bb.1 (%ir-block.0):
83 ; RV64-NEXT: liveins: $v8, $x10
85 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
86 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
87 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
88 ; RV64-NEXT: PseudoRET
89 store <vscale x 8 x i8> %b, ptr %pa
93 define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) {
94 ; RV32-LABEL: name: vstore_nx16i8
95 ; RV32: bb.1 (%ir-block.0):
96 ; RV32-NEXT: liveins: $x10, $v8m2
98 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
99 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
100 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
101 ; RV32-NEXT: PseudoRET
103 ; RV64-LABEL: name: vstore_nx16i8
104 ; RV64: bb.1 (%ir-block.0):
105 ; RV64-NEXT: liveins: $x10, $v8m2
107 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
108 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
109 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
110 ; RV64-NEXT: PseudoRET
111 store <vscale x 16 x i8> %b, ptr %pa
115 define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) {
116 ; RV32-LABEL: name: vstore_nx32i8
117 ; RV32: bb.1 (%ir-block.0):
118 ; RV32-NEXT: liveins: $x10, $v8m4
120 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
121 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
122 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
123 ; RV32-NEXT: PseudoRET
125 ; RV64-LABEL: name: vstore_nx32i8
126 ; RV64: bb.1 (%ir-block.0):
127 ; RV64-NEXT: liveins: $x10, $v8m4
129 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
130 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
131 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
132 ; RV64-NEXT: PseudoRET
133 store <vscale x 32 x i8> %b, ptr %pa
137 define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) {
138 ; RV32-LABEL: name: vstore_nx64i8
139 ; RV32: bb.1 (%ir-block.0):
140 ; RV32-NEXT: liveins: $x10, $v8m8
142 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
143 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
144 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
145 ; RV32-NEXT: PseudoRET
147 ; RV64-LABEL: name: vstore_nx64i8
148 ; RV64: bb.1 (%ir-block.0):
149 ; RV64-NEXT: liveins: $x10, $v8m8
151 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
152 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
153 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
154 ; RV64-NEXT: PseudoRET
155 store <vscale x 64 x i8> %b, ptr %pa
159 define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) {
160 ; RV32-LABEL: name: vstore_nx1i16
161 ; RV32: bb.1 (%ir-block.0):
162 ; RV32-NEXT: liveins: $v8, $x10
164 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
165 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
166 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
167 ; RV32-NEXT: PseudoRET
169 ; RV64-LABEL: name: vstore_nx1i16
170 ; RV64: bb.1 (%ir-block.0):
171 ; RV64-NEXT: liveins: $v8, $x10
173 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
174 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
175 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
176 ; RV64-NEXT: PseudoRET
177 store <vscale x 1 x i16> %b, ptr %pa
181 define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) {
182 ; RV32-LABEL: name: vstore_nx2i16
183 ; RV32: bb.1 (%ir-block.0):
184 ; RV32-NEXT: liveins: $v8, $x10
186 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
187 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
188 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
189 ; RV32-NEXT: PseudoRET
191 ; RV64-LABEL: name: vstore_nx2i16
192 ; RV64: bb.1 (%ir-block.0):
193 ; RV64-NEXT: liveins: $v8, $x10
195 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
196 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
197 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
198 ; RV64-NEXT: PseudoRET
199 store <vscale x 2 x i16> %b, ptr %pa
203 define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) {
204 ; RV32-LABEL: name: vstore_nx4i16
205 ; RV32: bb.1 (%ir-block.0):
206 ; RV32-NEXT: liveins: $v8, $x10
208 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
209 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
210 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
211 ; RV32-NEXT: PseudoRET
213 ; RV64-LABEL: name: vstore_nx4i16
214 ; RV64: bb.1 (%ir-block.0):
215 ; RV64-NEXT: liveins: $v8, $x10
217 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
218 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
219 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
220 ; RV64-NEXT: PseudoRET
221 store <vscale x 4 x i16> %b, ptr %pa
225 define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) {
226 ; RV32-LABEL: name: vstore_nx8i16
227 ; RV32: bb.1 (%ir-block.0):
228 ; RV32-NEXT: liveins: $x10, $v8m2
230 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
231 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
232 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
233 ; RV32-NEXT: PseudoRET
235 ; RV64-LABEL: name: vstore_nx8i16
236 ; RV64: bb.1 (%ir-block.0):
237 ; RV64-NEXT: liveins: $x10, $v8m2
239 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
240 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
241 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
242 ; RV64-NEXT: PseudoRET
243 store <vscale x 8 x i16> %b, ptr %pa
247 define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) {
248 ; RV32-LABEL: name: vstore_nx16i16
249 ; RV32: bb.1 (%ir-block.0):
250 ; RV32-NEXT: liveins: $x10, $v8m4
252 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
253 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
254 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
255 ; RV32-NEXT: PseudoRET
257 ; RV64-LABEL: name: vstore_nx16i16
258 ; RV64: bb.1 (%ir-block.0):
259 ; RV64-NEXT: liveins: $x10, $v8m4
261 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
262 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
263 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
264 ; RV64-NEXT: PseudoRET
265 store <vscale x 16 x i16> %b, ptr %pa
269 define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) {
270 ; RV32-LABEL: name: vstore_nx32i16
271 ; RV32: bb.1 (%ir-block.0):
272 ; RV32-NEXT: liveins: $x10, $v8m8
274 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
275 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
276 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
277 ; RV32-NEXT: PseudoRET
279 ; RV64-LABEL: name: vstore_nx32i16
280 ; RV64: bb.1 (%ir-block.0):
281 ; RV64-NEXT: liveins: $x10, $v8m8
283 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
284 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
285 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
286 ; RV64-NEXT: PseudoRET
287 store <vscale x 32 x i16> %b, ptr %pa
291 define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) {
292 ; RV32-LABEL: name: vstore_nx1i32
293 ; RV32: bb.1 (%ir-block.0):
294 ; RV32-NEXT: liveins: $v8, $x10
296 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
297 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
298 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
299 ; RV32-NEXT: PseudoRET
301 ; RV64-LABEL: name: vstore_nx1i32
302 ; RV64: bb.1 (%ir-block.0):
303 ; RV64-NEXT: liveins: $v8, $x10
305 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
306 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
307 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
308 ; RV64-NEXT: PseudoRET
309 store <vscale x 1 x i32> %b, ptr %pa
313 define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) {
314 ; RV32-LABEL: name: vstore_nx2i32
315 ; RV32: bb.1 (%ir-block.0):
316 ; RV32-NEXT: liveins: $v8, $x10
318 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
319 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
320 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
321 ; RV32-NEXT: PseudoRET
323 ; RV64-LABEL: name: vstore_nx2i32
324 ; RV64: bb.1 (%ir-block.0):
325 ; RV64-NEXT: liveins: $v8, $x10
327 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
328 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
329 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
330 ; RV64-NEXT: PseudoRET
331 store <vscale x 2 x i32> %b, ptr %pa
335 define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) {
336 ; RV32-LABEL: name: vstore_nx4i32
337 ; RV32: bb.1 (%ir-block.0):
338 ; RV32-NEXT: liveins: $x10, $v8m2
340 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
341 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
342 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
343 ; RV32-NEXT: PseudoRET
345 ; RV64-LABEL: name: vstore_nx4i32
346 ; RV64: bb.1 (%ir-block.0):
347 ; RV64-NEXT: liveins: $x10, $v8m2
349 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
350 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
351 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
352 ; RV64-NEXT: PseudoRET
353 store <vscale x 4 x i32> %b, ptr %pa
357 define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) {
358 ; RV32-LABEL: name: vstore_nx8i32
359 ; RV32: bb.1 (%ir-block.0):
360 ; RV32-NEXT: liveins: $x10, $v8m4
362 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
363 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
364 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
365 ; RV32-NEXT: PseudoRET
367 ; RV64-LABEL: name: vstore_nx8i32
368 ; RV64: bb.1 (%ir-block.0):
369 ; RV64-NEXT: liveins: $x10, $v8m4
371 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
372 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
373 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
374 ; RV64-NEXT: PseudoRET
375 store <vscale x 8 x i32> %b, ptr %pa
379 define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) {
380 ; RV32-LABEL: name: vstore_nx16i32
381 ; RV32: bb.1 (%ir-block.0):
382 ; RV32-NEXT: liveins: $x10, $v8m8
384 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
385 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
386 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
387 ; RV32-NEXT: PseudoRET
389 ; RV64-LABEL: name: vstore_nx16i32
390 ; RV64: bb.1 (%ir-block.0):
391 ; RV64-NEXT: liveins: $x10, $v8m8
393 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
394 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
395 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
396 ; RV64-NEXT: PseudoRET
397 store <vscale x 16 x i32> %b, ptr %pa
401 define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) {
402 ; RV32-LABEL: name: vstore_nx1i64
403 ; RV32: bb.1 (%ir-block.0):
404 ; RV32-NEXT: liveins: $v8, $x10
406 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
407 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
408 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
409 ; RV32-NEXT: PseudoRET
411 ; RV64-LABEL: name: vstore_nx1i64
412 ; RV64: bb.1 (%ir-block.0):
413 ; RV64-NEXT: liveins: $v8, $x10
415 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
416 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
417 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
418 ; RV64-NEXT: PseudoRET
419 store <vscale x 1 x i64> %b, ptr %pa
423 define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) {
424 ; RV32-LABEL: name: vstore_nx2i64
425 ; RV32: bb.1 (%ir-block.0):
426 ; RV32-NEXT: liveins: $x10, $v8m2
428 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
429 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
430 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
431 ; RV32-NEXT: PseudoRET
433 ; RV64-LABEL: name: vstore_nx2i64
434 ; RV64: bb.1 (%ir-block.0):
435 ; RV64-NEXT: liveins: $x10, $v8m2
437 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
438 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
439 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
440 ; RV64-NEXT: PseudoRET
441 store <vscale x 2 x i64> %b, ptr %pa
445 define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) {
446 ; RV32-LABEL: name: vstore_nx4i64
447 ; RV32: bb.1 (%ir-block.0):
448 ; RV32-NEXT: liveins: $x10, $v8m4
450 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
451 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
452 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
453 ; RV32-NEXT: PseudoRET
455 ; RV64-LABEL: name: vstore_nx4i64
456 ; RV64: bb.1 (%ir-block.0):
457 ; RV64-NEXT: liveins: $x10, $v8m4
459 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
460 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
461 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
462 ; RV64-NEXT: PseudoRET
463 store <vscale x 4 x i64> %b, ptr %pa
467 define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) {
468 ; RV32-LABEL: name: vstore_nx8i64
469 ; RV32: bb.1 (%ir-block.0):
470 ; RV32-NEXT: liveins: $x10, $v8m8
472 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
473 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
474 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
475 ; RV32-NEXT: PseudoRET
477 ; RV64-LABEL: name: vstore_nx8i64
478 ; RV64: bb.1 (%ir-block.0):
479 ; RV64-NEXT: liveins: $x10, $v8m8
481 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
482 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
483 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
484 ; RV64-NEXT: PseudoRET
485 store <vscale x 8 x i64> %b, ptr %pa
489 define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) {
490 ; RV32-LABEL: name: vstore_nx16i8_align1
491 ; RV32: bb.1 (%ir-block.0):
492 ; RV32-NEXT: liveins: $x10, $v8m2
494 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
495 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
496 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
497 ; RV32-NEXT: PseudoRET
499 ; RV64-LABEL: name: vstore_nx16i8_align1
500 ; RV64: bb.1 (%ir-block.0):
501 ; RV64-NEXT: liveins: $x10, $v8m2
503 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
504 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
505 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
506 ; RV64-NEXT: PseudoRET
507 store <vscale x 16 x i8> %b, ptr %pa, align 1
511 define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) {
512 ; RV32-LABEL: name: vstore_nx16i8_align2
513 ; RV32: bb.1 (%ir-block.0):
514 ; RV32-NEXT: liveins: $x10, $v8m2
516 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
517 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
518 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
519 ; RV32-NEXT: PseudoRET
521 ; RV64-LABEL: name: vstore_nx16i8_align2
522 ; RV64: bb.1 (%ir-block.0):
523 ; RV64-NEXT: liveins: $x10, $v8m2
525 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
526 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
527 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
528 ; RV64-NEXT: PseudoRET
529 store <vscale x 16 x i8> %b, ptr %pa, align 2
533 define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) {
534 ; RV32-LABEL: name: vstore_nx16i8_align16
535 ; RV32: bb.1 (%ir-block.0):
536 ; RV32-NEXT: liveins: $x10, $v8m2
538 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
539 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
540 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
541 ; RV32-NEXT: PseudoRET
543 ; RV64-LABEL: name: vstore_nx16i8_align16
544 ; RV64: bb.1 (%ir-block.0):
545 ; RV64-NEXT: liveins: $x10, $v8m2
547 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
548 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
549 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
550 ; RV64-NEXT: PseudoRET
551 store <vscale x 16 x i8> %b, ptr %pa, align 16
555 define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) {
556 ; RV32-LABEL: name: vstore_nx16i8_align64
557 ; RV32: bb.1 (%ir-block.0):
558 ; RV32-NEXT: liveins: $x10, $v8m2
560 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
561 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
562 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
563 ; RV32-NEXT: PseudoRET
565 ; RV64-LABEL: name: vstore_nx16i8_align64
566 ; RV64: bb.1 (%ir-block.0):
567 ; RV64-NEXT: liveins: $x10, $v8m2
569 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
570 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
571 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
572 ; RV64-NEXT: PseudoRET
573 store <vscale x 16 x i8> %b, ptr %pa, align 64
577 define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) {
578 ; RV32-LABEL: name: vstore_nx4i16_align1
579 ; RV32: bb.1 (%ir-block.0):
580 ; RV32-NEXT: liveins: $v8, $x10
582 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
583 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
584 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 1)
585 ; RV32-NEXT: PseudoRET
587 ; RV64-LABEL: name: vstore_nx4i16_align1
588 ; RV64: bb.1 (%ir-block.0):
589 ; RV64-NEXT: liveins: $v8, $x10
591 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
592 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
593 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 1)
594 ; RV64-NEXT: PseudoRET
595 store <vscale x 4 x i16> %b, ptr %pa, align 1
599 define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) {
600 ; RV32-LABEL: name: vstore_nx4i16_align2
601 ; RV32: bb.1 (%ir-block.0):
602 ; RV32-NEXT: liveins: $v8, $x10
604 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
605 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
606 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
607 ; RV32-NEXT: PseudoRET
609 ; RV64-LABEL: name: vstore_nx4i16_align2
610 ; RV64: bb.1 (%ir-block.0):
611 ; RV64-NEXT: liveins: $v8, $x10
613 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
614 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
615 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
616 ; RV64-NEXT: PseudoRET
617 store <vscale x 4 x i16> %b, ptr %pa, align 2
621 define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) {
622 ; RV32-LABEL: name: vstore_nx4i16_align4
623 ; RV32: bb.1 (%ir-block.0):
624 ; RV32-NEXT: liveins: $v8, $x10
626 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
627 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
628 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
629 ; RV32-NEXT: PseudoRET
631 ; RV64-LABEL: name: vstore_nx4i16_align4
632 ; RV64: bb.1 (%ir-block.0):
633 ; RV64-NEXT: liveins: $v8, $x10
635 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
636 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
637 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
638 ; RV64-NEXT: PseudoRET
639 store <vscale x 4 x i16> %b, ptr %pa, align 4
643 define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) {
644 ; RV32-LABEL: name: vstore_nx4i16_align8
645 ; RV32: bb.1 (%ir-block.0):
646 ; RV32-NEXT: liveins: $v8, $x10
648 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
649 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
650 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
651 ; RV32-NEXT: PseudoRET
653 ; RV64-LABEL: name: vstore_nx4i16_align8
654 ; RV64: bb.1 (%ir-block.0):
655 ; RV64-NEXT: liveins: $v8, $x10
657 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
658 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
659 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
660 ; RV64-NEXT: PseudoRET
661 store <vscale x 4 x i16> %b, ptr %pa, align 8
665 define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) {
666 ; RV32-LABEL: name: vstore_nx4i16_align16
667 ; RV32: bb.1 (%ir-block.0):
668 ; RV32-NEXT: liveins: $v8, $x10
670 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
671 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
672 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
673 ; RV32-NEXT: PseudoRET
675 ; RV64-LABEL: name: vstore_nx4i16_align16
676 ; RV64: bb.1 (%ir-block.0):
677 ; RV64-NEXT: liveins: $v8, $x10
679 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
680 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
681 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
682 ; RV64-NEXT: PseudoRET
683 store <vscale x 4 x i16> %b, ptr %pa, align 16
687 define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) {
688 ; RV32-LABEL: name: vstore_nx2i32_align2
689 ; RV32: bb.1 (%ir-block.0):
690 ; RV32-NEXT: liveins: $v8, $x10
692 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
693 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
694 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 2)
695 ; RV32-NEXT: PseudoRET
697 ; RV64-LABEL: name: vstore_nx2i32_align2
698 ; RV64: bb.1 (%ir-block.0):
699 ; RV64-NEXT: liveins: $v8, $x10
701 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
702 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
703 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 2)
704 ; RV64-NEXT: PseudoRET
705 store <vscale x 2 x i32> %b, ptr %pa, align 2
709 define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) {
710 ; RV32-LABEL: name: vstore_nx2i32_align4
711 ; RV32: bb.1 (%ir-block.0):
712 ; RV32-NEXT: liveins: $v8, $x10
714 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
715 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
716 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
717 ; RV32-NEXT: PseudoRET
719 ; RV64-LABEL: name: vstore_nx2i32_align4
720 ; RV64: bb.1 (%ir-block.0):
721 ; RV64-NEXT: liveins: $v8, $x10
723 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
724 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
725 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
726 ; RV64-NEXT: PseudoRET
727 store <vscale x 2 x i32> %b, ptr %pa, align 4
731 define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) {
732 ; RV32-LABEL: name: vstore_nx2i32_align8
733 ; RV32: bb.1 (%ir-block.0):
734 ; RV32-NEXT: liveins: $v8, $x10
736 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
737 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
738 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
739 ; RV32-NEXT: PseudoRET
741 ; RV64-LABEL: name: vstore_nx2i32_align8
742 ; RV64: bb.1 (%ir-block.0):
743 ; RV64-NEXT: liveins: $v8, $x10
745 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
746 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
747 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
748 ; RV64-NEXT: PseudoRET
749 store <vscale x 2 x i32> %b, ptr %pa, align 8
753 define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) {
754 ; RV32-LABEL: name: vstore_nx2i32_align16
755 ; RV32: bb.1 (%ir-block.0):
756 ; RV32-NEXT: liveins: $v8, $x10
758 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
759 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
760 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
761 ; RV32-NEXT: PseudoRET
763 ; RV64-LABEL: name: vstore_nx2i32_align16
764 ; RV64: bb.1 (%ir-block.0):
765 ; RV64-NEXT: liveins: $v8, $x10
767 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
768 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
769 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
770 ; RV64-NEXT: PseudoRET
771 store <vscale x 2 x i32> %b, ptr %pa, align 16
775 define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) {
776 ; RV32-LABEL: name: vstore_nx2i32_align256
777 ; RV32: bb.1 (%ir-block.0):
778 ; RV32-NEXT: liveins: $v8, $x10
780 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
781 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
782 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
783 ; RV32-NEXT: PseudoRET
785 ; RV64-LABEL: name: vstore_nx2i32_align256
786 ; RV64: bb.1 (%ir-block.0):
787 ; RV64-NEXT: liveins: $v8, $x10
789 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
790 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
791 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
792 ; RV64-NEXT: PseudoRET
793 store <vscale x 2 x i32> %b, ptr %pa, align 256
797 define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) {
798 ; RV32-LABEL: name: vstore_nx2i64_align4
799 ; RV32: bb.1 (%ir-block.0):
800 ; RV32-NEXT: liveins: $x10, $v8m2
802 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
803 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
804 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 4)
805 ; RV32-NEXT: PseudoRET
807 ; RV64-LABEL: name: vstore_nx2i64_align4
808 ; RV64: bb.1 (%ir-block.0):
809 ; RV64-NEXT: liveins: $x10, $v8m2
811 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
812 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
813 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 4)
814 ; RV64-NEXT: PseudoRET
815 store <vscale x 2 x i64> %b, ptr %pa, align 4
819 define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) {
820 ; RV32-LABEL: name: vstore_nx2i64_align8
821 ; RV32: bb.1 (%ir-block.0):
822 ; RV32-NEXT: liveins: $x10, $v8m2
824 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
825 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
826 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
827 ; RV32-NEXT: PseudoRET
829 ; RV64-LABEL: name: vstore_nx2i64_align8
830 ; RV64: bb.1 (%ir-block.0):
831 ; RV64-NEXT: liveins: $x10, $v8m2
833 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
834 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
835 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
836 ; RV64-NEXT: PseudoRET
837 store <vscale x 2 x i64> %b, ptr %pa, align 8
841 define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) {
842 ; RV32-LABEL: name: vstore_nx2i64_align16
843 ; RV32: bb.1 (%ir-block.0):
844 ; RV32-NEXT: liveins: $x10, $v8m2
846 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
847 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
848 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
849 ; RV32-NEXT: PseudoRET
851 ; RV64-LABEL: name: vstore_nx2i64_align16
852 ; RV64: bb.1 (%ir-block.0):
853 ; RV64-NEXT: liveins: $x10, $v8m2
855 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
856 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
857 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
858 ; RV64-NEXT: PseudoRET
859 store <vscale x 2 x i64> %b, ptr %pa, align 16
863 define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) {
864 ; RV32-LABEL: name: vstore_nx2i64_align32
865 ; RV32: bb.1 (%ir-block.0):
866 ; RV32-NEXT: liveins: $x10, $v8m2
868 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
869 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
870 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
871 ; RV32-NEXT: PseudoRET
873 ; RV64-LABEL: name: vstore_nx2i64_align32
874 ; RV64: bb.1 (%ir-block.0):
875 ; RV64-NEXT: liveins: $x10, $v8m2
877 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
878 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
879 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
880 ; RV64-NEXT: PseudoRET
881 store <vscale x 2 x i64> %b, ptr %pa, align 32
885 define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) {
886 ; RV32-LABEL: name: vstore_nx1ptr
887 ; RV32: bb.1 (%ir-block.0):
888 ; RV32-NEXT: liveins: $v8, $x10
890 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
891 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
892 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
893 ; RV32-NEXT: PseudoRET
895 ; RV64-LABEL: name: vstore_nx1ptr
896 ; RV64: bb.1 (%ir-block.0):
897 ; RV64-NEXT: liveins: $v8, $x10
899 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
900 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
901 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
902 ; RV64-NEXT: PseudoRET
903 store <vscale x 1 x ptr> %b, ptr %pa
907 define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) {
908 ; RV32-LABEL: name: vstore_nx2ptr
909 ; RV32: bb.1 (%ir-block.0):
910 ; RV32-NEXT: liveins: $v8, $x10
912 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
913 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8
914 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
915 ; RV32-NEXT: PseudoRET
917 ; RV64-LABEL: name: vstore_nx2ptr
918 ; RV64: bb.1 (%ir-block.0):
919 ; RV64-NEXT: liveins: $x10, $v8m2
921 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
922 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8m2
923 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
924 ; RV64-NEXT: PseudoRET
925 store <vscale x 2 x ptr> %b, ptr %pa
929 define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) {
930 ; RV32-LABEL: name: vstore_nx8ptr
931 ; RV32: bb.1 (%ir-block.0):
932 ; RV32-NEXT: liveins: $x10, $v8m4
934 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
935 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m4
936 ; RV32-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
937 ; RV32-NEXT: PseudoRET
939 ; RV64-LABEL: name: vstore_nx8ptr
940 ; RV64: bb.1 (%ir-block.0):
941 ; RV64-NEXT: liveins: $x10, $v8m8
943 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
944 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m8
945 ; RV64-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
946 ; RV64-NEXT: PseudoRET
947 store <vscale x 8 x ptr> %b, ptr %pa