1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
3 # RUN: -simplify-mir -verify-machineinstrs %s \
4 # RUN: -o - | FileCheck -check-prefix=RV32I %s
5 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
6 # RUN: -simplify-mir -verify-machineinstrs %s \
7 # RUN: -o - | FileCheck -check-prefix=RV64I %s
11 tracksRegLiveness: true
16 ; RV32I-LABEL: name: vadd_vv_nxv1i8
17 ; RV32I: liveins: $v8, $v9
19 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
20 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
21 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
22 ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
23 ; RV32I-NEXT: PseudoRET implicit $v8
25 ; RV64I-LABEL: name: vadd_vv_nxv1i8
26 ; RV64I: liveins: $v8, $v9
28 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
29 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v9
30 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
31 ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
32 ; RV64I-NEXT: PseudoRET implicit $v8
33 %0:_(<vscale x 1 x s8>) = COPY $v8
34 %1:_(<vscale x 1 x s8>) = COPY $v9
35 %2:_(<vscale x 1 x s8>) = G_ADD %0, %1
36 $v8 = COPY %2(<vscale x 1 x s8>)
37 PseudoRET implicit $v8
43 tracksRegLiveness: true
48 ; RV32I-LABEL: name: vadd_vv_nxv2i8
49 ; RV32I: liveins: $v8, $v9
51 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
52 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
53 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
54 ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
55 ; RV32I-NEXT: PseudoRET implicit $v8
57 ; RV64I-LABEL: name: vadd_vv_nxv2i8
58 ; RV64I: liveins: $v8, $v9
60 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
61 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v9
62 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
63 ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
64 ; RV64I-NEXT: PseudoRET implicit $v8
65 %0:_(<vscale x 2 x s8>) = COPY $v8
66 %1:_(<vscale x 2 x s8>) = COPY $v9
67 %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
68 $v8 = COPY %2(<vscale x 2 x s8>)
69 PseudoRET implicit $v8
75 tracksRegLiveness: true
80 ; RV32I-LABEL: name: vadd_vv_nxv4i8
81 ; RV32I: liveins: $v8, $v9
83 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
84 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
85 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
86 ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
87 ; RV32I-NEXT: PseudoRET implicit $v8
89 ; RV64I-LABEL: name: vadd_vv_nxv4i8
90 ; RV64I: liveins: $v8, $v9
92 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
93 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v9
94 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
95 ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
96 ; RV64I-NEXT: PseudoRET implicit $v8
97 %0:_(<vscale x 4 x s8>) = COPY $v8
98 %1:_(<vscale x 4 x s8>) = COPY $v9
99 %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
100 $v8 = COPY %2(<vscale x 4 x s8>)
101 PseudoRET implicit $v8
107 tracksRegLiveness: true
112 ; RV32I-LABEL: name: vadd_vv_nxv8i8
113 ; RV32I: liveins: $v8, $v9
115 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
116 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
117 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
118 ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
119 ; RV32I-NEXT: PseudoRET implicit $v8
121 ; RV64I-LABEL: name: vadd_vv_nxv8i8
122 ; RV64I: liveins: $v8, $v9
124 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
125 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v9
126 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
127 ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
128 ; RV64I-NEXT: PseudoRET implicit $v8
129 %0:_(<vscale x 8 x s8>) = COPY $v8
130 %1:_(<vscale x 8 x s8>) = COPY $v9
131 %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
132 $v8 = COPY %2(<vscale x 8 x s8>)
133 PseudoRET implicit $v8
137 name: vadd_vv_nxv16i8
139 tracksRegLiveness: true
142 liveins: $v8m2, $v10m2
144 ; RV32I-LABEL: name: vadd_vv_nxv16i8
145 ; RV32I: liveins: $v8m2, $v10m2
147 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
148 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
149 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
150 ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
151 ; RV32I-NEXT: PseudoRET implicit $v8m2
153 ; RV64I-LABEL: name: vadd_vv_nxv16i8
154 ; RV64I: liveins: $v8m2, $v10m2
156 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
157 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v10m2
158 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
159 ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
160 ; RV64I-NEXT: PseudoRET implicit $v8m2
161 %0:_(<vscale x 16 x s8>) = COPY $v8m2
162 %1:_(<vscale x 16 x s8>) = COPY $v10m2
163 %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
164 $v8m2 = COPY %2(<vscale x 16 x s8>)
165 PseudoRET implicit $v8m2
169 name: vadd_vv_nxv32i8
171 tracksRegLiveness: true
174 liveins: $v8m4, $v12m4
176 ; RV32I-LABEL: name: vadd_vv_nxv32i8
177 ; RV32I: liveins: $v8m4, $v12m4
179 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
180 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
181 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
182 ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
183 ; RV32I-NEXT: PseudoRET implicit $v8m4
185 ; RV64I-LABEL: name: vadd_vv_nxv32i8
186 ; RV64I: liveins: $v8m4, $v12m4
188 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
189 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v12m4
190 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
191 ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
192 ; RV64I-NEXT: PseudoRET implicit $v8m4
193 %0:_(<vscale x 32 x s8>) = COPY $v8m4
194 %1:_(<vscale x 32 x s8>) = COPY $v12m4
195 %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
196 $v8m4 = COPY %2(<vscale x 32 x s8>)
197 PseudoRET implicit $v8m4
201 name: vadd_vv_nxv64i8
203 tracksRegLiveness: true
206 liveins: $v8m8, $v16m8
208 ; RV32I-LABEL: name: vadd_vv_nxv64i8
209 ; RV32I: liveins: $v8m8, $v16m8
211 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
212 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
213 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
214 ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
215 ; RV32I-NEXT: PseudoRET implicit $v8m8
217 ; RV64I-LABEL: name: vadd_vv_nxv64i8
218 ; RV64I: liveins: $v8m8, $v16m8
220 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
221 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v16m8
222 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
223 ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
224 ; RV64I-NEXT: PseudoRET implicit $v8m8
225 %0:_(<vscale x 64 x s8>) = COPY $v8m8
226 %1:_(<vscale x 64 x s8>) = COPY $v16m8
227 %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
228 $v8m8 = COPY %2(<vscale x 64 x s8>)
229 PseudoRET implicit $v8m8
233 name: vadd_vv_nxv1i16
235 tracksRegLiveness: true
240 ; RV32I-LABEL: name: vadd_vv_nxv1i16
241 ; RV32I: liveins: $v8, $v9
243 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
244 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
245 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
246 ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
247 ; RV32I-NEXT: PseudoRET implicit $v8
249 ; RV64I-LABEL: name: vadd_vv_nxv1i16
250 ; RV64I: liveins: $v8, $v9
252 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
253 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v9
254 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
255 ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
256 ; RV64I-NEXT: PseudoRET implicit $v8
257 %0:_(<vscale x 1 x s16>) = COPY $v8
258 %1:_(<vscale x 1 x s16>) = COPY $v9
259 %2:_(<vscale x 1 x s16>) = G_ADD %0, %1
260 $v8 = COPY %2(<vscale x 1 x s16>)
261 PseudoRET implicit $v8
265 name: vadd_vv_nxv2i16
267 tracksRegLiveness: true
272 ; RV32I-LABEL: name: vadd_vv_nxv2i16
273 ; RV32I: liveins: $v8, $v9
275 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
276 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
277 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
278 ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
279 ; RV32I-NEXT: PseudoRET implicit $v8
281 ; RV64I-LABEL: name: vadd_vv_nxv2i16
282 ; RV64I: liveins: $v8, $v9
284 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
285 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v9
286 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
287 ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
288 ; RV64I-NEXT: PseudoRET implicit $v8
289 %0:_(<vscale x 2 x s16>) = COPY $v8
290 %1:_(<vscale x 2 x s16>) = COPY $v9
291 %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
292 $v8 = COPY %2(<vscale x 2 x s16>)
293 PseudoRET implicit $v8
297 name: vadd_vv_nxv4i16
299 tracksRegLiveness: true
304 ; RV32I-LABEL: name: vadd_vv_nxv4i16
305 ; RV32I: liveins: $v8, $v9
307 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
308 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
309 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
310 ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
311 ; RV32I-NEXT: PseudoRET implicit $v8
313 ; RV64I-LABEL: name: vadd_vv_nxv4i16
314 ; RV64I: liveins: $v8, $v9
316 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
317 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v9
318 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
319 ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
320 ; RV64I-NEXT: PseudoRET implicit $v8
321 %0:_(<vscale x 4 x s16>) = COPY $v8
322 %1:_(<vscale x 4 x s16>) = COPY $v9
323 %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
324 $v8 = COPY %2(<vscale x 4 x s16>)
325 PseudoRET implicit $v8
329 name: vadd_vv_nxv8i16
331 tracksRegLiveness: true
334 liveins: $v8m2, $v10m2
336 ; RV32I-LABEL: name: vadd_vv_nxv8i16
337 ; RV32I: liveins: $v8m2, $v10m2
339 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
340 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
341 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
342 ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
343 ; RV32I-NEXT: PseudoRET implicit $v8m2
345 ; RV64I-LABEL: name: vadd_vv_nxv8i16
346 ; RV64I: liveins: $v8m2, $v10m2
348 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
349 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v10m2
350 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
351 ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
352 ; RV64I-NEXT: PseudoRET implicit $v8m2
353 %0:_(<vscale x 8 x s16>) = COPY $v8m2
354 %1:_(<vscale x 8 x s16>) = COPY $v10m2
355 %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
356 $v8m2 = COPY %2(<vscale x 8 x s16>)
357 PseudoRET implicit $v8m2
361 name: vadd_vv_nxv16i16
363 tracksRegLiveness: true
366 liveins: $v8m4, $v12m4
368 ; RV32I-LABEL: name: vadd_vv_nxv16i16
369 ; RV32I: liveins: $v8m4, $v12m4
371 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
372 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
373 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
374 ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
375 ; RV32I-NEXT: PseudoRET implicit $v8m4
377 ; RV64I-LABEL: name: vadd_vv_nxv16i16
378 ; RV64I: liveins: $v8m4, $v12m4
380 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
381 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v12m4
382 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
383 ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
384 ; RV64I-NEXT: PseudoRET implicit $v8m4
385 %0:_(<vscale x 16 x s16>) = COPY $v8m4
386 %1:_(<vscale x 16 x s16>) = COPY $v12m4
387 %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
388 $v8m4 = COPY %2(<vscale x 16 x s16>)
389 PseudoRET implicit $v8m4
393 name: vadd_vv_nxv32i16
395 tracksRegLiveness: true
398 liveins: $v8m8, $v16m8
400 ; RV32I-LABEL: name: vadd_vv_nxv32i16
401 ; RV32I: liveins: $v8m8, $v16m8
403 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
404 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
405 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
406 ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
407 ; RV32I-NEXT: PseudoRET implicit $v8m8
409 ; RV64I-LABEL: name: vadd_vv_nxv32i16
410 ; RV64I: liveins: $v8m8, $v16m8
412 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
413 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v16m8
414 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
415 ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
416 ; RV64I-NEXT: PseudoRET implicit $v8m8
417 %0:_(<vscale x 32 x s16>) = COPY $v8m8
418 %1:_(<vscale x 32 x s16>) = COPY $v16m8
419 %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
420 $v8m8 = COPY %2(<vscale x 32 x s16>)
421 PseudoRET implicit $v8m8
425 name: vadd_vv_nxv1i32
427 tracksRegLiveness: true
432 ; RV32I-LABEL: name: vadd_vv_nxv1i32
433 ; RV32I: liveins: $v8, $v9
435 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
436 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
437 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
438 ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
439 ; RV32I-NEXT: PseudoRET implicit $v8
441 ; RV64I-LABEL: name: vadd_vv_nxv1i32
442 ; RV64I: liveins: $v8, $v9
444 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
445 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v9
446 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
447 ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
448 ; RV64I-NEXT: PseudoRET implicit $v8
449 %0:_(<vscale x 1 x s32>) = COPY $v8
450 %1:_(<vscale x 1 x s32>) = COPY $v9
451 %2:_(<vscale x 1 x s32>) = G_ADD %0, %1
452 $v8 = COPY %2(<vscale x 1 x s32>)
453 PseudoRET implicit $v8
457 name: vadd_vv_nxv2i32
459 tracksRegLiveness: true
464 ; RV32I-LABEL: name: vadd_vv_nxv2i32
465 ; RV32I: liveins: $v8, $v9
467 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
468 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
469 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
470 ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
471 ; RV32I-NEXT: PseudoRET implicit $v8
473 ; RV64I-LABEL: name: vadd_vv_nxv2i32
474 ; RV64I: liveins: $v8, $v9
476 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
477 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v9
478 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
479 ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
480 ; RV64I-NEXT: PseudoRET implicit $v8
481 %0:_(<vscale x 2 x s32>) = COPY $v8
482 %1:_(<vscale x 2 x s32>) = COPY $v9
483 %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
484 $v8 = COPY %2(<vscale x 2 x s32>)
485 PseudoRET implicit $v8
489 name: vadd_vv_nxv4i32
491 tracksRegLiveness: true
494 liveins: $v8m2, $v10m2
496 ; RV32I-LABEL: name: vadd_vv_nxv4i32
497 ; RV32I: liveins: $v8m2, $v10m2
499 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
500 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
501 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
502 ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
503 ; RV32I-NEXT: PseudoRET implicit $v8m2
505 ; RV64I-LABEL: name: vadd_vv_nxv4i32
506 ; RV64I: liveins: $v8m2, $v10m2
508 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
509 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v10m2
510 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
511 ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
512 ; RV64I-NEXT: PseudoRET implicit $v8m2
513 %0:_(<vscale x 4 x s32>) = COPY $v8m2
514 %1:_(<vscale x 4 x s32>) = COPY $v10m2
515 %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
516 $v8m2 = COPY %2(<vscale x 4 x s32>)
517 PseudoRET implicit $v8m2
521 name: vadd_vv_nxv8i32
523 tracksRegLiveness: true
526 liveins: $v8m4, $v12m4
528 ; RV32I-LABEL: name: vadd_vv_nxv8i32
529 ; RV32I: liveins: $v8m4, $v12m4
531 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
532 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
533 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
534 ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
535 ; RV32I-NEXT: PseudoRET implicit $v8m4
537 ; RV64I-LABEL: name: vadd_vv_nxv8i32
538 ; RV64I: liveins: $v8m4, $v12m4
540 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
541 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v12m4
542 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
543 ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
544 ; RV64I-NEXT: PseudoRET implicit $v8m4
545 %0:_(<vscale x 8 x s32>) = COPY $v8m4
546 %1:_(<vscale x 8 x s32>) = COPY $v12m4
547 %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
548 $v8m4 = COPY %2(<vscale x 8 x s32>)
549 PseudoRET implicit $v8m4
553 name: vadd_vv_nxv16i32
555 tracksRegLiveness: true
558 liveins: $v8m8, $v16m8
560 ; RV32I-LABEL: name: vadd_vv_nxv16i32
561 ; RV32I: liveins: $v8m8, $v16m8
563 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
564 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
565 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
566 ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
567 ; RV32I-NEXT: PseudoRET implicit $v8m8
569 ; RV64I-LABEL: name: vadd_vv_nxv16i32
570 ; RV64I: liveins: $v8m8, $v16m8
572 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
573 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v16m8
574 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
575 ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
576 ; RV64I-NEXT: PseudoRET implicit $v8m8
577 %0:_(<vscale x 16 x s32>) = COPY $v8m8
578 %1:_(<vscale x 16 x s32>) = COPY $v16m8
579 %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
580 $v8m8 = COPY %2(<vscale x 16 x s32>)
581 PseudoRET implicit $v8m8
585 name: vadd_vv_nxv1i64
587 tracksRegLiveness: true
592 ; RV32I-LABEL: name: vadd_vv_nxv1i64
593 ; RV32I: liveins: $v8, $v9
595 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
596 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
597 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
598 ; RV32I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
599 ; RV32I-NEXT: PseudoRET implicit $v8
601 ; RV64I-LABEL: name: vadd_vv_nxv1i64
602 ; RV64I: liveins: $v8, $v9
604 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
605 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v9
606 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
607 ; RV64I-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
608 ; RV64I-NEXT: PseudoRET implicit $v8
609 %0:_(<vscale x 1 x s64>) = COPY $v8
610 %1:_(<vscale x 1 x s64>) = COPY $v9
611 %2:_(<vscale x 1 x s64>) = G_ADD %0, %1
612 $v8 = COPY %2(<vscale x 1 x s64>)
613 PseudoRET implicit $v8
617 name: vadd_vv_nxv2i64
619 tracksRegLiveness: true
622 liveins: $v8m2, $v10m2
624 ; RV32I-LABEL: name: vadd_vv_nxv2i64
625 ; RV32I: liveins: $v8m2, $v10m2
627 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
628 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
629 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
630 ; RV32I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
631 ; RV32I-NEXT: PseudoRET implicit $v8m2
633 ; RV64I-LABEL: name: vadd_vv_nxv2i64
634 ; RV64I: liveins: $v8m2, $v10m2
636 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
637 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v10m2
638 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
639 ; RV64I-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
640 ; RV64I-NEXT: PseudoRET implicit $v8m2
641 %0:_(<vscale x 2 x s64>) = COPY $v8m2
642 %1:_(<vscale x 2 x s64>) = COPY $v10m2
643 %2:_(<vscale x 2 x s64>) = G_ADD %0, %1
644 $v8m2 = COPY %2(<vscale x 2 x s64>)
645 PseudoRET implicit $v8m2
649 name: vadd_vv_nxv4i64
651 tracksRegLiveness: true
654 liveins: $v8m4, $v12m4
656 ; RV32I-LABEL: name: vadd_vv_nxv4i64
657 ; RV32I: liveins: $v8m4, $v12m4
659 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
660 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
661 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
662 ; RV32I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
663 ; RV32I-NEXT: PseudoRET implicit $v8m4
665 ; RV64I-LABEL: name: vadd_vv_nxv4i64
666 ; RV64I: liveins: $v8m4, $v12m4
668 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
669 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v12m4
670 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
671 ; RV64I-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
672 ; RV64I-NEXT: PseudoRET implicit $v8m4
673 %0:_(<vscale x 4 x s64>) = COPY $v8m4
674 %1:_(<vscale x 4 x s64>) = COPY $v12m4
675 %2:_(<vscale x 4 x s64>) = G_ADD %0, %1
676 $v8m4 = COPY %2(<vscale x 4 x s64>)
677 PseudoRET implicit $v8m4
681 name: vadd_vv_nxv8i64
683 tracksRegLiveness: true
686 liveins: $v8m8, $v16m8
688 ; RV32I-LABEL: name: vadd_vv_nxv8i64
689 ; RV32I: liveins: $v8m8, $v16m8
691 ; RV32I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
692 ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
693 ; RV32I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
694 ; RV32I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
695 ; RV32I-NEXT: PseudoRET implicit $v8m8
697 ; RV64I-LABEL: name: vadd_vv_nxv8i64
698 ; RV64I: liveins: $v8m8, $v16m8
700 ; RV64I-NEXT: [[COPY:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
701 ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v16m8
702 ; RV64I-NEXT: [[ADD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
703 ; RV64I-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
704 ; RV64I-NEXT: PseudoRET implicit $v8m8
705 %0:_(<vscale x 8 x s64>) = COPY $v8m8
706 %1:_(<vscale x 8 x s64>) = COPY $v16m8
707 %2:_(<vscale x 8 x s64>) = G_ADD %0, %1
708 $v8m8 = COPY %2(<vscale x 8 x s64>)
709 PseudoRET implicit $v8m8