1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=riscv32 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
3 # RUN: llc -mtriple=riscv64 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
9 ; CHECK-LABEL: name: test_nxv2i8
10 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
12 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
13 ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
14 ; CHECK-NEXT: PseudoRET implicit $v8
15 %0:_(<vscale x 2 x s8>) = COPY $v8
16 %1:_(<vscale x 2 x s8>) = COPY $v9
17 %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
18 $v8 = COPY %2(<vscale x 2 x s8>)
19 PseudoRET implicit $v8
27 ; CHECK-LABEL: name: test_nxv4i8
28 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
29 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
30 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
31 ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
32 ; CHECK-NEXT: PseudoRET implicit $v8
33 %0:_(<vscale x 4 x s8>) = COPY $v8
34 %1:_(<vscale x 4 x s8>) = COPY $v9
35 %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
36 $v8 = COPY %2(<vscale x 4 x s8>)
37 PseudoRET implicit $v8
45 ; CHECK-LABEL: name: test_nxv8i8
46 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
47 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
48 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
49 ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
50 ; CHECK-NEXT: PseudoRET implicit $v8
51 %0:_(<vscale x 8 x s8>) = COPY $v8
52 %1:_(<vscale x 8 x s8>) = COPY $v9
53 %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
54 $v8 = COPY %2(<vscale x 8 x s8>)
55 PseudoRET implicit $v8
63 ; CHECK-LABEL: name: test_nxv16i8
64 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
65 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
66 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
67 ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
68 ; CHECK-NEXT: PseudoRET implicit $v8m2
69 %0:_(<vscale x 16 x s8>) = COPY $v8m2
70 %1:_(<vscale x 16 x s8>) = COPY $v10m2
71 %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
72 $v8m2 = COPY %2(<vscale x 16 x s8>)
73 PseudoRET implicit $v8m2
81 ; CHECK-LABEL: name: test_nxv32i8
82 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
83 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
84 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
85 ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
86 ; CHECK-NEXT: PseudoRET implicit $v8m4
87 %0:_(<vscale x 32 x s8>) = COPY $v8m4
88 %1:_(<vscale x 32 x s8>) = COPY $v12m4
89 %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
90 $v8m4 = COPY %2(<vscale x 32 x s8>)
91 PseudoRET implicit $v8m4
99 ; CHECK-LABEL: name: test_nxv64i8
100 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
101 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
102 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
103 ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
104 ; CHECK-NEXT: PseudoRET implicit $v8m8
105 %0:_(<vscale x 64 x s8>) = COPY $v8m8
106 %1:_(<vscale x 64 x s8>) = COPY $v16m8
107 %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
108 $v8m8 = COPY %2(<vscale x 64 x s8>)
109 PseudoRET implicit $v8m8
117 ; CHECK-LABEL: name: test_nxv2i16
118 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
119 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
120 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
121 ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
122 ; CHECK-NEXT: PseudoRET implicit $v8
123 %0:_(<vscale x 2 x s16>) = COPY $v8
124 %1:_(<vscale x 2 x s16>) = COPY $v9
125 %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
126 $v8 = COPY %2(<vscale x 2 x s16>)
127 PseudoRET implicit $v8
135 ; CHECK-LABEL: name: test_nxv4i16
136 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
137 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
138 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
139 ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
140 ; CHECK-NEXT: PseudoRET implicit $v8
141 %0:_(<vscale x 4 x s16>) = COPY $v8
142 %1:_(<vscale x 4 x s16>) = COPY $v9
143 %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
144 $v8 = COPY %2(<vscale x 4 x s16>)
145 PseudoRET implicit $v8
153 ; CHECK-LABEL: name: test_nxv8i16
154 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
155 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
156 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
157 ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
158 ; CHECK-NEXT: PseudoRET implicit $v8m2
159 %0:_(<vscale x 8 x s16>) = COPY $v8m2
160 %1:_(<vscale x 8 x s16>) = COPY $v10m2
161 %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
162 $v8m2 = COPY %2(<vscale x 8 x s16>)
163 PseudoRET implicit $v8m2
171 ; CHECK-LABEL: name: test_nxv16i16
172 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
173 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
174 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
175 ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
176 ; CHECK-NEXT: PseudoRET implicit $v8m4
177 %0:_(<vscale x 16 x s16>) = COPY $v8m4
178 %1:_(<vscale x 16 x s16>) = COPY $v12m4
179 %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
180 $v8m4 = COPY %2(<vscale x 16 x s16>)
181 PseudoRET implicit $v8m4
189 ; CHECK-LABEL: name: test_nxv32i16
190 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
191 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
192 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
193 ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
194 ; CHECK-NEXT: PseudoRET implicit $v8m8
195 %0:_(<vscale x 32 x s16>) = COPY $v8m8
196 %1:_(<vscale x 32 x s16>) = COPY $v16m8
197 %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
198 $v8m8 = COPY %2(<vscale x 32 x s16>)
199 PseudoRET implicit $v8m8
207 ; CHECK-LABEL: name: test_nxv2i32
208 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
209 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
210 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
211 ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
212 ; CHECK-NEXT: PseudoRET implicit $v8
213 %0:_(<vscale x 2 x s32>) = COPY $v8
214 %1:_(<vscale x 2 x s32>) = COPY $v9
215 %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
216 $v8 = COPY %2(<vscale x 2 x s32>)
217 PseudoRET implicit $v8
225 ; CHECK-LABEL: name: test_nxv4i32
226 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
227 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
228 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
229 ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
230 ; CHECK-NEXT: PseudoRET implicit $v8m2
231 %0:_(<vscale x 4 x s32>) = COPY $v8m2
232 %1:_(<vscale x 4 x s32>) = COPY $v10m2
233 %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
234 $v8m2 = COPY %2(<vscale x 4 x s32>)
235 PseudoRET implicit $v8m2
243 ; CHECK-LABEL: name: test_nxv8i32
244 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
245 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
246 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
247 ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
248 ; CHECK-NEXT: PseudoRET implicit $v8m4
249 %0:_(<vscale x 8 x s32>) = COPY $v8m4
250 %1:_(<vscale x 8 x s32>) = COPY $v12m4
251 %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
252 $v8m4 = COPY %2(<vscale x 8 x s32>)
253 PseudoRET implicit $v8m4
261 ; CHECK-LABEL: name: test_nxv16i32
262 ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
263 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
264 ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
265 ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
266 ; CHECK-NEXT: PseudoRET implicit $v8m8
267 %0:_(<vscale x 16 x s32>) = COPY $v8m8
268 %1:_(<vscale x 16 x s32>) = COPY $v16m8
269 %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
270 $v8m8 = COPY %2(<vscale x 16 x s32>)
271 PseudoRET implicit $v8m8