1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc %s -o - -mtriple=riscv64 -mattr=v -verify-machineinstrs \
3 # RUN: -run-pass=phi-node-elimination,register-coalescer,riscv-insert-vsetvli | FileCheck %s
6 source_filename = "vsetvli-insert.ll"
7 target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
8 target triple = "riscv64"
10 define <vscale x 1 x i64> @load_add_or_sub(i8 zeroext %cond, ptr %0, <vscale x 1 x i64> %1, i64 %2) #0 {
12 %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %0, i64 %2)
13 %tobool = icmp eq i8 %cond, 0
14 br i1 %tobool, label %if.else, label %if.then
16 if.then: ; preds = %entry
17 %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
20 if.else: ; preds = %entry
21 %c = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
24 if.end: ; preds = %if.else, %if.then
25 %d = phi <vscale x 1 x i64> [ %b, %if.then ], [ %c, %if.else ]
26 ret <vscale x 1 x i64> %d
29 define void @load_zext_or_sext(i8 zeroext %cond, ptr %0, ptr %1, i64 %2) #0 {
31 %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr %0, i64 %2)
32 %tobool = icmp eq i8 %cond, 0
33 br i1 %tobool, label %if.else, label %if.then
35 if.then: ; preds = %entry
36 %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %2)
39 if.else: ; preds = %entry
40 %c = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %2)
43 if.end: ; preds = %if.else, %if.then
44 %d = phi <vscale x 1 x i64> [ %b, %if.then ], [ %c, %if.else ]
45 call void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64> %d, ptr %1, i64 %2)
49 declare i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64>) #1
51 define i64 @vmv_x_s(i8 zeroext %cond, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) #0 {
53 %tobool = icmp eq i8 %cond, 0
54 br i1 %tobool, label %if.else, label %if.then
56 if.then: ; preds = %entry
57 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
60 if.else: ; preds = %entry
61 %b = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %1, i64 %2)
64 if.end: ; preds = %if.else, %if.then
65 %c = phi <vscale x 1 x i64> [ %a, %if.then ], [ %b, %if.else ]
66 %d = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> %c)
70 declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #2
72 define <vscale x 1 x i64> @vsetvli_add_or_sub(i8 zeroext %cond, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %avl) #0 {
74 %vl = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0)
75 %tobool = icmp eq i8 %cond, 0
76 br i1 %tobool, label %if.else, label %if.then
78 if.then: ; preds = %entry
79 %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
82 if.else: ; preds = %entry
83 %c = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
86 if.end: ; preds = %if.else, %if.then
87 %d = phi <vscale x 1 x i64> [ %b, %if.then ], [ %c, %if.else ]
88 ret <vscale x 1 x i64> %d
91 define void @vsetvli_vcpop() {
95 define void @vsetvli_loop_store() {
99 define void @vsetvli_loop_store2() {
103 define void @redusum_loop(ptr nocapture noundef readonly %a, i32 noundef signext %n, ptr nocapture noundef writeonly %res) #0 {
105 br label %vector.body
107 vector.body: ; preds = %vector.body, %entry
108 %lsr.iv1 = phi ptr [ %scevgep, %vector.body ], [ %a, %entry ]
109 %lsr.iv = phi i64 [ %lsr.iv.next, %vector.body ], [ 2048, %entry ]
110 %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %0, %vector.body ]
111 %lsr.iv12 = bitcast ptr %lsr.iv1 to ptr
112 %wide.load = load <4 x i32>, ptr %lsr.iv12, align 4
113 %0 = add <4 x i32> %wide.load, %vec.phi
114 %lsr.iv.next = add nsw i64 %lsr.iv, -4
115 %scevgep = getelementptr i32, ptr %lsr.iv1, i64 4
116 %1 = icmp eq i64 %lsr.iv.next, 0
117 br i1 %1, label %middle.block, label %vector.body
119 middle.block: ; preds = %vector.body
120 %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %0)
121 store i32 %2, ptr %res, align 4
125 define void @vsetvli_vluxei64_regression() {
129 define void @if_in_loop() {
133 define void @pre_undemanded_vl() {
137 define void @clobberred_forwarded_avl() {
141 define void @clobberred_forwarded_phi_avl() {
145 declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
147 declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
149 declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
151 declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #3
153 declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, ptr nocapture, i64) #3
155 declare void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #4
157 declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
159 declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
161 attributes #0 = { "target-features"="+v" }
162 attributes #1 = { nounwind readnone }
163 attributes #2 = { nounwind }
164 attributes #3 = { nounwind readonly }
165 attributes #4 = { nounwind writeonly }
169 name: load_add_or_sub
171 tracksRegLiveness: true
173 - { id: 0, class: vr }
174 - { id: 1, class: vr }
175 - { id: 2, class: vr }
176 - { id: 3, class: vr }
177 - { id: 4, class: gpr }
178 - { id: 5, class: gpr }
179 - { id: 6, class: vr }
180 - { id: 7, class: gprnox0 }
181 - { id: 8, class: gpr }
183 - { reg: '$x10', virtual-reg: '%4' }
184 - { reg: '$x11', virtual-reg: '%5' }
185 - { reg: '$v8', virtual-reg: '%6' }
186 - { reg: '$x12', virtual-reg: '%7' }
189 machineFunctionInfo: {}
191 ; CHECK-LABEL: name: load_add_or_sub
193 ; CHECK-NEXT: successors: %bb.2(0x30000000), %bb.1(0x50000000)
194 ; CHECK-NEXT: liveins: $x10, $x11, $v8, $x12
196 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x12
197 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
198 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11
199 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10
200 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
201 ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
202 ; CHECK-NEXT: BEQ [[COPY3]], $x0, %bb.2
203 ; CHECK-NEXT: PseudoBR %bb.1
205 ; CHECK-NEXT: bb.1.if.then:
206 ; CHECK-NEXT: successors: %bb.3(0x80000000)
208 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
209 ; CHECK-NEXT: PseudoBR %bb.3
211 ; CHECK-NEXT: bb.2.if.else:
212 ; CHECK-NEXT: successors: %bb.3(0x80000000)
214 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
216 ; CHECK-NEXT: bb.3.if.end:
217 ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
218 ; CHECK-NEXT: PseudoRET implicit $v8
220 successors: %bb.2(0x30000000), %bb.1(0x50000000)
221 liveins: $x10, $x11, $v8, $x12
223 %7:gprnox0 = COPY $x12
227 %0:vr = PseudoVLE64_V_M1 undef $noreg, %5, %7, 6, 0
233 %1:vr = PseudoVADD_VV_M1 undef $noreg, %0, %6, %7, 6, 0
237 %2:vr = PseudoVSUB_VV_M1 undef $noreg, %0, %6, %7, 6, 0
240 %3:vr = PHI %1, %bb.1, %2, %bb.2
242 PseudoRET implicit $v8
246 name: load_zext_or_sext
248 tracksRegLiveness: true
250 - { id: 0, class: vr }
251 - { id: 1, class: vr }
252 - { id: 2, class: vr }
253 - { id: 3, class: vr }
254 - { id: 4, class: gpr }
255 - { id: 5, class: gpr }
256 - { id: 6, class: gpr }
257 - { id: 7, class: gprnox0 }
258 - { id: 8, class: gpr }
260 - { reg: '$x10', virtual-reg: '%4' }
261 - { reg: '$x11', virtual-reg: '%5' }
262 - { reg: '$x12', virtual-reg: '%6' }
263 - { reg: '$x13', virtual-reg: '%7' }
266 machineFunctionInfo: {}
268 ; CHECK-LABEL: name: load_zext_or_sext
270 ; CHECK-NEXT: successors: %bb.2(0x30000000), %bb.1(0x50000000)
271 ; CHECK-NEXT: liveins: $x10, $x11, $x12, $x13
273 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x13
274 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x12
275 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11
276 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10
277 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
278 ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 undef $noreg, [[COPY2]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
279 ; CHECK-NEXT: BEQ [[COPY3]], $x0, %bb.2
280 ; CHECK-NEXT: PseudoBR %bb.1
282 ; CHECK-NEXT: bb.1.if.then:
283 ; CHECK-NEXT: successors: %bb.3(0x80000000)
285 ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
286 ; CHECK-NEXT: early-clobber %9:vr = PseudoVZEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
287 ; CHECK-NEXT: PseudoBR %bb.3
289 ; CHECK-NEXT: bb.2.if.else:
290 ; CHECK-NEXT: successors: %bb.3(0x80000000)
292 ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
293 ; CHECK-NEXT: early-clobber %9:vr = PseudoVSEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
295 ; CHECK-NEXT: bb.3.if.end:
296 ; CHECK-NEXT: PseudoVSE64_V_M1 %9, [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
297 ; CHECK-NEXT: PseudoRET
299 successors: %bb.2(0x30000000), %bb.1(0x50000000)
300 liveins: $x10, $x11, $x12, $x13
302 %7:gprnox0 = COPY $x13
306 %0:vr = PseudoVLE32_V_MF2 undef $noreg, %5, %7, 5, 0
312 early-clobber %1:vr = PseudoVZEXT_VF2_M1 undef $noreg, %0, %7, 6, 0
316 early-clobber %2:vr = PseudoVSEXT_VF2_M1 undef $noreg, %0, %7, 6, 0
319 %3:vr = PHI %1, %bb.1, %2, %bb.2
320 PseudoVSE64_V_M1 %3, %6, %7, 6
327 tracksRegLiveness: true
329 - { id: 0, class: vr }
330 - { id: 1, class: vr }
331 - { id: 2, class: vr }
332 - { id: 3, class: gpr }
333 - { id: 4, class: vr }
334 - { id: 5, class: vr }
335 - { id: 6, class: gprnox0 }
336 - { id: 7, class: gpr }
337 - { id: 8, class: gpr }
339 - { reg: '$x10', virtual-reg: '%3' }
340 - { reg: '$v8', virtual-reg: '%4' }
341 - { reg: '$v9', virtual-reg: '%5' }
342 - { reg: '$x11', virtual-reg: '%6' }
345 machineFunctionInfo: {}
347 ; CHECK-LABEL: name: vmv_x_s
349 ; CHECK-NEXT: successors: %bb.2(0x30000000), %bb.1(0x50000000)
350 ; CHECK-NEXT: liveins: $x10, $v8, $v9, $x11
352 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
353 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
354 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
355 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10
356 ; CHECK-NEXT: BEQ [[COPY3]], $x0, %bb.2
357 ; CHECK-NEXT: PseudoBR %bb.1
359 ; CHECK-NEXT: bb.1.if.then:
360 ; CHECK-NEXT: successors: %bb.3(0x80000000)
362 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
363 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
364 ; CHECK-NEXT: PseudoBR %bb.3
366 ; CHECK-NEXT: bb.2.if.else:
367 ; CHECK-NEXT: successors: %bb.3(0x80000000)
369 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
370 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[COPY1]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
372 ; CHECK-NEXT: bb.3.if.end:
373 ; CHECK-NEXT: [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S [[PseudoVADD_VV_M1_]], 6 /* e64 */, implicit $vtype
374 ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S]]
375 ; CHECK-NEXT: PseudoRET implicit $x10
377 successors: %bb.2(0x30000000), %bb.1(0x50000000)
378 liveins: $x10, $v8, $v9, $x11
380 %6:gprnox0 = COPY $x11
389 %0:vr = PseudoVADD_VV_M1 undef $noreg, %4, %5, %6, 6, 0
393 %1:vr = PseudoVSUB_VV_M1 undef $noreg, %5, %5, %6, 6, 0
396 %2:vr = PHI %0, %bb.1, %1, %bb.2
397 %8:gpr = PseudoVMV_X_S %2, 6
399 PseudoRET implicit $x10
403 name: vsetvli_add_or_sub
405 tracksRegLiveness: true
407 - { id: 0, class: gprnox0 }
408 - { id: 1, class: vr }
409 - { id: 2, class: vr }
410 - { id: 3, class: vr }
411 - { id: 4, class: gpr }
412 - { id: 5, class: vr }
413 - { id: 6, class: vr }
414 - { id: 7, class: gprnox0 }
415 - { id: 8, class: gpr }
417 - { reg: '$x10', virtual-reg: '%4' }
418 - { reg: '$v8', virtual-reg: '%5' }
419 - { reg: '$v9', virtual-reg: '%6' }
420 - { reg: '$x11', virtual-reg: '%7' }
423 machineFunctionInfo: {}
425 ; CHECK-LABEL: name: vsetvli_add_or_sub
427 ; CHECK-NEXT: successors: %bb.2(0x30000000), %bb.1(0x50000000)
428 ; CHECK-NEXT: liveins: $x10, $v8, $v9, $x11
430 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
431 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
432 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
433 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10
434 ; CHECK-NEXT: dead [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
435 ; CHECK-NEXT: BEQ [[COPY3]], $x0, %bb.2
436 ; CHECK-NEXT: PseudoBR %bb.1
438 ; CHECK-NEXT: bb.1.if.then:
439 ; CHECK-NEXT: successors: %bb.3(0x80000000)
441 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
442 ; CHECK-NEXT: PseudoBR %bb.3
444 ; CHECK-NEXT: bb.2.if.else:
445 ; CHECK-NEXT: successors: %bb.3(0x80000000)
447 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
449 ; CHECK-NEXT: bb.3.if.end:
450 ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
451 ; CHECK-NEXT: PseudoRET implicit $v8
453 successors: %bb.2(0x30000000), %bb.1(0x50000000)
454 liveins: $x10, $v8, $v9, $x11
456 %7:gprnox0 = COPY $x11
460 %0:gprnox0 = PseudoVSETVLI %7, 88, implicit-def dead $vl, implicit-def dead $vtype
466 %1:vr = PseudoVADD_VV_M1 undef $noreg, %5, %6, %0, 6, 0
470 %2:vr = PseudoVSUB_VV_M1 undef $noreg, %5, %6, %0, 6, 0
473 %3:vr = PHI %1, %bb.1, %2, %bb.2
475 PseudoRET implicit $v8
480 tracksRegLiveness: true
482 - { id: 0, class: gpr, preferred-register: '' }
483 - { id: 1, class: gpr, preferred-register: '' }
484 - { id: 2, class: gpr, preferred-register: '' }
485 - { id: 3, class: vr, preferred-register: '' }
486 - { id: 4, class: vrnov0, preferred-register: '' }
487 - { id: 5, class: vmv0, preferred-register: '' }
488 - { id: 6, class: vrnov0, preferred-register: '' }
489 - { id: 7, class: gpr, preferred-register: '' }
490 - { id: 8, class: gpr, preferred-register: '' }
491 - { id: 9, class: gpr, preferred-register: '' }
492 - { id: 10, class: gpr, preferred-register: '' }
493 - { id: 11, class: vr, preferred-register: '' }
495 ; CHECK-LABEL: name: vsetvli_vcpop
497 ; CHECK-NEXT: successors: %bb.1(0x80000000)
498 ; CHECK-NEXT: liveins: $x10, $x11
500 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
501 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
502 ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 223 /* e64, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
503 ; CHECK-NEXT: [[PseudoVID_V_MF2_:%[0-9]+]]:vr = PseudoVID_V_MF2 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
504 ; CHECK-NEXT: dead [[PseudoVSETVLIX0_1:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
505 ; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 undef $noreg, 0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
508 ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
510 ; CHECK-NEXT: [[PseudoVMSEQ_VI_MF2_:%[0-9]+]]:vmv0 = PseudoVMSEQ_VI_MF2 [[PseudoVID_V_MF2_]], 0, -1, 5 /* e32 */, implicit $vl, implicit $vtype
511 ; CHECK-NEXT: $v0 = COPY [[PseudoVMSEQ_VI_MF2_]]
512 ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 23 /* e32, mf2, tu, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl
513 ; CHECK-NEXT: [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], [[COPY]], $v0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
514 ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
515 ; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype
516 ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
517 ; CHECK-NEXT: BEQ [[PseudoVCPOP_M_B1_]], $x0, %bb.3
518 ; CHECK-NEXT: PseudoBR %bb.2
521 ; CHECK-NEXT: successors: %bb.3(0x80000000)
523 ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr = LWU [[COPY1]], 0
526 ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
527 ; CHECK-NEXT: [[PseudoVADD_VX_MF2_:%[0-9]+]]:vr = nsw PseudoVADD_VX_MF2 undef $noreg, [[PseudoVLE32_V_MF2_MASK]], [[DEF]], -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
528 ; CHECK-NEXT: $v0 = COPY [[PseudoVADD_VX_MF2_]]
529 ; CHECK-NEXT: PseudoRET implicit $v0
531 successors: %bb.1(0x80000000)
536 %2:gpr = IMPLICIT_DEF
537 %3:vr = PseudoVID_V_MF2 undef $noreg, -1, 6, 0
538 %4:vrnov0 = PseudoVMV_V_I_MF2 undef $noreg, 0, -1, 5, 0
541 successors: %bb.2(0x40000000), %bb.3(0x40000000)
543 %5:vmv0 = PseudoVMSEQ_VI_MF2 killed %3, 0, -1, 5
545 %6:vrnov0 = PseudoVLE32_V_MF2_MASK %4, killed %0, $v0, -1, 5, 0
546 %7:gpr = PseudoVCPOP_M_B1 %5, -1, 0
548 BEQ killed %7, %8, %bb.3
552 successors: %bb.3(0x80000000)
557 %10:gpr = PHI %2, %bb.1, %9, %bb.2
558 %11:vr = nsw PseudoVADD_VX_MF2 undef $noreg, %6, %10, -1, 5, 0
560 PseudoRET implicit $v0
563 name: vsetvli_loop_store
564 tracksRegLiveness: true
566 - { id: 0, class: gpr, preferred-register: '' }
567 - { id: 1, class: gpr, preferred-register: '' }
568 - { id: 2, class: gpr, preferred-register: '' }
569 - { id: 3, class: gpr, preferred-register: '' }
570 - { id: 4, class: vr, preferred-register: '' }
571 - { id: 5, class: gpr, preferred-register: '' }
572 - { id: 6, class: gpr, preferred-register: '' }
573 - { id: 7, class: vr, preferred-register: '' }
574 - { id: 8, class: gpr, preferred-register: '' }
575 - { id: 9, class: gpr, preferred-register: '' }
576 - { id: 10, class: gpr, preferred-register: '' }
578 ; CHECK-LABEL: name: vsetvli_loop_store
580 ; CHECK-NEXT: successors: %bb.1(0x80000000)
581 ; CHECK-NEXT: liveins: $x10, $x11
583 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
584 ; CHECK-NEXT: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
585 ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
586 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
587 ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
588 ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
589 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
592 ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
594 ; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[COPY2]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
595 ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY2]], [[SRLI]]
596 ; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
597 ; CHECK-NEXT: PseudoVSE32_V_MF2 [[PseudoVADD_VX_M1_]], [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype
598 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = ADDI [[COPY2]], 1
599 ; CHECK-NEXT: BLTU [[COPY2]], [[COPY1]], %bb.1
600 ; CHECK-NEXT: PseudoBR %bb.2
603 ; CHECK-NEXT: PseudoRET
607 %1:gpr = PseudoReadVLENB
608 %2:gpr = SRLI %1:gpr, 3
610 %4:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 0
614 successors: %bb.1, %bb.2
616 %6:gpr = PHI %5:gpr, %bb.0, %10:gpr, %bb.1
617 %7:vr = PseudoVADD_VX_M1 undef $noreg, %4:vr, %6:gpr, -1, 6, 0
618 %8:gpr = MUL %6:gpr, %2:gpr
619 %9:gpr = ADD %0:gpr, %8:gpr
620 PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5
621 %10:gpr = ADDI %6:gpr, 1
622 BLTU %10:gpr, %3:gpr, %bb.1
630 name: vsetvli_loop_store2
631 tracksRegLiveness: true
633 - { id: 0, class: gpr, preferred-register: '' }
634 - { id: 1, class: gpr, preferred-register: '' }
635 - { id: 2, class: gpr, preferred-register: '' }
636 - { id: 3, class: gpr, preferred-register: '' }
637 - { id: 4, class: vr, preferred-register: '' }
638 - { id: 5, class: gpr, preferred-register: '' }
639 - { id: 6, class: gpr, preferred-register: '' }
640 - { id: 7, class: vr, preferred-register: '' }
641 - { id: 8, class: gpr, preferred-register: '' }
642 - { id: 9, class: gpr, preferred-register: '' }
643 - { id: 10, class: gpr, preferred-register: '' }
645 ; CHECK-LABEL: name: vsetvli_loop_store2
647 ; CHECK-NEXT: successors: %bb.1(0x80000000)
648 ; CHECK-NEXT: liveins: $x10, $x11
650 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
651 ; CHECK-NEXT: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
652 ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
653 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
654 ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
655 ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
656 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
659 ; CHECK-NEXT: successors: %bb.2(0x80000000)
661 ; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[COPY2]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
662 ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY2]], [[SRLI]]
663 ; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
664 ; CHECK-NEXT: PseudoVSE32_V_MF2 [[PseudoVADD_VX_M1_]], [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype
665 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = ADDI [[COPY2]], 1
668 ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000)
670 ; CHECK-NEXT: BLTU [[COPY2]], [[COPY1]], %bb.1
671 ; CHECK-NEXT: PseudoBR %bb.3
674 ; CHECK-NEXT: PseudoRET
678 %1:gpr = PseudoReadVLENB
679 %2:gpr = SRLI %1:gpr, 3
681 %4:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 3
687 %6:gpr = PHI %5:gpr, %bb.0, %10:gpr, %bb.3
688 %7:vr = PseudoVADD_VX_M1 undef $noreg, %4:vr, %6:gpr, -1, 6, 0
689 %8:gpr = MUL %6:gpr, %2:gpr
690 %9:gpr = ADD %0:gpr, %8:gpr
691 PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5
692 %10:gpr = ADDI %6:gpr, 1
695 successors: %bb.1, %bb.2
696 BLTU %10:gpr, %3:gpr, %bb.1
706 tracksRegLiveness: true
708 - { id: 0, class: gpr }
709 - { id: 1, class: gpr }
710 - { id: 2, class: vr }
711 - { id: 3, class: vr }
712 - { id: 4, class: gpr }
713 - { id: 5, class: gpr }
714 - { id: 6, class: gpr }
715 - { id: 7, class: gpr }
716 - { id: 8, class: gpr }
717 - { id: 9, class: gpr }
718 - { id: 10, class: vr }
719 - { id: 11, class: vr }
720 - { id: 12, class: vr }
721 - { id: 13, class: gpr }
722 - { id: 14, class: vr }
723 - { id: 15, class: vr }
724 - { id: 16, class: vr }
725 - { id: 17, class: vr }
726 - { id: 18, class: gpr }
727 - { id: 19, class: gpr }
728 - { id: 20, class: vr }
729 - { id: 21, class: vr }
730 - { id: 22, class: vr }
731 - { id: 23, class: vr }
732 - { id: 24, class: vr }
734 - { reg: '$x10', virtual-reg: '%6' }
735 - { reg: '$x12', virtual-reg: '%8' }
738 machineFunctionInfo: {}
740 ; CHECK-LABEL: name: redusum_loop
742 ; CHECK-NEXT: successors: %bb.1(0x80000000)
743 ; CHECK-NEXT: liveins: $x10, $x12
745 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12
746 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
747 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
748 ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $noreg, 0, 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
749 ; CHECK-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 1
750 ; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], -2048
752 ; CHECK-NEXT: bb.1.vector.body:
753 ; CHECK-NEXT: successors: %bb.2(0x04000000), %bb.1(0x7c000000)
755 ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 undef $noreg, [[COPY1]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4)
756 ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE32_V_M1_]], [[PseudoVMV_V_I_M1_]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
757 ; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = nsw ADDI [[ADDIW]], -4
758 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = ADDI [[COPY1]], 16
759 ; CHECK-NEXT: BNE [[ADDIW]], $x0, %bb.1
760 ; CHECK-NEXT: PseudoBR %bb.2
762 ; CHECK-NEXT: bb.2.middle.block:
763 ; CHECK-NEXT: [[PseudoVMV_S_X:%[0-9]+]]:vr = PseudoVMV_S_X undef $noreg, $x0, 1, 5 /* e32 */, implicit $vl, implicit $vtype
764 ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, [[PseudoVMV_V_I_M1_]], [[PseudoVMV_S_X]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
765 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
766 ; CHECK-NEXT: PseudoVSE32_V_M1 [[PseudoVREDSUM_VS_M1_E8_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res)
767 ; CHECK-NEXT: PseudoRET
773 %11:vr = PseudoVMV_V_I_M1 undef $noreg, 0, 4, 5, 0
777 %9:gpr = ADDIW killed %13, -2048
780 successors: %bb.2(0x04000000), %bb.1(0x7c000000)
782 %0:gpr = PHI %6, %bb.0, %5, %bb.1
783 %1:gpr = PHI %9, %bb.0, %4, %bb.1
784 %2:vr = PHI %10, %bb.0, %16, %bb.1
785 %14:vr = PseudoVLE32_V_M1 undef $noreg, %0, 4, 5, 0 :: (load (s128) from %ir.lsr.iv12, align 4)
786 %16:vr = PseudoVADD_VV_M1 undef $noreg, killed %14, %2, 4, 5, 0
787 %4:gpr = nsw ADDI %1, -4
795 %20:vr = PseudoVMV_S_X undef $noreg, %19, 1, 5
796 %23:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, %16, killed %20, 4, 5, 1
797 PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res)
802 name: vsetvli_vluxei64_regression
803 tracksRegLiveness: true
805 ; CHECK-LABEL: name: vsetvli_vluxei64_regression
807 ; CHECK-NEXT: successors: %bb.1(0x80000000)
808 ; CHECK-NEXT: liveins: $x10, $x11, $x12, $v0, $v1, $v2, $v3
810 ; CHECK-NEXT: %a:gpr = COPY $x10
811 ; CHECK-NEXT: %b:gpr = COPY $x11
812 ; CHECK-NEXT: %inaddr:gpr = COPY $x12
813 ; CHECK-NEXT: %idxs:vr = COPY $v0
814 ; CHECK-NEXT: %t1:vr = COPY $v1
815 ; CHECK-NEXT: %t3:vr = COPY $v2
816 ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrnov0 = COPY $v3
817 ; CHECK-NEXT: %t5:vrnov0 = COPY $v1
818 ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
819 ; CHECK-NEXT: %t6:vr = PseudoVMSEQ_VI_M1 %t1, 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype
820 ; CHECK-NEXT: PseudoBR %bb.1
823 ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000)
825 ; CHECK-NEXT: %mask:vr = PseudoVMANDN_MM_MF8 %t6, %t3, -1, 0 /* e8 */, implicit $vl, implicit $vtype
826 ; CHECK-NEXT: BEQ %a, $x0, %bb.3
827 ; CHECK-NEXT: PseudoBR %bb.2
830 ; CHECK-NEXT: successors: %bb.3(0x80000000)
832 ; CHECK-NEXT: $v0 = COPY %mask
833 ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl
834 ; CHECK-NEXT: early-clobber [[COPY]]:vrnov0 = PseudoVLUXEI64_V_M1_MF8_MASK %t5, %inaddr, %idxs, $v0, -1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
835 ; CHECK-NEXT: PseudoBR %bb.3
838 ; CHECK-NEXT: $v0 = COPY %mask
839 ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
840 ; CHECK-NEXT: PseudoVSOXEI64_V_M1_MF8_MASK [[COPY]], %b, %idxs, $v0, -1, 3 /* e8 */, implicit $vl, implicit $vtype
841 ; CHECK-NEXT: PseudoRET
844 liveins: $x10, $x11, $x12, $v0, $v1, $v2, $v3
848 %inaddr:gpr = COPY $x12
853 %t5:vrnov0 = COPY $v1
854 %t6:vr = PseudoVMSEQ_VI_M1 %t1, 0, -1, 6
858 successors: %bb.3, %bb.2
860 %mask:vr = PseudoVMANDN_MM_MF8 %t6, %t3, -1, 0
869 early-clobber %t0:vrnov0 = PseudoVLUXEI64_V_M1_MF8_MASK %t5, killed %inaddr, %idxs, $v0, -1, 3, 1
874 %stval:vr = PHI %t4, %bb.1, %ldval, %bb.2
876 PseudoVSOXEI64_V_M1_MF8_MASK killed %stval, killed %b, %idxs, $v0, -1, 3
882 tracksRegLiveness: true
884 ; CHECK-LABEL: name: if_in_loop
886 ; CHECK-NEXT: successors: %bb.1(0x80000000)
887 ; CHECK-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15
889 ; CHECK-NEXT: %dst:gpr = COPY $x10
890 ; CHECK-NEXT: %src:gpr = COPY $x11
891 ; CHECK-NEXT: dead [[COPY:%[0-9]+]]:gpr = COPY $x12
892 ; CHECK-NEXT: %tc:gpr = COPY $x13
893 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x14
894 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x15
895 ; CHECK-NEXT: %vlenb:gpr = PseudoReadVLENB
896 ; CHECK-NEXT: %inc:gpr = SRLI %vlenb, 3
897 ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
898 ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
899 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x0
900 ; CHECK-NEXT: PseudoBR %bb.1
903 ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
905 ; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY2]], [[COPY3]]
906 ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
907 ; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[ADD]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
908 ; CHECK-NEXT: [[PseudoVMSLTU_VX_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VX_M1 [[PseudoVADD_VX_M1_]], [[COPY1]], -1, 6 /* e64 */, implicit $vl, implicit $vtype
909 ; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSLTU_VX_M1_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype
910 ; CHECK-NEXT: BEQ [[PseudoVCPOP_M_B1_]], $x0, %bb.3
911 ; CHECK-NEXT: PseudoBR %bb.2
914 ; CHECK-NEXT: successors: %bb.3(0x80000000)
916 ; CHECK-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD %src, [[COPY3]]
917 ; CHECK-NEXT: [[PseudoVLE8_V_MF8_:%[0-9]+]]:vrnov0 = PseudoVLE8_V_MF8 undef $noreg, [[ADD1]], -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
918 ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
919 ; CHECK-NEXT: [[PseudoVADD_VI_MF8_:%[0-9]+]]:vrnov0 = PseudoVADD_VI_MF8 undef $noreg, [[PseudoVLE8_V_MF8_]], 4, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
920 ; CHECK-NEXT: [[ADD2:%[0-9]+]]:gpr = ADD %dst, [[COPY3]]
921 ; CHECK-NEXT: PseudoVSE8_V_MF8 [[PseudoVADD_VI_MF8_]], [[ADD2]], -1, 3 /* e8 */, implicit $vl, implicit $vtype
924 ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.4(0x04000000)
926 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = ADD [[COPY3]], %inc
927 ; CHECK-NEXT: BLTU [[COPY3]], %tc, %bb.1
928 ; CHECK-NEXT: PseudoBR %bb.4
931 ; CHECK-NEXT: PseudoRET
933 successors: %bb.1(0x80000000)
934 liveins: $x10, $x11, $x12, $x13, $x14, $x15
942 %vlenb:gpr = PseudoReadVLENB
943 %inc:gpr = SRLI killed %vlenb, 3
944 %10:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 0
949 successors: %bb.2(0x40000000), %bb.3(0x40000000)
951 %26:gpr = PHI %59, %bb.0, %28, %bb.3
952 %61:gpr = ADD %12, %26
953 %27:vr = PseudoVADD_VX_M1 undef $noreg, %10, killed %61, -1, 6, 0
954 %62:vr = PseudoVMSLTU_VX_M1 %27, %11, -1, 6
955 %63:gpr = PseudoVCPOP_M_B1 %62, -1, 0
957 BEQ killed %63, %64, %bb.3
961 successors: %bb.3(0x80000000)
963 %66:gpr = ADD %src, %26
964 %67:vrnov0 = PseudoVLE8_V_MF8 undef $noreg, killed %66, -1, 3, 0
965 %76:vrnov0 = PseudoVADD_VI_MF8 undef $noreg, %67, 4, -1, 3, 0
966 %77:gpr = ADD %dst, %26
967 PseudoVSE8_V_MF8 killed %76, killed %77, -1, 3
970 successors: %bb.1(0x7c000000), %bb.4(0x04000000)
972 %28:gpr = ADD %26, %inc
981 name: pre_undemanded_vl
983 ; CHECK-LABEL: name: pre_undemanded_vl
985 ; CHECK-NEXT: successors: %bb.1(0x80000000)
987 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
988 ; CHECK-NEXT: PseudoBR %bb.1
991 ; CHECK-NEXT: successors: %bb.1(0x80000000)
993 ; CHECK-NEXT: dead %x:gpr = PseudoVMV_X_S undef $noreg, 6 /* e64 */, implicit $vtype
994 ; CHECK-NEXT: PseudoBR %bb.1
998 %x:gpr = PseudoVMV_X_S undef $noreg, 6
1002 name: clobberred_forwarded_avl
1003 tracksRegLiveness: true
1005 ; CHECK-LABEL: name: clobberred_forwarded_avl
1007 ; CHECK-NEXT: successors: %bb.1(0x80000000)
1008 ; CHECK-NEXT: liveins: $x10, $v8m2
1009 ; CHECK-NEXT: {{ $}}
1010 ; CHECK-NEXT: %avl:gprnox0 = COPY $x10
1011 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY %avl
1012 ; CHECK-NEXT: dead %outvl:gprnox0 = PseudoVSETVLI %avl, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1013 ; CHECK-NEXT: {{ $}}
1015 ; CHECK-NEXT: successors: %bb.2(0x80000000)
1016 ; CHECK-NEXT: liveins: $v8m2
1017 ; CHECK-NEXT: {{ $}}
1018 ; CHECK-NEXT: dead %avl:gprnox0 = ADDI %avl, 1
1019 ; CHECK-NEXT: {{ $}}
1021 ; CHECK-NEXT: liveins: $v8m2
1022 ; CHECK-NEXT: {{ $}}
1023 ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1024 ; CHECK-NEXT: renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, renamable $v8m2, renamable $v8m2, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
1025 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1026 ; CHECK-NEXT: renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, renamable $v8m2, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
1027 ; CHECK-NEXT: PseudoRET implicit $v8m2
1029 liveins: $x10, $v8m2
1030 %avl:gprnox0 = COPY $x10
1031 %outvl:gprnox0 = PseudoVSETVLI %avl:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype
1035 %avl:gprnox0 = ADDI %avl:gprnox0, 1
1039 renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, renamable $v8m2, renamable $v8m2, -1, 5, 0
1040 renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, killed renamable $v8m2, %outvl:gprnox0, 5, 0
1041 PseudoRET implicit $v8m2
1044 name: clobberred_forwarded_phi_avl
1045 tracksRegLiveness: true
1047 ; CHECK-LABEL: name: clobberred_forwarded_phi_avl
1049 ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
1050 ; CHECK-NEXT: liveins: $x10, $x11, $v8m2
1051 ; CHECK-NEXT: {{ $}}
1052 ; CHECK-NEXT: %v:vrm2 = COPY $v8m2
1053 ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, 1
1054 ; CHECK-NEXT: %x:gpr = COPY $x10
1055 ; CHECK-NEXT: %y:gpr = COPY $x11
1056 ; CHECK-NEXT: BEQ %x, %y, %bb.2
1057 ; CHECK-NEXT: {{ $}}
1059 ; CHECK-NEXT: successors: %bb.2(0x80000000)
1060 ; CHECK-NEXT: {{ $}}
1061 ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, 2
1062 ; CHECK-NEXT: {{ $}}
1064 ; CHECK-NEXT: successors: %bb.3(0x80000000)
1065 ; CHECK-NEXT: {{ $}}
1066 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY [[ADDI]]
1067 ; CHECK-NEXT: dead %outvl:gprnox0 = PseudoVSETVLI [[ADDI]], 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1068 ; CHECK-NEXT: {{ $}}
1070 ; CHECK-NEXT: successors: %bb.4(0x80000000)
1071 ; CHECK-NEXT: {{ $}}
1072 ; CHECK-NEXT: dead [[ADDI:%[0-9]+]]:gprnox0 = ADDI [[ADDI]], 1
1073 ; CHECK-NEXT: {{ $}}
1075 ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1076 ; CHECK-NEXT: renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, %v, %v, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
1077 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
1078 ; CHECK-NEXT: renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, %v, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
1079 ; CHECK-NEXT: PseudoRET implicit $v8m2
1081 liveins: $x10, $x11, $v8m2
1082 %v:vrm2 = COPY $v8m2
1083 %a:gpr = ADDI $x0, 1
1089 %b:gpr = ADDI $x0, 2
1092 %avl:gprnox0 = PHI %a, %bb.0, %b, %bb.1
1093 %outvl:gprnox0 = PseudoVSETVLI %avl:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype
1096 %avl:gprnox0 = ADDI %avl:gprnox0, 1
1099 renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, %v, %v, -1, 5, 0
1100 renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, killed %v, %outvl:gprnox0, 5, 0
1101 PseudoRET implicit $v8m2