1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel | FileCheck %s
4 declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
5 declare <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
6 declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr, <vscale x 2 x i1>, i32)
8 ; Test result has chain output of true operand of merge.vvm.
9 define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
10 ; CHECK-LABEL: name: vpmerge_vpload_store
11 ; CHECK: bb.0 (%ir-block.0):
12 ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11
14 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
15 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0
16 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
17 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
18 ; CHECK-NEXT: $v0 = COPY [[COPY1]]
19 ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 8)
20 ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]], -1, 5 /* e32 */ :: (store (<vscale x 1 x s64>) into %ir.p)
21 ; CHECK-NEXT: PseudoRET
22 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
23 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
24 store <vscale x 2 x i32> %b, ptr %p
28 define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
29 ; CHECK-LABEL: name: vpselect_vpload_store
30 ; CHECK: bb.0 (%ir-block.0):
31 ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11
33 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
34 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0
35 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
36 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
37 ; CHECK-NEXT: $v0 = COPY [[COPY1]]
38 ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */ :: (load unknown-size from %ir.p, align 8)
39 ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]], -1, 5 /* e32 */ :: (store (<vscale x 1 x s64>) into %ir.p)
40 ; CHECK-NEXT: PseudoRET
41 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
42 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
43 store <vscale x 2 x i32> %b, ptr %p