1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc %s -mtriple=riscv64 -mattr=v -riscv-v-vector-bits-min=128 -run-pass=finalize-isel -o - | FileCheck %s
3 # RUN: llc %s -mtriple=riscv64 -mattr=v -riscv-v-vector-bits-min=128 -passes=finalize-isel -o - | FileCheck %s
5 # This test makes sure we peak through the COPY instruction between the
6 # IMPLICIT_DEF and PseudoVLE64_V_M8_MASK in order to select the tail agnostic
7 # policy. The test is working if the second argument to PseudoVSETVLI has bit 6
11 ; ModuleID = 'test.ll'
12 source_filename = "test.ll"
13 target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
14 target triple = "riscv64"
16 ; Function Attrs: nounwind
17 define <vscale x 8 x i64> @masked_load_nxv8i64(ptr %a, <vscale x 8 x i1> %mask) #0 {
18 %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
19 ret <vscale x 8 x i64> %load
22 ; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn
23 declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i64>) #1
25 attributes #0 = { nounwind "target-features"="+v" }
26 attributes #1 = { argmemonly nofree nosync nounwind readonly willreturn "target-features"="+v" }
30 name: masked_load_nxv8i64
32 tracksRegLiveness: true
34 - { id: 0, class: gpr }
35 - { id: 1, class: vr }
36 - { id: 2, class: vrm8nov0 }
37 - { id: 3, class: vrm8 }
38 - { id: 4, class: vrm8nov0 }
40 - { reg: '$x10', virtual-reg: '%0' }
41 - { reg: '$v0', virtual-reg: '%1' }
44 machineFunctionInfo: {}
49 ; CHECK-LABEL: name: masked_load_nxv8i64
50 ; CHECK: liveins: $x10, $v0
52 ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v0
53 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
54 ; CHECK-NEXT: $v0 = COPY [[COPY]]
55 ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
56 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]]
57 ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 /* e64 */, 1 /* ta, mu */ :: (load (s512) from %ir.a, align 8)
58 ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
59 ; CHECK-NEXT: PseudoRET implicit $v8m8
63 %3:vrm8 = IMPLICIT_DEF
65 %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 6, 1 :: (load (s512) from %ir.a, align 8)
67 PseudoRET implicit $v8m8