1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
7 %struct = type { i64, i64, ptr, i32, i32, i32, [4 x i32] }
9 define void @complex_gep(ptr %p, <vscale x 2 x i64> %vec.ind, <vscale x 2 x i1> %m) {
10 ; RV32-LABEL: complex_gep:
12 ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
13 ; RV32-NEXT: vnsrl.wi v10, v8, 0
14 ; RV32-NEXT: li a1, 48
15 ; RV32-NEXT: vmul.vx v8, v10, a1
16 ; RV32-NEXT: addi a0, a0, 28
17 ; RV32-NEXT: vmv.v.i v9, 0
18 ; RV32-NEXT: vsoxei32.v v9, (a0), v8, v0.t
21 ; RV64-LABEL: complex_gep:
23 ; RV64-NEXT: li a1, 56
24 ; RV64-NEXT: vsetvli a2, zero, e64, m2, ta, ma
25 ; RV64-NEXT: vmul.vx v8, v8, a1
26 ; RV64-NEXT: addi a0, a0, 32
27 ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
28 ; RV64-NEXT: vmv.v.i v10, 0
29 ; RV64-NEXT: vsoxei64.v v10, (a0), v8, v0.t
31 %gep = getelementptr inbounds %struct, ptr %p, <vscale x 2 x i64> %vec.ind, i32 5
32 call void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> zeroinitializer, <vscale x 2 x ptr> %gep, i32 8, <vscale x 2 x i1> %m)
36 define void @strided_store_zero_start(i64 %n, ptr %p) {
37 ; RV32-LABEL: strided_store_zero_start:
39 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
41 ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
42 ; RV32-NEXT: vnsrl.wi v8, v8, 0
43 ; RV32-NEXT: li a0, 48
44 ; RV32-NEXT: vmul.vx v8, v8, a0
45 ; RV32-NEXT: addi a0, a2, 32
46 ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
47 ; RV32-NEXT: vmv.v.i v9, 0
48 ; RV32-NEXT: vsoxei32.v v9, (a0), v8
51 ; RV64-LABEL: strided_store_zero_start:
53 ; RV64-NEXT: addi a0, a1, 36
54 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
55 ; RV64-NEXT: vmv.v.i v8, 0
56 ; RV64-NEXT: li a1, 56
57 ; RV64-NEXT: vsse64.v v8, (a0), a1
59 %step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
60 %gep = getelementptr inbounds %struct, ptr %p, <vscale x 1 x i64> %step, i32 6
61 tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
65 define void @strided_store_offset_start(i64 %n, ptr %p) {
66 ; RV32-LABEL: strided_store_offset_start:
68 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
70 ; RV32-NEXT: vadd.vx v8, v8, a0
71 ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
72 ; RV32-NEXT: vnsrl.wi v8, v8, 0
73 ; RV32-NEXT: li a0, 48
74 ; RV32-NEXT: vmul.vx v8, v8, a0
75 ; RV32-NEXT: addi a0, a2, 32
76 ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
77 ; RV32-NEXT: vmv.v.i v9, 0
78 ; RV32-NEXT: vsoxei32.v v9, (a0), v8
81 ; RV64-LABEL: strided_store_offset_start:
83 ; RV64-NEXT: li a2, 56
84 ; RV64-NEXT: mul a0, a0, a2
85 ; RV64-NEXT: add a0, a1, a0
86 ; RV64-NEXT: addi a0, a0, 36
87 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
88 ; RV64-NEXT: vmv.v.i v8, 0
89 ; RV64-NEXT: vsse64.v v8, (a0), a2
91 %step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
92 %.splatinsert = insertelement <vscale x 1 x i64> poison, i64 %n, i64 0
93 %.splat = shufflevector <vscale x 1 x i64> %.splatinsert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
94 %add = add <vscale x 1 x i64> %step, %.splat
95 %gep = getelementptr inbounds %struct, ptr %p, <vscale x 1 x i64> %add, i32 6
96 tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
100 define void @stride_one_store(i64 %n, ptr %p) {
101 ; RV32-LABEL: stride_one_store:
103 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
104 ; RV32-NEXT: vid.v v8
105 ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
106 ; RV32-NEXT: vnsrl.wi v8, v8, 0
107 ; RV32-NEXT: vsll.vi v8, v8, 3
108 ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
109 ; RV32-NEXT: vmv.v.i v9, 0
110 ; RV32-NEXT: vsoxei32.v v9, (a2), v8
113 ; RV64-LABEL: stride_one_store:
115 ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
116 ; RV64-NEXT: vmv.v.i v8, 0
117 ; RV64-NEXT: vs1r.v v8, (a1)
119 %step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
120 %gep = getelementptr inbounds i64, ptr %p, <vscale x 1 x i64> %step
121 tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
125 declare <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
126 declare void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
127 declare void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>)