1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v | FileCheck %s
4 declare <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8>, ptr, i64, <32 x i1>)
5 declare <2 x i64> @llvm.riscv.masked.strided.load.v2i64.p0.i64(<2 x i64>, ptr, i64, <2 x i1>)
7 declare void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8>, ptr, i64, <32 x i1>)
8 declare void @llvm.riscv.masked.strided.store.v2i64.p0.i64(<2 x i64>, ptr, i64, <2 x i1>)
10 define <32 x i8> @strided_load_i8(ptr %p, i64 %stride, <32 x i1> %m) {
11 ; CHECK-LABEL: strided_load_i8:
13 ; CHECK-NEXT: li a2, 32
14 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
15 ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t
17 %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 %stride, <32 x i1> %m)
21 define <2 x i64> @strided_load_i64(ptr %p, i64 %stride, <2 x i1> %m) {
22 ; CHECK-LABEL: strided_load_i64:
24 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
25 ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
27 %res = call <2 x i64> @llvm.riscv.masked.strided.load.v2i64.p0.i64(<2 x i64> undef, ptr %p, i64 %stride, <2 x i1> %m)
31 define <32 x i8> @strided_load_i8_splat(ptr %p, <32 x i1> %m) {
32 ; CHECK-LABEL: strided_load_i8_splat:
34 ; CHECK-NEXT: li a1, 32
35 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
36 ; CHECK-NEXT: vlse8.v v8, (a0), zero, v0.t
38 %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 0, <32 x i1> %m)
42 define <32 x i8> @strided_load_i8_reverse(ptr %p, <32 x i1> %m) {
43 ; CHECK-LABEL: strided_load_i8_reverse:
45 ; CHECK-NEXT: li a1, 32
46 ; CHECK-NEXT: li a2, -1
47 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
48 ; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t
50 %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 -1, <32 x i1> %m)
54 define <32 x i8> @strided_load_i8_nostride(ptr %p, <32 x i1> %m) {
55 ; CHECK-LABEL: strided_load_i8_nostride:
57 ; CHECK-NEXT: li a1, 32
58 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
59 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
61 %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 1, <32 x i1> %m)
66 define void @strided_store_i8(ptr %p, <32 x i8> %v, i64 %stride, <32 x i1> %m) {
67 ; CHECK-LABEL: strided_store_i8:
69 ; CHECK-NEXT: li a2, 32
70 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
71 ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
73 call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 %stride, <32 x i1> %m)
77 define void @strided_store_i8_zero(ptr %p, <32 x i8> %v, <32 x i1> %m) {
78 ; CHECK-LABEL: strided_store_i8_zero:
80 ; CHECK-NEXT: li a1, 32
81 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
82 ; CHECK-NEXT: vsse8.v v8, (a0), zero, v0.t
84 call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 0, <32 x i1> %m)
88 define void @strided_store_i8_nostride(ptr %p, <32 x i8> %v, <32 x i1> %m) {
89 ; CHECK-LABEL: strided_store_i8_nostride:
91 ; CHECK-NEXT: li a1, 32
92 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
93 ; CHECK-NEXT: vse8.v v8, (a0), v0.t
95 call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 1, <32 x i1> %m)
99 define void @strided_store_i8_reverse(ptr %p, <32 x i8> %v, <32 x i1> %m) {
100 ; CHECK-LABEL: strided_store_i8_reverse:
102 ; CHECK-NEXT: li a1, 32
103 ; CHECK-NEXT: li a2, -1
104 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
105 ; CHECK-NEXT: vsse8.v v8, (a0), a2, v0.t
107 call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 -1, <32 x i1> %m)
111 declare void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64(<vscale x 1 x i64>, ptr, i64, <vscale x 1 x i1>)
113 declare <vscale x 1 x i64> @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(<vscale x 1 x i64>, ptr, i64, <vscale x 1 x i1>)
115 define <vscale x 1 x i64> @strided_load_vscale_i64(ptr %p, i64 %stride, <vscale x 1 x i1> %m) {
116 ; CHECK-LABEL: strided_load_vscale_i64:
118 ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
119 ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
121 %res = call <vscale x 1 x i64> @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(<vscale x 1 x i64> undef, ptr %p, i64 %stride, <vscale x 1 x i1> %m)
122 ret <vscale x 1 x i64> %res
125 define void @strided_store_vscale_i64(ptr %p, <vscale x 1 x i64> %v, i64 %stride, <vscale x 1 x i1> %m) {
126 ; CHECK-LABEL: strided_store_vscale_i64:
128 ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
129 ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
131 call void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64(<vscale x 1 x i64> %v, ptr %p, i64 %stride, <vscale x 1 x i1> %m)