1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
5 declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>)
6 define void @compressstore_v1i8(ptr %base, <1 x i8> %v, <1 x i1> %mask) {
7 ; CHECK-LABEL: compressstore_v1i8:
9 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
10 ; CHECK-NEXT: vcompress.vm v9, v8, v0
11 ; CHECK-NEXT: vcpop.m a1, v0
12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
13 ; CHECK-NEXT: vse8.v v9, (a0)
15 call void @llvm.masked.compressstore.v1i8(<1 x i8> %v, ptr %base, <1 x i1> %mask)
19 declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
20 define void @compressstore_v2i8(ptr %base, <2 x i8> %v, <2 x i1> %mask) {
21 ; CHECK-LABEL: compressstore_v2i8:
23 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
24 ; CHECK-NEXT: vcompress.vm v9, v8, v0
25 ; CHECK-NEXT: vcpop.m a1, v0
26 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
27 ; CHECK-NEXT: vse8.v v9, (a0)
29 call void @llvm.masked.compressstore.v2i8(<2 x i8> %v, ptr %base, <2 x i1> %mask)
33 declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>)
34 define void @compressstore_v4i8(ptr %base, <4 x i8> %v, <4 x i1> %mask) {
35 ; CHECK-LABEL: compressstore_v4i8:
37 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
38 ; CHECK-NEXT: vcompress.vm v9, v8, v0
39 ; CHECK-NEXT: vcpop.m a1, v0
40 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
41 ; CHECK-NEXT: vse8.v v9, (a0)
43 call void @llvm.masked.compressstore.v4i8(<4 x i8> %v, ptr %base, <4 x i1> %mask)
47 declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>)
48 define void @compressstore_v8i8(ptr %base, <8 x i8> %v, <8 x i1> %mask) {
49 ; CHECK-LABEL: compressstore_v8i8:
51 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
52 ; CHECK-NEXT: vcompress.vm v9, v8, v0
53 ; CHECK-NEXT: vcpop.m a1, v0
54 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
55 ; CHECK-NEXT: vse8.v v9, (a0)
57 call void @llvm.masked.compressstore.v8i8(<8 x i8> %v, ptr %base, <8 x i1> %mask)
61 declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>)
62 define void @compressstore_v1i16(ptr %base, <1 x i16> %v, <1 x i1> %mask) {
63 ; CHECK-LABEL: compressstore_v1i16:
65 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
66 ; CHECK-NEXT: vcompress.vm v9, v8, v0
67 ; CHECK-NEXT: vcpop.m a1, v0
68 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
69 ; CHECK-NEXT: vse16.v v9, (a0)
71 call void @llvm.masked.compressstore.v1i16(<1 x i16> %v, ptr align 2 %base, <1 x i1> %mask)
75 declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>)
76 define void @compressstore_v2i16(ptr %base, <2 x i16> %v, <2 x i1> %mask) {
77 ; CHECK-LABEL: compressstore_v2i16:
79 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
80 ; CHECK-NEXT: vcompress.vm v9, v8, v0
81 ; CHECK-NEXT: vcpop.m a1, v0
82 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
83 ; CHECK-NEXT: vse16.v v9, (a0)
85 call void @llvm.masked.compressstore.v2i16(<2 x i16> %v, ptr align 2 %base, <2 x i1> %mask)
89 declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>)
90 define void @compressstore_v4i16(ptr %base, <4 x i16> %v, <4 x i1> %mask) {
91 ; CHECK-LABEL: compressstore_v4i16:
93 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
94 ; CHECK-NEXT: vcompress.vm v9, v8, v0
95 ; CHECK-NEXT: vcpop.m a1, v0
96 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
97 ; CHECK-NEXT: vse16.v v9, (a0)
99 call void @llvm.masked.compressstore.v4i16(<4 x i16> %v, ptr align 2 %base, <4 x i1> %mask)
103 declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>)
104 define void @compressstore_v8i16(ptr %base, <8 x i16> %v, <8 x i1> %mask) {
105 ; CHECK-LABEL: compressstore_v8i16:
107 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
108 ; CHECK-NEXT: vcompress.vm v9, v8, v0
109 ; CHECK-NEXT: vcpop.m a1, v0
110 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
111 ; CHECK-NEXT: vse16.v v9, (a0)
113 call void @llvm.masked.compressstore.v8i16(<8 x i16> %v, ptr align 2 %base, <8 x i1> %mask)
117 declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>)
118 define void @compressstore_v1i32(ptr %base, <1 x i32> %v, <1 x i1> %mask) {
119 ; CHECK-LABEL: compressstore_v1i32:
121 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
122 ; CHECK-NEXT: vcompress.vm v9, v8, v0
123 ; CHECK-NEXT: vcpop.m a1, v0
124 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
125 ; CHECK-NEXT: vse32.v v9, (a0)
127 call void @llvm.masked.compressstore.v1i32(<1 x i32> %v, ptr align 4 %base, <1 x i1> %mask)
131 declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>)
132 define void @compressstore_v2i32(ptr %base, <2 x i32> %v, <2 x i1> %mask) {
133 ; CHECK-LABEL: compressstore_v2i32:
135 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
136 ; CHECK-NEXT: vcompress.vm v9, v8, v0
137 ; CHECK-NEXT: vcpop.m a1, v0
138 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
139 ; CHECK-NEXT: vse32.v v9, (a0)
141 call void @llvm.masked.compressstore.v2i32(<2 x i32> %v, ptr align 4 %base, <2 x i1> %mask)
145 declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>)
146 define void @compressstore_v4i32(ptr %base, <4 x i32> %v, <4 x i1> %mask) {
147 ; CHECK-LABEL: compressstore_v4i32:
149 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
150 ; CHECK-NEXT: vcompress.vm v9, v8, v0
151 ; CHECK-NEXT: vcpop.m a1, v0
152 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
153 ; CHECK-NEXT: vse32.v v9, (a0)
155 call void @llvm.masked.compressstore.v4i32(<4 x i32> %v, ptr align 4 %base, <4 x i1> %mask)
159 declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>)
160 define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) {
161 ; CHECK-LABEL: compressstore_v8i32:
163 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
164 ; CHECK-NEXT: vcompress.vm v10, v8, v0
165 ; CHECK-NEXT: vcpop.m a1, v0
166 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
167 ; CHECK-NEXT: vse32.v v10, (a0)
169 call void @llvm.masked.compressstore.v8i32(<8 x i32> %v, ptr align 4 %base, <8 x i1> %mask)
173 declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>)
174 define void @compressstore_v1i64(ptr %base, <1 x i64> %v, <1 x i1> %mask) {
175 ; CHECK-LABEL: compressstore_v1i64:
177 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
178 ; CHECK-NEXT: vcompress.vm v9, v8, v0
179 ; CHECK-NEXT: vcpop.m a1, v0
180 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
181 ; CHECK-NEXT: vse64.v v9, (a0)
183 call void @llvm.masked.compressstore.v1i64(<1 x i64> %v, ptr align 8 %base, <1 x i1> %mask)
187 declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>)
188 define void @compressstore_v2i64(ptr %base, <2 x i64> %v, <2 x i1> %mask) {
189 ; CHECK-LABEL: compressstore_v2i64:
191 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
192 ; CHECK-NEXT: vcompress.vm v9, v8, v0
193 ; CHECK-NEXT: vcpop.m a1, v0
194 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
195 ; CHECK-NEXT: vse64.v v9, (a0)
197 call void @llvm.masked.compressstore.v2i64(<2 x i64> %v, ptr align 8 %base, <2 x i1> %mask)
201 declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>)
202 define void @compressstore_v4i64(ptr %base, <4 x i64> %v, <4 x i1> %mask) {
203 ; CHECK-LABEL: compressstore_v4i64:
205 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
206 ; CHECK-NEXT: vcompress.vm v10, v8, v0
207 ; CHECK-NEXT: vcpop.m a1, v0
208 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
209 ; CHECK-NEXT: vse64.v v10, (a0)
211 call void @llvm.masked.compressstore.v4i64(<4 x i64> %v, ptr align 8 %base, <4 x i1> %mask)
215 declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>)
216 define void @compressstore_v8i64(ptr %base, <8 x i64> %v, <8 x i1> %mask) {
217 ; CHECK-LABEL: compressstore_v8i64:
219 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
220 ; CHECK-NEXT: vcompress.vm v12, v8, v0
221 ; CHECK-NEXT: vcpop.m a1, v0
222 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
223 ; CHECK-NEXT: vse64.v v12, (a0)
225 call void @llvm.masked.compressstore.v8i64(<8 x i64> %v, ptr align 8 %base, <8 x i1> %mask)
228 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: