1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
5 define void @masked_store_nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %a, <vscale x 1 x i1> %mask) nounwind {
6 ; CHECK-LABEL: masked_store_nxv1i8:
8 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
9 ; CHECK-NEXT: vse8.v v8, (a0), v0.t
11 call void @llvm.masked.store.v1i8.p0(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %a, i32 1, <vscale x 1 x i1> %mask)
14 declare void @llvm.masked.store.v1i8.p0(<vscale x 1 x i8>, <vscale x 1 x i8>*, i32, <vscale x 1 x i1>)
16 define void @masked_store_nxv1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %a, <vscale x 1 x i1> %mask) nounwind {
17 ; CHECK-LABEL: masked_store_nxv1i16:
19 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
20 ; CHECK-NEXT: vse16.v v8, (a0), v0.t
22 call void @llvm.masked.store.v1i16.p0(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %a, i32 2, <vscale x 1 x i1> %mask)
25 declare void @llvm.masked.store.v1i16.p0(<vscale x 1 x i16>, <vscale x 1 x i16>*, i32, <vscale x 1 x i1>)
27 define void @masked_store_nxv1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %a, <vscale x 1 x i1> %mask) nounwind {
28 ; CHECK-LABEL: masked_store_nxv1i32:
30 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
31 ; CHECK-NEXT: vse32.v v8, (a0), v0.t
33 call void @llvm.masked.store.v1i32.p0(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %a, i32 4, <vscale x 1 x i1> %mask)
36 declare void @llvm.masked.store.v1i32.p0(<vscale x 1 x i32>, <vscale x 1 x i32>*, i32, <vscale x 1 x i1>)
38 define void @masked_store_nxv1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %a, <vscale x 1 x i1> %mask) nounwind {
39 ; CHECK-LABEL: masked_store_nxv1i64:
41 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
42 ; CHECK-NEXT: vse64.v v8, (a0), v0.t
44 call void @llvm.masked.store.v1i64.p0(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %a, i32 8, <vscale x 1 x i1> %mask)
47 declare void @llvm.masked.store.v1i64.p0(<vscale x 1 x i64>, <vscale x 1 x i64>*, i32, <vscale x 1 x i1>)
49 define void @masked_store_nxv2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, <vscale x 2 x i1> %mask) nounwind {
50 ; CHECK-LABEL: masked_store_nxv2i8:
52 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
53 ; CHECK-NEXT: vse8.v v8, (a0), v0.t
55 call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> %mask)
58 declare void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
60 define void @masked_store_nxv2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %a, <vscale x 2 x i1> %mask) nounwind {
61 ; CHECK-LABEL: masked_store_nxv2i16:
63 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
64 ; CHECK-NEXT: vse16.v v8, (a0), v0.t
66 call void @llvm.masked.store.v2i16.p0(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %a, i32 2, <vscale x 2 x i1> %mask)
69 declare void @llvm.masked.store.v2i16.p0(<vscale x 2 x i16>, <vscale x 2 x i16>*, i32, <vscale x 2 x i1>)
71 define void @masked_store_nxv2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %a, <vscale x 2 x i1> %mask) nounwind {
72 ; CHECK-LABEL: masked_store_nxv2i32:
74 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
75 ; CHECK-NEXT: vse32.v v8, (a0), v0.t
77 call void @llvm.masked.store.v2i32.p0(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %a, i32 4, <vscale x 2 x i1> %mask)
80 declare void @llvm.masked.store.v2i32.p0(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32, <vscale x 2 x i1>)
82 define void @masked_store_nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %a, <vscale x 2 x i1> %mask) nounwind {
83 ; CHECK-LABEL: masked_store_nxv2i64:
85 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
86 ; CHECK-NEXT: vse64.v v8, (a0), v0.t
88 call void @llvm.masked.store.v2i64.p0(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %a, i32 8, <vscale x 2 x i1> %mask)
91 declare void @llvm.masked.store.v2i64.p0(<vscale x 2 x i64>, <vscale x 2 x i64>*, i32, <vscale x 2 x i1>)
93 define void @masked_store_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %a, <vscale x 4 x i1> %mask) nounwind {
94 ; CHECK-LABEL: masked_store_nxv4i8:
96 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
97 ; CHECK-NEXT: vse8.v v8, (a0), v0.t
99 call void @llvm.masked.store.v4i8.p0(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %a, i32 1, <vscale x 4 x i1> %mask)
102 declare void @llvm.masked.store.v4i8.p0(<vscale x 4 x i8>, <vscale x 4 x i8>*, i32, <vscale x 4 x i1>)
104 define void @masked_store_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %a, <vscale x 4 x i1> %mask) nounwind {
105 ; CHECK-LABEL: masked_store_nxv4i16:
107 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
108 ; CHECK-NEXT: vse16.v v8, (a0), v0.t
110 call void @llvm.masked.store.v4i16.p0(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %a, i32 2, <vscale x 4 x i1> %mask)
113 declare void @llvm.masked.store.v4i16.p0(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32, <vscale x 4 x i1>)
115 define void @masked_store_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %a, <vscale x 4 x i1> %mask) nounwind {
116 ; CHECK-LABEL: masked_store_nxv4i32:
118 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
119 ; CHECK-NEXT: vse32.v v8, (a0), v0.t
121 call void @llvm.masked.store.v4i32.p0(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %a, i32 4, <vscale x 4 x i1> %mask)
124 declare void @llvm.masked.store.v4i32.p0(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32, <vscale x 4 x i1>)
126 define void @masked_store_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %a, <vscale x 4 x i1> %mask) nounwind {
127 ; CHECK-LABEL: masked_store_nxv4i64:
129 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
130 ; CHECK-NEXT: vse64.v v8, (a0), v0.t
132 call void @llvm.masked.store.v4i64.p0(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %a, i32 8, <vscale x 4 x i1> %mask)
135 declare void @llvm.masked.store.v4i64.p0(<vscale x 4 x i64>, <vscale x 4 x i64>*, i32, <vscale x 4 x i1>)
137 define void @masked_store_nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %a, <vscale x 8 x i1> %mask) nounwind {
138 ; CHECK-LABEL: masked_store_nxv8i8:
140 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
141 ; CHECK-NEXT: vse8.v v8, (a0), v0.t
143 call void @llvm.masked.store.v8i8.p0(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %a, i32 1, <vscale x 8 x i1> %mask)
146 declare void @llvm.masked.store.v8i8.p0(<vscale x 8 x i8>, <vscale x 8 x i8>*, i32, <vscale x 8 x i1>)
148 define void @masked_store_nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %a, <vscale x 8 x i1> %mask) nounwind {
149 ; CHECK-LABEL: masked_store_nxv8i16:
151 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
152 ; CHECK-NEXT: vse16.v v8, (a0), v0.t
154 call void @llvm.masked.store.v8i16.p0(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %a, i32 2, <vscale x 8 x i1> %mask)
157 declare void @llvm.masked.store.v8i16.p0(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)
159 define void @masked_store_nxv8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %a, <vscale x 8 x i1> %mask) nounwind {
160 ; CHECK-LABEL: masked_store_nxv8i32:
162 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
163 ; CHECK-NEXT: vse32.v v8, (a0), v0.t
165 call void @llvm.masked.store.v8i32.p0(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %a, i32 4, <vscale x 8 x i1> %mask)
168 declare void @llvm.masked.store.v8i32.p0(<vscale x 8 x i32>, <vscale x 8 x i32>*, i32, <vscale x 8 x i1>)
170 define void @masked_store_nxv8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %a, <vscale x 8 x i1> %mask) nounwind {
171 ; CHECK-LABEL: masked_store_nxv8i64:
173 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
174 ; CHECK-NEXT: vse64.v v8, (a0), v0.t
176 call void @llvm.masked.store.v8i64.p0(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %a, i32 8, <vscale x 8 x i1> %mask)
179 declare void @llvm.masked.store.v8i64.p0(<vscale x 8 x i64>, <vscale x 8 x i64>*, i32, <vscale x 8 x i1>)
181 define void @masked_store_nxv16i8(<vscale x 16 x i8> %val, <vscale x 16 x i8>* %a, <vscale x 16 x i1> %mask) nounwind {
182 ; CHECK-LABEL: masked_store_nxv16i8:
184 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
185 ; CHECK-NEXT: vse8.v v8, (a0), v0.t
187 call void @llvm.masked.store.v16i8.p0(<vscale x 16 x i8> %val, <vscale x 16 x i8>* %a, i32 1, <vscale x 16 x i1> %mask)
190 declare void @llvm.masked.store.v16i8.p0(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32, <vscale x 16 x i1>)
192 define void @masked_store_nxv16i16(<vscale x 16 x i16> %val, <vscale x 16 x i16>* %a, <vscale x 16 x i1> %mask) nounwind {
193 ; CHECK-LABEL: masked_store_nxv16i16:
195 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
196 ; CHECK-NEXT: vse16.v v8, (a0), v0.t
198 call void @llvm.masked.store.v16i16.p0(<vscale x 16 x i16> %val, <vscale x 16 x i16>* %a, i32 2, <vscale x 16 x i1> %mask)
201 declare void @llvm.masked.store.v16i16.p0(<vscale x 16 x i16>, <vscale x 16 x i16>*, i32, <vscale x 16 x i1>)
203 define void @masked_store_nxv16i32(<vscale x 16 x i32> %val, <vscale x 16 x i32>* %a, <vscale x 16 x i1> %mask) nounwind {
204 ; CHECK-LABEL: masked_store_nxv16i32:
206 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
207 ; CHECK-NEXT: vse32.v v8, (a0), v0.t
209 call void @llvm.masked.store.v16i32.p0(<vscale x 16 x i32> %val, <vscale x 16 x i32>* %a, i32 4, <vscale x 16 x i1> %mask)
212 declare void @llvm.masked.store.v16i32.p0(<vscale x 16 x i32>, <vscale x 16 x i32>*, i32, <vscale x 16 x i1>)
214 define void @masked_store_nxv32i8(<vscale x 32 x i8> %val, <vscale x 32 x i8>* %a, <vscale x 32 x i1> %mask) nounwind {
215 ; CHECK-LABEL: masked_store_nxv32i8:
217 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
218 ; CHECK-NEXT: vse8.v v8, (a0), v0.t
220 call void @llvm.masked.store.v32i8.p0(<vscale x 32 x i8> %val, <vscale x 32 x i8>* %a, i32 1, <vscale x 32 x i1> %mask)
223 declare void @llvm.masked.store.v32i8.p0(<vscale x 32 x i8>, <vscale x 32 x i8>*, i32, <vscale x 32 x i1>)
225 define void @masked_store_nxv32i16(<vscale x 32 x i16> %val, <vscale x 32 x i16>* %a, <vscale x 32 x i1> %mask) nounwind {
226 ; CHECK-LABEL: masked_store_nxv32i16:
228 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
229 ; CHECK-NEXT: vse16.v v8, (a0), v0.t
231 call void @llvm.masked.store.v32i16.p0(<vscale x 32 x i16> %val, <vscale x 32 x i16>* %a, i32 2, <vscale x 32 x i1> %mask)
234 declare void @llvm.masked.store.v32i16.p0(<vscale x 32 x i16>, <vscale x 32 x i16>*, i32, <vscale x 32 x i1>)
236 define void @masked_store_nxv64i8(<vscale x 64 x i8> %val, <vscale x 64 x i8>* %a, <vscale x 64 x i1> %mask) nounwind {
237 ; CHECK-LABEL: masked_store_nxv64i8:
239 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
240 ; CHECK-NEXT: vse8.v v8, (a0), v0.t
242 call void @llvm.masked.store.v64i8.p0(<vscale x 64 x i8> %val, <vscale x 64 x i8>* %a, i32 4, <vscale x 64 x i1> %mask)
245 declare void @llvm.masked.store.v64i8.p0(<vscale x 64 x i8>, <vscale x 64 x i8>*, i32, <vscale x 64 x i1>)
247 define void @masked_store_zero_mask(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a) nounwind {
248 ; CHECK-LABEL: masked_store_zero_mask:
251 call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> zeroinitializer)
255 define void @masked_store_allones_mask(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a) nounwind {
256 ; CHECK-LABEL: masked_store_allones_mask:
258 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
259 ; CHECK-NEXT: vse8.v v8, (a0)
261 %insert = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
262 %mask = shufflevector <vscale x 2 x i1> %insert, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
263 call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> %mask)