1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
7 define <vscale x 1 x bfloat> @masked_load_nxv1bf16(ptr %a, <vscale x 1 x i1> %mask) nounwind {
8 ; CHECK-LABEL: masked_load_nxv1bf16:
10 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
11 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
13 %load = call <vscale x 1 x bfloat> @llvm.masked.load.nxv1bf16(ptr %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x bfloat> undef)
14 ret <vscale x 1 x bfloat> %load
16 declare <vscale x 1 x bfloat> @llvm.masked.load.nxv1bf16(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x bfloat>)
18 define <vscale x 1 x half> @masked_load_nxv1f16(ptr %a, <vscale x 1 x i1> %mask) nounwind {
19 ; CHECK-LABEL: masked_load_nxv1f16:
21 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
22 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
24 %load = call <vscale x 1 x half> @llvm.masked.load.nxv1f16(ptr %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x half> undef)
25 ret <vscale x 1 x half> %load
27 declare <vscale x 1 x half> @llvm.masked.load.nxv1f16(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x half>)
29 define <vscale x 1 x float> @masked_load_nxv1f32(ptr %a, <vscale x 1 x i1> %mask) nounwind {
30 ; CHECK-LABEL: masked_load_nxv1f32:
32 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
33 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
35 %load = call <vscale x 1 x float> @llvm.masked.load.nxv1f32(ptr %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x float> undef)
36 ret <vscale x 1 x float> %load
38 declare <vscale x 1 x float> @llvm.masked.load.nxv1f32(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x float>)
40 define <vscale x 1 x double> @masked_load_nxv1f64(ptr %a, <vscale x 1 x i1> %mask) nounwind {
41 ; CHECK-LABEL: masked_load_nxv1f64:
43 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
44 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
46 %load = call <vscale x 1 x double> @llvm.masked.load.nxv1f64(ptr %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x double> undef)
47 ret <vscale x 1 x double> %load
49 declare <vscale x 1 x double> @llvm.masked.load.nxv1f64(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x double>)
51 define <vscale x 2 x bfloat> @masked_load_nxv2bf16(ptr %a, <vscale x 2 x i1> %mask) nounwind {
52 ; CHECK-LABEL: masked_load_nxv2bf16:
54 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
55 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
57 %load = call <vscale x 2 x bfloat> @llvm.masked.load.nxv2bf16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
58 ret <vscale x 2 x bfloat> %load
60 declare <vscale x 2 x bfloat> @llvm.masked.load.nxv2bf16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
62 define <vscale x 2 x half> @masked_load_nxv2f16(ptr %a, <vscale x 2 x i1> %mask) nounwind {
63 ; CHECK-LABEL: masked_load_nxv2f16:
65 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
66 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
68 %load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
69 ret <vscale x 2 x half> %load
71 declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
73 define <vscale x 2 x float> @masked_load_nxv2f32(ptr %a, <vscale x 2 x i1> %mask) nounwind {
74 ; CHECK-LABEL: masked_load_nxv2f32:
76 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
77 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
79 %load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
80 ret <vscale x 2 x float> %load
82 declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
84 define <vscale x 2 x double> @masked_load_nxv2f64(ptr %a, <vscale x 2 x i1> %mask) nounwind {
85 ; CHECK-LABEL: masked_load_nxv2f64:
87 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
88 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
90 %load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
91 ret <vscale x 2 x double> %load
93 declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
95 define <vscale x 4 x bfloat> @masked_load_nxv4bf16(ptr %a, <vscale x 4 x i1> %mask) nounwind {
96 ; CHECK-LABEL: masked_load_nxv4bf16:
98 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
99 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
101 %load = call <vscale x 4 x bfloat> @llvm.masked.load.nxv4bf16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
102 ret <vscale x 4 x bfloat> %load
104 declare <vscale x 4 x bfloat> @llvm.masked.load.nxv4bf16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
106 define <vscale x 4 x half> @masked_load_nxv4f16(ptr %a, <vscale x 4 x i1> %mask) nounwind {
107 ; CHECK-LABEL: masked_load_nxv4f16:
109 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
110 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
112 %load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
113 ret <vscale x 4 x half> %load
115 declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
117 define <vscale x 4 x float> @masked_load_nxv4f32(ptr %a, <vscale x 4 x i1> %mask) nounwind {
118 ; CHECK-LABEL: masked_load_nxv4f32:
120 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
121 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
123 %load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
124 ret <vscale x 4 x float> %load
126 declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
128 define <vscale x 4 x double> @masked_load_nxv4f64(ptr %a, <vscale x 4 x i1> %mask) nounwind {
129 ; CHECK-LABEL: masked_load_nxv4f64:
131 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
132 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
134 %load = call <vscale x 4 x double> @llvm.masked.load.nxv4f64(ptr %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
135 ret <vscale x 4 x double> %load
137 declare <vscale x 4 x double> @llvm.masked.load.nxv4f64(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x double>)
139 define <vscale x 8 x bfloat> @masked_load_nxv8bf16(ptr %a, <vscale x 8 x i1> %mask) nounwind {
140 ; CHECK-LABEL: masked_load_nxv8bf16:
142 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
143 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
145 %load = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> undef)
146 ret <vscale x 8 x bfloat> %load
148 declare <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
150 define <vscale x 8 x half> @masked_load_nxv8f16(ptr %a, <vscale x 8 x i1> %mask) nounwind {
151 ; CHECK-LABEL: masked_load_nxv8f16:
153 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
154 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
156 %load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
157 ret <vscale x 8 x half> %load
159 declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
161 define <vscale x 8 x float> @masked_load_nxv8f32(ptr %a, <vscale x 8 x i1> %mask) nounwind {
162 ; CHECK-LABEL: masked_load_nxv8f32:
164 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
165 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
167 %load = call <vscale x 8 x float> @llvm.masked.load.nxv8f32(ptr %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
168 ret <vscale x 8 x float> %load
170 declare <vscale x 8 x float> @llvm.masked.load.nxv8f32(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
172 define <vscale x 8 x double> @masked_load_nxv8f64(ptr %a, <vscale x 8 x i1> %mask) nounwind {
173 ; CHECK-LABEL: masked_load_nxv8f64:
175 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
176 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
178 %load = call <vscale x 8 x double> @llvm.masked.load.nxv8f64(ptr %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x double> undef)
179 ret <vscale x 8 x double> %load
181 declare <vscale x 8 x double> @llvm.masked.load.nxv8f64(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x double>)
183 define <vscale x 16 x bfloat> @masked_load_nxv16bf16(ptr %a, <vscale x 16 x i1> %mask) nounwind {
184 ; CHECK-LABEL: masked_load_nxv16bf16:
186 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
187 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
189 %load = call <vscale x 16 x bfloat> @llvm.masked.load.nxv16bf16(ptr %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x bfloat> undef)
190 ret <vscale x 16 x bfloat> %load
192 declare <vscale x 16 x bfloat> @llvm.masked.load.nxv16bf16(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x bfloat>)
194 define <vscale x 16 x half> @masked_load_nxv16f16(ptr %a, <vscale x 16 x i1> %mask) nounwind {
195 ; CHECK-LABEL: masked_load_nxv16f16:
197 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
198 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
200 %load = call <vscale x 16 x half> @llvm.masked.load.nxv16f16(ptr %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x half> undef)
201 ret <vscale x 16 x half> %load
203 declare <vscale x 16 x half> @llvm.masked.load.nxv16f16(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x half>)
205 define <vscale x 16 x float> @masked_load_nxv16f32(ptr %a, <vscale x 16 x i1> %mask) nounwind {
206 ; CHECK-LABEL: masked_load_nxv16f32:
208 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
209 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
211 %load = call <vscale x 16 x float> @llvm.masked.load.nxv16f32(ptr %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x float> undef)
212 ret <vscale x 16 x float> %load
214 declare <vscale x 16 x float> @llvm.masked.load.nxv16f32(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x float>)
216 define <vscale x 32 x bfloat> @masked_load_nxv32bf16(ptr %a, <vscale x 32 x i1> %mask) nounwind {
217 ; CHECK-LABEL: masked_load_nxv32bf16:
219 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
220 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
222 %load = call <vscale x 32 x bfloat> @llvm.masked.load.nxv32bf16(ptr %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x bfloat> undef)
223 ret <vscale x 32 x bfloat> %load
225 declare <vscale x 32 x bfloat> @llvm.masked.load.nxv32bf16(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x bfloat>)
227 define <vscale x 32 x half> @masked_load_nxv32f16(ptr %a, <vscale x 32 x i1> %mask) nounwind {
228 ; CHECK-LABEL: masked_load_nxv32f16:
230 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
231 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
233 %load = call <vscale x 32 x half> @llvm.masked.load.nxv32f16(ptr %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x half> undef)
234 ret <vscale x 32 x half> %load
236 declare <vscale x 32 x half> @llvm.masked.load.nxv32f16(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x half>)