1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
5 define <vscale x 1 x half> @masked_load_nxv1f16(<vscale x 1 x half>* %a, <vscale x 1 x i1> %mask) nounwind {
6 ; CHECK-LABEL: masked_load_nxv1f16:
8 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
9 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
11 %load = call <vscale x 1 x half> @llvm.masked.load.nxv1f16(<vscale x 1 x half>* %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x half> undef)
12 ret <vscale x 1 x half> %load
14 declare <vscale x 1 x half> @llvm.masked.load.nxv1f16(<vscale x 1 x half>*, i32, <vscale x 1 x i1>, <vscale x 1 x half>)
16 define <vscale x 1 x float> @masked_load_nxv1f32(<vscale x 1 x float>* %a, <vscale x 1 x i1> %mask) nounwind {
17 ; CHECK-LABEL: masked_load_nxv1f32:
19 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
20 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
22 %load = call <vscale x 1 x float> @llvm.masked.load.nxv1f32(<vscale x 1 x float>* %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x float> undef)
23 ret <vscale x 1 x float> %load
25 declare <vscale x 1 x float> @llvm.masked.load.nxv1f32(<vscale x 1 x float>*, i32, <vscale x 1 x i1>, <vscale x 1 x float>)
27 define <vscale x 1 x double> @masked_load_nxv1f64(<vscale x 1 x double>* %a, <vscale x 1 x i1> %mask) nounwind {
28 ; CHECK-LABEL: masked_load_nxv1f64:
30 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
31 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
33 %load = call <vscale x 1 x double> @llvm.masked.load.nxv1f64(<vscale x 1 x double>* %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x double> undef)
34 ret <vscale x 1 x double> %load
36 declare <vscale x 1 x double> @llvm.masked.load.nxv1f64(<vscale x 1 x double>*, i32, <vscale x 1 x i1>, <vscale x 1 x double>)
38 define <vscale x 2 x half> @masked_load_nxv2f16(<vscale x 2 x half>* %a, <vscale x 2 x i1> %mask) nounwind {
39 ; CHECK-LABEL: masked_load_nxv2f16:
41 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
42 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
44 %load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>* %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
45 ret <vscale x 2 x half> %load
47 declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>*, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
49 define <vscale x 2 x float> @masked_load_nxv2f32(<vscale x 2 x float>* %a, <vscale x 2 x i1> %mask) nounwind {
50 ; CHECK-LABEL: masked_load_nxv2f32:
52 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
53 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
55 %load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>* %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
56 ret <vscale x 2 x float> %load
58 declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>*, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
60 define <vscale x 2 x double> @masked_load_nxv2f64(<vscale x 2 x double>* %a, <vscale x 2 x i1> %mask) nounwind {
61 ; CHECK-LABEL: masked_load_nxv2f64:
63 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
64 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
66 %load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>* %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
67 ret <vscale x 2 x double> %load
69 declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>*, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
71 define <vscale x 4 x half> @masked_load_nxv4f16(<vscale x 4 x half>* %a, <vscale x 4 x i1> %mask) nounwind {
72 ; CHECK-LABEL: masked_load_nxv4f16:
74 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
75 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
77 %load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>* %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
78 ret <vscale x 4 x half> %load
80 declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>*, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
82 define <vscale x 4 x float> @masked_load_nxv4f32(<vscale x 4 x float>* %a, <vscale x 4 x i1> %mask) nounwind {
83 ; CHECK-LABEL: masked_load_nxv4f32:
85 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
86 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
88 %load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>* %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
89 ret <vscale x 4 x float> %load
91 declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>*, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
93 define <vscale x 4 x double> @masked_load_nxv4f64(<vscale x 4 x double>* %a, <vscale x 4 x i1> %mask) nounwind {
94 ; CHECK-LABEL: masked_load_nxv4f64:
96 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
97 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
99 %load = call <vscale x 4 x double> @llvm.masked.load.nxv4f64(<vscale x 4 x double>* %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
100 ret <vscale x 4 x double> %load
102 declare <vscale x 4 x double> @llvm.masked.load.nxv4f64(<vscale x 4 x double>*, i32, <vscale x 4 x i1>, <vscale x 4 x double>)
104 define <vscale x 8 x half> @masked_load_nxv8f16(<vscale x 8 x half>* %a, <vscale x 8 x i1> %mask) nounwind {
105 ; CHECK-LABEL: masked_load_nxv8f16:
107 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
108 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
110 %load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
111 ret <vscale x 8 x half> %load
113 declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>*, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
115 define <vscale x 8 x float> @masked_load_nxv8f32(<vscale x 8 x float>* %a, <vscale x 8 x i1> %mask) nounwind {
116 ; CHECK-LABEL: masked_load_nxv8f32:
118 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
119 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
121 %load = call <vscale x 8 x float> @llvm.masked.load.nxv8f32(<vscale x 8 x float>* %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
122 ret <vscale x 8 x float> %load
124 declare <vscale x 8 x float> @llvm.masked.load.nxv8f32(<vscale x 8 x float>*, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
126 define <vscale x 8 x double> @masked_load_nxv8f64(<vscale x 8 x double>* %a, <vscale x 8 x i1> %mask) nounwind {
127 ; CHECK-LABEL: masked_load_nxv8f64:
129 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
130 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
132 %load = call <vscale x 8 x double> @llvm.masked.load.nxv8f64(<vscale x 8 x double>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x double> undef)
133 ret <vscale x 8 x double> %load
135 declare <vscale x 8 x double> @llvm.masked.load.nxv8f64(<vscale x 8 x double>*, i32, <vscale x 8 x i1>, <vscale x 8 x double>)
137 define <vscale x 16 x half> @masked_load_nxv16f16(<vscale x 16 x half>* %a, <vscale x 16 x i1> %mask) nounwind {
138 ; CHECK-LABEL: masked_load_nxv16f16:
140 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
141 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
143 %load = call <vscale x 16 x half> @llvm.masked.load.nxv16f16(<vscale x 16 x half>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x half> undef)
144 ret <vscale x 16 x half> %load
146 declare <vscale x 16 x half> @llvm.masked.load.nxv16f16(<vscale x 16 x half>*, i32, <vscale x 16 x i1>, <vscale x 16 x half>)
148 define <vscale x 16 x float> @masked_load_nxv16f32(<vscale x 16 x float>* %a, <vscale x 16 x i1> %mask) nounwind {
149 ; CHECK-LABEL: masked_load_nxv16f32:
151 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
152 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
154 %load = call <vscale x 16 x float> @llvm.masked.load.nxv16f32(<vscale x 16 x float>* %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x float> undef)
155 ret <vscale x 16 x float> %load
157 declare <vscale x 16 x float> @llvm.masked.load.nxv16f32(<vscale x 16 x float>*, i32, <vscale x 16 x i1>, <vscale x 16 x float>)
159 define <vscale x 32 x half> @masked_load_nxv32f16(<vscale x 32 x half>* %a, <vscale x 32 x i1> %mask) nounwind {
160 ; CHECK-LABEL: masked_load_nxv32f16:
162 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
163 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
165 %load = call <vscale x 32 x half> @llvm.masked.load.nxv32f16(<vscale x 32 x half>* %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x half> undef)
166 ret <vscale x 32 x half> %load
168 declare <vscale x 32 x half> @llvm.masked.load.nxv32f16(<vscale x 32 x half>*, i32, <vscale x 32 x i1>, <vscale x 32 x half>)