1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3 ; RUN: llc -O0 -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
9 define <vscale x 16 x i8> @ld1b_i8(<vscale x 16 x i1> %pred, ptr %addr) {
10 ; CHECK-LABEL: ld1b_i8:
12 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
14 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, ptr %addr)
15 ret <vscale x 16 x i8> %res
18 define <vscale x 8 x i16> @ld1b_h(<vscale x 8 x i1> %pred, ptr %addr) {
19 ; CHECK-LABEL: ld1b_h:
21 ; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
23 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, ptr %addr)
24 %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
25 ret <vscale x 8 x i16> %res
28 define <vscale x 8 x i16> @ld1sb_h(<vscale x 8 x i1> %pred, ptr %addr) {
29 ; CHECK-LABEL: ld1sb_h:
31 ; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
33 %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, ptr %addr)
34 %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
35 ret <vscale x 8 x i16> %res
38 define <vscale x 4 x i32> @ld1b_s(<vscale x 4 x i1> %pred, ptr %addr) {
39 ; CHECK-LABEL: ld1b_s:
41 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
43 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, ptr %addr)
44 %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
45 ret <vscale x 4 x i32> %res
48 define <vscale x 4 x i32> @ld1sb_s(<vscale x 4 x i1> %pred, ptr %addr) {
49 ; CHECK-LABEL: ld1sb_s:
51 ; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
53 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, ptr %addr)
54 %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
55 ret <vscale x 4 x i32> %res
58 define <vscale x 2 x i64> @ld1b_d(<vscale x 2 x i1> %pred, ptr %addr) {
59 ; CHECK-LABEL: ld1b_d:
61 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
63 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, ptr %addr)
64 %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
65 ret <vscale x 2 x i64> %res
68 define <vscale x 2 x i64> @ld1sb_d(<vscale x 2 x i1> %pred, ptr %addr) {
69 ; CHECK-LABEL: ld1sb_d:
71 ; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
73 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, ptr %addr)
74 %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
75 ret <vscale x 2 x i64> %res
82 define <vscale x 8 x i16> @ld1h_i16(<vscale x 8 x i1> %pred, ptr %addr) {
83 ; CHECK-LABEL: ld1h_i16:
85 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
87 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pred, ptr %addr)
88 ret <vscale x 8 x i16> %res
91 define <vscale x 8 x half> @ld1h_f16(<vscale x 8 x i1> %pred, ptr %addr) {
92 ; CHECK-LABEL: ld1h_f16:
94 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
96 %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pred, ptr %addr)
97 ret <vscale x 8 x half> %res
100 define <vscale x 8 x bfloat> @ld1h_bf16(<vscale x 8 x i1> %pred, ptr %addr) #0 {
101 ; CHECK-LABEL: ld1h_bf16:
103 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
105 %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1> %pred, ptr %addr)
106 ret <vscale x 8 x bfloat> %res
109 define <vscale x 4 x i32> @ld1h_s(<vscale x 4 x i1> %pred, ptr %addr) {
110 ; CHECK-LABEL: ld1h_s:
112 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
114 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, ptr %addr)
115 %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
116 ret <vscale x 4 x i32> %res
119 define <vscale x 4 x i32> @ld1sh_s(<vscale x 4 x i1> %pred, ptr %addr) {
120 ; CHECK-LABEL: ld1sh_s:
122 ; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
124 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, ptr %addr)
125 %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
126 ret <vscale x 4 x i32> %res
129 define <vscale x 2 x i64> @ld1h_d(<vscale x 2 x i1> %pred, ptr %addr) {
130 ; CHECK-LABEL: ld1h_d:
132 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
134 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, ptr %addr)
135 %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
136 ret <vscale x 2 x i64> %res
139 define <vscale x 2 x i64> @ld1sh_d(<vscale x 2 x i1> %pred, ptr %addr) {
140 ; CHECK-LABEL: ld1sh_d:
142 ; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
144 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, ptr %addr)
145 %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
146 ret <vscale x 2 x i64> %res
153 define <vscale x 4 x i32> @ld1w_i32(<vscale x 4 x i1> %pred, ptr %addr) {
154 ; CHECK-LABEL: ld1w_i32:
156 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
158 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pred, ptr %addr)
159 ret <vscale x 4 x i32> %res
162 define <vscale x 4 x float> @ld1w_f32(<vscale x 4 x i1> %pred, ptr %addr) {
163 ; CHECK-LABEL: ld1w_f32:
165 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
167 %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pred, ptr %addr)
168 ret <vscale x 4 x float> %res
171 define <vscale x 2 x i64> @ld1w_d(<vscale x 2 x i1> %pred, ptr %addr) {
172 ; CHECK-LABEL: ld1w_d:
174 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
176 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, ptr %addr)
177 %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
178 ret <vscale x 2 x i64> %res
181 define <vscale x 2 x i64> @ld1sw_d(<vscale x 2 x i1> %pred, ptr %addr) {
182 ; CHECK-LABEL: ld1sw_d:
184 ; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
186 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, ptr %addr)
187 %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
188 ret <vscale x 2 x i64> %res
195 define <vscale x 2 x i64> @ld1d_i64(<vscale x 2 x i1> %pred, ptr %addr) {
196 ; CHECK-LABEL: ld1d_i64:
198 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
200 %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pred,
202 ret <vscale x 2 x i64> %res
205 define <vscale x 2 x double> @ld1d_f64(<vscale x 2 x i1> %pred, ptr %addr) {
206 ; CHECK-LABEL: ld1d_f64:
208 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
210 %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pred,
212 ret <vscale x 2 x double> %res
215 declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, ptr)
217 declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, ptr)
218 declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, ptr)
219 declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, ptr)
220 declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1>, ptr)
222 declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1>, ptr)
223 declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1>, ptr)
224 declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, ptr)
225 declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, ptr)
227 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1>, ptr)
228 declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1>, ptr)
229 declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, ptr)
230 declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, ptr)
231 declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, ptr)
233 ; +bf16 is required for the bfloat version.
234 attributes #0 = { "target-features"="+sve,+bf16" }