1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
5 ; LD1B, LD1W, LD1H, LD1D: base + 32-bit unscaled offset, sign (sxtw) or zero
6 ; (uxtw) extended to 64 bits.
7 ; e.g. ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
11 define <vscale x 4 x i32> @gld1b_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
12 ; CHECK-LABEL: gld1b_s_uxtw:
14 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, z0.s, uxtw]
16 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
18 <vscale x 4 x i32> %b)
19 %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
20 ret <vscale x 4 x i32> %res
23 define <vscale x 4 x i32> @gld1b_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
24 ; CHECK-LABEL: gld1b_s_sxtw:
26 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw]
28 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8(<vscale x 4 x i1> %pg,
30 <vscale x 4 x i32> %b)
31 %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
32 ret <vscale x 4 x i32> %res
35 define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
36 ; CHECK-LABEL: gld1b_d_uxtw:
38 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw]
40 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg,
42 <vscale x 2 x i32> %b)
43 %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
44 ret <vscale x 2 x i64> %res
47 define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
48 ; CHECK-LABEL: gld1b_d_sxtw:
50 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
52 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg,
54 <vscale x 2 x i32> %b)
55 %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
56 ret <vscale x 2 x i64> %res
60 define <vscale x 4 x i32> @gld1h_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
61 ; CHECK-LABEL: gld1h_s_uxtw:
63 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
65 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
67 <vscale x 4 x i32> %b)
68 %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
69 ret <vscale x 4 x i32> %res
72 define <vscale x 4 x i32> @gld1h_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
73 ; CHECK-LABEL: gld1h_s_sxtw:
75 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
77 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %pg,
79 <vscale x 4 x i32> %b)
80 %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
81 ret <vscale x 4 x i32> %res
84 define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
85 ; CHECK-LABEL: gld1h_d_uxtw:
87 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
89 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1> %pg,
91 <vscale x 2 x i32> %b)
92 %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
93 ret <vscale x 2 x i64> %res
96 define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
97 ; CHECK-LABEL: gld1h_d_sxtw:
99 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
101 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16(<vscale x 2 x i1> %pg,
103 <vscale x 2 x i32> %b)
104 %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
105 ret <vscale x 2 x i64> %res
109 define <vscale x 4 x i32> @gld1w_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
110 ; CHECK-LABEL: gld1w_s_uxtw:
112 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
114 %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32(<vscale x 4 x i1> %pg,
116 <vscale x 4 x i32> %b)
117 ret <vscale x 4 x i32> %load
120 define <vscale x 4 x i32> @gld1w_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
121 ; CHECK-LABEL: gld1w_s_sxtw:
123 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw]
125 %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32(<vscale x 4 x i1> %pg,
127 <vscale x 4 x i32> %b)
128 ret <vscale x 4 x i32> %load
131 define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
132 ; CHECK-LABEL: gld1w_d_uxtw:
134 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
136 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32(<vscale x 2 x i1> %pg,
138 <vscale x 2 x i32> %b)
139 %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
140 ret <vscale x 2 x i64> %res
143 define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
144 ; CHECK-LABEL: gld1w_d_sxtw:
146 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
148 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32(<vscale x 2 x i1> %pg,
150 <vscale x 2 x i32> %b)
151 %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
152 ret <vscale x 2 x i64> %res
155 define <vscale x 4 x float> @gld1w_s_uxtw_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
156 ; CHECK-LABEL: gld1w_s_uxtw_float:
158 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
160 %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32(<vscale x 4 x i1> %pg,
162 <vscale x 4 x i32> %b)
163 ret <vscale x 4 x float> %load
166 define <vscale x 4 x float> @gld1w_s_sxtw_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
167 ; CHECK-LABEL: gld1w_s_sxtw_float:
169 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw]
171 %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32(<vscale x 4 x i1> %pg,
173 <vscale x 4 x i32> %b)
174 ret <vscale x 4 x float> %load
178 define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
179 ; CHECK-LABEL: gld1d_d_uxtw:
181 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
183 %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64(<vscale x 2 x i1> %pg,
185 <vscale x 2 x i32> %b)
186 ret <vscale x 2 x i64> %load
189 define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
190 ; CHECK-LABEL: gld1d_d_sxtw:
192 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
194 %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64(<vscale x 2 x i1> %pg,
196 <vscale x 2 x i32> %b)
197 ret <vscale x 2 x i64> %load
200 define <vscale x 2 x double> @gld1d_d_uxtw_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
201 ; CHECK-LABEL: gld1d_d_uxtw_double:
203 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
205 %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64(<vscale x 2 x i1> %pg,
207 <vscale x 2 x i32> %b)
208 ret <vscale x 2 x double> %load
211 define <vscale x 2 x double> @gld1d_d_sxtw_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
212 ; CHECK-LABEL: gld1d_d_sxtw_double:
214 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
216 %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64(<vscale x 2 x i1> %pg,
218 <vscale x 2 x i32> %b)
219 ret <vscale x 2 x double> %load
223 ; LD1SB, LD1SW, LD1SH: base + 32-bit unscaled offset, sign (sxtw) or zero
224 ; (uxtw) extended to 64 bits.
225 ; e.g. ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
229 define <vscale x 4 x i32> @gld1sb_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
230 ; CHECK-LABEL: gld1sb_s_uxtw:
232 ; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, z0.s, uxtw]
234 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
236 <vscale x 4 x i32> %b)
237 %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
238 ret <vscale x 4 x i32> %res
241 define <vscale x 4 x i32> @gld1sb_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
242 ; CHECK-LABEL: gld1sb_s_sxtw:
244 ; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, z0.s, sxtw]
246 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8(<vscale x 4 x i1> %pg,
248 <vscale x 4 x i32> %b)
249 %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
250 ret <vscale x 4 x i32> %res
253 define <vscale x 2 x i64> @gld1sb_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
254 ; CHECK-LABEL: gld1sb_d_uxtw:
256 ; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d, uxtw]
258 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg,
260 <vscale x 2 x i32> %b)
261 %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
262 ret <vscale x 2 x i64> %res
265 define <vscale x 2 x i64> @gld1sb_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
266 ; CHECK-LABEL: gld1sb_d_sxtw:
268 ; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw]
270 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg,
272 <vscale x 2 x i32> %b)
273 %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
274 ret <vscale x 2 x i64> %res
278 define <vscale x 4 x i32> @gld1sh_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
279 ; CHECK-LABEL: gld1sh_s_uxtw:
281 ; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw]
283 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
285 <vscale x 4 x i32> %b)
286 %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
287 ret <vscale x 4 x i32> %res
290 define <vscale x 4 x i32> @gld1sh_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
291 ; CHECK-LABEL: gld1sh_s_sxtw:
293 ; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw]
295 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %pg,
297 <vscale x 4 x i32> %b)
298 %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
299 ret <vscale x 4 x i32> %res
302 define <vscale x 2 x i64> @gld1sh_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
303 ; CHECK-LABEL: gld1sh_d_uxtw:
305 ; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
307 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1> %pg,
309 <vscale x 2 x i32> %b)
310 %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
311 ret <vscale x 2 x i64> %res
314 define <vscale x 2 x i64> @gld1sh_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
315 ; CHECK-LABEL: gld1sh_d_sxtw:
317 ; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw]
319 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16(<vscale x 2 x i1> %pg,
321 <vscale x 2 x i32> %b)
322 %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
323 ret <vscale x 2 x i64> %res
327 define <vscale x 2 x i64> @gld1sw_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
328 ; CHECK-LABEL: gld1sw_d_uxtw:
330 ; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw]
332 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32(<vscale x 2 x i1> %pg,
334 <vscale x 2 x i32> %b)
335 %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
336 ret <vscale x 2 x i64> %res
339 define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
340 ; CHECK-LABEL: gld1sw_d_sxtw:
342 ; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw]
344 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32(<vscale x 2 x i1> %pg,
346 <vscale x 2 x i32> %b)
347 %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
348 ret <vscale x 2 x i64> %res
352 declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
353 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
354 declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
355 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
358 declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
359 declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
360 declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
361 declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
364 declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
365 declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
366 declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
367 declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
369 declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
370 declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
373 declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
374 declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
376 declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
377 declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)