1 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
4 ; LD1B, LD1W, LD1H, LD1D: base + 64-bit unscaled offset
5 ; e.g. ld1h { z0.d }, p0/z, [x0, z0.d]
8 define <vscale x 2 x i64> @gld1b_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
9 ; CHECK-LABEL: gld1b_d:
10 ; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d]
12 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
14 <vscale x 2 x i64> %b)
15 %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
16 ret <vscale x 2 x i64> %res
19 define <vscale x 2 x i64> @gld1h_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
20 ; CHECK-LABEL: gld1h_d:
21 ; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d]
23 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
25 <vscale x 2 x i64> %b)
26 %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
27 ret <vscale x 2 x i64> %res
30 define <vscale x 2 x i64> @gld1w_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
31 ; CHECK-LABEL: gld1w_d:
32 ; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d]
34 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
36 <vscale x 2 x i64> %offsets)
37 %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
38 ret <vscale x 2 x i64> %res
41 define <vscale x 2 x i64> @gld1d_d(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
42 ; CHECK-LABEL: gld1d_d:
43 ; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d]
45 %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %pg,
47 <vscale x 2 x i64> %b)
48 ret <vscale x 2 x i64> %load
51 define <vscale x 2 x double> @gld1d_d_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
52 ; CHECK-LABEL: gld1d_d_double:
53 ; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d]
55 %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1> %pg,
57 <vscale x 2 x i64> %b)
58 ret <vscale x 2 x double> %load
62 ; LD1SB, LD1SW, LD1SH: base + 64-bit unscaled offset
63 ; e.g. ld1sh { z0.d }, p0/z, [x0, z0.d]
66 define <vscale x 2 x i64> @gld1sb_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
67 ; CHECK-LABEL: gld1sb_d:
68 ; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d]
70 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
72 <vscale x 2 x i64> %b)
73 %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
74 ret <vscale x 2 x i64> %res
77 define <vscale x 2 x i64> @gld1sh_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
78 ; CHECK-LABEL: gld1sh_d:
79 ; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d]
81 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
83 <vscale x 2 x i64> %b)
84 %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
85 ret <vscale x 2 x i64> %res
88 define <vscale x 2 x i64> @gld1sw_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
89 ; CHECK-LABEL: gld1sw_d:
90 ; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d]
92 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
94 <vscale x 2 x i64> %offsets)
95 %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
96 ret <vscale x 2 x i64> %res
100 ; LD1B, LD1W, LD1H, LD1D: base + 64-bit sxtw'd unscaled offset
101 ; e.g. ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
104 define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
105 ; CHECK-LABEL: gld1b_d_sxtw:
106 ; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
108 %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
109 <vscale x 2 x i1> %pg,
110 <vscale x 2 x i64> %b)
111 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
113 <vscale x 2 x i64> %sxtw)
114 %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
115 ret <vscale x 2 x i64> %res
118 define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
119 ; CHECK-LABEL: gld1h_d_sxtw:
120 ; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
122 %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
123 <vscale x 2 x i1> %pg,
124 <vscale x 2 x i64> %b)
125 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
127 <vscale x 2 x i64> %sxtw)
128 %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
129 ret <vscale x 2 x i64> %res
132 define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
133 ; CHECK-LABEL: gld1w_d_sxtw:
134 ; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
136 %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
137 <vscale x 2 x i1> %pg,
138 <vscale x 2 x i64> %offsets)
139 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
141 <vscale x 2 x i64> %sxtw)
142 %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
143 ret <vscale x 2 x i64> %res
146 define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
147 ; CHECK-LABEL: gld1d_d_sxtw:
148 ; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
150 %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
151 <vscale x 2 x i1> %pg,
152 <vscale x 2 x i64> %b)
153 %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %pg,
155 <vscale x 2 x i64> %sxtw)
156 ret <vscale x 2 x i64> %load
159 define <vscale x 2 x double> @gld1d_d_double_sxtw(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
160 ; CHECK-LABEL: gld1d_d_double_sxtw:
161 ; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
163 %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
164 <vscale x 2 x i1> %pg,
165 <vscale x 2 x i64> %b)
166 %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1> %pg,
168 <vscale x 2 x i64> %sxtw)
169 ret <vscale x 2 x double> %load
173 ; LD1SB, LD1SW, LD1SH: base + 64-bit sxtw'd unscaled offset
174 ; e.g. ld1sh { z0.d }, p0/z, [x0, z0.d]
177 define <vscale x 2 x i64> @gld1sb_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
178 ; CHECK-LABEL: gld1sb_d_sxtw:
179 ; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw]
181 %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
182 <vscale x 2 x i1> %pg,
183 <vscale x 2 x i64> %b)
184 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
186 <vscale x 2 x i64> %sxtw)
187 %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
188 ret <vscale x 2 x i64> %res
191 define <vscale x 2 x i64> @gld1sh_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
192 ; CHECK-LABEL: gld1sh_d_sxtw:
193 ; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw]
195 %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
196 <vscale x 2 x i1> %pg,
197 <vscale x 2 x i64> %b)
198 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
200 <vscale x 2 x i64> %sxtw)
201 %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
202 ret <vscale x 2 x i64> %res
205 define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
206 ; CHECK-LABEL: gld1sw_d_sxtw:
207 ; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw]
209 %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
210 <vscale x 2 x i1> %pg,
211 <vscale x 2 x i64> %offsets)
212 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
214 <vscale x 2 x i64> %sxtw)
215 %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
216 ret <vscale x 2 x i64> %res
220 ; LD1B, LD1W, LD1H, LD1D: base + 64-bit uxtw'd unscaled offset
221 ; e.g. ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
224 define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
225 ; CHECK-LABEL: gld1b_d_uxtw:
226 ; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw]
228 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
229 <vscale x 2 x i1> %pg,
230 <vscale x 2 x i64> %b)
231 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
233 <vscale x 2 x i64> %uxtw)
234 %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
235 ret <vscale x 2 x i64> %res
238 define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
239 ; CHECK-LABEL: gld1h_d_uxtw:
240 ; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
242 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
243 <vscale x 2 x i1> %pg,
244 <vscale x 2 x i64> %b)
245 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
247 <vscale x 2 x i64> %uxtw)
248 %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
249 ret <vscale x 2 x i64> %res
252 define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
253 ; CHECK-LABEL: gld1w_d_uxtw:
254 ; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
256 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
257 <vscale x 2 x i1> %pg,
258 <vscale x 2 x i64> %offsets)
259 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
261 <vscale x 2 x i64> %uxtw)
262 %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
263 ret <vscale x 2 x i64> %res
266 define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
267 ; CHECK-LABEL: gld1d_d_uxtw:
268 ; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
270 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
271 <vscale x 2 x i1> %pg,
272 <vscale x 2 x i64> %b)
273 %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %pg,
275 <vscale x 2 x i64> %uxtw)
276 ret <vscale x 2 x i64> %load
279 define <vscale x 2 x double> @gld1d_d_double_uxtw(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
280 ; CHECK-LABEL: gld1d_d_double_uxtw:
281 ; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
283 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
284 <vscale x 2 x i1> %pg,
285 <vscale x 2 x i64> %b)
286 %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1> %pg,
288 <vscale x 2 x i64> %uxtw)
289 ret <vscale x 2 x double> %load
293 ; LD1SB, LD1SW, LD1SH: base + 64-bit uxtw'd unscaled offset
294 ; e.g. ld1sh { z0.d }, p0/z, [x0, z0.d]
297 define <vscale x 2 x i64> @gld1sb_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
298 ; CHECK-LABEL: gld1sb_d_uxtw:
299 ; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d, uxtw]
301 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
302 <vscale x 2 x i1> %pg,
303 <vscale x 2 x i64> %b)
304 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
306 <vscale x 2 x i64> %uxtw)
307 %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
308 ret <vscale x 2 x i64> %res
311 define <vscale x 2 x i64> @gld1sh_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
312 ; CHECK-LABEL: gld1sh_d_uxtw:
313 ; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
315 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
316 <vscale x 2 x i1> %pg,
317 <vscale x 2 x i64> %b)
318 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
320 <vscale x 2 x i64> %uxtw)
321 %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
322 ret <vscale x 2 x i64> %res
325 define <vscale x 2 x i64> @gld1sw_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
326 ; CHECK-LABEL: gld1sw_d_uxtw:
327 ; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw]
329 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
330 <vscale x 2 x i1> %pg,
331 <vscale x 2 x i64> %offsets)
332 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
334 <vscale x 2 x i64> %uxtw)
335 %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
336 ret <vscale x 2 x i64> %res
339 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
340 declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
341 declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
342 declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
343 declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
345 declare <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
346 declare <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)