1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+bf16 < %s | FileCheck %s
8 define <vscale x 16 x i8> @ld1rqb_i8(<vscale x 16 x i1> %pred, ptr %addr) {
9 ; CHECK-LABEL: ld1rqb_i8:
11 ; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0]
13 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %addr)
14 ret <vscale x 16 x i8> %res
17 define <vscale x 16 x i8> @ld1rqb_i8_imm(<vscale x 16 x i1> %pred, ptr %addr) {
18 ; CHECK-LABEL: ld1rqb_i8_imm:
20 ; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0, #16]
22 %ptr = getelementptr inbounds i8, ptr %addr, i8 16
23 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
24 ret <vscale x 16 x i8> %res
27 define <vscale x 16 x i8> @ld1rqb_i8_scalar(<vscale x 16 x i1> %pred, ptr %addr, i64 %idx) {
28 ; CHECK-LABEL: ld1rqb_i8_scalar:
30 ; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0, x1]
32 %ptr = getelementptr inbounds i8, ptr %addr, i64 %idx
33 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
34 ret <vscale x 16 x i8> %res
37 define <vscale x 16 x i8> @ld1rqb_i8_imm_lower_bound(<vscale x 16 x i1> %pred, ptr %addr) {
38 ; CHECK-LABEL: ld1rqb_i8_imm_lower_bound:
40 ; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0, #-128]
42 %ptr = getelementptr inbounds i8, ptr %addr, i8 -128
43 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
44 ret <vscale x 16 x i8> %res
47 define <vscale x 16 x i8> @ld1rqb_i8_imm_upper_bound(<vscale x 16 x i1> %pred, ptr %addr) {
48 ; CHECK-LABEL: ld1rqb_i8_imm_upper_bound:
50 ; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0, #112]
52 %ptr = getelementptr inbounds i8, ptr %addr, i8 112
53 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
54 ret <vscale x 16 x i8> %res
57 define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_lower_bound(<vscale x 16 x i1> %pred, ptr %addr) {
58 ; CHECK-LABEL: ld1rqb_i8_imm_out_of_lower_bound:
60 ; CHECK-NEXT: mov x8, #-129
61 ; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0, x8]
63 %ptr = getelementptr inbounds i8, ptr %addr, i64 -129
64 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
65 ret <vscale x 16 x i8> %res
68 define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_upper_bound(<vscale x 16 x i1> %pred, ptr %addr) {
69 ; CHECK-LABEL: ld1rqb_i8_imm_out_of_upper_bound:
71 ; CHECK-NEXT: mov w8, #113
72 ; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0, x8]
74 %ptr = getelementptr inbounds i8, ptr %addr, i64 113
75 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
76 ret <vscale x 16 x i8> %res
79 define <vscale x 16 x i8> @ld1rqb_i8_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
80 ; CHECK-LABEL: ld1rqb_i8_imm_dupqlane:
82 ; CHECK-NEXT: ptrue p0.b
83 ; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0, #-16]
85 %ptr = getelementptr inbounds <16 x i8>, ptr %addr, i16 -1
86 %load = load <16 x i8>, ptr %ptr
87 %1 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %load, i64 0)
88 %2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %1, i64 0)
89 ret <vscale x 16 x i8> %2
92 define <vscale x 16 x i8> @ld1rqb_i8_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
93 ; CHECK-LABEL: ld1rqb_i8_scalar_dupqlane:
95 ; CHECK-NEXT: ptrue p0.b
96 ; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0, x1]
98 %ptr = getelementptr inbounds i8, ptr %addr, i64 %idx
99 %load = load <16 x i8>, ptr %ptr
100 %1 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %load, i64 0)
101 %2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %1, i64 0)
102 ret <vscale x 16 x i8> %2
109 define <vscale x 8 x i16> @ld1rqh_i16(<vscale x 8 x i1> %pred, ptr %addr) {
110 ; CHECK-LABEL: ld1rqh_i16:
112 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0]
114 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, ptr %addr)
115 ret <vscale x 8 x i16> %res
118 define <vscale x 8 x half> @ld1rqh_f16(<vscale x 8 x i1> %pred, ptr %addr) {
119 ; CHECK-LABEL: ld1rqh_f16:
121 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0]
123 %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, ptr %addr)
124 ret <vscale x 8 x half> %res
127 define <vscale x 8 x i16> @ld1rqh_i16_imm(<vscale x 8 x i1> %pred, ptr %addr) {
128 ; CHECK-LABEL: ld1rqh_i16_imm:
130 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, #-64]
132 %ptr = getelementptr inbounds i16, ptr %addr, i16 -32
133 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, ptr %ptr)
134 ret <vscale x 8 x i16> %res
137 define <vscale x 8 x half> @ld1rqh_f16_imm(<vscale x 8 x i1> %pred, ptr %addr) {
138 ; CHECK-LABEL: ld1rqh_f16_imm:
140 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, #-16]
142 %ptr = getelementptr inbounds half, ptr %addr, i16 -8
143 %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, ptr %ptr)
144 ret <vscale x 8 x half> %res
147 define <vscale x 8 x i16> @ld1rqh_i16_scalar(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
148 ; CHECK-LABEL: ld1rqh_i16_scalar:
150 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
152 %ptr = getelementptr inbounds i16, ptr %addr, i64 %idx
153 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, ptr %ptr)
154 ret <vscale x 8 x i16> %res
157 define <vscale x 8 x half> @ld1rqh_f16_scalar(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
158 ; CHECK-LABEL: ld1rqh_f16_scalar:
160 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
162 %ptr = getelementptr inbounds half, ptr %addr, i64 %idx
163 %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, ptr %ptr)
164 ret <vscale x 8 x half> %res
167 define <vscale x 8 x bfloat> @ld1rqh_bf16(<vscale x 8 x i1> %pred, ptr %addr) {
168 ; CHECK-LABEL: ld1rqh_bf16:
170 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0]
172 %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1> %pred, ptr %addr)
173 ret <vscale x 8 x bfloat> %res
176 define <vscale x 8 x bfloat> @ld1rqh_bf16_imm(<vscale x 8 x i1> %pred, ptr %addr) {
177 ; CHECK-LABEL: ld1rqh_bf16_imm:
179 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, #-16]
181 %ptr = getelementptr inbounds bfloat, ptr %addr, i16 -8
182 %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1> %pred, ptr %ptr)
183 ret <vscale x 8 x bfloat> %res
186 define <vscale x 8 x bfloat> @ld1rqh_bf16_scalar(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
187 ; CHECK-LABEL: ld1rqh_bf16_scalar:
189 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
191 %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %idx
192 %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1> %pred, ptr %ptr)
193 ret <vscale x 8 x bfloat> %res
196 define <vscale x 8 x i16> @ld1rqh_i16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
197 ; CHECK-LABEL: ld1rqh_i16_imm_dupqlane:
199 ; CHECK-NEXT: ptrue p0.h
200 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, #-16]
202 %ptr = getelementptr inbounds <8 x i16>, ptr %addr, i16 -1
203 %load = load <8 x i16>, ptr %ptr
204 %1 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %load, i64 0)
205 %2 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %1, i64 0)
206 ret <vscale x 8 x i16> %2
209 define <vscale x 8 x i16> @ld1rqh_i16_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
210 ; CHECK-LABEL: ld1rqh_i16_scalar_dupqlane:
212 ; CHECK-NEXT: ptrue p0.h
213 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
215 %ptr = getelementptr inbounds i16, ptr %addr, i64 %idx
216 %load = load <8 x i16>, ptr %ptr
217 %1 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %load, i64 0)
218 %2 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %1, i64 0)
219 ret <vscale x 8 x i16> %2
222 define <vscale x 8 x half> @ld1rqh_f16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
223 ; CHECK-LABEL: ld1rqh_f16_imm_dupqlane:
225 ; CHECK-NEXT: ptrue p0.h
226 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, #-16]
228 %ptr = getelementptr inbounds <8 x half>, ptr %addr, i16 -1
229 %load = load <8 x half>, ptr %ptr
230 %1 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %load, i64 0)
231 %2 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %1, i64 0)
232 ret <vscale x 8 x half> %2
235 define <vscale x 8 x half> @ld1rqh_f16_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
236 ; CHECK-LABEL: ld1rqh_f16_scalar_dupqlane:
238 ; CHECK-NEXT: ptrue p0.h
239 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
241 %ptr = getelementptr inbounds half, ptr %addr, i64 %idx
242 %load = load <8 x half>, ptr %ptr
243 %1 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %load, i64 0)
244 %2 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %1, i64 0)
245 ret <vscale x 8 x half> %2
248 define <vscale x 8 x bfloat> @ld1rqh_bf16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
249 ; CHECK-LABEL: ld1rqh_bf16_imm_dupqlane:
251 ; CHECK-NEXT: ptrue p0.h
252 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, #-16]
254 %ptr = getelementptr inbounds <8 x bfloat>, ptr %addr, i16 -1
255 %load = load <8 x bfloat>, ptr %ptr
256 %1 = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> %load, i64 0)
257 %2 = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %1, i64 0)
258 ret <vscale x 8 x bfloat> %2
261 define <vscale x 8 x bfloat> @ld1rqh_bf16_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
262 ; CHECK-LABEL: ld1rqh_bf16_scalar_dupqlane:
264 ; CHECK-NEXT: ptrue p0.h
265 ; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
267 %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %idx
268 %load = load <8 x bfloat>, ptr %ptr
269 %1 = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> %load, i64 0)
270 %2 = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %1, i64 0)
271 ret <vscale x 8 x bfloat> %2
278 define <vscale x 4 x i32> @ld1rqw_i32(<vscale x 4 x i1> %pred, ptr %addr) {
279 ; CHECK-LABEL: ld1rqw_i32:
281 ; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0]
283 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, ptr %addr)
284 ret <vscale x 4 x i32> %res
287 define <vscale x 4 x float> @ld1rqw_f32(<vscale x 4 x i1> %pred, ptr %addr) {
288 ; CHECK-LABEL: ld1rqw_f32:
290 ; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0]
292 %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, ptr %addr)
293 ret <vscale x 4 x float> %res
296 define <vscale x 4 x i32> @ld1rqw_i32_imm(<vscale x 4 x i1> %pred, ptr %addr) {
297 ; CHECK-LABEL: ld1rqw_i32_imm:
299 ; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0, #112]
301 %ptr = getelementptr inbounds i32, ptr %addr, i32 28
302 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, ptr %ptr)
303 ret <vscale x 4 x i32> %res
306 define <vscale x 4 x float> @ld1rqw_f32_imm(<vscale x 4 x i1> %pred, ptr %addr) {
307 ; CHECK-LABEL: ld1rqw_f32_imm:
309 ; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0, #32]
311 %ptr = getelementptr inbounds float, ptr %addr, i32 8
312 %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, ptr %ptr)
313 ret <vscale x 4 x float> %res
316 define <vscale x 4 x i32> @ld1rqw_i32_scalar(<vscale x 4 x i1> %pred, ptr %base, i64 %idx) {
317 ; CHECK-LABEL: ld1rqw_i32_scalar:
319 ; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0, x1, lsl #2]
321 %ptr = getelementptr inbounds i32, ptr %base, i64 %idx
322 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, ptr %ptr)
323 ret <vscale x 4 x i32> %res
326 define <vscale x 4 x float> @ld1rqw_f32_scalar(<vscale x 4 x i1> %pred, ptr %base, i64 %idx) {
327 ; CHECK-LABEL: ld1rqw_f32_scalar:
329 ; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0, x1, lsl #2]
331 %ptr = getelementptr inbounds float, ptr %base, i64 %idx
332 %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, ptr %ptr)
333 ret <vscale x 4 x float> %res
336 define <vscale x 4 x i32> @ld1rqw_i32_imm_dupqlane(<vscale x 4 x i1> %pred, ptr %addr) {
337 ; CHECK-LABEL: ld1rqw_i32_imm_dupqlane:
339 ; CHECK-NEXT: ptrue p0.s
340 ; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0, #16]
342 %ptr = getelementptr inbounds <4 x i32>, ptr %addr, i32 1
343 %load = load <4 x i32>, ptr %ptr
344 %1 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %load, i64 0)
345 %2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %1, i64 0)
346 ret <vscale x 4 x i32> %2
349 define <vscale x 4 x i32> @ld1rqw_i32_scalar_dupqlane(<vscale x 4 x i1> %pred, ptr %addr, i64 %idx) {
350 ; CHECK-LABEL: ld1rqw_i32_scalar_dupqlane:
352 ; CHECK-NEXT: ptrue p0.s
353 ; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0, x1, lsl #2]
355 %ptr = getelementptr inbounds i32, ptr %addr, i64 %idx
356 %load = load <4 x i32>, ptr %ptr
357 %1 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %load, i64 0)
358 %2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %1, i64 0)
359 ret <vscale x 4 x i32> %2
362 define <vscale x 4 x float> @ld1rqw_f32_imm_dupqlane(<vscale x 4 x i1> %pred, ptr %addr) {
363 ; CHECK-LABEL: ld1rqw_f32_imm_dupqlane:
365 ; CHECK-NEXT: ptrue p0.s
366 ; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0, #16]
368 %ptr = getelementptr inbounds <4 x float>, ptr %addr, i32 1
369 %load = load <4 x float>, ptr %ptr
370 %1 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %load, i64 0)
371 %2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %1, i64 0)
372 ret <vscale x 4 x float> %2
375 define <vscale x 4 x float> @ld1rqw_f32_scalar_dupqlane(<vscale x 4 x i1> %pred, ptr %addr, i64 %idx) {
376 ; CHECK-LABEL: ld1rqw_f32_scalar_dupqlane:
378 ; CHECK-NEXT: ptrue p0.s
379 ; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0, x1, lsl #2]
381 %ptr = getelementptr inbounds float, ptr %addr, i64 %idx
382 %load = load <4 x float>, ptr %ptr
383 %1 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %load, i64 0)
384 %2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %1, i64 0)
385 ret <vscale x 4 x float> %2
392 define <vscale x 2 x i64> @ld1rqd_i64(<vscale x 2 x i1> %pred, ptr %addr) {
393 ; CHECK-LABEL: ld1rqd_i64:
395 ; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0]
397 %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, ptr %addr)
398 ret <vscale x 2 x i64> %res
401 define <vscale x 2 x double> @ld1rqd_f64(<vscale x 2 x i1> %pred, ptr %addr) {
402 ; CHECK-LABEL: ld1rqd_f64:
404 ; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0]
406 %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, ptr %addr)
407 ret <vscale x 2 x double> %res
410 define <vscale x 2 x i64> @ld1rqd_i64_imm(<vscale x 2 x i1> %pred, ptr %addr) {
411 ; CHECK-LABEL: ld1rqd_i64_imm:
413 ; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0, #64]
415 %ptr = getelementptr inbounds i64, ptr %addr, i64 8
416 %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, ptr %ptr)
417 ret <vscale x 2 x i64> %res
420 define <vscale x 2 x double> @ld1rqd_f64_imm(<vscale x 2 x i1> %pred, ptr %addr) {
421 ; CHECK-LABEL: ld1rqd_f64_imm:
423 ; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0, #-128]
425 %ptr = getelementptr inbounds double, ptr %addr, i64 -16
426 %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, ptr %ptr)
427 ret <vscale x 2 x double> %res
430 define <vscale x 2 x i64> @ld1rqd_i64_scalar(<vscale x 2 x i1> %pred, ptr %base, i64 %idx) {
431 ; CHECK-LABEL: ld1rqd_i64_scalar:
433 ; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0, x1, lsl #3]
435 %ptr = getelementptr inbounds i64, ptr %base, i64 %idx
436 %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, ptr %ptr)
437 ret <vscale x 2 x i64> %res
440 define <vscale x 2 x double> @ld1rqd_f64_scalar(<vscale x 2 x i1> %pred, ptr %base, i64 %idx) {
441 ; CHECK-LABEL: ld1rqd_f64_scalar:
443 ; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0, x1, lsl #3]
445 %ptr = getelementptr inbounds double, ptr %base, i64 %idx
446 %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, ptr %ptr)
447 ret <vscale x 2 x double> %res
450 define <vscale x 2 x i64> @ld1rqd_i64_imm_dupqlane(<vscale x 2 x i1> %pred, ptr %addr) {
451 ; CHECK-LABEL: ld1rqd_i64_imm_dupqlane:
453 ; CHECK-NEXT: ptrue p0.d
454 ; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0, #16]
456 %ptr = getelementptr inbounds <2 x i64>, ptr %addr, i64 1
457 %load = load <2 x i64>, ptr %ptr
458 %1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %load, i64 0)
459 %2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %1, i64 0)
460 ret <vscale x 2 x i64> %2
463 define <vscale x 2 x i64> @ld1rqd_i64_scalar_dupqlane(<vscale x 2 x i1> %pred, ptr %addr, i64 %idx) {
464 ; CHECK-LABEL: ld1rqd_i64_scalar_dupqlane:
466 ; CHECK-NEXT: ptrue p0.d
467 ; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0, x1, lsl #3]
469 %ptr = getelementptr inbounds i64, ptr %addr, i64 %idx
470 %load = load <2 x i64>, ptr %ptr
471 %1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %load, i64 0)
472 %2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %1, i64 0)
473 ret <vscale x 2 x i64> %2
476 define <vscale x 2 x double> @ld1rqd_f64_imm_dupqlane(<vscale x 2 x i1> %pred, ptr %addr) {
477 ; CHECK-LABEL: ld1rqd_f64_imm_dupqlane:
479 ; CHECK-NEXT: ptrue p0.d
480 ; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0, #16]
482 %ptr = getelementptr inbounds <2 x double>, ptr %addr, i64 1
483 %load = load <2 x double>, ptr %ptr
484 %1 = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %load, i64 0)
485 %2 = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %1, i64 0)
486 ret <vscale x 2 x double> %2
489 define <vscale x 2 x double> @ld1rqd_f64_scalar_dupqlane(<vscale x 2 x i1> %pred, ptr %addr, i64 %idx) {
490 ; CHECK-LABEL: ld1rqd_f64_scalar_dupqlane:
492 ; CHECK-NEXT: ptrue p0.d
493 ; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0, x1, lsl #3]
495 %ptr = getelementptr inbounds double, ptr %addr, i64 %idx
496 %load = load <2 x double>, ptr %ptr
497 %1 = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %load, i64 0)
498 %2 = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %1, i64 0)
499 ret <vscale x 2 x double> %2
506 define <vscale x 16 x i8> @ldnt1b_i8(<vscale x 16 x i1> %pred, ptr %addr) {
507 ; CHECK-LABEL: ldnt1b_i8:
509 ; CHECK-NEXT: ldnt1b { z0.b }, p0/z, [x0]
511 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1> %pred,
513 ret <vscale x 16 x i8> %res
520 define <vscale x 8 x i16> @ldnt1h_i16(<vscale x 8 x i1> %pred, ptr %addr) {
521 ; CHECK-LABEL: ldnt1h_i16:
523 ; CHECK-NEXT: ldnt1h { z0.h }, p0/z, [x0]
525 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1> %pred,
527 ret <vscale x 8 x i16> %res
530 define <vscale x 8 x half> @ldnt1h_f16(<vscale x 8 x i1> %pred, ptr %addr) {
531 ; CHECK-LABEL: ldnt1h_f16:
533 ; CHECK-NEXT: ldnt1h { z0.h }, p0/z, [x0]
535 %res = call <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1> %pred,
537 ret <vscale x 8 x half> %res
540 define <vscale x 8 x bfloat> @ldnt1h_bf16(<vscale x 8 x i1> %pred, ptr %addr) {
541 ; CHECK-LABEL: ldnt1h_bf16:
543 ; CHECK-NEXT: ldnt1h { z0.h }, p0/z, [x0]
545 %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1> %pred,
547 ret <vscale x 8 x bfloat> %res
554 define <vscale x 4 x i32> @ldnt1w_i32(<vscale x 4 x i1> %pred, ptr %addr) {
555 ; CHECK-LABEL: ldnt1w_i32:
557 ; CHECK-NEXT: ldnt1w { z0.s }, p0/z, [x0]
559 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1> %pred,
561 ret <vscale x 4 x i32> %res
564 define <vscale x 4 x float> @ldnt1w_f32(<vscale x 4 x i1> %pred, ptr %addr) {
565 ; CHECK-LABEL: ldnt1w_f32:
567 ; CHECK-NEXT: ldnt1w { z0.s }, p0/z, [x0]
569 %res = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1> %pred,
571 ret <vscale x 4 x float> %res
578 define <vscale x 2 x i64> @ldnt1d_i64(<vscale x 2 x i1> %pred, ptr %addr) {
579 ; CHECK-LABEL: ldnt1d_i64:
581 ; CHECK-NEXT: ldnt1d { z0.d }, p0/z, [x0]
583 %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1> %pred,
585 ret <vscale x 2 x i64> %res
588 define <vscale x 2 x double> @ldnt1d_f64(<vscale x 2 x i1> %pred, ptr %addr) {
589 ; CHECK-LABEL: ldnt1d_f64:
591 ; CHECK-NEXT: ldnt1d { z0.d }, p0/z, [x0]
593 %res = call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1> %pred,
595 ret <vscale x 2 x double> %res
599 declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1>, ptr)
600 declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1>, ptr)
601 declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1>, ptr)
602 declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1>, ptr)
603 declare <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1>, ptr)
604 declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1>, ptr)
605 declare <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1>, ptr)
606 declare <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1>, ptr)
608 declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1>, ptr)
609 declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1>, ptr)
610 declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1>, ptr)
611 declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1>, ptr)
612 declare <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1>, ptr)
613 declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1>, ptr)
614 declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1>, ptr)
615 declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1>, ptr)
617 declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
618 declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double>, <2 x double>, i64)
619 declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
620 declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float>, <4 x float>, i64)
621 declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
622 declare <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half>, <8 x half>, i64)
623 declare <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat>, <8 x bfloat>, i64)
624 declare <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8>, <16 x i8>, i64)
626 declare <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64>, i64)
627 declare <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double>, i64)
628 declare <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32>, i64)
629 declare <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float>, i64)
630 declare <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16>, i64)
631 declare <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half>, i64)
632 declare <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat>, i64)
633 declare <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8>, i64)