1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
6 define <vscale x 16 x i8> @ld1b_lower_bound(<vscale x 16 x i8>* %a) {
7 ; CHECK-LABEL: ld1b_lower_bound:
9 ; CHECK-NEXT: ptrue p0.b
10 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #-8, mul vl]
12 %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -8
13 %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
14 ret <vscale x 16 x i8> %load
17 define <vscale x 16 x i8> @ld1b_inbound(<vscale x 16 x i8>* %a) {
18 ; CHECK-LABEL: ld1b_inbound:
20 ; CHECK-NEXT: ptrue p0.b
21 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #2, mul vl]
23 %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 2
24 %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
25 ret <vscale x 16 x i8> %load
28 define <vscale x 16 x i8> @ld1b_upper_bound(<vscale x 16 x i8>* %a) {
29 ; CHECK-LABEL: ld1b_upper_bound:
31 ; CHECK-NEXT: ptrue p0.b
32 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #7, mul vl]
34 %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 7
35 %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
36 ret <vscale x 16 x i8> %load
39 define <vscale x 16 x i8> @ld1b_out_of_upper_bound(<vscale x 16 x i8>* %a) {
40 ; CHECK-LABEL: ld1b_out_of_upper_bound:
42 ; CHECK-NEXT: ptrue p0.b
43 ; CHECK-NEXT: rdvl x8, #8
44 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, x8]
46 %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8
47 %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
48 ret <vscale x 16 x i8> %load
51 define <vscale x 16 x i8> @ld1b_out_of_lower_bound(<vscale x 16 x i8>* %a) {
52 ; CHECK-LABEL: ld1b_out_of_lower_bound:
54 ; CHECK-NEXT: ptrue p0.b
55 ; CHECK-NEXT: rdvl x8, #-9
56 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, x8]
58 %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9
59 %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
60 ret <vscale x 16 x i8> %load
65 define <vscale x 8 x i16> @ld1h_inbound(<vscale x 8 x i16>* %a) {
66 ; CHECK-LABEL: ld1h_inbound:
68 ; CHECK-NEXT: ptrue p0.h
69 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0, #-2, mul vl]
71 %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %a, i64 -2
72 %load = load <vscale x 8 x i16>, <vscale x 8 x i16>* %base
73 ret <vscale x 8 x i16> %load
78 define <vscale x 4 x i32> @ld1s_inbound(<vscale x 4 x i32>* %a) {
79 ; CHECK-LABEL: ld1s_inbound:
81 ; CHECK-NEXT: ptrue p0.s
82 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, #4, mul vl]
84 %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 4
85 %load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %base
86 ret <vscale x 4 x i32> %load
91 define <vscale x 2 x i64> @ld1d_inbound(<vscale x 2 x i64>* %a) {
92 ; CHECK-LABEL: ld1d_inbound:
94 ; CHECK-NEXT: ptrue p0.d
95 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #6, mul vl]
97 %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %a, i64 6
98 %load = load <vscale x 2 x i64>, <vscale x 2 x i64>* %base
99 ret <vscale x 2 x i64> %load
102 define void @load_nxv6f16(<vscale x 6 x half>* %a) {
103 ; CHECK-LABEL: load_nxv6f16:
105 ; CHECK-NEXT: ptrue p0.d
106 ; CHECK-NEXT: ptrue p1.s
107 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #2, mul vl]
108 ; CHECK-NEXT: ld1h { z0.s }, p1/z, [x0]
110 %val = load volatile <vscale x 6 x half>, <vscale x 6 x half>* %a
114 define void @load_nxv6f32(<vscale x 6 x float>* %a) {
115 ; CHECK-LABEL: load_nxv6f32:
117 ; CHECK-NEXT: ptrue p0.d
118 ; CHECK-NEXT: ptrue p1.s
119 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, #2, mul vl]
120 ; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0]
122 %val = load volatile <vscale x 6 x float>, <vscale x 6 x float>* %a
126 define void @load_nxv12f16(<vscale x 12 x half>* %a) {
127 ; CHECK-LABEL: load_nxv12f16:
129 ; CHECK-NEXT: ptrue p0.s
130 ; CHECK-NEXT: ptrue p1.h
131 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, #2, mul vl]
132 ; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0]
134 %val = load volatile <vscale x 12 x half>, <vscale x 12 x half>* %a