1 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
4 ; LDNT1B, LDNT1W, LDNT1H, LDNT1D: vector base + scalar offset
5 ; ldnt1b { z0.s }, p0/z, [z0.s, x0]
9 define <vscale x 4 x i32> @gldnt1b_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
10 ; CHECK-LABEL: gldnt1b_s:
11 ; CHECK: ldnt1b { z0.s }, p0/z, [z0.s, x0]
13 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
14 <vscale x 4 x i32> %base,
16 %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
17 ret <vscale x 4 x i32> %res
20 define <vscale x 2 x i64> @gldnt1b_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
21 ; CHECK-LABEL: gldnt1b_d:
22 ; CHECK: ldnt1b { z0.d }, p0/z, [z0.d, x0]
24 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
25 <vscale x 2 x i64> %base,
27 %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
28 ret <vscale x 2 x i64> %res
32 define <vscale x 4 x i32> @gldnt1h_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
33 ; CHECK-LABEL: gldnt1h_s:
34 ; CHECK: ldnt1h { z0.s }, p0/z, [z0.s, x0]
36 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv416.nxv4i32(<vscale x 4 x i1> %pg,
37 <vscale x 4 x i32> %base,
39 %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
40 ret <vscale x 4 x i32> %res
43 define <vscale x 2 x i64> @gldnt1h_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
44 ; CHECK-LABEL: gldnt1h_d:
45 ; CHECK: ldnt1h { z0.d }, p0/z, [z0.d, x0]
47 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
48 <vscale x 2 x i64> %base,
50 %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
51 ret <vscale x 2 x i64> %res
55 define <vscale x 4 x i32> @gldnt1w_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
56 ; CHECK-LABEL: gldnt1w_s:
57 ; CHECK: ldnt1w { z0.s }, p0/z, [z0.s, x0]
59 %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg,
60 <vscale x 4 x i32> %base,
62 ret <vscale x 4 x i32> %load
65 define <vscale x 4 x float> @gldnt1w_s_float(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
66 ; CHECK-LABEL: gldnt1w_s_float:
67 ; CHECK: ldnt1w { z0.s }, p0/z, [z0.s, x0]
69 %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg,
70 <vscale x 4 x i32> %base,
72 ret <vscale x 4 x float> %load
75 define <vscale x 2 x i64> @gldnt1w_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
76 ; CHECK-LABEL: gldnt1w_d:
77 ; CHECK: ldnt1w { z0.d }, p0/z, [z0.d, x0]
79 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
80 <vscale x 2 x i64> %base,
82 %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
83 ret <vscale x 2 x i64> %res
87 define <vscale x 2 x i64> @gldnt1d_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
88 ; CHECK-LABEL: gldnt1d_d:
89 ; CHECK: ldnt1d { z0.d }, p0/z, [z0.d, x0]
91 %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg,
92 <vscale x 2 x i64> %base,
94 ret <vscale x 2 x i64> %load
98 define <vscale x 2 x double> @gldnt1d_d_double(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
99 ; CHECK-LABEL: gldnt1d_d_double:
100 ; CHECK: ldnt1d { z0.d }, p0/z, [z0.d, x0]
102 %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg,
103 <vscale x 2 x i64> %base,
105 ret <vscale x 2 x double> %load
109 ; LDNT1SB, LDNT1SW, LDNT1SH, LDNT1SD: vector base + scalar offset
110 ; ldnt1sb { z0.s }, p0/z, [z0.s, x0]
114 define <vscale x 4 x i32> @gldnt1sb_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
115 ; CHECK-LABEL: gldnt1sb_s:
116 ; CHECK: ldnt1sb { z0.s }, p0/z, [z0.s, x0]
118 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
119 <vscale x 4 x i32> %base,
121 %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
122 ret <vscale x 4 x i32> %res
125 define <vscale x 2 x i64> @gldnt1sb_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
126 ; CHECK-LABEL: gldnt1sb_d:
127 ; CHECK: ldnt1sb { z0.d }, p0/z, [z0.d, x0]
129 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
130 <vscale x 2 x i64> %base,
132 %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
133 ret <vscale x 2 x i64> %res
137 define <vscale x 4 x i32> @gldnt1sh_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
138 ; CHECK-LABEL: gldnt1sh_s:
139 ; CHECK: ldnt1sh { z0.s }, p0/z, [z0.s, x0]
141 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv416.nxv4i32(<vscale x 4 x i1> %pg,
142 <vscale x 4 x i32> %base,
144 %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
145 ret <vscale x 4 x i32> %res
148 define <vscale x 2 x i64> @gldnt1sh_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
149 ; CHECK-LABEL: gldnt1sh_d:
150 ; CHECK: ldnt1sh { z0.d }, p0/z, [z0.d, x0]
152 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
153 <vscale x 2 x i64> %base,
155 %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
156 ret <vscale x 2 x i64> %res
160 define <vscale x 2 x i64> @gldnt1sw_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
161 ; CHECK-LABEL: gldnt1sw_d:
162 ; CHECK: ldnt1sw { z0.d }, p0/z, [z0.d, x0]
164 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
165 <vscale x 2 x i64> %base,
167 %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
168 ret <vscale x 2 x i64> %res
172 declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
173 declare <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
176 declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv416.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
177 declare <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
180 declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
181 declare <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
183 declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
186 declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
188 declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)