1 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
4 ; LDNT1B, LDNT1W, LDNT1H, LDNT1D: base + 32-bit unscaled offsets, zero (uxtw)
6 ; e.g. ldnt1h { z0.s }, p0/z, [z0.s, x0]
10 define <vscale x 4 x i32> @gldnt1b_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
11 ; CHECK-LABEL: gldnt1b_s_uxtw:
12 ; CHECK: ldnt1b { z0.s }, p0/z, [z0.s, x0]
14 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
16 <vscale x 4 x i32> %b)
17 %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
18 ret <vscale x 4 x i32> %res
22 define <vscale x 4 x i32> @gldnt1h_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
23 ; CHECK-LABEL: gldnt1h_s_uxtw:
24 ; CHECK: ldnt1h { z0.s }, p0/z, [z0.s, x0]
26 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
28 <vscale x 4 x i32> %b)
29 %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
30 ret <vscale x 4 x i32> %res
34 define <vscale x 4 x i32> @gldnt1w_s_uxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
35 ; CHECK-LABEL: gldnt1w_s_uxtw:
36 ; CHECK: ldnt1w { z0.s }, p0/z, [z0.s, x0]
38 %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1> %pg,
40 <vscale x 4 x i32> %b)
41 ret <vscale x 4 x i32> %load
44 define <vscale x 4 x float> @gldnt1w_s_uxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
45 ; CHECK-LABEL: gldnt1w_s_uxtw_float:
46 ; CHECK: ldnt1w { z0.s }, p0/z, [z0.s, x0]
48 %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32(<vscale x 4 x i1> %pg,
50 <vscale x 4 x i32> %b)
51 ret <vscale x 4 x float> %load
54 ; LDNT1SB, LDNT1SW, LDNT1SH: base + 32-bit unscaled offsets, zero (uxtw)
55 ; extended to 64 bits.
56 ; e.g. ldnt1sh { z0.s }, p0/z, [z0.s, x0]
60 define <vscale x 4 x i32> @gldnt1sb_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
61 ; CHECK-LABEL: gldnt1sb_s_uxtw:
62 ; CHECK: ldnt1sb { z0.s }, p0/z, [z0.s, x0]
64 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
66 <vscale x 4 x i32> %b)
67 %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
68 ret <vscale x 4 x i32> %res
72 define <vscale x 4 x i32> @gldnt1sh_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
73 ; CHECK-LABEL: gldnt1sh_s_uxtw:
74 ; CHECK: ldnt1sh { z0.s }, p0/z, [z0.s, x0]
76 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
78 <vscale x 4 x i32> %b)
79 %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
80 ret <vscale x 4 x i32> %res
84 declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
85 declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i8(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
88 declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
89 declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
92 declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
93 declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
95 declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
96 declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)