1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
5 ; LDNT1B, LDNT1W, LDNT1H, LDNT1D: base + 32-bit unscaled offsets, zero (uxtw)
7 ; e.g. ldnt1h { z0.s }, p0/z, [z0.s, x0]
11 define <vscale x 4 x i32> @gldnt1b_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
12 ; CHECK-LABEL: gldnt1b_s_uxtw:
14 ; CHECK-NEXT: ldnt1b { z0.s }, p0/z, [z0.s, x0]
16 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
18 <vscale x 4 x i32> %b)
19 %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
20 ret <vscale x 4 x i32> %res
24 define <vscale x 4 x i32> @gldnt1h_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
25 ; CHECK-LABEL: gldnt1h_s_uxtw:
27 ; CHECK-NEXT: ldnt1h { z0.s }, p0/z, [z0.s, x0]
29 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
31 <vscale x 4 x i32> %b)
32 %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
33 ret <vscale x 4 x i32> %res
37 define <vscale x 4 x i32> @gldnt1w_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
38 ; CHECK-LABEL: gldnt1w_s_uxtw:
40 ; CHECK-NEXT: ldnt1w { z0.s }, p0/z, [z0.s, x0]
42 %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1> %pg,
44 <vscale x 4 x i32> %b)
45 ret <vscale x 4 x i32> %load
48 define <vscale x 4 x float> @gldnt1w_s_uxtw_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
49 ; CHECK-LABEL: gldnt1w_s_uxtw_float:
51 ; CHECK-NEXT: ldnt1w { z0.s }, p0/z, [z0.s, x0]
53 %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32(<vscale x 4 x i1> %pg,
55 <vscale x 4 x i32> %b)
56 ret <vscale x 4 x float> %load
59 ; LDNT1SB, LDNT1SW, LDNT1SH: base + 32-bit unscaled offsets, zero (uxtw)
60 ; extended to 64 bits.
61 ; e.g. ldnt1sh { z0.s }, p0/z, [z0.s, x0]
65 define <vscale x 4 x i32> @gldnt1sb_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
66 ; CHECK-LABEL: gldnt1sb_s_uxtw:
68 ; CHECK-NEXT: ldnt1sb { z0.s }, p0/z, [z0.s, x0]
70 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
72 <vscale x 4 x i32> %b)
73 %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
74 ret <vscale x 4 x i32> %res
78 define <vscale x 4 x i32> @gldnt1sh_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
79 ; CHECK-LABEL: gldnt1sh_s_uxtw:
81 ; CHECK-NEXT: ldnt1sh { z0.s }, p0/z, [z0.s, x0]
83 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
85 <vscale x 4 x i32> %b)
86 %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
87 ret <vscale x 4 x i32> %res
91 declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
92 declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
95 declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
96 declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
99 declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
100 declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
102 declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
103 declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)