1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
5 ; STNT1B, STNT1W, STNT1H, STNT1D: base + 32-bit unscaled offset, zero (uxtw)
7 ; e.g. stnt1h { z0.d }, p0, [z1.d, x0]
11 define void @sstnt1b_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
12 ; CHECK-LABEL: sstnt1b_s_uxtw:
14 ; CHECK-NEXT: stnt1b { z0.s }, p0, [z1.s, x0]
16 %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
17 call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8(<vscale x 4 x i8> %data_trunc,
18 <vscale x 4 x i1> %pg,
20 <vscale x 4 x i32> %offsets)
25 define void @sstnt1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
26 ; CHECK-LABEL: sstnt1h_s_uxtw:
28 ; CHECK-NEXT: stnt1h { z0.s }, p0, [z1.s, x0]
30 %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
31 call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16(<vscale x 4 x i16> %data_trunc,
32 <vscale x 4 x i1> %pg,
34 <vscale x 4 x i32> %offsets)
39 define void @sstnt1w_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
40 ; CHECK-LABEL: sstnt1w_s_uxtw:
42 ; CHECK-NEXT: stnt1w { z0.s }, p0, [z1.s, x0]
44 call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32(<vscale x 4 x i32> %data,
45 <vscale x 4 x i1> %pg,
47 <vscale x 4 x i32> %offsets)
51 define void @sstnt1w_s_uxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
52 ; CHECK-LABEL: sstnt1w_s_uxtw_float:
54 ; CHECK-NEXT: stnt1w { z0.s }, p0, [z1.s, x0]
56 call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32(<vscale x 4 x float> %data,
57 <vscale x 4 x i1> %pg,
59 <vscale x 4 x i32> %offsets)
64 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
65 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
66 declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
67 declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
70 declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
71 declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
72 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
73 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
76 declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
77 declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
78 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
79 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
81 declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
82 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)