1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
5 ; ST1B, ST1W, ST1H, ST1D: vector base + immediate offset
6 ; e.g. st1h { z0.s }, p0, [z1.s, #16]
10 define void @sst1b_s_imm_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
11 ; CHECK-LABEL: sst1b_s_imm_offset:
13 ; CHECK-NEXT: st1b { z0.s }, p0, [z1.s, #16]
15 %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
16 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> %data_trunc,
17 <vscale x 4 x i1> %pg,
18 <vscale x 4 x i32> %base,
23 define void @sst1b_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
24 ; CHECK-LABEL: sst1b_d_imm_offset:
26 ; CHECK-NEXT: st1b { z0.d }, p0, [z1.d, #16]
28 %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
29 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> %data_trunc,
30 <vscale x 2 x i1> %pg,
31 <vscale x 2 x i64> %base,
37 define void @sst1h_s_imm_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
38 ; CHECK-LABEL: sst1h_s_imm_offset:
40 ; CHECK-NEXT: st1h { z0.s }, p0, [z1.s, #16]
42 %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
43 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> %data_trunc,
44 <vscale x 4 x i1> %pg,
45 <vscale x 4 x i32> %base,
50 define void @sst1h_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
51 ; CHECK-LABEL: sst1h_d_imm_offset:
53 ; CHECK-NEXT: st1h { z0.d }, p0, [z1.d, #16]
55 %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
56 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> %data_trunc,
57 <vscale x 2 x i1> %pg,
58 <vscale x 2 x i64> %base,
64 define void @sst1w_s_imm_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
65 ; CHECK-LABEL: sst1w_s_imm_offset:
67 ; CHECK-NEXT: st1w { z0.s }, p0, [z1.s, #16]
69 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32> %data,
70 <vscale x 4 x i1> %pg,
71 <vscale x 4 x i32> %base,
76 define void @sst1w_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
77 ; CHECK-LABEL: sst1w_d_imm_offset:
79 ; CHECK-NEXT: st1w { z0.d }, p0, [z1.d, #16]
81 %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
82 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32> %data_trunc,
83 <vscale x 2 x i1> %pg,
84 <vscale x 2 x i64> %base,
89 define void @sst1w_s_imm_offset_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
90 ; CHECK-LABEL: sst1w_s_imm_offset_float:
92 ; CHECK-NEXT: st1w { z0.s }, p0, [z1.s, #16]
94 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float> %data,
95 <vscale x 4 x i1> %pg,
96 <vscale x 4 x i32> %base,
102 define void @sst1d_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
103 ; CHECK-LABEL: sst1d_d_imm_offset:
105 ; CHECK-NEXT: st1d { z0.d }, p0, [z1.d, #16]
107 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> %data,
108 <vscale x 2 x i1> %pg,
109 <vscale x 2 x i64> %base,
114 define void @sst1d_d_imm_offset_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
115 ; CHECK-LABEL: sst1d_d_imm_offset_double:
117 ; CHECK-NEXT: st1d { z0.d }, p0, [z1.d, #16]
119 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> %data,
120 <vscale x 2 x i1> %pg,
121 <vscale x 2 x i64> %base,
127 ; ST1B, ST1W, ST1H, ST1D: vector base + out of range immediate offset
128 ; e.g. st1h { z0.s }, p0, [z1.s, #16]
132 define void @sst1b_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
133 ; CHECK-LABEL: sst1b_s_imm_offset_out_of_range:
135 ; CHECK-NEXT: mov w8, #32
136 ; CHECK-NEXT: st1b { z0.s }, p0, [x8, z1.s, uxtw]
138 %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
139 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> %data_trunc,
140 <vscale x 4 x i1> %pg,
141 <vscale x 4 x i32> %base,
146 define void @sst1b_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
147 ; CHECK-LABEL: sst1b_d_imm_offset_out_of_range:
149 ; CHECK-NEXT: mov w8, #32
150 ; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d]
152 %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
153 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> %data_trunc,
154 <vscale x 2 x i1> %pg,
155 <vscale x 2 x i64> %base,
161 define void @sst1h_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
162 ; CHECK-LABEL: sst1h_s_imm_offset_out_of_range:
164 ; CHECK-NEXT: mov w8, #63
165 ; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, uxtw]
167 %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
168 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> %data_trunc,
169 <vscale x 4 x i1> %pg,
170 <vscale x 4 x i32> %base,
175 define void @sst1h_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
176 ; CHECK-LABEL: sst1h_d_imm_offset_out_of_range:
178 ; CHECK-NEXT: mov w8, #63
179 ; CHECK-NEXT: st1h { z0.d }, p0, [x8, z1.d]
181 %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
182 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> %data_trunc,
183 <vscale x 2 x i1> %pg,
184 <vscale x 2 x i64> %base,
190 define void @sst1w_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
191 ; CHECK-LABEL: sst1w_s_imm_offset_out_of_range:
193 ; CHECK-NEXT: mov w8, #125
194 ; CHECK-NEXT: st1w { z0.s }, p0, [x8, z1.s, uxtw]
196 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32> %data,
197 <vscale x 4 x i1> %pg,
198 <vscale x 4 x i32> %base,
203 define void @sst1w_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
204 ; CHECK-LABEL: sst1w_d_imm_offset_out_of_range:
206 ; CHECK-NEXT: mov w8, #125
207 ; CHECK-NEXT: st1w { z0.d }, p0, [x8, z1.d]
209 %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
210 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32> %data_trunc,
211 <vscale x 2 x i1> %pg,
212 <vscale x 2 x i64> %base,
217 define void @sst1w_s_imm_offset_float_out_of_range(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
218 ; CHECK-LABEL: sst1w_s_imm_offset_float_out_of_range:
220 ; CHECK-NEXT: mov w8, #125
221 ; CHECK-NEXT: st1w { z0.s }, p0, [x8, z1.s, uxtw]
223 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float> %data,
224 <vscale x 4 x i1> %pg,
225 <vscale x 4 x i32> %base,
231 define void @sst1d_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
232 ; CHECK-LABEL: sst1d_d_imm_offset_out_of_range:
234 ; CHECK-NEXT: mov w8, #249
235 ; CHECK-NEXT: st1d { z0.d }, p0, [x8, z1.d]
237 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> %data,
238 <vscale x 2 x i1> %pg,
239 <vscale x 2 x i64> %base,
244 define void @sst1d_d_imm_offset_double_out_of_range(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
245 ; CHECK-LABEL: sst1d_d_imm_offset_double_out_of_range:
247 ; CHECK-NEXT: mov w8, #249
248 ; CHECK-NEXT: st1d { z0.d }, p0, [x8, z1.d]
250 call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> %data,
251 <vscale x 2 x i1> %pg,
252 <vscale x 2 x i64> %base,
258 declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
259 declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
262 declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
263 declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
266 declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
267 declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
269 declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
272 declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
274 declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)