1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
4 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
5 ; unscaled unpacked 32-bit offsets
6 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
8 define void @masked_scatter_nxv2i8_sext_offsets(<vscale x 2 x i8> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
9 ; CHECK-LABEL: masked_scatter_nxv2i8_sext_offsets:
11 ; CHECK-NEXT: st1b { z0.d }, p0, [x0, z1.d, sxtw]
13 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
14 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
15 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
16 call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
20 define void @masked_scatter_nxv2i16_sext_offsets(<vscale x 2 x i16> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
21 ; CHECK-LABEL: masked_scatter_nxv2i16_sext_offsets:
23 ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, sxtw]
25 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
26 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
27 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
28 call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
32 define void @masked_scatter_nxv2i32_sext_offsets(<vscale x 2 x i32> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
33 ; CHECK-LABEL: masked_scatter_nxv2i32_sext_offsets:
35 ; CHECK-NEXT: st1w { z0.d }, p0, [x0, z1.d, sxtw]
37 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
38 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
39 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
40 call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
44 define void @masked_scatter_nxv2i64_sext_offsets(<vscale x 2 x i64> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
45 ; CHECK-LABEL: masked_scatter_nxv2i64_sext_offsets:
47 ; CHECK-NEXT: st1d { z0.d }, p0, [x0, z1.d, sxtw]
49 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
50 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
51 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
52 call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
56 define void @masked_scatter_nxv2f16_sext_offsets(<vscale x 2 x half> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
57 ; CHECK-LABEL: masked_scatter_nxv2f16_sext_offsets:
59 ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, sxtw]
61 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
62 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
63 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
64 call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
68 define void @masked_scatter_nxv2bf16_sext_offsets(<vscale x 2 x bfloat> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind #0 {
69 ; CHECK-LABEL: masked_scatter_nxv2bf16_sext_offsets:
71 ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, sxtw]
73 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
74 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
75 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
76 call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
80 define void @masked_scatter_nxv2f32_sext_offsets(<vscale x 2 x float> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
81 ; CHECK-LABEL: masked_scatter_nxv2f32_sext_offsets:
83 ; CHECK-NEXT: st1w { z0.d }, p0, [x0, z1.d, sxtw]
85 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
86 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
87 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
88 call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
92 define void @masked_scatter_nxv2f64_sext_offsets(<vscale x 2 x double> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
93 ; CHECK-LABEL: masked_scatter_nxv2f64_sext_offsets:
95 ; CHECK-NEXT: st1d { z0.d }, p0, [x0, z1.d, sxtw]
97 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
98 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
99 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
100 call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
104 define void @masked_scatter_nxv2i8_zext_offsets(<vscale x 2 x i8> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
105 ; CHECK-LABEL: masked_scatter_nxv2i8_zext_offsets:
107 ; CHECK-NEXT: st1b { z0.d }, p0, [x0, z1.d, uxtw]
109 %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
110 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
111 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
112 call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
116 define void @masked_scatter_nxv2i16_zext_offsets(<vscale x 2 x i16> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
117 ; CHECK-LABEL: masked_scatter_nxv2i16_zext_offsets:
119 ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, uxtw]
121 %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
122 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
123 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
124 call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
128 define void @masked_scatter_nxv2i32_zext_offsets(<vscale x 2 x i32> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
129 ; CHECK-LABEL: masked_scatter_nxv2i32_zext_offsets:
131 ; CHECK-NEXT: st1w { z0.d }, p0, [x0, z1.d, uxtw]
133 %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
134 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
135 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
136 call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
140 define void @masked_scatter_nxv2i64_zext_offsets(<vscale x 2 x i64> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
141 ; CHECK-LABEL: masked_scatter_nxv2i64_zext_offsets:
143 ; CHECK-NEXT: st1d { z0.d }, p0, [x0, z1.d, uxtw]
145 %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
146 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
147 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
148 call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
152 define void @masked_scatter_nxv2f16_zext_offsets(<vscale x 2 x half> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
153 ; CHECK-LABEL: masked_scatter_nxv2f16_zext_offsets:
155 ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, uxtw]
157 %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
158 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
159 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
160 call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
164 define void @masked_scatter_nxv2bf16_zext_offsets(<vscale x 2 x bfloat> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind #0 {
165 ; CHECK-LABEL: masked_scatter_nxv2bf16_zext_offsets:
167 ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, uxtw]
169 %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
170 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
171 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
172 call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
176 define void @masked_scatter_nxv2f32_zext_offsets(<vscale x 2 x float> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
177 ; CHECK-LABEL: masked_scatter_nxv2f32_zext_offsets:
179 ; CHECK-NEXT: st1w { z0.d }, p0, [x0, z1.d, uxtw]
181 %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
182 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
183 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
184 call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
188 define void @masked_scatter_nxv2f64_zext_offsets(<vscale x 2 x double> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
189 ; CHECK-LABEL: masked_scatter_nxv2f64_zext_offsets:
191 ; CHECK-NEXT: st1d { z0.d }, p0, [x0, z1.d, uxtw]
193 %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
194 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
195 %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
196 call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
200 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
201 ; unscaled packed 32-bit offsets
202 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
203 define void @masked_scatter_nxv4i8_sext_offsets(<vscale x 4 x i8> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
204 ; CHECK-LABEL: masked_scatter_nxv4i8_sext_offsets:
206 ; CHECK-NEXT: st1b { z0.s }, p0, [x0, z1.s, sxtw]
208 %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
209 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
210 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
211 call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
215 define void @masked_scatter_nxv4i16_sext_offsets(<vscale x 4 x i16> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
216 ; CHECK-LABEL: masked_scatter_nxv4i16_sext_offsets:
218 ; CHECK-NEXT: st1h { z0.s }, p0, [x0, z1.s, sxtw]
220 %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
221 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
222 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
223 call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
227 define void @masked_scatter_nxv4i32_sext_offsets(<vscale x 4 x i32> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
228 ; CHECK-LABEL: masked_scatter_nxv4i32_sext_offsets:
230 ; CHECK-NEXT: st1w { z0.s }, p0, [x0, z1.s, sxtw]
232 %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
233 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
234 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
235 call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
239 define void @masked_scatter_nxv4f16_sext_offsets(<vscale x 4 x half> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
240 ; CHECK-LABEL: masked_scatter_nxv4f16_sext_offsets:
242 ; CHECK-NEXT: st1h { z0.s }, p0, [x0, z1.s, sxtw]
244 %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
245 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
246 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
247 call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
251 define void @masked_scatter_nxv4bf16_sext_offsets(<vscale x 4 x bfloat> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
252 ; CHECK-LABEL: masked_scatter_nxv4bf16_sext_offsets:
254 ; CHECK-NEXT: st1h { z0.s }, p0, [x0, z1.s, sxtw]
256 %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
257 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
258 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
259 call void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
263 define void @masked_scatter_nxv4f32_sext_offsets(<vscale x 4 x float> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
264 ; CHECK-LABEL: masked_scatter_nxv4f32_sext_offsets:
266 ; CHECK-NEXT: st1w { z0.s }, p0, [x0, z1.s, sxtw]
268 %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
269 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
270 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
271 call void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
275 define void @masked_scatter_nxv4i8_zext_offsets(<vscale x 4 x i8> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
276 ; CHECK-LABEL: masked_scatter_nxv4i8_zext_offsets:
278 ; CHECK-NEXT: st1b { z0.s }, p0, [x0, z1.s, uxtw]
280 %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
281 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
282 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
283 call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
287 define void @masked_scatter_nxv4i16_zext_offsets(<vscale x 4 x i16> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
288 ; CHECK-LABEL: masked_scatter_nxv4i16_zext_offsets:
290 ; CHECK-NEXT: st1h { z0.s }, p0, [x0, z1.s, uxtw]
292 %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
293 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
294 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
295 call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
299 define void @masked_scatter_nxv4i32_zext_offsets(<vscale x 4 x i32> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
300 ; CHECK-LABEL: masked_scatter_nxv4i32_zext_offsets:
302 ; CHECK-NEXT: st1w { z0.s }, p0, [x0, z1.s, uxtw]
304 %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
305 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
306 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
307 call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
311 define void @masked_scatter_nxv4f16_zext_offsets(<vscale x 4 x half> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
312 ; CHECK-LABEL: masked_scatter_nxv4f16_zext_offsets:
314 ; CHECK-NEXT: st1h { z0.s }, p0, [x0, z1.s, uxtw]
316 %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
317 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
318 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
319 call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
323 define void @masked_scatter_nxv4bf16_zext_offsets(<vscale x 4 x bfloat> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
324 ; CHECK-LABEL: masked_scatter_nxv4bf16_zext_offsets:
326 ; CHECK-NEXT: st1h { z0.s }, p0, [x0, z1.s, uxtw]
328 %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
329 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
330 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
331 call void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
335 define void @masked_scatter_nxv4f32_zext_offsets(<vscale x 4 x float> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
336 ; CHECK-LABEL: masked_scatter_nxv4f32_zext_offsets:
338 ; CHECK-NEXT: st1w { z0.s }, p0, [x0, z1.s, uxtw]
340 %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
341 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
342 %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
343 call void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
347 declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
348 declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
349 declare void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
350 declare void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
351 declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
352 declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
353 declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
354 declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
355 declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
356 declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
357 declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
358 declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
359 declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
360 declare void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
361 attributes #0 = { "target-features"="+sve,+bf16" }