1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=aarch64-linux-unknown | FileCheck %s
5 ; Ensure we use a "vscale x 4" wide scatter for the maximum supported offset.
6 define void @scatter_i8_index_offset_maximum(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
7 ; CHECK-LABEL: scatter_i8_index_offset_maximum:
9 ; CHECK-NEXT: mov w8, #33554431 // =0x1ffffff
10 ; CHECK-NEXT: index z1.s, #0, w8
11 ; CHECK-NEXT: add x8, x0, x1
12 ; CHECK-NEXT: st1b { z0.s }, p0, [x8, z1.s, sxtw]
14 %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
15 %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
16 %t2 = insertelement <vscale x 4 x i64> undef, i64 33554431, i32 0
17 %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
18 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
19 %t4 = mul <vscale x 4 x i64> %t3, %step
20 %t5 = add <vscale x 4 x i64> %t1, %t4
21 %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
22 call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
26 ; Ensure we use a "vscale x 4" wide scatter for the minimum supported offset.
27 define void @scatter_i16_index_offset_minimum(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i16> %data) #0 {
28 ; CHECK-LABEL: scatter_i16_index_offset_minimum:
30 ; CHECK-NEXT: mov w8, #-33554432 // =0xfe000000
31 ; CHECK-NEXT: index z1.s, #0, w8
32 ; CHECK-NEXT: add x8, x0, x1, lsl #1
33 ; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw #1]
35 %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
36 %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
37 %t2 = insertelement <vscale x 4 x i64> undef, i64 -33554432, i32 0
38 %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
39 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
40 %t4 = mul <vscale x 4 x i64> %t3, %step
41 %t5 = add <vscale x 4 x i64> %t1, %t4
42 %t6 = getelementptr i16, ptr %base, <vscale x 4 x i64> %t5
43 call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
47 ; Ensure we use a "vscale x 4" gather for an offset in the limits of 32 bits.
48 define <vscale x 4 x i8> @gather_i8_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1> %pg) #0 {
49 ; CHECK-LABEL: gather_i8_index_offset_8:
51 ; CHECK-NEXT: index z0.s, #0, #1
52 ; CHECK-NEXT: add x8, x0, x1
53 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, sxtw]
55 %splat.insert0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
56 %splat0 = shufflevector <vscale x 4 x i64> %splat.insert0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
57 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
58 %splat.insert1 = insertelement <vscale x 4 x i64> undef, i64 1, i32 0
59 %splat1 = shufflevector <vscale x 4 x i64> %splat.insert1, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
60 %t1 = mul <vscale x 4 x i64> %splat1, %step
61 %t2 = add <vscale x 4 x i64> %splat0, %t1
62 %t3 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t2
63 %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %t3, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> undef)
64 ret <vscale x 4 x i8> %load
69 ; Ensure we don't use a "vscale x 4" scatter. Cannot prove that variable stride
70 ; will not wrap when shrunk to be i32 based.
71 define void @scatter_f16_index_offset_var(ptr %base, i64 %offset, i64 %scale, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
72 ; CHECK-LABEL: scatter_f16_index_offset_var:
74 ; CHECK-NEXT: index z1.d, #0, #1
75 ; CHECK-NEXT: ptrue p1.d
76 ; CHECK-NEXT: mov z2.d, x1
77 ; CHECK-NEXT: movprfx z4, z2
78 ; CHECK-NEXT: mla z4.d, p1/m, z1.d, z2.d
79 ; CHECK-NEXT: punpklo p2.h, p0.b
80 ; CHECK-NEXT: uunpklo z3.d, z0.s
81 ; CHECK-NEXT: punpkhi p0.h, p0.b
82 ; CHECK-NEXT: uunpkhi z0.d, z0.s
83 ; CHECK-NEXT: incd z1.d
84 ; CHECK-NEXT: st1h { z3.d }, p2, [x0, z4.d, lsl #1]
85 ; CHECK-NEXT: mad z1.d, p1/m, z2.d, z2.d
86 ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, lsl #1]
88 %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
89 %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
90 %t2 = insertelement <vscale x 4 x i64> undef, i64 %scale, i32 0
91 %t3 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
92 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
93 %t4 = mul <vscale x 4 x i64> %t3, %step
94 %t5 = add <vscale x 4 x i64> %t1, %t4
95 %t6 = getelementptr half, ptr %base, <vscale x 4 x i64> %t5
96 call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
100 ; Ensure we don't use a "vscale x 4" wide scatter when the offset is too big.
101 define void @scatter_i8_index_offset_maximum_plus_one(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
102 ; CHECK-LABEL: scatter_i8_index_offset_maximum_plus_one:
104 ; CHECK-NEXT: punpklo p1.h, p0.b
105 ; CHECK-NEXT: mov w8, #33554432 // =0x2000000
106 ; CHECK-NEXT: uunpklo z2.d, z0.s
107 ; CHECK-NEXT: index z1.d, #0, x8
108 ; CHECK-NEXT: rdvl x9, #1
109 ; CHECK-NEXT: add x8, x0, x1
110 ; CHECK-NEXT: lsr x9, x9, #4
111 ; CHECK-NEXT: mov w10, #67108864 // =0x4000000
112 ; CHECK-NEXT: punpkhi p0.h, p0.b
113 ; CHECK-NEXT: uunpkhi z0.d, z0.s
114 ; CHECK-NEXT: st1b { z2.d }, p1, [x8, z1.d]
115 ; CHECK-NEXT: madd x8, x9, x10, x8
116 ; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d]
118 %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
119 %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
120 %t2 = insertelement <vscale x 4 x i64> undef, i64 33554432, i32 0
121 %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
122 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
123 %t4 = mul <vscale x 4 x i64> %t3, %step
124 %t5 = add <vscale x 4 x i64> %t1, %t4
125 %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
126 call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
130 ; Ensure we don't use a "vscale x 4" wide scatter when the offset is too small.
131 define void @scatter_i8_index_offset_minimum_minus_one(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
132 ; CHECK-LABEL: scatter_i8_index_offset_minimum_minus_one:
134 ; CHECK-NEXT: punpklo p1.h, p0.b
135 ; CHECK-NEXT: mov x8, #-33554433 // =0xfffffffffdffffff
136 ; CHECK-NEXT: uunpklo z2.d, z0.s
137 ; CHECK-NEXT: index z1.d, #0, x8
138 ; CHECK-NEXT: rdvl x9, #1
139 ; CHECK-NEXT: mov x10, #-2 // =0xfffffffffffffffe
140 ; CHECK-NEXT: lsr x9, x9, #4
141 ; CHECK-NEXT: add x8, x0, x1
142 ; CHECK-NEXT: movk x10, #64511, lsl #16
143 ; CHECK-NEXT: punpkhi p0.h, p0.b
144 ; CHECK-NEXT: uunpkhi z0.d, z0.s
145 ; CHECK-NEXT: st1b { z2.d }, p1, [x8, z1.d]
146 ; CHECK-NEXT: madd x8, x9, x10, x8
147 ; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d]
149 %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
150 %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
151 %t2 = insertelement <vscale x 4 x i64> undef, i64 -33554433, i32 0
152 %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
153 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
154 %t4 = mul <vscale x 4 x i64> %t3, %step
155 %t5 = add <vscale x 4 x i64> %t1, %t4
156 %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
157 call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
161 ; Ensure we don't use a "vscale x 4" wide scatter when the stride is too big .
162 define void @scatter_i8_index_stride_too_big(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
163 ; CHECK-LABEL: scatter_i8_index_stride_too_big:
165 ; CHECK-NEXT: punpklo p1.h, p0.b
166 ; CHECK-NEXT: mov x8, #4611686018427387904 // =0x4000000000000000
167 ; CHECK-NEXT: uunpklo z2.d, z0.s
168 ; CHECK-NEXT: index z1.d, #0, x8
169 ; CHECK-NEXT: rdvl x9, #1
170 ; CHECK-NEXT: add x8, x0, x1
171 ; CHECK-NEXT: lsr x9, x9, #4
172 ; CHECK-NEXT: mov x10, #-9223372036854775808 // =0x8000000000000000
173 ; CHECK-NEXT: punpkhi p0.h, p0.b
174 ; CHECK-NEXT: uunpkhi z0.d, z0.s
175 ; CHECK-NEXT: st1b { z2.d }, p1, [x8, z1.d]
176 ; CHECK-NEXT: madd x8, x9, x10, x8
177 ; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d]
179 %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
180 %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
181 %t2 = insertelement <vscale x 4 x i64> undef, i64 4611686018427387904, i32 0
182 %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
183 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
184 %t4 = mul <vscale x 4 x i64> %t3, %step
185 %t5 = add <vscale x 4 x i64> %t1, %t4
186 %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
187 call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
191 ; Ensure the resulting load is "vscale x 4" wide, despite the offset giving the
192 ; impression the gather must be split due to it's <vscale x 4 x i64> offset.
193 ; gather_f32(base, index(offset, 8 * sizeof(float))
194 define <vscale x 4 x i8> @gather_8i8_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1> %pg) #0 {
195 ; CHECK-LABEL: gather_8i8_index_offset_8:
197 ; CHECK-NEXT: index z0.s, #0, #8
198 ; CHECK-NEXT: add x8, x0, x1, lsl #3
199 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, sxtw]
201 %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
202 %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
203 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
204 %t2 = add <vscale x 4 x i64> %t1, %step
205 %t3 = getelementptr [8 x i8], ptr %base, <vscale x 4 x i64> %t2
206 %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
207 %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %t4, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> undef)
208 ret <vscale x 4 x i8> %load
211 ; Ensure the resulting load is "vscale x 4" wide, despite the offset giving the
212 ; impression the gather must be split due to it's <vscale x 4 x i64> offset.
213 ; gather_f32(base, index(offset, 8 * sizeof(float))
214 define <vscale x 4 x float> @gather_f32_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1> %pg) #0 {
215 ; CHECK-LABEL: gather_f32_index_offset_8:
217 ; CHECK-NEXT: mov w8, #32 // =0x20
218 ; CHECK-NEXT: index z0.s, #0, w8
219 ; CHECK-NEXT: add x8, x0, x1, lsl #5
220 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, z0.s, sxtw]
222 %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
223 %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
224 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
225 %t2 = add <vscale x 4 x i64> %t1, %step
226 %t3 = getelementptr [8 x float], ptr %base, <vscale x 4 x i64> %t2
227 %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
228 %load = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %t4, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x float> undef)
229 ret <vscale x 4 x float> %load
232 ; Ensure the resulting store is "vscale x 4" wide, despite the offset giving the
233 ; impression the scatter must be split due to it's <vscale x 4 x i64> offset.
234 ; scatter_f16(base, index(offset, 8 * sizeof(i8))
235 define void @scatter_i8_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
236 ; CHECK-LABEL: scatter_i8_index_offset_8:
238 ; CHECK-NEXT: index z1.s, #0, #8
239 ; CHECK-NEXT: add x8, x0, x1, lsl #3
240 ; CHECK-NEXT: st1b { z0.s }, p0, [x8, z1.s, sxtw]
242 %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
243 %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
244 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
245 %t2 = add <vscale x 4 x i64> %t1, %step
246 %t3 = getelementptr [8 x i8], ptr %base, <vscale x 4 x i64> %t2
247 %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
248 call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t4, i32 2, <vscale x 4 x i1> %pg)
252 ; Ensure the resulting store is "vscale x 4" wide, despite the offset giving the
253 ; impression the scatter must be split due to it's <vscale x 4 x i64> offset.
254 ; scatter_f16(base, index(offset, 8 * sizeof(half))
255 define void @scatter_f16_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
256 ; CHECK-LABEL: scatter_f16_index_offset_8:
258 ; CHECK-NEXT: mov w8, #16 // =0x10
259 ; CHECK-NEXT: index z1.s, #0, w8
260 ; CHECK-NEXT: add x8, x0, x1, lsl #4
261 ; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw]
263 %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
264 %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
265 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
266 %t2 = add <vscale x 4 x i64> %t1, %step
267 %t3 = getelementptr [8 x half], ptr %base, <vscale x 4 x i64> %t2
268 %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
269 call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %t4, i32 2, <vscale x 4 x i1> %pg)
273 ; stepvector is hidden further behind GEP and two adds.
274 define void @scatter_f16_index_add_add(ptr %base, i64 %offset, i64 %offset2, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
275 ; CHECK-LABEL: scatter_f16_index_add_add:
277 ; CHECK-NEXT: mov w8, #16 // =0x10
278 ; CHECK-NEXT: add x9, x0, x2, lsl #4
279 ; CHECK-NEXT: index z1.s, #0, w8
280 ; CHECK-NEXT: add x8, x9, x1, lsl #4
281 ; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw]
283 %splat.offset.ins = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
284 %splat.offset = shufflevector <vscale x 4 x i64> %splat.offset.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
285 %splat.offset2.ins = insertelement <vscale x 4 x i64> undef, i64 %offset2, i32 0
286 %splat.offset2 = shufflevector <vscale x 4 x i64> %splat.offset2.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
287 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
288 %add1 = add <vscale x 4 x i64> %splat.offset, %step
289 %add2 = add <vscale x 4 x i64> %add1, %splat.offset2
290 %gep = getelementptr [8 x half], ptr %base, <vscale x 4 x i64> %add2
291 %gep.bc = bitcast <vscale x 4 x ptr> %gep to <vscale x 4 x ptr>
292 call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %gep.bc, i32 2, <vscale x 4 x i1> %pg)
296 ; stepvector is hidden further behind GEP two adds and a shift.
297 define void @scatter_f16_index_add_add_mul(ptr %base, i64 %offset, i64 %offset2, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
298 ; CHECK-LABEL: scatter_f16_index_add_add_mul:
300 ; CHECK-NEXT: mov w8, #128 // =0x80
301 ; CHECK-NEXT: add x9, x0, x2, lsl #7
302 ; CHECK-NEXT: index z1.s, #0, w8
303 ; CHECK-NEXT: add x8, x9, x1, lsl #7
304 ; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw]
306 %splat.offset.ins = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
307 %splat.offset = shufflevector <vscale x 4 x i64> %splat.offset.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
308 %splat.offset2.ins = insertelement <vscale x 4 x i64> undef, i64 %offset2, i32 0
309 %splat.offset2 = shufflevector <vscale x 4 x i64> %splat.offset2.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
310 %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
311 %add1 = add <vscale x 4 x i64> %splat.offset, %step
312 %add2 = add <vscale x 4 x i64> %add1, %splat.offset2
313 %splat.const8.ins = insertelement <vscale x 4 x i64> undef, i64 8, i32 0
314 %splat.const8 = shufflevector <vscale x 4 x i64> %splat.const8.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
315 %mul = mul <vscale x 4 x i64> %add2, %splat.const8
316 %gep = getelementptr [8 x half], ptr %base, <vscale x 4 x i64> %mul
317 %gep.bc = bitcast <vscale x 4 x ptr> %gep to <vscale x 4 x ptr>
318 call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %gep.bc, i32 2, <vscale x 4 x i1> %pg)
322 define <vscale x 2 x i64> @masked_gather_nxv2i64_const_with_vec_offsets(<vscale x 2 x i64> %vector_offsets, <vscale x 2 x i1> %pg) #0 {
323 ; CHECK-LABEL: masked_gather_nxv2i64_const_with_vec_offsets:
325 ; CHECK-NEXT: mov w8, #8 // =0x8
326 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d, lsl #3]
328 %ptrs = getelementptr i64, ptr inttoptr (i64 8 to ptr), <vscale x 2 x i64> %vector_offsets
329 %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
330 ret <vscale x 2 x i64> %data
333 define <vscale x 2 x i64> @masked_gather_nxv2i64_null_with_vec_plus_scalar_offsets(<vscale x 2 x i64> %vector_offsets, i64 %scalar_offset, <vscale x 2 x i1> %pg) #0 {
334 ; CHECK-LABEL: masked_gather_nxv2i64_null_with_vec_plus_scalar_offsets:
336 ; CHECK-NEXT: lsl x8, x0, #3
337 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d, lsl #3]
339 %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 %scalar_offset, i64 0
340 %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
341 %offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
342 %ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
343 %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
344 ret <vscale x 2 x i64> %data
347 define <vscale x 2 x i64> @masked_gather_nxv2i64_null_with__vec_plus_imm_offsets(<vscale x 2 x i64> %vector_offsets, <vscale x 2 x i1> %pg) #0 {
348 ; CHECK-LABEL: masked_gather_nxv2i64_null_with__vec_plus_imm_offsets:
350 ; CHECK-NEXT: mov w8, #8 // =0x8
351 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d, lsl #3]
353 %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 1, i64 0
354 %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
355 %offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
356 %ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
357 %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
358 ret <vscale x 2 x i64> %data
361 define <vscale x 4 x i32> @masked_gather_nxv4i32_s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
362 ; CHECK-LABEL: masked_gather_nxv4i32_s8_offsets:
364 ; CHECK-NEXT: ptrue p1.s
365 ; CHECK-NEXT: sxtb z0.s, p1/m, z0.s
366 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
368 %offsets.sext = sext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
369 %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets.sext
370 %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
371 ret <vscale x 4 x i32> %data
374 define <vscale x 4 x i32> @masked_gather_nxv4i32_u8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
375 ; CHECK-LABEL: masked_gather_nxv4i32_u8_offsets:
377 ; CHECK-NEXT: and z0.s, z0.s, #0xff
378 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
380 %offsets.zext = zext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
381 %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets.zext
382 %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
383 ret <vscale x 4 x i32> %data
386 define <vscale x 4 x i32> @masked_gather_nxv4i32_u32s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
387 ; CHECK-LABEL: masked_gather_nxv4i32_u32s8_offsets:
389 ; CHECK-NEXT: ptrue p1.s
390 ; CHECK-NEXT: sxtb z0.s, p1/m, z0.s
391 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
393 %offsets.sext = sext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
394 %offsets.sext.zext = zext <vscale x 4 x i32> %offsets.sext to <vscale x 4 x i64>
395 %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i64> %offsets.sext.zext
396 %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
397 ret <vscale x 4 x i32> %data
400 define void @masked_scatter_nxv2i64_const_with_vec_offsets(<vscale x 2 x i64> %vector_offsets, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %data) #0 {
401 ; CHECK-LABEL: masked_scatter_nxv2i64_const_with_vec_offsets:
403 ; CHECK-NEXT: mov w8, #8 // =0x8
404 ; CHECK-NEXT: st1d { z1.d }, p0, [x8, z0.d, lsl #3]
406 %ptrs = getelementptr i64, ptr inttoptr (i64 8 to ptr), <vscale x 2 x i64> %vector_offsets
407 call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg)
411 define void @masked_scatter_nxv2i64_null_with_vec_plus_scalar_offsets(<vscale x 2 x i64> %vector_offsets, i64 %scalar_offset, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %data) #0 {
412 ; CHECK-LABEL: masked_scatter_nxv2i64_null_with_vec_plus_scalar_offsets:
414 ; CHECK-NEXT: lsl x8, x0, #3
415 ; CHECK-NEXT: st1d { z1.d }, p0, [x8, z0.d, lsl #3]
417 %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 %scalar_offset, i64 0
418 %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
419 %offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
420 %ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
421 call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg)
425 define void @masked_scatter_nxv2i64_null_with__vec_plus_imm_offsets(<vscale x 2 x i64> %vector_offsets, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %data) #0 {
426 ; CHECK-LABEL: masked_scatter_nxv2i64_null_with__vec_plus_imm_offsets:
428 ; CHECK-NEXT: mov w8, #8 // =0x8
429 ; CHECK-NEXT: st1d { z1.d }, p0, [x8, z0.d, lsl #3]
431 %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 1, i64 0
432 %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
433 %offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
434 %ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
435 call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg)
439 define void @masked_scatter_nxv4i32_s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
440 ; CHECK-LABEL: masked_scatter_nxv4i32_s8_offsets:
442 ; CHECK-NEXT: ptrue p1.s
443 ; CHECK-NEXT: sxtb z0.s, p1/m, z0.s
444 ; CHECK-NEXT: st1w { z1.s }, p0, [x0, z0.s, sxtw #2]
446 %offsets.sext = sext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
447 %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets.sext
448 call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask)
452 define void @masked_scatter_nxv4i32_u8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
453 ; CHECK-LABEL: masked_scatter_nxv4i32_u8_offsets:
455 ; CHECK-NEXT: and z0.s, z0.s, #0xff
456 ; CHECK-NEXT: st1w { z1.s }, p0, [x0, z0.s, uxtw #2]
458 %offsets.zext = zext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
459 %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets.zext
460 call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask)
464 define void @masked_scatter_nxv4i32_u32s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
465 ; CHECK-LABEL: masked_scatter_nxv4i32_u32s8_offsets:
467 ; CHECK-NEXT: ptrue p1.s
468 ; CHECK-NEXT: sxtb z0.s, p1/m, z0.s
469 ; CHECK-NEXT: st1w { z1.s }, p0, [x0, z0.s, uxtw #2]
471 %offsets.sext = sext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
472 %offsets.sext.zext = zext <vscale x 4 x i32> %offsets.sext to <vscale x 4 x i64>
473 %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i64> %offsets.sext.zext
474 call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask)
478 attributes #0 = { "target-features"="+sve" vscale_range(1, 16) }
480 declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
481 declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
482 declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
483 declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
485 declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
486 declare void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
487 declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
488 declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
489 declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
491 declare <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()