1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -aarch64-enable-mgather-combine=0 -enable-misched=false < %s | FileCheck %s
3 ; RUN: llc -aarch64-enable-mgather-combine=1 -enable-misched=false < %s | FileCheck %s
5 target triple = "aarch64-linux-gnu"
7 ; Test for multiple uses of the mgather where the s/zext should not be combined
9 define <vscale x 2 x i64> @masked_sgather_sext(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) #0 {
10 ; CHECK-LABEL: masked_sgather_sext:
12 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d]
13 ; CHECK-NEXT: ptrue p0.d
14 ; CHECK-NEXT: movprfx z2, z0
15 ; CHECK-NEXT: sxtb z2.d, p0/m, z0.d
16 ; CHECK-NEXT: add z0.d, z0.d, z1.d
17 ; CHECK-NEXT: sxtb z0.d, p0/m, z0.d
18 ; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d
20 %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
21 %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
22 %data.sext = sext <vscale x 2 x i8> %data to <vscale x 2 x i64>
23 %add = add <vscale x 2 x i8> %data, %vals
24 %add.sext = sext <vscale x 2 x i8> %add to <vscale x 2 x i64>
25 %mul = mul <vscale x 2 x i64> %data.sext, %add.sext
26 ret <vscale x 2 x i64> %mul
29 define <vscale x 2 x i64> @masked_sgather_zext(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) #0 {
30 ; CHECK-LABEL: masked_sgather_zext:
32 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d]
33 ; CHECK-NEXT: ptrue p0.d
34 ; CHECK-NEXT: add z1.d, z0.d, z1.d
35 ; CHECK-NEXT: and z0.d, z0.d, #0xff
36 ; CHECK-NEXT: and z1.d, z1.d, #0xff
37 ; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
39 %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
40 %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
41 %data.zext = zext <vscale x 2 x i8> %data to <vscale x 2 x i64>
42 %add = add <vscale x 2 x i8> %data, %vals
43 %add.zext = zext <vscale x 2 x i8> %add to <vscale x 2 x i64>
44 %mul = mul <vscale x 2 x i64> %data.zext, %add.zext
45 ret <vscale x 2 x i64> %mul
48 ; Tests that exercise various type legalisation scenarios for ISD::MGATHER.
50 ; Code generate load of an illegal datatype via promotion.
51 define <vscale x 2 x i8> @masked_gather_nxv2i8(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) #0 {
52 ; CHECK-LABEL: masked_gather_nxv2i8:
54 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d]
56 %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
57 ret <vscale x 2 x i8> %data
60 ; Code generate load of an illegal datatype via promotion.
61 define <vscale x 2 x i16> @masked_gather_nxv2i16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) #0 {
62 ; CHECK-LABEL: masked_gather_nxv2i16:
64 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
66 %data = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
67 ret <vscale x 2 x i16> %data
70 ; Code generate load of an illegal datatype via promotion.
71 define <vscale x 2 x i32> @masked_gather_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) #0 {
72 ; CHECK-LABEL: masked_gather_nxv2i32:
74 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d]
76 %data = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
77 ret <vscale x 2 x i32> %data
80 define <vscale x 4 x half> @masked_gather_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mask) #0 {
81 ; CHECK-LABEL: masked_gather_nxv4f16:
83 ; CHECK-NEXT: punpkhi p1.h, p0.b
84 ; CHECK-NEXT: punpklo p0.h, p0.b
85 ; CHECK-NEXT: ld1h { z1.d }, p1/z, [z1.d]
86 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
87 ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
89 %data = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
90 ret <vscale x 4 x half> %data
93 define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i16> %indices, <vscale x 2 x i1> %mask) #0 {
94 ; CHECK-LABEL: masked_gather_nxv2f32:
96 ; CHECK-NEXT: ptrue p1.d
97 ; CHECK-NEXT: sxth z0.d, p1/m, z0.d
98 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
100 %ptrs = getelementptr float, ptr %base, <vscale x 2 x i16> %indices
101 %data = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
102 ret <vscale x 2 x float> %data
105 define <vscale x 8 x half> @masked_gather_nxv8f16(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %mask) #0 {
106 ; CHECK-LABEL: masked_gather_nxv8f16:
108 ; CHECK-NEXT: punpkhi p1.h, p0.b
109 ; CHECK-NEXT: punpklo p0.h, p0.b
110 ; CHECK-NEXT: punpkhi p2.h, p1.b
111 ; CHECK-NEXT: punpklo p1.h, p1.b
112 ; CHECK-NEXT: ld1h { z3.d }, p2/z, [z3.d]
113 ; CHECK-NEXT: ld1h { z2.d }, p1/z, [z2.d]
114 ; CHECK-NEXT: punpkhi p1.h, p0.b
115 ; CHECK-NEXT: punpklo p0.h, p0.b
116 ; CHECK-NEXT: uzp1 z2.s, z2.s, z3.s
117 ; CHECK-NEXT: ld1h { z1.d }, p1/z, [z1.d]
118 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
119 ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
120 ; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h
122 %data = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
123 ret <vscale x 8 x half> %data
126 define <vscale x 8 x bfloat> @masked_gather_nxv8bf16(ptr %base, <vscale x 8 x i16> %indices, <vscale x 8 x i1> %mask) #0 {
127 ; CHECK-LABEL: masked_gather_nxv8bf16:
129 ; CHECK-NEXT: punpkhi p1.h, p0.b
130 ; CHECK-NEXT: sunpkhi z1.s, z0.h
131 ; CHECK-NEXT: sunpklo z0.s, z0.h
132 ; CHECK-NEXT: punpklo p0.h, p0.b
133 ; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0, z1.s, sxtw #1]
134 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
135 ; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
137 %ptrs = getelementptr bfloat, ptr %base, <vscale x 8 x i16> %indices
138 %data = call <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16(<vscale x 8 x ptr> %ptrs, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> undef)
139 ret <vscale x 8 x bfloat> %data
142 define <vscale x 4 x double> @masked_gather_nxv4f64(ptr %base, <vscale x 4 x i16> %indices, <vscale x 4 x i1> %mask) #0 {
143 ; CHECK-LABEL: masked_gather_nxv4f64:
145 ; CHECK-NEXT: ptrue p1.s
146 ; CHECK-NEXT: movprfx z1, z0
147 ; CHECK-NEXT: sxth z1.s, p1/m, z0.s
148 ; CHECK-NEXT: sunpklo z0.d, z1.s
149 ; CHECK-NEXT: punpklo p1.h, p0.b
150 ; CHECK-NEXT: sunpkhi z1.d, z1.s
151 ; CHECK-NEXT: punpkhi p0.h, p0.b
152 ; CHECK-NEXT: ld1d { z0.d }, p1/z, [x0, z0.d, lsl #3]
153 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, z1.d, lsl #3]
155 %ptrs = getelementptr double, ptr %base, <vscale x 4 x i16> %indices
156 %data = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
157 ret <vscale x 4 x double> %data
160 define <vscale x 8 x float> @masked_gather_nxv8f32(ptr %base, <vscale x 8 x i32> %offsets, <vscale x 8 x i1> %mask) #0 {
161 ; CHECK-LABEL: masked_gather_nxv8f32:
163 ; CHECK-NEXT: punpklo p1.h, p0.b
164 ; CHECK-NEXT: punpkhi p0.h, p0.b
165 ; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, z0.s, uxtw #2]
166 ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, z1.s, uxtw #2]
168 %offsets.zext = zext <vscale x 8 x i32> %offsets to <vscale x 8 x i64>
169 %ptrs = getelementptr float, ptr %base, <vscale x 8 x i64> %offsets.zext
170 %vals = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
171 ret <vscale x 8 x float> %vals
174 ; Code generate the worst case scenario when all vector types are legal.
175 define <vscale x 16 x i8> @masked_gather_nxv16i8(ptr %base, <vscale x 16 x i8> %indices, <vscale x 16 x i1> %mask) #0 {
176 ; CHECK-LABEL: masked_gather_nxv16i8:
178 ; CHECK-NEXT: punpkhi p1.h, p0.b
179 ; CHECK-NEXT: sunpkhi z1.h, z0.b
180 ; CHECK-NEXT: sunpklo z0.h, z0.b
181 ; CHECK-NEXT: punpklo p0.h, p0.b
182 ; CHECK-NEXT: sunpkhi z2.s, z1.h
183 ; CHECK-NEXT: sunpklo z1.s, z1.h
184 ; CHECK-NEXT: punpkhi p2.h, p1.b
185 ; CHECK-NEXT: punpklo p1.h, p1.b
186 ; CHECK-NEXT: ld1b { z2.s }, p2/z, [x0, z2.s, sxtw]
187 ; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0, z1.s, sxtw]
188 ; CHECK-NEXT: punpkhi p1.h, p0.b
189 ; CHECK-NEXT: punpklo p0.h, p0.b
190 ; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h
191 ; CHECK-NEXT: sunpkhi z2.s, z0.h
192 ; CHECK-NEXT: sunpklo z0.s, z0.h
193 ; CHECK-NEXT: ld1b { z2.s }, p1/z, [x0, z2.s, sxtw]
194 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw]
195 ; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h
196 ; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
198 %ptrs = getelementptr i8, ptr %base, <vscale x 16 x i8> %indices
199 %data = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8(<vscale x 16 x ptr> %ptrs, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
200 ret <vscale x 16 x i8> %data
203 ; Code generate the worst case scenario when all vector types are illegal.
204 define <vscale x 32 x i32> @masked_gather_nxv32i32(ptr %base, <vscale x 32 x i32> %indices, <vscale x 32 x i1> %mask) #0 {
205 ; CHECK-LABEL: masked_gather_nxv32i32:
207 ; CHECK-NEXT: punpklo p2.h, p0.b
208 ; CHECK-NEXT: punpkhi p0.h, p0.b
209 ; CHECK-NEXT: punpklo p3.h, p2.b
210 ; CHECK-NEXT: punpkhi p2.h, p2.b
211 ; CHECK-NEXT: ld1w { z0.s }, p3/z, [x0, z0.s, sxtw #2]
212 ; CHECK-NEXT: ld1w { z1.s }, p2/z, [x0, z1.s, sxtw #2]
213 ; CHECK-NEXT: punpklo p2.h, p0.b
214 ; CHECK-NEXT: punpkhi p0.h, p0.b
215 ; CHECK-NEXT: ld1w { z2.s }, p2/z, [x0, z2.s, sxtw #2]
216 ; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, z3.s, sxtw #2]
217 ; CHECK-NEXT: punpklo p0.h, p1.b
218 ; CHECK-NEXT: punpklo p2.h, p0.b
219 ; CHECK-NEXT: punpkhi p0.h, p0.b
220 ; CHECK-NEXT: ld1w { z4.s }, p2/z, [x0, z4.s, sxtw #2]
221 ; CHECK-NEXT: ld1w { z5.s }, p0/z, [x0, z5.s, sxtw #2]
222 ; CHECK-NEXT: punpkhi p0.h, p1.b
223 ; CHECK-NEXT: punpklo p1.h, p0.b
224 ; CHECK-NEXT: punpkhi p0.h, p0.b
225 ; CHECK-NEXT: ld1w { z6.s }, p1/z, [x0, z6.s, sxtw #2]
226 ; CHECK-NEXT: ld1w { z7.s }, p0/z, [x0, z7.s, sxtw #2]
228 %ptrs = getelementptr i32, ptr %base, <vscale x 32 x i32> %indices
229 %data = call <vscale x 32 x i32> @llvm.masked.gather.nxv32i32(<vscale x 32 x ptr> %ptrs, i32 4, <vscale x 32 x i1> %mask, <vscale x 32 x i32> undef)
230 ret <vscale x 32 x i32> %data
233 ; TODO: Currently, the sign extend gets applied to the values after a 'uzp1' of two
234 ; registers, so it doesn't get folded away. Same for any other vector-of-pointers
235 ; style gathers which don't fit in an <vscale x 2 x ptr> single register. Better folding
236 ; is required before we can check those off.
237 define <vscale x 4 x i32> @masked_sgather_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mask) #0 {
238 ; CHECK-LABEL: masked_sgather_nxv4i8:
240 ; CHECK-NEXT: punpkhi p1.h, p0.b
241 ; CHECK-NEXT: punpklo p0.h, p0.b
242 ; CHECK-NEXT: ld1b { z1.d }, p1/z, [z1.d]
243 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d]
244 ; CHECK-NEXT: ptrue p0.s
245 ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
246 ; CHECK-NEXT: sxtb z0.s, p0/m, z0.s
248 %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
249 %svals = sext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
250 ret <vscale x 4 x i32> %svals
253 attributes #0 = { nounwind "target-features"="+sve,+bf16" }
255 declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
256 declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
257 declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
258 declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
259 declare <vscale x 16 x i8> @llvm.masked.gather.nxv16i8(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
260 declare <vscale x 32 x i32> @llvm.masked.gather.nxv32i32(<vscale x 32 x ptr>, i32, <vscale x 32 x i1>, <vscale x 32 x i32>)
262 declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
263 declare <vscale x 8 x half> @llvm.masked.gather.nxv8f16(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
264 declare <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
265 declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
266 declare <vscale x 8 x float> @llvm.masked.gather.nxv8f32(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
267 declare <vscale x 4 x double> @llvm.masked.gather.nxv4f64(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x double>)