1 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme < %s | FileCheck %s
6 define i64 @ctz_nxv8i1(<vscale x 8 x i1> %a) #0 {
7 ; CHECK-LABEL: ctz_nxv8i1:
9 ; CHECK-NEXT: index z0.h, #0, #-1
10 ; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff
11 ; CHECK-NEXT: ptrue p0.h
13 ; CHECK-NEXT: inch z0.h
14 ; CHECK-NEXT: and z0.d, z0.d, z1.d
15 ; CHECK-NEXT: and z0.h, z0.h, #0xff
16 ; CHECK-NEXT: umaxv h0, p0, z0.h
17 ; CHECK-NEXT: fmov w8, s0
18 ; CHECK-NEXT: sub w8, w9, w8
19 ; CHECK-NEXT: and x0, x8, #0xff
21 %res = call i64 @llvm.experimental.cttz.elts.i64.nxv8i1(<vscale x 8 x i1> %a, i1 0)
25 define i32 @ctz_nxv32i1(<vscale x 32 x i1> %a) #0 {
26 ; CHECK-LABEL: ctz_nxv32i1:
28 ; CHECK-NEXT: index z0.h, #0, #-1
30 ; CHECK-NEXT: punpklo p2.h, p0.b
31 ; CHECK-NEXT: neg x8, x8
32 ; CHECK-NEXT: punpklo p3.h, p1.b
33 ; CHECK-NEXT: rdvl x9, #2
34 ; CHECK-NEXT: punpkhi p0.h, p0.b
35 ; CHECK-NEXT: mov z1.h, w8
36 ; CHECK-NEXT: rdvl x8, #-1
37 ; CHECK-NEXT: punpkhi p1.h, p1.b
38 ; CHECK-NEXT: mov z2.h, w8
39 ; CHECK-NEXT: inch z0.h, all, mul #4
40 ; CHECK-NEXT: mov z3.h, p2/z, #-1 // =0xffffffffffffffff
41 ; CHECK-NEXT: ptrue p2.h
42 ; CHECK-NEXT: mov z5.h, p3/z, #-1 // =0xffffffffffffffff
43 ; CHECK-NEXT: add z1.h, z0.h, z1.h
44 ; CHECK-NEXT: add z4.h, z0.h, z2.h
45 ; CHECK-NEXT: mov z6.h, p0/z, #-1 // =0xffffffffffffffff
46 ; CHECK-NEXT: mov z7.h, p1/z, #-1 // =0xffffffffffffffff
47 ; CHECK-NEXT: and z0.d, z0.d, z3.d
48 ; CHECK-NEXT: add z2.h, z1.h, z2.h
49 ; CHECK-NEXT: and z3.d, z4.d, z5.d
50 ; CHECK-NEXT: and z1.d, z1.d, z6.d
51 ; CHECK-NEXT: and z2.d, z2.d, z7.d
52 ; CHECK-NEXT: umax z0.h, p2/m, z0.h, z3.h
53 ; CHECK-NEXT: umax z1.h, p2/m, z1.h, z2.h
54 ; CHECK-NEXT: umax z0.h, p2/m, z0.h, z1.h
55 ; CHECK-NEXT: umaxv h0, p2, z0.h
56 ; CHECK-NEXT: fmov w8, s0
57 ; CHECK-NEXT: sub w8, w9, w8
58 ; CHECK-NEXT: and w0, w8, #0xffff
60 %res = call i32 @llvm.experimental.cttz.elts.i32.nxv32i1(<vscale x 32 x i1> %a, i1 0)
64 define i32 @ctz_nxv4i32(<vscale x 4 x i32> %a) #0 {
65 ; CHECK-LABEL: ctz_nxv4i32:
67 ; CHECK-NEXT: ptrue p0.s
68 ; CHECK-NEXT: index z1.s, #0, #-1
70 ; CHECK-NEXT: incw z1.s
71 ; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0
72 ; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff
73 ; CHECK-NEXT: and z0.d, z1.d, z0.d
74 ; CHECK-NEXT: and z0.s, z0.s, #0xff
75 ; CHECK-NEXT: umaxv s0, p0, z0.s
76 ; CHECK-NEXT: fmov w8, s0
77 ; CHECK-NEXT: sub w8, w9, w8
78 ; CHECK-NEXT: and w0, w8, #0xff
80 %res = call i32 @llvm.experimental.cttz.elts.i32.nxv4i32(<vscale x 4 x i32> %a, i1 0)
84 ; VSCALE RANGE, ZERO IS POISON
86 define i64 @vscale_4096(<vscale x 16 x i8> %a) #1 {
87 ; CHECK-LABEL: vscale_4096:
89 ; CHECK-NEXT: ptrue p0.b
92 ; CHECK-NEXT: neg x8, x8
93 ; CHECK-NEXT: mov z1.s, w8
94 ; CHECK-NEXT: neg x8, x9
95 ; CHECK-NEXT: rdvl x9, #1
96 ; CHECK-NEXT: mov z2.s, w8
97 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
98 ; CHECK-NEXT: index z0.s, #0, #-1
99 ; CHECK-NEXT: punpklo p1.h, p0.b
100 ; CHECK-NEXT: punpkhi p0.h, p0.b
101 ; CHECK-NEXT: incw z0.s, all, mul #4
102 ; CHECK-NEXT: add z1.s, z0.s, z1.s
103 ; CHECK-NEXT: add z5.s, z0.s, z2.s
104 ; CHECK-NEXT: punpkhi p2.h, p1.b
105 ; CHECK-NEXT: punpkhi p3.h, p0.b
106 ; CHECK-NEXT: punpklo p0.h, p0.b
107 ; CHECK-NEXT: add z2.s, z1.s, z2.s
108 ; CHECK-NEXT: punpklo p1.h, p1.b
109 ; CHECK-NEXT: mov z3.s, p2/z, #-1 // =0xffffffffffffffff
110 ; CHECK-NEXT: ptrue p2.s
111 ; CHECK-NEXT: mov z4.s, p3/z, #-1 // =0xffffffffffffffff
112 ; CHECK-NEXT: mov z6.s, p0/z, #-1 // =0xffffffffffffffff
113 ; CHECK-NEXT: mov z7.s, p1/z, #-1 // =0xffffffffffffffff
114 ; CHECK-NEXT: and z1.d, z1.d, z3.d
115 ; CHECK-NEXT: and z2.d, z2.d, z4.d
116 ; CHECK-NEXT: and z3.d, z5.d, z6.d
117 ; CHECK-NEXT: and z0.d, z0.d, z7.d
118 ; CHECK-NEXT: umax z1.s, p2/m, z1.s, z2.s
119 ; CHECK-NEXT: umax z0.s, p2/m, z0.s, z3.s
120 ; CHECK-NEXT: umax z0.s, p2/m, z0.s, z1.s
121 ; CHECK-NEXT: umaxv s0, p2, z0.s
122 ; CHECK-NEXT: fmov w8, s0
123 ; CHECK-NEXT: sub w0, w9, w8
125 %res = call i64 @llvm.experimental.cttz.elts.i64.nxv16i8(<vscale x 16 x i8> %a, i1 0)
129 define i64 @vscale_4096_poison(<vscale x 16 x i8> %a) #1 {
130 ; CHECK-LABEL: vscale_4096_poison:
132 ; CHECK-NEXT: ptrue p0.b
133 ; CHECK-NEXT: cnth x8
134 ; CHECK-NEXT: rdvl x9, #1
135 ; CHECK-NEXT: neg x8, x8
136 ; CHECK-NEXT: mov z1.h, w8
137 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
138 ; CHECK-NEXT: index z0.h, #0, #-1
139 ; CHECK-NEXT: punpkhi p1.h, p0.b
140 ; CHECK-NEXT: punpklo p0.h, p0.b
141 ; CHECK-NEXT: inch z0.h, all, mul #2
142 ; CHECK-NEXT: add z1.h, z0.h, z1.h
143 ; CHECK-NEXT: mov z2.h, p1/z, #-1 // =0xffffffffffffffff
144 ; CHECK-NEXT: mov z3.h, p0/z, #-1 // =0xffffffffffffffff
145 ; CHECK-NEXT: ptrue p0.h
146 ; CHECK-NEXT: and z1.d, z1.d, z2.d
147 ; CHECK-NEXT: and z0.d, z0.d, z3.d
148 ; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h
149 ; CHECK-NEXT: umaxv h0, p0, z0.h
150 ; CHECK-NEXT: fmov w8, s0
151 ; CHECK-NEXT: sub w8, w9, w8
152 ; CHECK-NEXT: and x0, x8, #0xffff
154 %res = call i64 @llvm.experimental.cttz.elts.i64.nxv16i8(<vscale x 16 x i8> %a, i1 1)
160 define i32 @ctz_nxv8i1_no_range(<vscale x 8 x i1> %a) {
161 ; CHECK-LABEL: ctz_nxv8i1_no_range:
163 ; CHECK-NEXT: index z0.s, #0, #-1
164 ; CHECK-NEXT: punpklo p1.h, p0.b
165 ; CHECK-NEXT: cntw x8
166 ; CHECK-NEXT: punpkhi p0.h, p0.b
167 ; CHECK-NEXT: neg x8, x8
168 ; CHECK-NEXT: cnth x9
169 ; CHECK-NEXT: mov z1.s, w8
170 ; CHECK-NEXT: incw z0.s, all, mul #2
171 ; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff
172 ; CHECK-NEXT: mov z3.s, p0/z, #-1 // =0xffffffffffffffff
173 ; CHECK-NEXT: ptrue p0.s
174 ; CHECK-NEXT: add z1.s, z0.s, z1.s
175 ; CHECK-NEXT: and z0.d, z0.d, z2.d
176 ; CHECK-NEXT: and z1.d, z1.d, z3.d
177 ; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s
178 ; CHECK-NEXT: umaxv s0, p0, z0.s
179 ; CHECK-NEXT: fmov w8, s0
180 ; CHECK-NEXT: sub w0, w9, w8
182 %res = call i32 @llvm.experimental.cttz.elts.i32.nxv8i1(<vscale x 8 x i1> %a, i1 0)
186 ; MATCH WITH BRKB + CNTP
188 define i32 @ctz_nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a) {
189 ; CHECK-LABEL: ctz_nxv16i1:
191 ; CHECK-NEXT: ptrue p0.b
192 ; CHECK-NEXT: brkb p0.b, p0/z, p1.b
193 ; CHECK-NEXT: cntp x0, p0, p0.b
194 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
196 %res = call i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> %a, i1 0)
200 define i32 @ctz_nxv16i1_poison(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a) {
201 ; CHECK-LABEL: ctz_nxv16i1_poison:
203 ; CHECK-NEXT: ptrue p0.b
204 ; CHECK-NEXT: brkb p0.b, p0/z, p1.b
205 ; CHECK-NEXT: cntp x0, p0, p0.b
206 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
208 %res = call i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> %a, i1 1)
212 define i32 @ctz_and_nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
213 ; CHECK-LABEL: ctz_and_nxv16i1:
215 ; CHECK-NEXT: ptrue p1.b
216 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z1.b
217 ; CHECK-NEXT: brkb p0.b, p1/z, p0.b
218 ; CHECK-NEXT: cntp x0, p0, p0.b
219 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
221 %cmp = icmp ne <vscale x 16 x i8> %a, %b
222 %select = select <vscale x 16 x i1> %pg, <vscale x 16 x i1> %cmp, <vscale x 16 x i1> zeroinitializer
223 %and = and <vscale x 16 x i1> %pg, %select
224 %res = call i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> %and, i1 0)
228 define i64 @add_i64_ctz_nxv16i1_poison(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, i64 %b) {
229 ; CHECK-LABEL: add_i64_ctz_nxv16i1_poison:
231 ; CHECK-NEXT: ptrue p0.b
232 ; CHECK-NEXT: brkb p0.b, p0/z, p1.b
233 ; CHECK-NEXT: incp x0, p0.b
235 %res = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1(<vscale x 16 x i1> %a, i1 1)
236 %add = add i64 %res, %b
240 define i32 @add_i32_ctz_nxv16i1_poison(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, i32 %b) {
241 ; CHECK-LABEL: add_i32_ctz_nxv16i1_poison:
243 ; CHECK-NEXT: ptrue p0.b
244 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
245 ; CHECK-NEXT: brkb p0.b, p0/z, p1.b
246 ; CHECK-NEXT: incp x0, p0.b
247 ; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
249 %res = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1(<vscale x 16 x i1> %a, i1 1)
250 %trunc = trunc i64 %res to i32
251 %add = add i32 %trunc, %b
255 declare i32 @llvm.experimental.cttz.elts.i32.nxv8i1(<vscale x 8 x i1>, i1)
256 declare i64 @llvm.experimental.cttz.elts.i64.nxv8i1(<vscale x 8 x i1>, i1)
257 declare i64 @llvm.experimental.cttz.elts.i64.nxv16i1(<vscale x 16 x i1>, i1)
258 declare i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1>, i1)
259 declare i32 @llvm.experimental.cttz.elts.i32.nxv32i1(<vscale x 32 x i1>, i1)
260 declare i32 @llvm.experimental.cttz.elts.i32.nxv4i32(<vscale x 4 x i32>, i1)
262 declare i64 @llvm.experimental.cttz.elts.i64.nxv16i8(<vscale x 16 x i8>, i1)
264 attributes #0 = { vscale_range(1,16) }
265 attributes #1 = { vscale_range(1,4096) }