1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
4 define i1 @combine_setcc_eq_vecreduce_or_v8i1(<8 x i8> %a) {
5 ; CHECK-LABEL: combine_setcc_eq_vecreduce_or_v8i1:
7 ; CHECK-NEXT: cmeq v0.8b, v0.8b, #0
8 ; CHECK-NEXT: mov w8, #1 // =0x1
9 ; CHECK-NEXT: umaxv b0, v0.8b
10 ; CHECK-NEXT: fmov w9, s0
11 ; CHECK-NEXT: bic w0, w8, w9
13 %cmp1 = icmp eq <8 x i8> %a, zeroinitializer
14 %cast = bitcast <8 x i1> %cmp1 to i8
15 %cmp2 = icmp eq i8 %cast, zeroinitializer
19 define i1 @combine_setcc_eq_vecreduce_or_v16i1(<16 x i8> %a) {
20 ; CHECK-LABEL: combine_setcc_eq_vecreduce_or_v16i1:
22 ; CHECK-NEXT: cmeq v0.16b, v0.16b, #0
23 ; CHECK-NEXT: mov w8, #1 // =0x1
24 ; CHECK-NEXT: umaxv b0, v0.16b
25 ; CHECK-NEXT: fmov w9, s0
26 ; CHECK-NEXT: bic w0, w8, w9
28 %cmp1 = icmp eq <16 x i8> %a, zeroinitializer
29 %cast = bitcast <16 x i1> %cmp1 to i16
30 %cmp2 = icmp eq i16 %cast, zeroinitializer
34 define i1 @combine_setcc_eq_vecreduce_or_v32i1(<32 x i8> %a) {
35 ; CHECK-LABEL: combine_setcc_eq_vecreduce_or_v32i1:
37 ; CHECK-NEXT: cmeq v1.16b, v1.16b, #0
38 ; CHECK-NEXT: cmeq v0.16b, v0.16b, #0
39 ; CHECK-NEXT: mov w8, #1 // =0x1
40 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
41 ; CHECK-NEXT: umaxv b0, v0.16b
42 ; CHECK-NEXT: fmov w9, s0
43 ; CHECK-NEXT: bic w0, w8, w9
45 %cmp1 = icmp eq <32 x i8> %a, zeroinitializer
46 %cast = bitcast <32 x i1> %cmp1 to i32
47 %cmp2 = icmp eq i32 %cast, zeroinitializer
51 define i1 @combine_setcc_eq_vecreduce_or_v64i1(<64 x i8> %a) {
52 ; CHECK-LABEL: combine_setcc_eq_vecreduce_or_v64i1:
54 ; CHECK-NEXT: cmeq v2.16b, v2.16b, #0
55 ; CHECK-NEXT: cmeq v0.16b, v0.16b, #0
56 ; CHECK-NEXT: mov w9, #1 // =0x1
57 ; CHECK-NEXT: cmeq v3.16b, v3.16b, #0
58 ; CHECK-NEXT: cmeq v1.16b, v1.16b, #0
59 ; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
60 ; CHECK-NEXT: orr v1.16b, v1.16b, v3.16b
61 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
62 ; CHECK-NEXT: umaxv b0, v0.16b
63 ; CHECK-NEXT: fmov w8, s0
64 ; CHECK-NEXT: bic w0, w9, w8
66 %cmp1 = icmp eq <64 x i8> %a, zeroinitializer
67 %cast = bitcast <64 x i1> %cmp1 to i64
68 %cmp2 = icmp eq i64 %cast, zeroinitializer
72 define i1 @combine_setcc_ne_vecreduce_or_v8i1(<8 x i8> %a) {
73 ; CHECK-LABEL: combine_setcc_ne_vecreduce_or_v8i1:
75 ; CHECK-NEXT: cmtst v0.8b, v0.8b, v0.8b
76 ; CHECK-NEXT: umaxv b0, v0.8b
77 ; CHECK-NEXT: fmov w8, s0
78 ; CHECK-NEXT: and w0, w8, #0x1
80 %cmp1 = icmp ne <8 x i8> %a, zeroinitializer
81 %cast = bitcast <8 x i1> %cmp1 to i8
82 %cmp2 = icmp ne i8 %cast, zeroinitializer
86 define i1 @combine_setcc_ne_vecreduce_or_v16i1(<16 x i8> %a) {
87 ; CHECK-LABEL: combine_setcc_ne_vecreduce_or_v16i1:
89 ; CHECK-NEXT: cmtst v0.16b, v0.16b, v0.16b
90 ; CHECK-NEXT: umaxv b0, v0.16b
91 ; CHECK-NEXT: fmov w8, s0
92 ; CHECK-NEXT: and w0, w8, #0x1
94 %cmp1 = icmp ne <16 x i8> %a, zeroinitializer
95 %cast = bitcast <16 x i1> %cmp1 to i16
96 %cmp2 = icmp ne i16 %cast, zeroinitializer
100 define i1 @combine_setcc_ne_vecreduce_or_v32i1(<32 x i8> %a) {
101 ; CHECK-LABEL: combine_setcc_ne_vecreduce_or_v32i1:
103 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
104 ; CHECK-NEXT: cmtst v0.16b, v0.16b, v0.16b
105 ; CHECK-NEXT: umaxv b0, v0.16b
106 ; CHECK-NEXT: fmov w8, s0
107 ; CHECK-NEXT: and w0, w8, #0x1
109 %cmp1 = icmp ne <32 x i8> %a, zeroinitializer
110 %cast = bitcast <32 x i1> %cmp1 to i32
111 %cmp2 = icmp ne i32 %cast, zeroinitializer
115 define i1 @combine_setcc_ne_vecreduce_or_v64i1(<64 x i8> %a) {
116 ; CHECK-LABEL: combine_setcc_ne_vecreduce_or_v64i1:
118 ; CHECK-NEXT: orr v1.16b, v1.16b, v3.16b
119 ; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
120 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
121 ; CHECK-NEXT: cmtst v0.16b, v0.16b, v0.16b
122 ; CHECK-NEXT: umaxv b0, v0.16b
123 ; CHECK-NEXT: fmov w8, s0
124 ; CHECK-NEXT: and w0, w8, #0x1
126 %cmp1 = icmp ne <64 x i8> %a, zeroinitializer
127 %cast = bitcast <64 x i1> %cmp1 to i64
128 %cmp2 = icmp ne i64 %cast, zeroinitializer
132 define i1 @combine_setcc_eq_vecreduce_and_v8i1(<8 x i8> %a) {
133 ; CHECK-LABEL: combine_setcc_eq_vecreduce_and_v8i1:
135 ; CHECK-NEXT: cmeq v0.8b, v0.8b, #0
136 ; CHECK-NEXT: uminv b0, v0.8b
137 ; CHECK-NEXT: fmov w8, s0
138 ; CHECK-NEXT: and w0, w8, #0x1
140 %cmp1 = icmp eq <8 x i8> %a, zeroinitializer
141 %cast = bitcast <8 x i1> %cmp1 to i8
142 %cmp2 = icmp eq i8 %cast, -1
146 define i1 @combine_setcc_eq_vecreduce_and_v16i1(<16 x i8> %a) {
147 ; CHECK-LABEL: combine_setcc_eq_vecreduce_and_v16i1:
149 ; CHECK-NEXT: cmeq v0.16b, v0.16b, #0
150 ; CHECK-NEXT: uminv b0, v0.16b
151 ; CHECK-NEXT: fmov w8, s0
152 ; CHECK-NEXT: and w0, w8, #0x1
154 %cmp1 = icmp eq <16 x i8> %a, zeroinitializer
155 %cast = bitcast <16 x i1> %cmp1 to i16
156 %cmp2 = icmp eq i16 %cast, -1
160 define i1 @combine_setcc_eq_vecreduce_and_v32i1(<32 x i8> %a) {
161 ; CHECK-LABEL: combine_setcc_eq_vecreduce_and_v32i1:
163 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
164 ; CHECK-NEXT: cmeq v0.16b, v0.16b, #0
165 ; CHECK-NEXT: uminv b0, v0.16b
166 ; CHECK-NEXT: fmov w8, s0
167 ; CHECK-NEXT: and w0, w8, #0x1
169 %cmp1 = icmp eq <32 x i8> %a, zeroinitializer
170 %cast = bitcast <32 x i1> %cmp1 to i32
171 %cmp2 = icmp eq i32 %cast, -1
175 define i1 @combine_setcc_eq_vecreduce_and_v64i1(<64 x i8> %a) {
176 ; CHECK-LABEL: combine_setcc_eq_vecreduce_and_v64i1:
178 ; CHECK-NEXT: orr v1.16b, v1.16b, v3.16b
179 ; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
180 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
181 ; CHECK-NEXT: cmeq v0.16b, v0.16b, #0
182 ; CHECK-NEXT: uminv b0, v0.16b
183 ; CHECK-NEXT: fmov w8, s0
184 ; CHECK-NEXT: and w0, w8, #0x1
186 %cmp1 = icmp eq <64 x i8> %a, zeroinitializer
187 %cast = bitcast <64 x i1> %cmp1 to i64
188 %cmp2 = icmp eq i64 %cast, -1
192 define i1 @combine_setcc_ne_vecreduce_and_v8i1(<8 x i8> %a) {
193 ; CHECK-LABEL: combine_setcc_ne_vecreduce_and_v8i1:
195 ; CHECK-NEXT: cmtst v0.8b, v0.8b, v0.8b
196 ; CHECK-NEXT: mov w8, #1 // =0x1
197 ; CHECK-NEXT: uminv b0, v0.8b
198 ; CHECK-NEXT: fmov w9, s0
199 ; CHECK-NEXT: bic w0, w8, w9
201 %cmp1 = icmp ne <8 x i8> %a, zeroinitializer
202 %cast = bitcast <8 x i1> %cmp1 to i8
203 %cmp2 = icmp ne i8 %cast, -1
207 define i1 @combine_setcc_ne_vecreduce_and_v16i1(<16 x i8> %a) {
208 ; CHECK-LABEL: combine_setcc_ne_vecreduce_and_v16i1:
210 ; CHECK-NEXT: cmtst v0.16b, v0.16b, v0.16b
211 ; CHECK-NEXT: mov w8, #1 // =0x1
212 ; CHECK-NEXT: uminv b0, v0.16b
213 ; CHECK-NEXT: fmov w9, s0
214 ; CHECK-NEXT: bic w0, w8, w9
216 %cmp1 = icmp ne <16 x i8> %a, zeroinitializer
217 %cast = bitcast <16 x i1> %cmp1 to i16
218 %cmp2 = icmp ne i16 %cast, -1
222 define i1 @combine_setcc_ne_vecreduce_and_v32i1(<32 x i8> %a) {
223 ; CHECK-LABEL: combine_setcc_ne_vecreduce_and_v32i1:
225 ; CHECK-NEXT: cmtst v0.16b, v0.16b, v0.16b
226 ; CHECK-NEXT: cmeq v1.16b, v1.16b, #0
227 ; CHECK-NEXT: mov w8, #1 // =0x1
228 ; CHECK-NEXT: bic v0.16b, v0.16b, v1.16b
229 ; CHECK-NEXT: uminv b0, v0.16b
230 ; CHECK-NEXT: fmov w9, s0
231 ; CHECK-NEXT: bic w0, w8, w9
233 %cmp1 = icmp ne <32 x i8> %a, zeroinitializer
234 %cast = bitcast <32 x i1> %cmp1 to i32
235 %cmp2 = icmp ne i32 %cast, -1
239 define i1 @combine_setcc_ne_vecreduce_and_v64i1(<64 x i8> %a) {
240 ; CHECK-LABEL: combine_setcc_ne_vecreduce_and_v64i1:
242 ; CHECK-NEXT: cmtst v1.16b, v1.16b, v1.16b
243 ; CHECK-NEXT: cmtst v0.16b, v0.16b, v0.16b
244 ; CHECK-NEXT: mov w9, #1 // =0x1
245 ; CHECK-NEXT: cmeq v2.16b, v2.16b, #0
246 ; CHECK-NEXT: cmeq v3.16b, v3.16b, #0
247 ; CHECK-NEXT: bic v1.16b, v1.16b, v3.16b
248 ; CHECK-NEXT: bic v0.16b, v0.16b, v2.16b
249 ; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
250 ; CHECK-NEXT: uminv b0, v0.16b
251 ; CHECK-NEXT: fmov w8, s0
252 ; CHECK-NEXT: bic w0, w9, w8
254 %cmp1 = icmp ne <64 x i8> %a, zeroinitializer
255 %cast = bitcast <64 x i1> %cmp1 to i64
256 %cmp2 = icmp ne i64 %cast, -1
260 define i1 @combine_setcc_eq0_conjunction_xor_or(ptr %a, ptr %b) {
261 ; CHECK-LABEL: combine_setcc_eq0_conjunction_xor_or:
263 ; CHECK-NEXT: ldp x8, x11, [x1]
264 ; CHECK-NEXT: ldp x9, x10, [x0]
265 ; CHECK-NEXT: cmp x9, x8
266 ; CHECK-NEXT: ccmp x10, x11, #0, eq
267 ; CHECK-NEXT: cset w0, eq
269 %bcmp = tail call i32 @bcmp(ptr dereferenceable(16) %a, ptr dereferenceable(16) %b, i64 16)
270 %cmp = icmp eq i32 %bcmp, 0
274 define i1 @combine_setcc_ne0_conjunction_xor_or(ptr %a, ptr %b) {
275 ; CHECK-LABEL: combine_setcc_ne0_conjunction_xor_or:
277 ; CHECK-NEXT: ldp x8, x11, [x1]
278 ; CHECK-NEXT: ldp x9, x10, [x0]
279 ; CHECK-NEXT: cmp x9, x8
280 ; CHECK-NEXT: ccmp x10, x11, #0, eq
281 ; CHECK-NEXT: cset w0, ne
283 %bcmp = tail call i32 @bcmp(ptr dereferenceable(16) %a, ptr dereferenceable(16) %b, i64 16)
284 %cmp = icmp ne i32 %bcmp, 0
288 ; Doesn't increase the number of instructions, where the LHS has multiple uses
289 define i32 @combine_setcc_multiuse(i32 %0, i32 %1, i32 %2, i32 %3) {
290 ; CHECK-LABEL: combine_setcc_multiuse:
292 ; CHECK-NEXT: eor w8, w3, w2
293 ; CHECK-NEXT: eor w9, w1, w0
294 ; CHECK-NEXT: orr w8, w8, w9
295 ; CHECK-NEXT: cbz w8, .LBB18_2
296 ; CHECK-NEXT: // %bb.1:
297 ; CHECK-NEXT: mov w0, w8
299 ; CHECK-NEXT: .LBB18_2:
304 %8 = icmp eq i32 %7, 0
305 br i1 %8, label %11, label %9
308 %10 = tail call i32 @use(i32 %7) #2
312 %12 = phi i32 [ %10, %9 ], [ %0, %4 ]
316 ; There may be issues with the CMP/CCMP with the scheduling of instructions
317 ; that ISel will create out of the DAG
318 define i32 @combine_setcc_glue(i128 noundef %x, i128 noundef %y) {
319 ; CHECK-LABEL: combine_setcc_glue:
320 ; CHECK: // %bb.0: // %entry
321 ; CHECK-NEXT: cmp x0, x2
322 ; CHECK-NEXT: cset w0, eq
325 %cmp3 = icmp eq i128 %x, %y
326 %conv = trunc i128 %x to i64
327 %conv1 = trunc i128 %y to i64
328 %cmp = icmp eq i64 %conv, %conv1
329 %or7 = or i1 %cmp3, %cmp
330 %or = zext i1 %or7 to i32
334 ; Reduced test from https://github.com/llvm/llvm-project/issues/58675
335 define [2 x i64] @PR58675(i128 %a.addr, i128 %b.addr) {
336 ; CHECK-LABEL: PR58675:
337 ; CHECK: // %bb.0: // %entry
338 ; CHECK-NEXT: mov x8, xzr
339 ; CHECK-NEXT: mov x9, xzr
340 ; CHECK-NEXT: .LBB20_1: // %do.body
341 ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
342 ; CHECK-NEXT: cmp x0, x8
343 ; CHECK-NEXT: sbcs xzr, x1, x9
344 ; CHECK-NEXT: csel x10, x0, x8, lo
345 ; CHECK-NEXT: csel x11, x1, x9, lo
346 ; CHECK-NEXT: subs x8, x2, x10
347 ; CHECK-NEXT: sbc x9, x3, x11
348 ; CHECK-NEXT: cmp x3, x11
349 ; CHECK-NEXT: ccmp x2, x10, #0, eq
350 ; CHECK-NEXT: b.ne .LBB20_1
351 ; CHECK-NEXT: // %bb.2: // %do.end
352 ; CHECK-NEXT: mov x0, xzr
353 ; CHECK-NEXT: mov x1, xzr
358 do.body: ; preds = %do.body, %entry
359 %a.addr.i1 = phi i128 [ 1, %do.body ], [ 0, %entry ]
360 %b.addr.i2 = phi i128 [ %sub, %do.body ], [ 0, %entry ]
361 %0 = tail call i128 @llvm.umin.i128(i128 %a.addr, i128 %b.addr.i2)
362 %1 = tail call i128 @llvm.umax.i128(i128 0, i128 %a.addr)
363 %sub = sub i128 %b.addr, %0
364 %cmp18.not = icmp eq i128 %b.addr, %0
365 br i1 %cmp18.not, label %do.end, label %do.body
367 do.end: ; preds = %do.body
368 ret [2 x i64] zeroinitializer
371 declare i128 @llvm.umin.i128(i128, i128)
372 declare i128 @llvm.umax.i128(i128, i128)
373 declare i32 @bcmp(ptr nocapture, ptr nocapture, i64)
374 declare i32 @use(i32 noundef)