1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=LOWER
3 # RUN: llc -mtriple=aarch64 -global-isel -start-before=aarch64-postlegalizer-lowering -stop-after=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=SELECT
5 # Check that we swap the order of operands on comparisons when it is likely
6 # to introduce a folding opportunity.
8 # The condition code for the compare should be changed when appropriate.
10 # TODO: emitBinOp doesn't know about selectArithExtendedRegister, so some of
11 # these cases don't hit in selection yet.
15 name: swap_sextinreg_lhs
17 tracksRegLiveness: true
22 ; LOWER-LABEL: name: swap_sextinreg_lhs
23 ; LOWER: liveins: $x0, $x1
25 ; LOWER-NEXT: %reg:_(s64) = COPY $x0
26 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
27 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
28 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
29 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
30 ; LOWER-NEXT: RET_ReallyLR implicit $w0
32 ; SELECT-LABEL: name: swap_sextinreg_lhs
33 ; SELECT: liveins: $x0, $x1
35 ; SELECT-NEXT: %reg:gpr64all = COPY $x0
36 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %reg.sub_32
37 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
38 ; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
39 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 32, implicit-def $nzcv
40 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
41 ; SELECT-NEXT: $w0 = COPY %cmp
42 ; SELECT-NEXT: RET_ReallyLR implicit $w0
43 %reg:_(s64) = COPY $x0
44 %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
45 %cmp_rhs:_(s64) = COPY $x1
46 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
48 RET_ReallyLR implicit $w0
52 name: dont_swap_more_than_one_use
54 tracksRegLiveness: true
59 ; The LHS of the compare is used in an add, and a second compare. Don't
60 ; swap, since we don't gain any folding opportunities here.
62 ; LOWER-LABEL: name: dont_swap_more_than_one_use
63 ; LOWER: liveins: $x0, $x1
65 ; LOWER-NEXT: %reg0:_(s64) = COPY $x0
66 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg0, 8
67 ; LOWER-NEXT: %add:_(s64) = G_ADD %cmp_lhs, %reg0
68 ; LOWER-NEXT: %cmp2:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %add
69 ; LOWER-NEXT: $w0 = COPY %cmp2(s32)
70 ; LOWER-NEXT: RET_ReallyLR implicit $w0
72 ; SELECT-LABEL: name: dont_swap_more_than_one_use
73 ; SELECT: liveins: $x0, $x1
75 ; SELECT-NEXT: %reg0:gpr64 = COPY $x0
76 ; SELECT-NEXT: %cmp_lhs:gpr64 = SBFMXri %reg0, 0, 7
77 ; SELECT-NEXT: %add:gpr64 = ADDXrr %cmp_lhs, %reg0
78 ; SELECT-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %add, implicit-def $nzcv
79 ; SELECT-NEXT: %cmp2:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
80 ; SELECT-NEXT: $w0 = COPY %cmp2
81 ; SELECT-NEXT: RET_ReallyLR implicit $w0
82 %reg0:_(s64) = COPY $x0
83 %cmp_lhs:_(s64) = G_SEXT_INREG %reg0, 8
84 %reg1:_(s64) = COPY $x1
85 %cmp1:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %reg1
87 %add:_(s64) = G_ADD %cmp_lhs(s64), %reg0
88 %cmp2:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %add
91 RET_ReallyLR implicit $w0
95 name: dont_swap_legal_arith_immed_on_rhs
97 tracksRegLiveness: true
101 ; Arithmetic immediates can be folded into compares. If we have one, then
102 ; don't bother changing anything.
104 ; LOWER-LABEL: name: dont_swap_legal_arith_immed_on_rhs
105 ; LOWER: liveins: $x0, $x1
107 ; LOWER-NEXT: %reg:_(s64) = COPY $x0
108 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
109 ; LOWER-NEXT: %cmp_rhs:_(s64) = G_CONSTANT i64 12
110 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
111 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
112 ; LOWER-NEXT: RET_ReallyLR implicit $w0
114 ; SELECT-LABEL: name: dont_swap_legal_arith_immed_on_rhs
115 ; SELECT: liveins: $x0, $x1
116 ; SELECT-NEXT: {{ $}}
117 ; SELECT-NEXT: %reg:gpr64 = COPY $x0
118 ; SELECT-NEXT: %cmp_lhs:gpr64common = SBFMXri %reg, 0, 7
119 ; SELECT-NEXT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %cmp_lhs, 12, 0, implicit-def $nzcv
120 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
121 ; SELECT-NEXT: $w0 = COPY %cmp
122 ; SELECT-NEXT: RET_ReallyLR implicit $w0
123 %reg:_(s64) = COPY $x0
124 %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
125 %cmp_rhs:_(s64) = G_CONSTANT i64 12
126 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
128 RET_ReallyLR implicit $w0
132 name: swap_non_arith_immed_on_rhs
134 tracksRegLiveness: true
138 ; If we have a non-arithmetic immediate on the rhs, then we can swap to get
139 ; a guaranteed folding opportunity.
141 ; LOWER-LABEL: name: swap_non_arith_immed_on_rhs
142 ; LOWER: liveins: $x0, $x1
144 ; LOWER-NEXT: %reg:_(s64) = COPY $x0
145 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
146 ; LOWER-NEXT: %cmp_rhs:_(s64) = G_CONSTANT i64 1234567
147 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
148 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
149 ; LOWER-NEXT: RET_ReallyLR implicit $w0
151 ; SELECT-LABEL: name: swap_non_arith_immed_on_rhs
152 ; SELECT: liveins: $x0, $x1
153 ; SELECT-NEXT: {{ $}}
154 ; SELECT-NEXT: %reg:gpr64all = COPY $x0
155 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %reg.sub_32
156 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
157 ; SELECT-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1234567
158 ; SELECT-NEXT: %cmp_rhs:gpr64sp = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
159 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 32, implicit-def $nzcv
160 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
161 ; SELECT-NEXT: $w0 = COPY %cmp
162 ; SELECT-NEXT: RET_ReallyLR implicit $w0
163 %reg:_(s64) = COPY $x0
164 %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
165 %cmp_rhs:_(s64) = G_CONSTANT i64 1234567
166 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
168 RET_ReallyLR implicit $w0
172 name: swap_and_lhs_0xFF
174 tracksRegLiveness: true
178 ; LOWER-LABEL: name: swap_and_lhs_0xFF
179 ; LOWER: liveins: $x0, $x1
181 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
182 ; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
183 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 255
184 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %cst
185 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
186 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
187 ; LOWER-NEXT: RET_ReallyLR implicit $w0
189 ; SELECT-LABEL: name: swap_and_lhs_0xFF
190 ; SELECT: liveins: $x0, $x1
191 ; SELECT-NEXT: {{ $}}
192 ; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
193 ; SELECT-NEXT: %and_lhs:gpr64all = COPY $x0
194 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %and_lhs.sub_32
195 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
196 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 0, implicit-def $nzcv
197 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
198 ; SELECT-NEXT: $w0 = COPY %cmp
199 ; SELECT-NEXT: RET_ReallyLR implicit $w0
200 %cmp_rhs:_(s64) = COPY $x1
202 %and_lhs:_(s64) = COPY $x0
203 %cst:_(s64) = G_CONSTANT i64 255
204 %cmp_lhs:_(s64) = G_AND %and_lhs, %cst(s64)
206 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
208 RET_ReallyLR implicit $w0
212 name: swap_and_lhs_0xFFFF
214 tracksRegLiveness: true
218 ; LOWER-LABEL: name: swap_and_lhs_0xFFFF
219 ; LOWER: liveins: $x0, $x1
221 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
222 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 65535
223 ; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
224 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %cst
225 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
226 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
227 ; LOWER-NEXT: RET_ReallyLR implicit $w0
229 ; SELECT-LABEL: name: swap_and_lhs_0xFFFF
230 ; SELECT: liveins: $x0, $x1
231 ; SELECT-NEXT: {{ $}}
232 ; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
233 ; SELECT-NEXT: %and_lhs:gpr64all = COPY $x0
234 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %and_lhs.sub_32
235 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
236 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 8, implicit-def $nzcv
237 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
238 ; SELECT-NEXT: $w0 = COPY %cmp
239 ; SELECT-NEXT: RET_ReallyLR implicit $w0
240 %cmp_rhs:_(s64) = COPY $x1
242 %cst:_(s64) = G_CONSTANT i64 65535
243 %and_lhs:_(s64) = COPY $x0
244 %cmp_lhs:_(s64) = G_AND %and_lhs, %cst(s64)
246 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
248 RET_ReallyLR implicit $w0
252 name: swap_and_lhs_0xFFFFFFFF
254 tracksRegLiveness: true
258 ; LOWER-LABEL: name: swap_and_lhs_0xFFFFFFFF
259 ; LOWER: liveins: $x0, $x1
261 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
262 ; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
263 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 4294967295
264 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %cst
265 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
266 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
267 ; LOWER-NEXT: RET_ReallyLR implicit $w0
269 ; SELECT-LABEL: name: swap_and_lhs_0xFFFFFFFF
270 ; SELECT: liveins: $x0, $x1
271 ; SELECT-NEXT: {{ $}}
272 ; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
273 ; SELECT-NEXT: %and_lhs:gpr64all = COPY $x0
274 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %and_lhs.sub_32
275 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
276 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 16, implicit-def $nzcv
277 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
278 ; SELECT-NEXT: $w0 = COPY %cmp
279 ; SELECT-NEXT: RET_ReallyLR implicit $w0
280 %cmp_rhs:_(s64) = COPY $x1
282 %and_lhs:_(s64) = COPY $x0
283 %cst:_(s64) = G_CONSTANT i64 4294967295
284 %cmp_lhs:_(s64) = G_AND %and_lhs, %cst(s64)
286 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
288 RET_ReallyLR implicit $w0
292 name: dont_swap_and_lhs_wrong_mask
294 tracksRegLiveness: true
298 ; 7 isn't an extend mask for G_AND, so there's no folding opportunities
301 ; LOWER-LABEL: name: dont_swap_and_lhs_wrong_mask
302 ; LOWER: liveins: $x0, $x1
304 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
305 ; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
306 ; LOWER-NEXT: %not_an_extend_mask:_(s64) = G_CONSTANT i64 7
307 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %not_an_extend_mask
308 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
309 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
310 ; LOWER-NEXT: RET_ReallyLR implicit $w0
312 ; SELECT-LABEL: name: dont_swap_and_lhs_wrong_mask
313 ; SELECT: liveins: $x0, $x1
314 ; SELECT-NEXT: {{ $}}
315 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
316 ; SELECT-NEXT: %and_lhs:gpr64 = COPY $x0
317 ; SELECT-NEXT: %cmp_lhs:gpr64common = ANDXri %and_lhs, 4098
318 ; SELECT-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
319 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
320 ; SELECT-NEXT: $w0 = COPY %cmp
321 ; SELECT-NEXT: RET_ReallyLR implicit $w0
322 %cmp_rhs:_(s64) = COPY $x1
324 %and_lhs:_(s64) = COPY $x0
325 %not_an_extend_mask:_(s64) = G_CONSTANT i64 7
326 %cmp_lhs:_(s64) = G_AND %and_lhs, %not_an_extend_mask(s64)
328 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
330 RET_ReallyLR implicit $w0
336 tracksRegLiveness: true
341 ; LOWER-LABEL: name: swap_shl_lhs
342 ; LOWER: liveins: $x0, $x1
344 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
345 ; LOWER-NEXT: %shl_lhs:_(s64) = COPY $x0
346 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 1
347 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
348 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_rhs(s64), %cmp_lhs
349 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
350 ; LOWER-NEXT: RET_ReallyLR implicit $w0
352 ; SELECT-LABEL: name: swap_shl_lhs
353 ; SELECT: liveins: $x0, $x1
354 ; SELECT-NEXT: {{ $}}
355 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
356 ; SELECT-NEXT: %shl_lhs:gpr64 = COPY $x0
357 ; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_rhs, %shl_lhs, 1, implicit-def $nzcv
358 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
359 ; SELECT-NEXT: $w0 = COPY %cmp
360 ; SELECT-NEXT: RET_ReallyLR implicit $w0
361 %cmp_rhs:_(s64) = COPY $x1
363 %shl_lhs:_(s64) = COPY $x0
364 %cst:_(s64) = G_CONSTANT i64 1
365 %cmp_lhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
367 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
369 RET_ReallyLR implicit $w0
375 tracksRegLiveness: true
380 ; LOWER-LABEL: name: swap_ashr_lhs
381 ; LOWER: liveins: $x0, $x1
383 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
384 ; LOWER-NEXT: %ashr_lhs:_(s64) = COPY $x0
385 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 1
386 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_ASHR %ashr_lhs, %cst(s64)
387 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_rhs(s64), %cmp_lhs
388 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
389 ; LOWER-NEXT: RET_ReallyLR implicit $w0
391 ; SELECT-LABEL: name: swap_ashr_lhs
392 ; SELECT: liveins: $x0, $x1
393 ; SELECT-NEXT: {{ $}}
394 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
395 ; SELECT-NEXT: %ashr_lhs:gpr64 = COPY $x0
396 ; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_rhs, %ashr_lhs, 129, implicit-def $nzcv
397 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
398 ; SELECT-NEXT: $w0 = COPY %cmp
399 ; SELECT-NEXT: RET_ReallyLR implicit $w0
400 %cmp_rhs:_(s64) = COPY $x1
402 %ashr_lhs:_(s64) = COPY $x0
403 %cst:_(s64) = G_CONSTANT i64 1
404 %cmp_lhs:_(s64) = G_ASHR %ashr_lhs, %cst(s64)
406 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
408 RET_ReallyLR implicit $w0
414 tracksRegLiveness: true
419 ; LOWER-LABEL: name: swap_lshr_lhs
420 ; LOWER: liveins: $x0, $x1
422 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
423 ; LOWER-NEXT: %lshr_lhs:_(s64) = COPY $x0
424 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 1
425 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_LSHR %lshr_lhs, %cst(s64)
426 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_rhs(s64), %cmp_lhs
427 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
428 ; LOWER-NEXT: RET_ReallyLR implicit $w0
430 ; SELECT-LABEL: name: swap_lshr_lhs
431 ; SELECT: liveins: $x0, $x1
432 ; SELECT-NEXT: {{ $}}
433 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
434 ; SELECT-NEXT: %lshr_lhs:gpr64 = COPY $x0
435 ; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_rhs, %lshr_lhs, 65, implicit-def $nzcv
436 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
437 ; SELECT-NEXT: $w0 = COPY %cmp
438 ; SELECT-NEXT: RET_ReallyLR implicit $w0
439 %cmp_rhs:_(s64) = COPY $x1
441 %lshr_lhs:_(s64) = COPY $x0
442 %cst:_(s64) = G_CONSTANT i64 1
443 %cmp_lhs:_(s64) = G_LSHR %lshr_lhs, %cst(s64)
445 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
447 RET_ReallyLR implicit $w0
451 name: dont_swap_shift_s64_cst_too_large
453 tracksRegLiveness: true
458 ; Constant for the shift must be <= 63.
460 ; LOWER-LABEL: name: dont_swap_shift_s64_cst_too_large
461 ; LOWER: liveins: $x0, $x1
463 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
464 ; LOWER-NEXT: %shl_lhs:_(s64) = COPY $x0
465 ; LOWER-NEXT: %too_large:_(s64) = G_CONSTANT i64 64
466 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %shl_lhs, %too_large(s64)
467 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
468 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
469 ; LOWER-NEXT: RET_ReallyLR implicit $w0
471 ; SELECT-LABEL: name: dont_swap_shift_s64_cst_too_large
472 ; SELECT: liveins: $x0, $x1
473 ; SELECT-NEXT: {{ $}}
474 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
475 ; SELECT-NEXT: %shl_lhs:gpr64 = COPY $x0
476 ; SELECT-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 64
477 ; SELECT-NEXT: %too_large:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
478 ; SELECT-NEXT: %cmp_lhs:gpr64 = LSLVXr %shl_lhs, %too_large
479 ; SELECT-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
480 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
481 ; SELECT-NEXT: $w0 = COPY %cmp
482 ; SELECT-NEXT: RET_ReallyLR implicit $w0
483 %cmp_rhs:_(s64) = COPY $x1
485 %shl_lhs:_(s64) = COPY $x0
486 %too_large:_(s64) = G_CONSTANT i64 64
487 %cmp_lhs:_(s64) = G_SHL %shl_lhs, %too_large(s64)
489 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
491 RET_ReallyLR implicit $w0
496 name: dont_swap_shift_s32_cst_too_large
498 tracksRegLiveness: true
503 ; Constant for the shift must be <= 32.
505 ; LOWER-LABEL: name: dont_swap_shift_s32_cst_too_large
506 ; LOWER: liveins: $w0, $w1
508 ; LOWER-NEXT: %cmp_rhs:_(s32) = COPY $w1
509 ; LOWER-NEXT: %shl_lhs:_(s32) = COPY $w0
510 ; LOWER-NEXT: %cst:_(s32) = G_CONSTANT i32 32
511 ; LOWER-NEXT: %cmp_lhs:_(s32) = G_SHL %shl_lhs, %cst(s32)
512 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s32), %cmp_rhs
513 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
514 ; LOWER-NEXT: RET_ReallyLR implicit $w0
516 ; SELECT-LABEL: name: dont_swap_shift_s32_cst_too_large
517 ; SELECT: liveins: $w0, $w1
518 ; SELECT-NEXT: {{ $}}
519 ; SELECT-NEXT: %cmp_rhs:gpr32 = COPY $w1
520 ; SELECT-NEXT: %shl_lhs:gpr32 = COPY $w0
521 ; SELECT-NEXT: %cst:gpr32 = MOVi32imm 32
522 ; SELECT-NEXT: %cmp_lhs:gpr32 = LSLVWr %shl_lhs, %cst
523 ; SELECT-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
524 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
525 ; SELECT-NEXT: $w0 = COPY %cmp
526 ; SELECT-NEXT: RET_ReallyLR implicit $w0
527 %cmp_rhs:_(s32) = COPY $w1
529 %shl_lhs:_(s32) = COPY $w0
530 %cst:_(s32) = G_CONSTANT i32 32
531 %cmp_lhs:_(s32) = G_SHL %shl_lhs, %cst(s32)
533 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s32), %cmp_rhs
535 RET_ReallyLR implicit $w0
539 name: dont_swap_cmn_lhs_no_folding_opportunities
541 tracksRegLiveness: true
546 ; No reason to swap a CMN on the LHS when it won't introduce a constant
547 ; folding opportunity. We can recognise CMNs on the LHS and RHS, so there's
548 ; nothing to gain here.
550 ; LOWER-LABEL: name: dont_swap_cmn_lhs_no_folding_opportunities
551 ; LOWER: liveins: $x0, $x1
553 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
554 ; LOWER-NEXT: %sub_rhs:_(s64) = COPY $x0
555 ; LOWER-NEXT: %zero:_(s64) = G_CONSTANT i64 0
556 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
557 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ne), %cmp_lhs(s64), %cmp_rhs
558 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
559 ; LOWER-NEXT: RET_ReallyLR implicit $w0
561 ; SELECT-LABEL: name: dont_swap_cmn_lhs_no_folding_opportunities
562 ; SELECT: liveins: $x0, $x1
563 ; SELECT-NEXT: {{ $}}
564 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
565 ; SELECT-NEXT: %sub_rhs:gpr64 = COPY $x0
566 ; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %sub_rhs, %cmp_rhs, implicit-def $nzcv
567 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
568 ; SELECT-NEXT: $w0 = COPY %cmp
569 ; SELECT-NEXT: RET_ReallyLR implicit $w0
570 %cmp_rhs:_(s64) = COPY $x1
572 %sub_rhs:_(s64) = COPY $x0
573 %zero:_(s64) = G_CONSTANT i64 0
574 %cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
576 %cmp:_(s32) = G_ICMP intpred(ne), %cmp_lhs(s64), %cmp_rhs
578 RET_ReallyLR implicit $w0
584 tracksRegLiveness: true
589 ; Swap when we can see a constant folding opportunity through the sub on
593 ; LOWER-LABEL: name: swap_cmn_lhs
594 ; LOWER: liveins: $x0, $x1
596 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
597 ; LOWER-NEXT: %shl_lhs:_(s64) = COPY $x0
598 ; LOWER-NEXT: %zero:_(s64) = G_CONSTANT i64 0
599 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 63
600 ; LOWER-NEXT: %sub_rhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
601 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
602 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ne), %cmp_rhs(s64), %cmp_lhs
603 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
604 ; LOWER-NEXT: RET_ReallyLR implicit $w0
606 ; SELECT-LABEL: name: swap_cmn_lhs
607 ; SELECT: liveins: $x0, $x1
608 ; SELECT-NEXT: {{ $}}
609 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
610 ; SELECT-NEXT: %shl_lhs:gpr64 = COPY $x0
611 ; SELECT-NEXT: [[ADDSXrs:%[0-9]+]]:gpr64 = ADDSXrs %cmp_rhs, %shl_lhs, 63, implicit-def $nzcv
612 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
613 ; SELECT-NEXT: $w0 = COPY %cmp
614 ; SELECT-NEXT: RET_ReallyLR implicit $w0
615 %cmp_rhs:_(s64) = COPY $x1
617 %shl_lhs:_(s64) = COPY $x0
618 %zero:_(s64) = G_CONSTANT i64 0
619 %cst:_(s64) = G_CONSTANT i64 63
620 %sub_rhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
621 %cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
623 %cmp:_(s32) = G_ICMP intpred(ne), %cmp_lhs(s64), %cmp_rhs
625 RET_ReallyLR implicit $w0
629 name: dont_swap_cmn_lhs_when_rhs_more_profitable
631 tracksRegLiveness: true
636 ; Don't swap when the RHS's subtract offers a better constant folding
637 ; opportunity than the LHS's subtract.
639 ; In this case, the RHS has a supported extend, plus a shift with a constant
642 ; LOWER-LABEL: name: dont_swap_cmn_lhs_when_rhs_more_profitable
643 ; LOWER: liveins: $x0, $x1
645 ; LOWER-NEXT: %zero:_(s64) = G_CONSTANT i64 0
646 ; LOWER-NEXT: %reg0:_(s64) = COPY $x0
647 ; LOWER-NEXT: %shl_cst:_(s64) = G_CONSTANT i64 63
648 ; LOWER-NEXT: %shl:_(s64) = G_SHL %reg0, %shl_cst(s64)
649 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SUB %zero, %shl
650 ; LOWER-NEXT: %reg1:_(s64) = COPY $x1
651 ; LOWER-NEXT: %sext_in_reg:_(s64) = G_SEXT_INREG %reg1, 1
652 ; LOWER-NEXT: %ashr_cst:_(s64) = G_CONSTANT i64 3
653 ; LOWER-NEXT: %ashr:_(s64) = G_ASHR %sext_in_reg, %ashr_cst(s64)
654 ; LOWER-NEXT: %cmp_rhs:_(s64) = G_SUB %zero, %ashr
655 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(eq), %cmp_lhs(s64), %cmp_rhs
656 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
657 ; LOWER-NEXT: RET_ReallyLR implicit $w0
659 ; SELECT-LABEL: name: dont_swap_cmn_lhs_when_rhs_more_profitable
660 ; SELECT: liveins: $x0, $x1
661 ; SELECT-NEXT: {{ $}}
662 ; SELECT-NEXT: %zero:gpr64 = COPY $xzr
663 ; SELECT-NEXT: %reg0:gpr64 = COPY $x0
664 ; SELECT-NEXT: %shl:gpr64 = UBFMXri %reg0, 1, 0
665 ; SELECT-NEXT: %reg1:gpr64 = COPY $x1
666 ; SELECT-NEXT: %sext_in_reg:gpr64 = SBFMXri %reg1, 0, 0
667 ; SELECT-NEXT: %cmp_rhs:gpr64 = SUBSXrs %zero, %sext_in_reg, 131, implicit-def dead $nzcv
668 ; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %shl, %cmp_rhs, implicit-def $nzcv
669 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
670 ; SELECT-NEXT: $w0 = COPY %cmp
671 ; SELECT-NEXT: RET_ReallyLR implicit $w0
672 %zero:_(s64) = G_CONSTANT i64 0
674 %reg0:_(s64) = COPY $x0
675 %shl_cst:_(s64) = G_CONSTANT i64 63
676 %shl:_(s64) = G_SHL %reg0, %shl_cst(s64)
677 %cmp_lhs:_(s64) = G_SUB %zero, %shl
679 %reg1:_(s64) = COPY $x1
680 %sext_in_reg:_(s64) = G_SEXT_INREG %reg1, 1
681 %ashr_cst:_(s64) = G_CONSTANT i64 3
682 %ashr:_(s64) = G_ASHR %sext_in_reg, %ashr_cst(s64)
683 %cmp_rhs:_(s64) = G_SUB %zero, %ashr
685 %cmp:_(s32) = G_ICMP intpred(eq), %cmp_lhs(s64), %cmp_rhs
687 RET_ReallyLR implicit $w0
691 name: dont_swap_rhs_with_supported_extend
693 tracksRegLiveness: true
697 ; The RHS offers more constant folding opportunities than the LHS.
699 ; LOWER-LABEL: name: dont_swap_rhs_with_supported_extend
700 ; LOWER: liveins: $x0, $x1
702 ; LOWER-NEXT: %reg0:_(s64) = COPY $x0
703 ; LOWER-NEXT: %lhs_cst:_(s64) = G_CONSTANT i64 1
704 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %reg0, %lhs_cst(s64)
705 ; LOWER-NEXT: %reg1:_(s64) = COPY $x1
706 ; LOWER-NEXT: %and_mask:_(s64) = G_CONSTANT i64 255
707 ; LOWER-NEXT: %and:_(s64) = G_AND %reg1, %and_mask
708 ; LOWER-NEXT: %rhs_cst:_(s64) = G_CONSTANT i64 1
709 ; LOWER-NEXT: %cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
710 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
711 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
712 ; LOWER-NEXT: RET_ReallyLR implicit $w0
714 ; SELECT-LABEL: name: dont_swap_rhs_with_supported_extend
715 ; SELECT: liveins: $x0, $x1
716 ; SELECT-NEXT: {{ $}}
717 ; SELECT-NEXT: %reg0:gpr64 = COPY $x0
718 ; SELECT-NEXT: %cmp_lhs:gpr64 = UBFMXri %reg0, 63, 62
719 ; SELECT-NEXT: %reg1:gpr64 = COPY $x1
720 ; SELECT-NEXT: %and:gpr64common = ANDXri %reg1, 4103
721 ; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_lhs, %and, 129, implicit-def $nzcv
722 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
723 ; SELECT-NEXT: $w0 = COPY %cmp
724 ; SELECT-NEXT: RET_ReallyLR implicit $w0
725 %reg0:_(s64) = COPY $x0
726 %lhs_cst:_(s64) = G_CONSTANT i64 1
727 %cmp_lhs:_(s64) = G_SHL %reg0, %lhs_cst(s64)
729 %reg1:_(s64) = COPY $x1
730 %and_mask:_(s64) = G_CONSTANT i64 255
731 %and:_(s64) = G_AND %reg1, %and_mask(s64)
732 %rhs_cst:_(s64) = G_CONSTANT i64 1
733 %cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
735 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
737 RET_ReallyLR implicit $w0
742 name: swap_rhs_with_supported_extend
744 tracksRegLiveness: true
749 ; In this case, both the LHS and RHS are fed by a supported extend. However,
750 ; the LHS' shift has a constant <= 4. This makes it more profitable, so
751 ; we should swap the operands.
753 ; LOWER-LABEL: name: swap_rhs_with_supported_extend
754 ; LOWER: liveins: $x0, $x1
756 ; LOWER-NEXT: %reg0:_(s64) = COPY $x0
757 ; LOWER-NEXT: %and_mask:_(s64) = G_CONSTANT i64 255
758 ; LOWER-NEXT: %and:_(s64) = G_AND %reg0, %and_mask
759 ; LOWER-NEXT: %lhs_cst:_(s64) = G_CONSTANT i64 1
760 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %and, %lhs_cst(s64)
761 ; LOWER-NEXT: %rhs_cst:_(s64) = G_CONSTANT i64 5
762 ; LOWER-NEXT: %cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
763 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_rhs(s64), %cmp_lhs
764 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
765 ; LOWER-NEXT: RET_ReallyLR implicit $w0
767 ; SELECT-LABEL: name: swap_rhs_with_supported_extend
768 ; SELECT: liveins: $x0, $x1
769 ; SELECT-NEXT: {{ $}}
770 ; SELECT-NEXT: %reg0:gpr64 = COPY $x0
771 ; SELECT-NEXT: %and:gpr64common = ANDXri %reg0, 4103
772 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %reg0.sub_32
773 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
774 ; SELECT-NEXT: %cmp_rhs:gpr64common = SBFMXri %and, 5, 63
775 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 1, implicit-def $nzcv
776 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
777 ; SELECT-NEXT: $w0 = COPY %cmp
778 ; SELECT-NEXT: RET_ReallyLR implicit $w0
779 %reg0:_(s64) = COPY $x0
780 %and_mask:_(s64) = G_CONSTANT i64 255
781 %and:_(s64) = G_AND %reg0, %and_mask(s64)
783 %lhs_cst:_(s64) = G_CONSTANT i64 1
784 %cmp_lhs:_(s64) = G_SHL %and, %lhs_cst(s64)
786 %rhs_cst:_(s64) = G_CONSTANT i64 5
787 %cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
789 %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_lhs(s64), %cmp_rhs
791 RET_ReallyLR implicit $w0