1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=LOWER
3 # RUN: llc -mtriple=aarch64 -global-isel -start-before=aarch64-postlegalizer-lowering -stop-after=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=SELECT
5 # Check that we swap the order of operands on comparisons when it is likely
6 # to introduce a folding opportunity.
8 # The condition code for the compare should be changed when appropriate.
10 # TODO: emitBinOp doesn't know about selectArithExtendedRegister, so some of
11 # these cases don't hit in selection yet.
15 name: swap_sextinreg_lhs
17 tracksRegLiveness: true
22 ; LOWER-LABEL: name: swap_sextinreg_lhs
23 ; LOWER: liveins: $x0, $x1
25 ; LOWER-NEXT: %reg:_(s64) = COPY $x0
26 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
27 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
28 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
29 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
30 ; LOWER-NEXT: RET_ReallyLR implicit $w0
32 ; SELECT-LABEL: name: swap_sextinreg_lhs
33 ; SELECT: liveins: $x0, $x1
35 ; SELECT-NEXT: %reg:gpr64all = COPY $x0
36 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %reg.sub_32
37 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
38 ; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
39 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 32, implicit-def $nzcv
40 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
41 ; SELECT-NEXT: $w0 = COPY %cmp
42 ; SELECT-NEXT: RET_ReallyLR implicit $w0
43 %reg:_(s64) = COPY $x0
44 %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
45 %cmp_rhs:_(s64) = COPY $x1
46 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
48 RET_ReallyLR implicit $w0
52 name: dont_swap_more_than_one_use
54 tracksRegLiveness: true
59 ; The LHS of the compare is used in an add, and a second compare. Don't
60 ; swap, since we don't gain any folding opportunities here.
62 ; LOWER-LABEL: name: dont_swap_more_than_one_use
63 ; LOWER: liveins: $x0, $x1
65 ; LOWER-NEXT: %reg0:_(s64) = COPY $x0
66 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg0, 8
67 ; LOWER-NEXT: %reg1:_(s64) = COPY $x1
68 ; LOWER-NEXT: %cmp1:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %reg1
69 ; LOWER-NEXT: %add:_(s64) = G_ADD %cmp_lhs, %reg0
70 ; LOWER-NEXT: %cmp2:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %add
71 ; LOWER-NEXT: $w0 = COPY %cmp2(s32)
72 ; LOWER-NEXT: $w1 = COPY %cmp1(s32)
73 ; LOWER-NEXT: RET_ReallyLR implicit $w0, implicit $w1
75 ; SELECT-LABEL: name: dont_swap_more_than_one_use
76 ; SELECT: liveins: $x0, $x1
78 ; SELECT-NEXT: %reg0:gpr64 = COPY $x0
79 ; SELECT-NEXT: %cmp_lhs:gpr64 = SBFMXri %reg0, 0, 7
80 ; SELECT-NEXT: %reg1:gpr64 = COPY $x1
81 ; SELECT-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %reg1, implicit-def $nzcv
82 ; SELECT-NEXT: %cmp1:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
83 ; SELECT-NEXT: %add:gpr64 = ADDXrr %cmp_lhs, %reg0
84 ; SELECT-NEXT: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %add, implicit-def $nzcv
85 ; SELECT-NEXT: %cmp2:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
86 ; SELECT-NEXT: $w0 = COPY %cmp2
87 ; SELECT-NEXT: $w1 = COPY %cmp1
88 ; SELECT-NEXT: RET_ReallyLR implicit $w0, implicit $w1
89 %reg0:_(s64) = COPY $x0
90 %cmp_lhs:_(s64) = G_SEXT_INREG %reg0, 8
91 %reg1:_(s64) = COPY $x1
92 %cmp1:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %reg1
94 %add:_(s64) = G_ADD %cmp_lhs(s64), %reg0
95 %cmp2:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %add
99 RET_ReallyLR implicit $w0, implicit $w1
103 name: dont_swap_legal_arith_immed_on_rhs
105 tracksRegLiveness: true
109 ; Arithmetic immediates can be folded into compares. If we have one, then
110 ; don't bother changing anything.
112 ; LOWER-LABEL: name: dont_swap_legal_arith_immed_on_rhs
113 ; LOWER: liveins: $x0, $x1
115 ; LOWER-NEXT: %reg:_(s64) = COPY $x0
116 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
117 ; LOWER-NEXT: %cmp_rhs:_(s64) = G_CONSTANT i64 12
118 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
119 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
120 ; LOWER-NEXT: RET_ReallyLR implicit $w0
122 ; SELECT-LABEL: name: dont_swap_legal_arith_immed_on_rhs
123 ; SELECT: liveins: $x0, $x1
124 ; SELECT-NEXT: {{ $}}
125 ; SELECT-NEXT: %reg:gpr64 = COPY $x0
126 ; SELECT-NEXT: %cmp_lhs:gpr64common = SBFMXri %reg, 0, 7
127 ; SELECT-NEXT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %cmp_lhs, 12, 0, implicit-def $nzcv
128 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
129 ; SELECT-NEXT: $w0 = COPY %cmp
130 ; SELECT-NEXT: RET_ReallyLR implicit $w0
131 %reg:_(s64) = COPY $x0
132 %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
133 %cmp_rhs:_(s64) = G_CONSTANT i64 12
134 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
136 RET_ReallyLR implicit $w0
140 name: swap_non_arith_immed_on_rhs
142 tracksRegLiveness: true
146 ; If we have a non-arithmetic immediate on the rhs, then we can swap to get
147 ; a guaranteed folding opportunity.
149 ; LOWER-LABEL: name: swap_non_arith_immed_on_rhs
150 ; LOWER: liveins: $x0, $x1
152 ; LOWER-NEXT: %reg:_(s64) = COPY $x0
153 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
154 ; LOWER-NEXT: %cmp_rhs:_(s64) = G_CONSTANT i64 1234567
155 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
156 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
157 ; LOWER-NEXT: RET_ReallyLR implicit $w0
159 ; SELECT-LABEL: name: swap_non_arith_immed_on_rhs
160 ; SELECT: liveins: $x0, $x1
161 ; SELECT-NEXT: {{ $}}
162 ; SELECT-NEXT: %reg:gpr64all = COPY $x0
163 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %reg.sub_32
164 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
165 ; SELECT-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1234567
166 ; SELECT-NEXT: %cmp_rhs:gpr64sp = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
167 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 32, implicit-def $nzcv
168 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
169 ; SELECT-NEXT: $w0 = COPY %cmp
170 ; SELECT-NEXT: RET_ReallyLR implicit $w0
171 %reg:_(s64) = COPY $x0
172 %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
173 %cmp_rhs:_(s64) = G_CONSTANT i64 1234567
174 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
176 RET_ReallyLR implicit $w0
180 name: swap_and_lhs_0xFF
182 tracksRegLiveness: true
186 ; LOWER-LABEL: name: swap_and_lhs_0xFF
187 ; LOWER: liveins: $x0, $x1
189 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
190 ; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
191 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 255
192 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %cst
193 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
194 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
195 ; LOWER-NEXT: RET_ReallyLR implicit $w0
197 ; SELECT-LABEL: name: swap_and_lhs_0xFF
198 ; SELECT: liveins: $x0, $x1
199 ; SELECT-NEXT: {{ $}}
200 ; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
201 ; SELECT-NEXT: %and_lhs:gpr64all = COPY $x0
202 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %and_lhs.sub_32
203 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
204 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 0, implicit-def $nzcv
205 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
206 ; SELECT-NEXT: $w0 = COPY %cmp
207 ; SELECT-NEXT: RET_ReallyLR implicit $w0
208 %cmp_rhs:_(s64) = COPY $x1
210 %and_lhs:_(s64) = COPY $x0
211 %cst:_(s64) = G_CONSTANT i64 255
212 %cmp_lhs:_(s64) = G_AND %and_lhs, %cst(s64)
214 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
216 RET_ReallyLR implicit $w0
220 name: swap_and_lhs_0xFFFF
222 tracksRegLiveness: true
226 ; LOWER-LABEL: name: swap_and_lhs_0xFFFF
227 ; LOWER: liveins: $x0, $x1
229 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
230 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 65535
231 ; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
232 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %cst
233 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
234 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
235 ; LOWER-NEXT: RET_ReallyLR implicit $w0
237 ; SELECT-LABEL: name: swap_and_lhs_0xFFFF
238 ; SELECT: liveins: $x0, $x1
239 ; SELECT-NEXT: {{ $}}
240 ; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
241 ; SELECT-NEXT: %and_lhs:gpr64all = COPY $x0
242 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %and_lhs.sub_32
243 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
244 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 8, implicit-def $nzcv
245 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
246 ; SELECT-NEXT: $w0 = COPY %cmp
247 ; SELECT-NEXT: RET_ReallyLR implicit $w0
248 %cmp_rhs:_(s64) = COPY $x1
250 %cst:_(s64) = G_CONSTANT i64 65535
251 %and_lhs:_(s64) = COPY $x0
252 %cmp_lhs:_(s64) = G_AND %and_lhs, %cst(s64)
254 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
256 RET_ReallyLR implicit $w0
260 name: swap_and_lhs_0xFFFFFFFF
262 tracksRegLiveness: true
266 ; LOWER-LABEL: name: swap_and_lhs_0xFFFFFFFF
267 ; LOWER: liveins: $x0, $x1
269 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
270 ; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
271 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 4294967295
272 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %cst
273 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
274 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
275 ; LOWER-NEXT: RET_ReallyLR implicit $w0
277 ; SELECT-LABEL: name: swap_and_lhs_0xFFFFFFFF
278 ; SELECT: liveins: $x0, $x1
279 ; SELECT-NEXT: {{ $}}
280 ; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
281 ; SELECT-NEXT: %and_lhs:gpr64all = COPY $x0
282 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %and_lhs.sub_32
283 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
284 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 16, implicit-def $nzcv
285 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
286 ; SELECT-NEXT: $w0 = COPY %cmp
287 ; SELECT-NEXT: RET_ReallyLR implicit $w0
288 %cmp_rhs:_(s64) = COPY $x1
290 %and_lhs:_(s64) = COPY $x0
291 %cst:_(s64) = G_CONSTANT i64 4294967295
292 %cmp_lhs:_(s64) = G_AND %and_lhs, %cst(s64)
294 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
296 RET_ReallyLR implicit $w0
300 name: dont_swap_and_lhs_wrong_mask
302 tracksRegLiveness: true
306 ; 7 isn't an extend mask for G_AND, so there's no folding opportunities
309 ; LOWER-LABEL: name: dont_swap_and_lhs_wrong_mask
310 ; LOWER: liveins: $x0, $x1
312 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
313 ; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
314 ; LOWER-NEXT: %not_an_extend_mask:_(s64) = G_CONSTANT i64 7
315 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %not_an_extend_mask
316 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
317 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
318 ; LOWER-NEXT: RET_ReallyLR implicit $w0
320 ; SELECT-LABEL: name: dont_swap_and_lhs_wrong_mask
321 ; SELECT: liveins: $x0, $x1
322 ; SELECT-NEXT: {{ $}}
323 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
324 ; SELECT-NEXT: %and_lhs:gpr64 = COPY $x0
325 ; SELECT-NEXT: %cmp_lhs:gpr64common = ANDXri %and_lhs, 4098
326 ; SELECT-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
327 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
328 ; SELECT-NEXT: $w0 = COPY %cmp
329 ; SELECT-NEXT: RET_ReallyLR implicit $w0
330 %cmp_rhs:_(s64) = COPY $x1
332 %and_lhs:_(s64) = COPY $x0
333 %not_an_extend_mask:_(s64) = G_CONSTANT i64 7
334 %cmp_lhs:_(s64) = G_AND %and_lhs, %not_an_extend_mask(s64)
336 %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
338 RET_ReallyLR implicit $w0
344 tracksRegLiveness: true
349 ; LOWER-LABEL: name: swap_shl_lhs
350 ; LOWER: liveins: $x0, $x1
352 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
353 ; LOWER-NEXT: %shl_lhs:_(s64) = COPY $x0
354 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 1
355 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
356 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_rhs(s64), %cmp_lhs
357 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
358 ; LOWER-NEXT: RET_ReallyLR implicit $w0
360 ; SELECT-LABEL: name: swap_shl_lhs
361 ; SELECT: liveins: $x0, $x1
362 ; SELECT-NEXT: {{ $}}
363 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
364 ; SELECT-NEXT: %shl_lhs:gpr64 = COPY $x0
365 ; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_rhs, %shl_lhs, 1, implicit-def $nzcv
366 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
367 ; SELECT-NEXT: $w0 = COPY %cmp
368 ; SELECT-NEXT: RET_ReallyLR implicit $w0
369 %cmp_rhs:_(s64) = COPY $x1
371 %shl_lhs:_(s64) = COPY $x0
372 %cst:_(s64) = G_CONSTANT i64 1
373 %cmp_lhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
375 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
377 RET_ReallyLR implicit $w0
383 tracksRegLiveness: true
388 ; LOWER-LABEL: name: swap_ashr_lhs
389 ; LOWER: liveins: $x0, $x1
391 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
392 ; LOWER-NEXT: %ashr_lhs:_(s64) = COPY $x0
393 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 1
394 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_ASHR %ashr_lhs, %cst(s64)
395 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_rhs(s64), %cmp_lhs
396 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
397 ; LOWER-NEXT: RET_ReallyLR implicit $w0
399 ; SELECT-LABEL: name: swap_ashr_lhs
400 ; SELECT: liveins: $x0, $x1
401 ; SELECT-NEXT: {{ $}}
402 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
403 ; SELECT-NEXT: %ashr_lhs:gpr64 = COPY $x0
404 ; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_rhs, %ashr_lhs, 129, implicit-def $nzcv
405 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
406 ; SELECT-NEXT: $w0 = COPY %cmp
407 ; SELECT-NEXT: RET_ReallyLR implicit $w0
408 %cmp_rhs:_(s64) = COPY $x1
410 %ashr_lhs:_(s64) = COPY $x0
411 %cst:_(s64) = G_CONSTANT i64 1
412 %cmp_lhs:_(s64) = G_ASHR %ashr_lhs, %cst(s64)
414 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
416 RET_ReallyLR implicit $w0
422 tracksRegLiveness: true
427 ; LOWER-LABEL: name: swap_lshr_lhs
428 ; LOWER: liveins: $x0, $x1
430 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
431 ; LOWER-NEXT: %lshr_lhs:_(s64) = COPY $x0
432 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 1
433 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_LSHR %lshr_lhs, %cst(s64)
434 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_rhs(s64), %cmp_lhs
435 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
436 ; LOWER-NEXT: RET_ReallyLR implicit $w0
438 ; SELECT-LABEL: name: swap_lshr_lhs
439 ; SELECT: liveins: $x0, $x1
440 ; SELECT-NEXT: {{ $}}
441 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
442 ; SELECT-NEXT: %lshr_lhs:gpr64 = COPY $x0
443 ; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_rhs, %lshr_lhs, 65, implicit-def $nzcv
444 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
445 ; SELECT-NEXT: $w0 = COPY %cmp
446 ; SELECT-NEXT: RET_ReallyLR implicit $w0
447 %cmp_rhs:_(s64) = COPY $x1
449 %lshr_lhs:_(s64) = COPY $x0
450 %cst:_(s64) = G_CONSTANT i64 1
451 %cmp_lhs:_(s64) = G_LSHR %lshr_lhs, %cst(s64)
453 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
455 RET_ReallyLR implicit $w0
459 name: dont_swap_shift_s64_cst_too_large
461 tracksRegLiveness: true
466 ; Constant for the shift must be <= 63.
468 ; LOWER-LABEL: name: dont_swap_shift_s64_cst_too_large
469 ; LOWER: liveins: $x0, $x1
471 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
472 ; LOWER-NEXT: %shl_lhs:_(s64) = COPY $x0
473 ; LOWER-NEXT: %too_large:_(s64) = G_CONSTANT i64 64
474 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %shl_lhs, %too_large(s64)
475 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
476 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
477 ; LOWER-NEXT: RET_ReallyLR implicit $w0
479 ; SELECT-LABEL: name: dont_swap_shift_s64_cst_too_large
480 ; SELECT: liveins: $x0, $x1
481 ; SELECT-NEXT: {{ $}}
482 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
483 ; SELECT-NEXT: %shl_lhs:gpr64 = COPY $x0
484 ; SELECT-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 64
485 ; SELECT-NEXT: %too_large:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
486 ; SELECT-NEXT: %cmp_lhs:gpr64 = LSLVXr %shl_lhs, %too_large
487 ; SELECT-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
488 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
489 ; SELECT-NEXT: $w0 = COPY %cmp
490 ; SELECT-NEXT: RET_ReallyLR implicit $w0
491 %cmp_rhs:_(s64) = COPY $x1
493 %shl_lhs:_(s64) = COPY $x0
494 %too_large:_(s64) = G_CONSTANT i64 64
495 %cmp_lhs:_(s64) = G_SHL %shl_lhs, %too_large(s64)
497 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
499 RET_ReallyLR implicit $w0
504 name: dont_swap_shift_s32_cst_too_large
506 tracksRegLiveness: true
511 ; Constant for the shift must be <= 32.
513 ; LOWER-LABEL: name: dont_swap_shift_s32_cst_too_large
514 ; LOWER: liveins: $w0, $w1
516 ; LOWER-NEXT: %cmp_rhs:_(s32) = COPY $w1
517 ; LOWER-NEXT: %shl_lhs:_(s32) = COPY $w0
518 ; LOWER-NEXT: %cst:_(s32) = G_CONSTANT i32 32
519 ; LOWER-NEXT: %cmp_lhs:_(s32) = G_SHL %shl_lhs, %cst(s32)
520 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s32), %cmp_rhs
521 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
522 ; LOWER-NEXT: RET_ReallyLR implicit $w0
524 ; SELECT-LABEL: name: dont_swap_shift_s32_cst_too_large
525 ; SELECT: liveins: $w0, $w1
526 ; SELECT-NEXT: {{ $}}
527 ; SELECT-NEXT: %cmp_rhs:gpr32 = COPY $w1
528 ; SELECT-NEXT: %shl_lhs:gpr32 = COPY $w0
529 ; SELECT-NEXT: %cst:gpr32 = MOVi32imm 32
530 ; SELECT-NEXT: %cmp_lhs:gpr32 = LSLVWr %shl_lhs, %cst
531 ; SELECT-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
532 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
533 ; SELECT-NEXT: $w0 = COPY %cmp
534 ; SELECT-NEXT: RET_ReallyLR implicit $w0
535 %cmp_rhs:_(s32) = COPY $w1
537 %shl_lhs:_(s32) = COPY $w0
538 %cst:_(s32) = G_CONSTANT i32 32
539 %cmp_lhs:_(s32) = G_SHL %shl_lhs, %cst(s32)
541 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s32), %cmp_rhs
543 RET_ReallyLR implicit $w0
547 name: dont_swap_cmn_lhs_no_folding_opportunities
549 tracksRegLiveness: true
554 ; No reason to swap a CMN on the LHS when it won't introduce a constant
555 ; folding opportunity. We can recognise CMNs on the LHS and RHS, so there's
556 ; nothing to gain here.
558 ; LOWER-LABEL: name: dont_swap_cmn_lhs_no_folding_opportunities
559 ; LOWER: liveins: $x0, $x1
561 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
562 ; LOWER-NEXT: %sub_rhs:_(s64) = COPY $x0
563 ; LOWER-NEXT: %zero:_(s64) = G_CONSTANT i64 0
564 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
565 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ne), %cmp_lhs(s64), %cmp_rhs
566 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
567 ; LOWER-NEXT: RET_ReallyLR implicit $w0
569 ; SELECT-LABEL: name: dont_swap_cmn_lhs_no_folding_opportunities
570 ; SELECT: liveins: $x0, $x1
571 ; SELECT-NEXT: {{ $}}
572 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
573 ; SELECT-NEXT: %sub_rhs:gpr64 = COPY $x0
574 ; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %sub_rhs, %cmp_rhs, implicit-def $nzcv
575 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
576 ; SELECT-NEXT: $w0 = COPY %cmp
577 ; SELECT-NEXT: RET_ReallyLR implicit $w0
578 %cmp_rhs:_(s64) = COPY $x1
580 %sub_rhs:_(s64) = COPY $x0
581 %zero:_(s64) = G_CONSTANT i64 0
582 %cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
584 %cmp:_(s32) = G_ICMP intpred(ne), %cmp_lhs(s64), %cmp_rhs
586 RET_ReallyLR implicit $w0
592 tracksRegLiveness: true
597 ; Swap when we can see a constant folding opportunity through the sub on
601 ; LOWER-LABEL: name: swap_cmn_lhs
602 ; LOWER: liveins: $x0, $x1
604 ; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
605 ; LOWER-NEXT: %shl_lhs:_(s64) = COPY $x0
606 ; LOWER-NEXT: %zero:_(s64) = G_CONSTANT i64 0
607 ; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 63
608 ; LOWER-NEXT: %sub_rhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
609 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
610 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ne), %cmp_rhs(s64), %cmp_lhs
611 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
612 ; LOWER-NEXT: RET_ReallyLR implicit $w0
614 ; SELECT-LABEL: name: swap_cmn_lhs
615 ; SELECT: liveins: $x0, $x1
616 ; SELECT-NEXT: {{ $}}
617 ; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
618 ; SELECT-NEXT: %shl_lhs:gpr64 = COPY $x0
619 ; SELECT-NEXT: [[ADDSXrs:%[0-9]+]]:gpr64 = ADDSXrs %cmp_rhs, %shl_lhs, 63, implicit-def $nzcv
620 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
621 ; SELECT-NEXT: $w0 = COPY %cmp
622 ; SELECT-NEXT: RET_ReallyLR implicit $w0
623 %cmp_rhs:_(s64) = COPY $x1
625 %shl_lhs:_(s64) = COPY $x0
626 %zero:_(s64) = G_CONSTANT i64 0
627 %cst:_(s64) = G_CONSTANT i64 63
628 %sub_rhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
629 %cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
631 %cmp:_(s32) = G_ICMP intpred(ne), %cmp_lhs(s64), %cmp_rhs
633 RET_ReallyLR implicit $w0
637 name: dont_swap_cmn_lhs_when_rhs_more_profitable
639 tracksRegLiveness: true
644 ; Don't swap when the RHS's subtract offers a better constant folding
645 ; opportunity than the LHS's subtract.
647 ; In this case, the RHS has a supported extend, plus a shift with a constant
650 ; LOWER-LABEL: name: dont_swap_cmn_lhs_when_rhs_more_profitable
651 ; LOWER: liveins: $x0, $x1
653 ; LOWER-NEXT: %zero:_(s64) = G_CONSTANT i64 0
654 ; LOWER-NEXT: %reg0:_(s64) = COPY $x0
655 ; LOWER-NEXT: %shl_cst:_(s64) = G_CONSTANT i64 63
656 ; LOWER-NEXT: %shl:_(s64) = G_SHL %reg0, %shl_cst(s64)
657 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SUB %zero, %shl
658 ; LOWER-NEXT: %reg1:_(s64) = COPY $x1
659 ; LOWER-NEXT: %sext_in_reg:_(s64) = G_SEXT_INREG %reg1, 1
660 ; LOWER-NEXT: %ashr_cst:_(s64) = G_CONSTANT i64 3
661 ; LOWER-NEXT: %ashr:_(s64) = G_ASHR %sext_in_reg, %ashr_cst(s64)
662 ; LOWER-NEXT: %cmp_rhs:_(s64) = G_SUB %zero, %ashr
663 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(eq), %cmp_lhs(s64), %cmp_rhs
664 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
665 ; LOWER-NEXT: RET_ReallyLR implicit $w0
667 ; SELECT-LABEL: name: dont_swap_cmn_lhs_when_rhs_more_profitable
668 ; SELECT: liveins: $x0, $x1
669 ; SELECT-NEXT: {{ $}}
670 ; SELECT-NEXT: %zero:gpr64 = COPY $xzr
671 ; SELECT-NEXT: %reg0:gpr64 = COPY $x0
672 ; SELECT-NEXT: %shl:gpr64 = UBFMXri %reg0, 1, 0
673 ; SELECT-NEXT: %reg1:gpr64 = COPY $x1
674 ; SELECT-NEXT: %sext_in_reg:gpr64 = SBFMXri %reg1, 0, 0
675 ; SELECT-NEXT: %cmp_rhs:gpr64 = SUBSXrs %zero, %sext_in_reg, 131, implicit-def dead $nzcv
676 ; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %shl, %cmp_rhs, implicit-def $nzcv
677 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
678 ; SELECT-NEXT: $w0 = COPY %cmp
679 ; SELECT-NEXT: RET_ReallyLR implicit $w0
680 %zero:_(s64) = G_CONSTANT i64 0
682 %reg0:_(s64) = COPY $x0
683 %shl_cst:_(s64) = G_CONSTANT i64 63
684 %shl:_(s64) = G_SHL %reg0, %shl_cst(s64)
685 %cmp_lhs:_(s64) = G_SUB %zero, %shl
687 %reg1:_(s64) = COPY $x1
688 %sext_in_reg:_(s64) = G_SEXT_INREG %reg1, 1
689 %ashr_cst:_(s64) = G_CONSTANT i64 3
690 %ashr:_(s64) = G_ASHR %sext_in_reg, %ashr_cst(s64)
691 %cmp_rhs:_(s64) = G_SUB %zero, %ashr
693 %cmp:_(s32) = G_ICMP intpred(eq), %cmp_lhs(s64), %cmp_rhs
695 RET_ReallyLR implicit $w0
699 name: dont_swap_rhs_with_supported_extend
701 tracksRegLiveness: true
705 ; The RHS offers more constant folding opportunities than the LHS.
707 ; LOWER-LABEL: name: dont_swap_rhs_with_supported_extend
708 ; LOWER: liveins: $x0, $x1
710 ; LOWER-NEXT: %reg0:_(s64) = COPY $x0
711 ; LOWER-NEXT: %lhs_cst:_(s64) = G_CONSTANT i64 1
712 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %reg0, %lhs_cst(s64)
713 ; LOWER-NEXT: %reg1:_(s64) = COPY $x1
714 ; LOWER-NEXT: %and_mask:_(s64) = G_CONSTANT i64 255
715 ; LOWER-NEXT: %and:_(s64) = G_AND %reg1, %and_mask
716 ; LOWER-NEXT: %rhs_cst:_(s64) = G_CONSTANT i64 1
717 ; LOWER-NEXT: %cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
718 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
719 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
720 ; LOWER-NEXT: RET_ReallyLR implicit $w0
722 ; SELECT-LABEL: name: dont_swap_rhs_with_supported_extend
723 ; SELECT: liveins: $x0, $x1
724 ; SELECT-NEXT: {{ $}}
725 ; SELECT-NEXT: %reg0:gpr64 = COPY $x0
726 ; SELECT-NEXT: %cmp_lhs:gpr64 = UBFMXri %reg0, 63, 62
727 ; SELECT-NEXT: %reg1:gpr64 = COPY $x1
728 ; SELECT-NEXT: %and:gpr64common = ANDXri %reg1, 4103
729 ; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_lhs, %and, 129, implicit-def $nzcv
730 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
731 ; SELECT-NEXT: $w0 = COPY %cmp
732 ; SELECT-NEXT: RET_ReallyLR implicit $w0
733 %reg0:_(s64) = COPY $x0
734 %lhs_cst:_(s64) = G_CONSTANT i64 1
735 %cmp_lhs:_(s64) = G_SHL %reg0, %lhs_cst(s64)
737 %reg1:_(s64) = COPY $x1
738 %and_mask:_(s64) = G_CONSTANT i64 255
739 %and:_(s64) = G_AND %reg1, %and_mask(s64)
740 %rhs_cst:_(s64) = G_CONSTANT i64 1
741 %cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
743 %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
745 RET_ReallyLR implicit $w0
750 name: swap_rhs_with_supported_extend
752 tracksRegLiveness: true
757 ; In this case, both the LHS and RHS are fed by a supported extend. However,
758 ; the LHS' shift has a constant <= 4. This makes it more profitable, so
759 ; we should swap the operands.
761 ; LOWER-LABEL: name: swap_rhs_with_supported_extend
762 ; LOWER: liveins: $x0, $x1
764 ; LOWER-NEXT: %reg0:_(s64) = COPY $x0
765 ; LOWER-NEXT: %and_mask:_(s64) = G_CONSTANT i64 255
766 ; LOWER-NEXT: %and:_(s64) = G_AND %reg0, %and_mask
767 ; LOWER-NEXT: %lhs_cst:_(s64) = G_CONSTANT i64 1
768 ; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %and, %lhs_cst(s64)
769 ; LOWER-NEXT: %rhs_cst:_(s64) = G_CONSTANT i64 5
770 ; LOWER-NEXT: %cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
771 ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_rhs(s64), %cmp_lhs
772 ; LOWER-NEXT: $w0 = COPY %cmp(s32)
773 ; LOWER-NEXT: RET_ReallyLR implicit $w0
775 ; SELECT-LABEL: name: swap_rhs_with_supported_extend
776 ; SELECT: liveins: $x0, $x1
777 ; SELECT-NEXT: {{ $}}
778 ; SELECT-NEXT: %reg0:gpr64 = COPY $x0
779 ; SELECT-NEXT: %and:gpr64common = ANDXri %reg0, 4103
780 ; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %reg0.sub_32
781 ; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
782 ; SELECT-NEXT: %cmp_rhs:gpr64common = SBFMXri %and, 5, 63
783 ; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 1, implicit-def $nzcv
784 ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
785 ; SELECT-NEXT: $w0 = COPY %cmp
786 ; SELECT-NEXT: RET_ReallyLR implicit $w0
787 %reg0:_(s64) = COPY $x0
788 %and_mask:_(s64) = G_CONSTANT i64 255
789 %and:_(s64) = G_AND %reg0, %and_mask(s64)
791 %lhs_cst:_(s64) = G_CONSTANT i64 1
792 %cmp_lhs:_(s64) = G_SHL %and, %lhs_cst(s64)
794 %rhs_cst:_(s64) = G_CONSTANT i64 5
795 %cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
797 %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_lhs(s64), %cmp_rhs
799 RET_ReallyLR implicit $w0