1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple aarch64-unknown-unknown -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s
10 ; CHECK-LABEL: name: flip_eq
12 ; CHECK-NEXT: successors: %bb.0(0x40000000), %bb.1(0x40000000)
13 ; CHECK-NEXT: liveins: $x0
15 ; CHECK-NEXT: %copy:gpr64all = COPY $x0
16 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
17 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
18 ; CHECK-NEXT: TBNZW [[COPY1]], 3, %bb.1
22 ; CHECK-NEXT: RET_ReallyLR
24 successors: %bb.0, %bb.1
26 %copy:gpr(s64) = COPY $x0
29 %bit:gpr(s64) = G_CONSTANT i64 8
30 %zero:gpr(s64) = G_CONSTANT i64 0
32 ; 8 has the third bit set.
33 %fold_cst:gpr(s64) = G_CONSTANT i64 8
35 ; This only has the third bit set if %copy does not. So, to walk through
36 ; this, we want to use a TBNZW on %copy.
37 %fold_me:gpr(s64) = G_XOR %copy, %fold_cst
39 %and:gpr(s64) = G_AND %fold_me, %bit
40 %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
52 ; CHECK-LABEL: name: flip_ne
54 ; CHECK-NEXT: successors: %bb.0(0x40000000), %bb.1(0x40000000)
55 ; CHECK-NEXT: liveins: $x0
57 ; CHECK-NEXT: %copy:gpr64all = COPY $x0
58 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
59 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
60 ; CHECK-NEXT: TBZW [[COPY1]], 3, %bb.1
64 ; CHECK-NEXT: RET_ReallyLR
66 successors: %bb.0, %bb.1
69 ; Same as eq case, but we should get a TBZW instead.
71 %copy:gpr(s64) = COPY $x0
72 %bit:gpr(s64) = G_CONSTANT i64 8
73 %zero:gpr(s64) = G_CONSTANT i64 0
74 %fold_cst:gpr(s64) = G_CONSTANT i64 8
75 %fold_me:gpr(s64) = G_XOR %copy, %fold_cst
76 %and:gpr(s64) = G_AND %fold_me, %bit
77 %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
89 ; CHECK-LABEL: name: dont_flip_eq
91 ; CHECK-NEXT: successors: %bb.0(0x40000000), %bb.1(0x40000000)
92 ; CHECK-NEXT: liveins: $x0
94 ; CHECK-NEXT: %copy:gpr64all = COPY $x0
95 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
96 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
97 ; CHECK-NEXT: TBZW [[COPY1]], 3, %bb.1
101 ; CHECK-NEXT: RET_ReallyLR
103 successors: %bb.0, %bb.1
105 %copy:gpr(s64) = COPY $x0
108 %bit:gpr(s64) = G_CONSTANT i64 8
109 %zero:gpr(s64) = G_CONSTANT i64 0
111 ; 7 does not have the third bit set.
112 %fold_cst:gpr(s64) = G_CONSTANT i64 7
114 ; This only has the third bit set if %copy does. So, to walk through this,
115 ; we should have a TBZW on %copy.
116 %fold_me:gpr(s64) = G_XOR %fold_cst, %copy
118 %and:gpr(s64) = G_AND %fold_me, %bit
119 %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
126 name: dont_flip_eq_zext
129 regBankSelected: true
130 tracksRegLiveness: true
132 ; CHECK-LABEL: name: dont_flip_eq_zext
134 ; CHECK-NEXT: successors: %bb.0(0x40000000), %bb.1(0x40000000)
136 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $wzr
137 ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
138 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[SUBREG_TO_REG]]
139 ; CHECK-NEXT: TBNZX [[COPY1]], 63, %bb.1
140 ; CHECK-NEXT: B %bb.0
143 ; CHECK-NEXT: RET_ReallyLR
145 successors: %bb.0(0x40000000), %bb.1(0x40000000)
147 %1:gpr(s32) = G_CONSTANT i32 0
148 %3:gpr(s32) = G_CONSTANT i32 -1
149 %4:gpr(s32) = G_XOR %1, %3
150 %5:gpr(s64) = G_ZEXT %4(s32)
151 %15:gpr(s64) = G_CONSTANT i64 0
152 %13:gpr(s32) = G_ICMP intpred(slt), %5(s64), %15
162 regBankSelected: true
164 ; CHECK-LABEL: name: dont_flip_ne
166 ; CHECK-NEXT: successors: %bb.0(0x40000000), %bb.1(0x40000000)
167 ; CHECK-NEXT: liveins: $x0
169 ; CHECK-NEXT: %copy:gpr64all = COPY $x0
170 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
171 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
172 ; CHECK-NEXT: TBNZW [[COPY1]], 3, %bb.1
173 ; CHECK-NEXT: B %bb.0
176 ; CHECK-NEXT: RET_ReallyLR
178 successors: %bb.0, %bb.1
181 ; Same as eq case, but we should get a TBNZW instead.
183 %copy:gpr(s64) = COPY $x0
184 %bit:gpr(s64) = G_CONSTANT i64 8
185 %zero:gpr(s64) = G_CONSTANT i64 0
186 %fold_cst:gpr(s64) = G_CONSTANT i64 7
187 %fold_me:gpr(s64) = G_XOR %fold_cst, %copy
188 %and:gpr(s64) = G_AND %fold_me, %bit
189 %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
199 regBankSelected: true
201 ; CHECK-LABEL: name: xor_chain
203 ; CHECK-NEXT: successors: %bb.0(0x40000000), %bb.1(0x40000000)
204 ; CHECK-NEXT: liveins: $x0
206 ; CHECK-NEXT: %copy:gpr64all = COPY $x0
207 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
208 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
209 ; CHECK-NEXT: TBZW [[COPY1]], 3, %bb.1
210 ; CHECK-NEXT: B %bb.0
213 ; CHECK-NEXT: RET_ReallyLR
215 successors: %bb.0, %bb.1
217 %copy:gpr(s64) = COPY $x0
218 %bit:gpr(s64) = G_CONSTANT i64 8
219 %zero:gpr(s64) = G_CONSTANT i64 0
220 %fold_cst:gpr(s64) = G_CONSTANT i64 8
222 ; The G_XORs cancel each other out, so we should get a TBZW.
223 %xor1:gpr(s64) = G_XOR %copy, %fold_cst
224 %xor2:gpr(s64) = G_XOR %xor1, %fold_cst
226 %and:gpr(s64) = G_AND %xor2, %bit
227 %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero