1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
9 tracksRegLiveness: true
14 ; This should not have an UBFMXri, since ADDWrr implicitly gives us the
17 ; CHECK-LABEL: name: fold
18 ; CHECK: liveins: $w0, $w1
19 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
20 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
21 ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY1]], [[COPY]]
22 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ADDWrr]], %subreg.sub_32
23 ; CHECK: $x0 = COPY [[SUBREG_TO_REG]]
24 ; CHECK: RET_ReallyLR implicit $x0
25 %0:gpr(s32) = COPY $w0
26 %1:gpr(s32) = COPY $w1
27 %2:gpr(s32) = G_ADD %1, %0
28 %3:gpr(s64) = G_ZEXT %2(s32)
30 RET_ReallyLR implicit $x0
37 tracksRegLiveness: true
42 ; We should have a UBFMXri here, because we only do this for zero extends
43 ; from 32 bits to 64 bits.
45 ; CHECK-LABEL: name: dont_fold_s16
46 ; CHECK: liveins: $w0, $w1
47 ; CHECK: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
48 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[DEF]], %subreg.sub_32
49 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 15
50 ; CHECK: $x0 = COPY [[UBFMXri]]
51 ; CHECK: RET_ReallyLR implicit $x0
52 %0:gpr(s16) = G_IMPLICIT_DEF
53 %3:gpr(s64) = G_ZEXT %0(s16)
55 RET_ReallyLR implicit $x0
62 tracksRegLiveness: true
67 ; We should have a ORRWrs here, because isDef32 disallows copies.
69 ; CHECK-LABEL: name: dont_fold_copy
71 ; CHECK: %copy:gpr32 = COPY $w0
72 ; CHECK: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %copy, 0
73 ; CHECK: %zext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
74 ; CHECK: $x0 = COPY %zext
75 ; CHECK: RET_ReallyLR implicit $x0
76 %copy:gpr(s32) = COPY $w0
77 %zext:gpr(s64) = G_ZEXT %copy(s32)
79 RET_ReallyLR implicit $x0
83 name: dont_fold_bitcast
86 tracksRegLiveness: true
91 ; We should have a ORRWrs here, because isDef32 disallows bitcasts.
93 ; CHECK-LABEL: name: dont_fold_bitcast
95 ; CHECK: %copy:gpr32all = COPY $w0
96 ; CHECK: %bitcast1:gpr32 = COPY %copy
97 ; CHECK: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %bitcast1, 0
98 ; CHECK: %zext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
99 ; CHECK: $x0 = COPY %zext
100 ; CHECK: RET_ReallyLR implicit $x0
101 %copy:gpr(s32) = COPY $w0
102 %bitcast0:gpr(<4 x s8>) = G_BITCAST %copy(s32)
103 %bitcast1:gpr(s32) = G_BITCAST %bitcast0
104 %zext:gpr(s64) = G_ZEXT %bitcast1(s32)
105 $x0 = COPY %zext(s64)
106 RET_ReallyLR implicit $x0
110 name: dont_fold_trunc
112 regBankSelected: true
113 tracksRegLiveness: true
118 ; We should have a ORRWrs here, because isDef32 disallows truncs.
120 ; CHECK-LABEL: name: dont_fold_trunc
121 ; CHECK: liveins: $x0
122 ; CHECK: %copy:gpr64sp = COPY $x0
123 ; CHECK: %trunc:gpr32common = COPY %copy.sub_32
124 ; CHECK: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %trunc, 0
125 ; CHECK: %zext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
126 ; CHECK: $x0 = COPY %zext
127 ; CHECK: RET_ReallyLR implicit $x0
128 %copy:gpr(s64) = COPY $x0
129 %trunc:gpr(s32) = G_TRUNC %copy(s64)
130 %zext:gpr(s64) = G_ZEXT %trunc(s32)
131 $x0 = COPY %zext(s64)
132 RET_ReallyLR implicit $x0
138 regBankSelected: true
139 tracksRegLiveness: true
141 ; CHECK-LABEL: name: dont_fold_phi
143 ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
144 ; CHECK: liveins: $w0, $w1, $w2
145 ; CHECK: %copy1:gpr32all = COPY $w0
146 ; CHECK: %copy2:gpr32all = COPY $w1
147 ; CHECK: %cond_wide:gpr32 = COPY $w2
148 ; CHECK: TBNZW %cond_wide, 0, %bb.1
151 ; CHECK: successors: %bb.2(0x80000000)
153 ; CHECK: %phi:gpr32 = PHI %copy1, %bb.0, %copy2, %bb.1
154 ; CHECK: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %phi, 0
155 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
156 ; CHECK: $x0 = COPY [[SUBREG_TO_REG]]
157 ; CHECK: RET_ReallyLR implicit $x0
158 ; We should have a ORRWrs here, because isDef32 disallows phis.
161 liveins: $w0, $w1, $w2
163 %copy1:gpr(s32) = COPY $w0
164 %copy2:gpr(s32) = COPY $w1
165 %cond_wide:gpr(s32) = COPY $w2
166 %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
167 G_BRCOND %cond(s1), %bb.1
173 %phi:gpr(s32) = G_PHI %copy1(s32), %bb.0, %copy2(s32), %bb.1
174 %5:gpr(s64) = G_ZEXT %phi(s32)
176 RET_ReallyLR implicit $x0
180 name: dont_look_through_copy
182 regBankSelected: true
183 tracksRegLiveness: true
188 ; Make sure we don't walk past the copy.
190 ; CHECK-LABEL: name: dont_look_through_copy
191 ; CHECK: liveins: $w0, $w1
192 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
193 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
194 ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY1]], [[COPY]]
195 ; CHECK: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[ADDWrr]], 0
196 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
197 ; CHECK: $x0 = COPY [[SUBREG_TO_REG]]
198 ; CHECK: RET_ReallyLR implicit $x0
199 %0:gpr(s32) = COPY $w0
200 %1:gpr(s32) = COPY $w1
201 %2:gpr(s32) = G_ADD %1, %0
202 %3:gpr(s32) = COPY %2(s32)
203 %4:gpr(s64) = G_ZEXT %3(s32)
205 RET_ReallyLR implicit $x0