1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-FAST --allow-unused-prefixes
3 # RUN: llc -mtriple=aarch64-unknown-unknown -mattr=+addr-lsl-slow-14 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SLOW --allow-unused-prefixes
6 define void @ldrxrox_breg_oreg(ptr %addr) { ret void }
7 define void @ldrdrox_breg_oreg(ptr %addr) { ret void }
8 define void @more_than_one_use(ptr %addr) { ret void }
9 define void @ldrhrox_shl(ptr %addr) { ret void }
10 define void @ldrwrox_shl(ptr %addr) { ret void }
11 define void @ldrxrox_shl(ptr %addr) { ret void }
12 define void @ldrdrox_shl(ptr %addr) { ret void }
13 define void @ldrqrox_shl(ptr %addr) { ret void }
14 define void @ldrxrox_mul_rhs(ptr %addr) { ret void }
15 define void @ldrdrox_mul_rhs(ptr %addr) { ret void }
16 define void @ldrxrox_mul_lhs(ptr %addr) { ret void }
17 define void @ldrdrox_mul_lhs(ptr %addr) { ret void }
18 define void @mul_not_pow_2(ptr %addr) { ret void }
19 define void @mul_wrong_pow_2(ptr %addr) { ret void }
20 define void @more_than_one_use_shl_fallback(ptr %addr) { ret void }
21 define void @ldrxrox_more_than_one_mem_use_shl(ptr %addr) { ret void }
22 define void @ldrxrox_more_than_one_use_shl(ptr %addr) { ret void }
23 define void @ldrhrox_more_than_one_mem_use_shl(ptr %addr) { ret void }
24 define void @ldrhrox_more_than_one_use_shl(ptr %addr) { ret void }
25 define void @ldrwrox_more_than_one_use_shl(ptr %addr) { ret void }
26 define void @ldrqrox_more_than_one_use_shl(ptr %addr) { ret void }
27 define void @more_than_one_use_shl_lsl(ptr %addr) { ret void }
28 define void @more_than_one_use_shl_minsize(ptr %addr) #0 { ret void }
29 define void @ldrwrox(ptr %addr) { ret void }
30 define void @ldrsrox(ptr %addr) { ret void }
31 define void @ldrhrox(ptr %addr) { ret void }
32 define void @ldbbrox(ptr %addr) { ret void }
33 define void @ldrqrox(ptr %addr) { ret void }
34 attributes #0 = { optsize }
38 name: ldrxrox_breg_oreg
42 tracksRegLiveness: true
43 machineFunctionInfo: {}
48 ; CHECK-LABEL: name: ldrxrox_breg_oreg
49 ; CHECK: liveins: $x0, $x1
51 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
52 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
53 ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
54 ; CHECK-NEXT: $x0 = COPY [[LDRXroX]]
55 ; CHECK-NEXT: RET_ReallyLR implicit $x0
57 %1:gpr(s64) = COPY $x1
58 %2:gpr(p0) = G_PTR_ADD %0, %1
59 %4:gpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
61 RET_ReallyLR implicit $x0
65 name: ldrdrox_breg_oreg
69 tracksRegLiveness: true
70 machineFunctionInfo: {}
74 ; CHECK-LABEL: name: ldrdrox_breg_oreg
75 ; CHECK: liveins: $d0, $x1
77 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
78 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
79 ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
80 ; CHECK-NEXT: $d0 = COPY [[LDRDroX]]
81 ; CHECK-NEXT: RET_ReallyLR implicit $d0
83 %1:gpr(s64) = COPY $x1
84 %2:gpr(p0) = G_PTR_ADD %0, %1
85 %4:fpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
87 RET_ReallyLR implicit $d0
90 # This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
93 name: more_than_one_use
97 tracksRegLiveness: true
98 machineFunctionInfo: {}
102 ; CHECK-LABEL: name: more_than_one_use
103 ; CHECK: liveins: $x0, $x1
105 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
106 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
107 ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
108 ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
109 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
110 ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
111 ; CHECK-NEXT: $x0 = COPY [[ADDXrr1]]
112 ; CHECK-NEXT: RET_ReallyLR implicit $x0
113 %0:gpr(p0) = COPY $x0
114 %1:gpr(s64) = COPY $x1
115 %2:gpr(p0) = G_PTR_ADD %0, %1
116 %4:gpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
117 %5:gpr(s64) = G_PTRTOINT %2
118 %6:gpr(s64) = G_ADD %5, %4
120 RET_ReallyLR implicit $x0
127 regBankSelected: true
128 tracksRegLiveness: true
129 machineFunctionInfo: {}
132 liveins: $x0, $x1, $x2
135 ; CHECK-LABEL: name: ldrhrox_shl
136 ; CHECK: liveins: $x0, $x1, $x2, $w1, $x0
138 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
139 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
140 ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
141 ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
142 ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
143 ; CHECK-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
144 ; CHECK-NEXT: [[LDRHHroX:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
145 ; CHECK-NEXT: RET_ReallyLR implicit [[LDRHHroX]]
146 %0:gpr(p0) = COPY $x0
147 %1:gpr(s32) = COPY $w1
148 %15:gpr(s64) = G_CONSTANT i64 9
149 %3:gpr(s32) = G_LSHR %1, %15(s64)
150 %4:gpr(s64) = G_ZEXT %3(s32)
151 %5:gpr(s64) = G_CONSTANT i64 255
152 %6:gpr(s64) = G_AND %4, %5
153 %13:gpr(s64) = G_CONSTANT i64 1
154 %8:gpr(s64) = G_SHL %6, %13(s64)
155 %9:gpr(p0) = G_PTR_ADD %0, %8(s64)
156 %12:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
157 RET_ReallyLR implicit %12
163 regBankSelected: true
164 tracksRegLiveness: true
165 machineFunctionInfo: {}
168 liveins: $x0, $x1, $x2
169 ; CHECK-LABEL: name: ldrwrox_shl
170 ; CHECK: liveins: $x0, $x1, $x2
172 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
173 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
174 ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY1]], [[COPY]], 0, 1 :: (load (s32) from %ir.addr)
175 ; CHECK-NEXT: RET_ReallyLR implicit [[LDRWroX]]
176 %0:gpr(s64) = COPY $x0
177 %1:gpr(s64) = G_CONSTANT i64 2
178 %2:gpr(s64) = G_SHL %0, %1(s64)
179 %3:gpr(p0) = COPY $x1
180 %4:gpr(p0) = G_PTR_ADD %3, %2
181 %5:gpr(s32) = G_LOAD %4(p0) :: (load (s32) from %ir.addr)
182 RET_ReallyLR implicit %5
188 regBankSelected: true
189 tracksRegLiveness: true
190 machineFunctionInfo: {}
193 liveins: $x0, $x1, $x2
194 ; CHECK-LABEL: name: ldrxrox_shl
195 ; CHECK: liveins: $x0, $x1, $x2
197 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
198 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
199 ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
200 ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
201 ; CHECK-NEXT: RET_ReallyLR implicit $x2
202 %0:gpr(s64) = COPY $x0
203 %1:gpr(s64) = G_CONSTANT i64 3
204 %2:gpr(s64) = G_SHL %0, %1(s64)
205 %3:gpr(p0) = COPY $x1
206 %4:gpr(p0) = G_PTR_ADD %3, %2
207 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
209 RET_ReallyLR implicit $x2
216 regBankSelected: true
217 tracksRegLiveness: true
218 machineFunctionInfo: {}
221 liveins: $x0, $x1, $d2
222 ; CHECK-LABEL: name: ldrdrox_shl
223 ; CHECK: liveins: $x0, $x1, $d2
225 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
226 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
227 ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
228 ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
229 ; CHECK-NEXT: RET_ReallyLR implicit $d2
230 %0:gpr(s64) = COPY $x0
231 %1:gpr(s64) = G_CONSTANT i64 3
232 %2:gpr(s64) = G_SHL %0, %1(s64)
233 %3:gpr(p0) = COPY $x1
234 %4:gpr(p0) = G_PTR_ADD %3, %2
235 %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
237 RET_ReallyLR implicit $d2
244 regBankSelected: true
245 tracksRegLiveness: true
246 machineFunctionInfo: {}
249 liveins: $x0, $x1, $d2
250 ; CHECK-LABEL: name: ldrqrox_shl
251 ; CHECK: liveins: $x0, $x1, $d2
253 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
254 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
255 ; CHECK-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY1]], [[COPY]], 0, 1 :: (load (s128) from %ir.addr)
256 ; CHECK-NEXT: RET_ReallyLR implicit [[LDRQroX]]
257 %0:gpr(s64) = COPY $x0
258 %1:gpr(s64) = G_CONSTANT i64 4
259 %2:gpr(s64) = G_SHL %0, %1(s64)
260 %3:gpr(p0) = COPY $x1
261 %4:gpr(p0) = G_PTR_ADD %3, %2
262 %5:fpr(s128) = G_LOAD %4(p0) :: (load (s128) from %ir.addr)
263 RET_ReallyLR implicit %5
267 name: ldrxrox_mul_rhs
270 regBankSelected: true
271 tracksRegLiveness: true
272 machineFunctionInfo: {}
275 liveins: $x0, $x1, $x2
276 ; CHECK-LABEL: name: ldrxrox_mul_rhs
277 ; CHECK: liveins: $x0, $x1, $x2
279 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
280 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
281 ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
282 ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
283 ; CHECK-NEXT: RET_ReallyLR implicit $x2
284 %0:gpr(s64) = COPY $x0
285 %1:gpr(s64) = G_CONSTANT i64 8
286 %2:gpr(s64) = G_MUL %0, %1(s64)
287 %3:gpr(p0) = COPY $x1
288 %4:gpr(p0) = G_PTR_ADD %3, %2
289 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
291 RET_ReallyLR implicit $x2
295 name: ldrdrox_mul_rhs
298 regBankSelected: true
299 tracksRegLiveness: true
300 machineFunctionInfo: {}
303 liveins: $x0, $x1, $d2
304 ; CHECK-LABEL: name: ldrdrox_mul_rhs
305 ; CHECK: liveins: $x0, $x1, $d2
307 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
308 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
309 ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
310 ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
311 ; CHECK-NEXT: RET_ReallyLR implicit $d2
312 %0:gpr(s64) = COPY $x0
313 %1:gpr(s64) = G_CONSTANT i64 8
314 %2:gpr(s64) = G_MUL %0, %1(s64)
315 %3:gpr(p0) = COPY $x1
316 %4:gpr(p0) = G_PTR_ADD %3, %2
317 %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
319 RET_ReallyLR implicit $d2
323 name: ldrxrox_mul_lhs
326 regBankSelected: true
327 tracksRegLiveness: true
328 machineFunctionInfo: {}
331 liveins: $x0, $x1, $x2
332 ; CHECK-LABEL: name: ldrxrox_mul_lhs
333 ; CHECK: liveins: $x0, $x1, $x2
335 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
336 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
337 ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
338 ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
339 ; CHECK-NEXT: RET_ReallyLR implicit $x2
340 %0:gpr(s64) = COPY $x0
341 %1:gpr(s64) = G_CONSTANT i64 8
342 %2:gpr(s64) = G_MUL %1, %0(s64)
343 %3:gpr(p0) = COPY $x1
344 %4:gpr(p0) = G_PTR_ADD %3, %2
345 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
347 RET_ReallyLR implicit $x2
351 name: ldrdrox_mul_lhs
354 regBankSelected: true
355 tracksRegLiveness: true
356 machineFunctionInfo: {}
359 liveins: $x0, $x1, $d2
360 ; CHECK-LABEL: name: ldrdrox_mul_lhs
361 ; CHECK: liveins: $x0, $x1, $d2
363 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
364 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
365 ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
366 ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
367 ; CHECK-NEXT: RET_ReallyLR implicit $d2
368 %0:gpr(s64) = COPY $x0
369 %1:gpr(s64) = G_CONSTANT i64 8
370 %2:gpr(s64) = G_MUL %1, %0(s64)
371 %3:gpr(p0) = COPY $x1
372 %4:gpr(p0) = G_PTR_ADD %3, %2
373 %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
375 RET_ReallyLR implicit $d2
379 # Show that we don't get a shifted load from a mul when we don't have a
380 # power of 2. (The bit isn't set on the load.)
385 regBankSelected: true
386 tracksRegLiveness: true
387 machineFunctionInfo: {}
390 liveins: $x0, $x1, $d2
391 ; CHECK-LABEL: name: mul_not_pow_2
392 ; CHECK: liveins: $x0, $x1, $d2
394 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
395 ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
396 ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
397 ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
398 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
399 ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
400 ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
401 ; CHECK-NEXT: RET_ReallyLR implicit $d2
402 %0:gpr(s64) = COPY $x0
403 %1:gpr(s64) = G_CONSTANT i64 7
404 %2:gpr(s64) = G_MUL %1, %0(s64)
405 %3:gpr(p0) = COPY $x1
406 %4:gpr(p0) = G_PTR_ADD %3, %2
407 %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
409 RET_ReallyLR implicit $d2
413 # Show that we don't get a shifted load from a mul when we don't have
414 # the right power of 2. (The bit isn't set on the load.)
416 name: mul_wrong_pow_2
419 regBankSelected: true
420 tracksRegLiveness: true
421 machineFunctionInfo: {}
424 liveins: $x0, $x1, $d2
425 ; CHECK-LABEL: name: mul_wrong_pow_2
426 ; CHECK: liveins: $x0, $x1, $d2
428 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
429 ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
430 ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
431 ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
432 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
433 ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
434 ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
435 ; CHECK-NEXT: RET_ReallyLR implicit $d2
436 %0:gpr(s64) = COPY $x0
437 %1:gpr(s64) = G_CONSTANT i64 16
438 %2:gpr(s64) = G_MUL %1, %0(s64)
439 %3:gpr(p0) = COPY $x1
440 %4:gpr(p0) = G_PTR_ADD %3, %2
441 %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
443 RET_ReallyLR implicit $d2
447 # Show that we can still fall back to the register-register addressing
448 # mode when we fail to pull in the shift.
450 name: more_than_one_use_shl_fallback
453 regBankSelected: true
454 tracksRegLiveness: true
455 machineFunctionInfo: {}
458 liveins: $x0, $x1, $x2
459 ; CHECK-LABEL: name: more_than_one_use_shl_fallback
460 ; CHECK: liveins: $x0, $x1, $x2
462 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
463 ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 62, 61
464 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
465 ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
466 ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 2, 0
467 ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
468 ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
469 ; CHECK-NEXT: RET_ReallyLR implicit $x2
470 %0:gpr(s64) = COPY $x0
471 %1:gpr(s64) = G_CONSTANT i64 2
472 %2:gpr(s64) = G_SHL %0, %1(s64)
473 %3:gpr(p0) = COPY $x1
474 %4:gpr(p0) = G_PTR_ADD %3, %2
475 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
476 %6:gpr(s64) = G_ADD %2, %1
477 %7:gpr(s64) = G_ADD %5, %6
479 RET_ReallyLR implicit $x2
483 name: ldrxrox_more_than_one_mem_use_shl
486 regBankSelected: true
487 tracksRegLiveness: true
488 machineFunctionInfo: {}
491 liveins: $x0, $x1, $x2
492 ; CHECK-LABEL: name: ldrxrox_more_than_one_mem_use_shl
493 ; CHECK: liveins: $x0, $x1, $x2
495 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
496 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
497 ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
498 ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
499 ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
500 ; CHECK-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
501 ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s64))
502 ; CHECK-NEXT: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s64))
503 ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
504 ; CHECK-NEXT: RET_ReallyLR implicit [[ADDXrr]]
505 %0:gpr(p0) = COPY $x0
506 %1:gpr(s32) = COPY $w1
507 %15:gpr(s64) = G_CONSTANT i64 9
508 %3:gpr(s32) = G_LSHR %1, %15(s64)
509 %4:gpr(s64) = G_ZEXT %3(s32)
510 %5:gpr(s64) = G_CONSTANT i64 255
511 %6:gpr(s64) = G_AND %4, %5
512 %13:gpr(s64) = G_CONSTANT i64 3
513 %8:gpr(s64) = G_SHL %6, %13(s64)
514 %9:gpr(p0) = G_PTR_ADD %0, %8(s64)
515 %12:gpr(s64) = G_LOAD %9(p0) :: (load (s64))
516 %17:gpr(s64) = G_LOAD %9(p0) :: (load (s64))
517 %18:gpr(s64) = G_ADD %12, %17
518 RET_ReallyLR implicit %18
522 # Show that when the GEP is used both inside and outside a memory op, we only fold the memory op.
524 name: ldrxrox_more_than_one_use_shl
527 regBankSelected: true
528 tracksRegLiveness: true
529 machineFunctionInfo: {}
532 liveins: $x0, $x1, $x2
533 ; CHECK-LABEL: name: ldrxrox_more_than_one_use_shl
534 ; CHECK: liveins: $x0, $x1, $x2
536 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
537 ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
538 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
539 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
540 ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[UBFMXri]]
541 ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
542 ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
543 ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
544 ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
545 ; CHECK-NEXT: $x2 = COPY [[ADDXrr2]]
546 ; CHECK-NEXT: RET_ReallyLR implicit $x2
547 %0:gpr(s64) = COPY $x0
548 %1:gpr(s64) = G_CONSTANT i64 3
549 %2:gpr(s64) = G_SHL %0, %1(s64)
550 %3:gpr(p0) = COPY $x1
551 %4:gpr(p0) = G_PTR_ADD %3, %2
552 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
553 %6:gpr(s64) = G_ADD %2, %1
554 %7:gpr(s64) = G_ADD %5, %6
555 %8:gpr(s64) = G_PTRTOINT %4
556 %9:gpr(s64) = G_ADD %8, %7
558 RET_ReallyLR implicit $x2
562 # Fold SHL into LSL for mem ops. Do not fold if the target has LSLSLOW14.
563 name: ldrhrox_more_than_one_mem_use_shl
566 regBankSelected: true
567 tracksRegLiveness: true
568 machineFunctionInfo: {}
571 liveins: $x0, $x1, $x2
574 ; CHECK-FAST-LABEL: name: ldrhrox_more_than_one_mem_use_shl
575 ; CHECK-FAST: liveins: $x0, $x1, $x2, $w1, $x0
576 ; CHECK-FAST-NEXT: {{ $}}
577 ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
578 ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
579 ; CHECK-FAST-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
580 ; CHECK-FAST-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
581 ; CHECK-FAST-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
582 ; CHECK-FAST-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
583 ; CHECK-FAST-NEXT: [[LDRHHroX:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
584 ; CHECK-FAST-NEXT: [[LDRHHroX1:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
585 ; CHECK-FAST-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHroX]], [[LDRHHroX1]]
586 ; CHECK-FAST-NEXT: RET_ReallyLR implicit [[ADDWrr]]
588 ; CHECK-SLOW-LABEL: name: ldrhrox_more_than_one_mem_use_shl
589 ; CHECK-SLOW: liveins: $x0, $x1, $x2, $w1, $x0
590 ; CHECK-SLOW-NEXT: {{ $}}
591 ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
592 ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
593 ; CHECK-SLOW-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
594 ; CHECK-SLOW-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
595 ; CHECK-SLOW-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
596 ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]].sub_32
597 ; CHECK-SLOW-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
598 ; CHECK-SLOW-NEXT: [[COPY4:%[0-9]+]]:gpr64common = COPY [[COPY]]
599 ; CHECK-SLOW-NEXT: [[ADDXrx:%[0-9]+]]:gpr64sp = ADDXrx [[COPY4]], [[COPY3]], 1
600 ; CHECK-SLOW-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
601 ; CHECK-SLOW-NEXT: [[LDRHHui1:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
602 ; CHECK-SLOW-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHui]], [[LDRHHui1]]
603 ; CHECK-SLOW-NEXT: RET_ReallyLR implicit [[ADDWrr]]
604 %0:gpr(p0) = COPY $x0
605 %1:gpr(s32) = COPY $w1
606 %15:gpr(s64) = G_CONSTANT i64 9
607 %3:gpr(s32) = G_LSHR %1, %15(s64)
608 %4:gpr(s64) = G_ZEXT %3(s32)
609 %5:gpr(s64) = G_CONSTANT i64 255
610 %6:gpr(s64) = G_AND %4, %5
611 %13:gpr(s64) = G_CONSTANT i64 1
612 %8:gpr(s64) = G_SHL %6, %13(s64)
613 %9:gpr(p0) = G_PTR_ADD %0, %8(s64)
614 %12:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
615 %17:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
616 %18:gpr(s32) = G_ADD %12, %17
617 RET_ReallyLR implicit %18
620 # Fold SHL into LSL for memory ops. Do not fold if the target has LSLSLOW14.
621 name: ldrhrox_more_than_one_use_shl
624 regBankSelected: true
625 tracksRegLiveness: true
626 machineFunctionInfo: {}
629 liveins: $x0, $x1, $x2
632 ; CHECK-FAST-LABEL: name: ldrhrox_more_than_one_use_shl
633 ; CHECK-FAST: liveins: $x0, $x1, $x2, $w1, $x0
634 ; CHECK-FAST-NEXT: {{ $}}
635 ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
636 ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
637 ; CHECK-FAST-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
638 ; CHECK-FAST-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
639 ; CHECK-FAST-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
640 ; CHECK-FAST-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
641 ; CHECK-FAST-NEXT: [[LDRHHroX:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
642 ; CHECK-FAST-NEXT: [[LDRHHroX1:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
643 ; CHECK-FAST-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHroX]], [[LDRHHroX1]]
644 ; CHECK-FAST-NEXT: RET_ReallyLR implicit [[ADDWrr]]
646 ; CHECK-SLOW-LABEL: name: ldrhrox_more_than_one_use_shl
647 ; CHECK-SLOW: liveins: $x0, $x1, $x2, $w1, $x0
648 ; CHECK-SLOW-NEXT: {{ $}}
649 ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
650 ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
651 ; CHECK-SLOW-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
652 ; CHECK-SLOW-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
653 ; CHECK-SLOW-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
654 ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]].sub_32
655 ; CHECK-SLOW-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
656 ; CHECK-SLOW-NEXT: [[COPY4:%[0-9]+]]:gpr64common = COPY [[COPY]]
657 ; CHECK-SLOW-NEXT: [[ADDXrx:%[0-9]+]]:gpr64sp = ADDXrx [[COPY4]], [[COPY3]], 1
658 ; CHECK-SLOW-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
659 ; CHECK-SLOW-NEXT: [[LDRHHui1:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
660 ; CHECK-SLOW-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHui]], [[LDRHHui1]]
661 ; CHECK-SLOW-NEXT: RET_ReallyLR implicit [[ADDWrr]]
662 %0:gpr(p0) = COPY $x0
663 %1:gpr(s32) = COPY $w1
664 %15:gpr(s64) = G_CONSTANT i64 9
665 %3:gpr(s32) = G_LSHR %1, %15(s64)
666 %4:gpr(s64) = G_ZEXT %3(s32)
667 %5:gpr(s64) = G_CONSTANT i64 255
668 %6:gpr(s64) = G_AND %4, %5
669 %13:gpr(s64) = G_CONSTANT i64 1
670 %8:gpr(s64) = G_SHL %6, %13(s64)
671 %9:gpr(p0) = G_PTR_ADD %0, %8(s64)
672 %12:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
673 %17:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
674 %18:gpr(s32) = G_ADD %12, %17
675 RET_ReallyLR implicit %18
678 # Fold SHL into LSL for memory ops.
679 name: ldrwrox_more_than_one_use_shl
682 regBankSelected: true
683 tracksRegLiveness: true
684 machineFunctionInfo: {}
687 liveins: $x0, $x1, $x2
688 ; CHECK-LABEL: name: ldrwrox_more_than_one_use_shl
689 ; CHECK: liveins: $x0, $x1, $x2
691 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
692 ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 62, 61
693 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
694 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
695 ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[UBFMXri]]
696 ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY1]], [[COPY]], 0, 1 :: (load (s32) from %ir.addr)
697 ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[LDRWroX]], 0
698 ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
699 ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 2, 0
700 ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[SUBREG_TO_REG]], [[ADDXri]]
701 ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
702 ; CHECK-NEXT: $x2 = COPY [[ADDXrr2]]
703 ; CHECK-NEXT: RET_ReallyLR implicit $x2
704 %0:gpr(s64) = COPY $x0
705 %1:gpr(s64) = G_CONSTANT i64 2
706 %2:gpr(s64) = G_SHL %0, %1(s64)
707 %3:gpr(p0) = COPY $x1
708 %4:gpr(p0) = G_PTR_ADD %3, %2
709 %20:gpr(s32) = G_LOAD %4(p0) :: (load (s32) from %ir.addr)
710 %5:gpr(s64) = G_ZEXT %20
711 %6:gpr(s64) = G_ADD %2, %1
712 %7:gpr(s64) = G_ADD %5, %6
713 %8:gpr(s64) = G_PTRTOINT %4
714 %9:gpr(s64) = G_ADD %8, %7
716 RET_ReallyLR implicit $x2
719 # Fold SHL into LSL for memory ops. Do not fold if the target has LSLSLOW14.
720 name: ldrqrox_more_than_one_use_shl
723 regBankSelected: true
724 tracksRegLiveness: true
725 machineFunctionInfo: {}
728 liveins: $x0, $x1, $x2
729 ; CHECK-FAST-LABEL: name: ldrqrox_more_than_one_use_shl
730 ; CHECK-FAST: liveins: $x0, $x1, $x2
731 ; CHECK-FAST-NEXT: {{ $}}
732 ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
733 ; CHECK-FAST-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 60, 59
734 ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
735 ; CHECK-FAST-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
736 ; CHECK-FAST-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[UBFMXri]]
737 ; CHECK-FAST-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY1]], [[COPY]], 0, 1 :: (load (s128) from %ir.addr)
738 ; CHECK-FAST-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 4, 0
739 ; CHECK-FAST-NEXT: [[COPY3:%[0-9]+]]:fpr64 = COPY [[LDRQroX]].dsub
740 ; CHECK-FAST-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY [[COPY3]]
741 ; CHECK-FAST-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY4]], [[ADDXri]]
742 ; CHECK-FAST-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
743 ; CHECK-FAST-NEXT: RET_ReallyLR implicit [[ADDXrr2]]
745 ; CHECK-SLOW-LABEL: name: ldrqrox_more_than_one_use_shl
746 ; CHECK-SLOW: liveins: $x0, $x1, $x2
747 ; CHECK-SLOW-NEXT: {{ $}}
748 ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
749 ; CHECK-SLOW-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 60, 59
750 ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
751 ; CHECK-SLOW-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
752 ; CHECK-SLOW-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[ADDXrr]], 0 :: (load (s128) from %ir.addr)
753 ; CHECK-SLOW-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 4, 0
754 ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY [[LDRQui]].dsub
755 ; CHECK-SLOW-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY [[COPY2]]
756 ; CHECK-SLOW-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY3]], [[ADDXri]]
757 ; CHECK-SLOW-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
758 ; CHECK-SLOW-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY4]], [[ADDXrr1]]
759 ; CHECK-SLOW-NEXT: RET_ReallyLR implicit [[ADDXrr2]]
760 %0:gpr(s64) = COPY $x0
761 %1:gpr(s64) = G_CONSTANT i64 4
762 %2:gpr(s64) = G_SHL %0, %1(s64)
763 %3:gpr(p0) = COPY $x1
764 %4:gpr(p0) = G_PTR_ADD %3, %2
765 %20:fpr(s128) = G_LOAD %4(p0) :: (load (s128) from %ir.addr)
766 %6:gpr(s64) = G_ADD %2, %1
767 %200:fpr(s64) = G_TRUNC %20
768 %2000:gpr(s64) = COPY %200
769 %7:gpr(s64) = G_ADD %2000, %6
770 %8:gpr(s64) = G_PTRTOINT %4
771 %9:gpr(s64) = G_ADD %8, %7
772 RET_ReallyLR implicit %9
775 # Show that when we have a fastpath for shift-left, we perform the folding
776 # if it has more than one use.
778 name: more_than_one_use_shl_lsl
781 regBankSelected: true
782 tracksRegLiveness: true
783 machineFunctionInfo: {}
786 liveins: $x0, $x1, $x2
787 ; CHECK-LABEL: name: more_than_one_use_shl_lsl
788 ; CHECK: liveins: $x0, $x1, $x2
790 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
791 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
792 ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
793 ; CHECK-NEXT: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
794 ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
795 ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
796 ; CHECK-NEXT: RET_ReallyLR implicit $x2
797 %0:gpr(s64) = COPY $x0
798 %1:gpr(s64) = G_CONSTANT i64 3
799 %2:gpr(s64) = G_SHL %0, %1(s64)
800 %3:gpr(p0) = COPY $x1
801 %4:gpr(p0) = G_PTR_ADD %3, %2
802 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
803 %6:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
804 %7:gpr(s64) = G_ADD %5, %6
806 RET_ReallyLR implicit $x2
810 # Show that when we're optimizing for size, we'll do the folding no matter
813 name: more_than_one_use_shl_minsize
816 regBankSelected: true
817 tracksRegLiveness: true
818 machineFunctionInfo: {}
821 liveins: $x0, $x1, $x2
822 ; CHECK-LABEL: name: more_than_one_use_shl_minsize
823 ; CHECK: liveins: $x0, $x1, $x2
825 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
826 ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
827 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
828 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
829 ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
830 ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
831 ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
832 ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
833 ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
834 ; CHECK-NEXT: $x2 = COPY [[ADDXrr1]]
835 ; CHECK-NEXT: RET_ReallyLR implicit $x2
836 %0:gpr(s64) = COPY $x0
837 %1:gpr(s64) = G_CONSTANT i64 3
838 %2:gpr(s64) = G_SHL %0, %1(s64)
839 %3:gpr(p0) = COPY $x1
840 %4:gpr(p0) = G_PTR_ADD %3, %2
841 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
842 %6:gpr(s64) = G_ADD %2, %1
843 %7:gpr(s64) = G_ADD %5, %6
844 %8:gpr(s64) = G_PTRTOINT %4
845 %9:gpr(s64) = G_ADD %8, %7
847 RET_ReallyLR implicit $x2
853 regBankSelected: true
854 tracksRegLiveness: true
855 machineFunctionInfo: {}
859 ; CHECK-LABEL: name: ldrwrox
860 ; CHECK: liveins: $x0, $x1
862 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
863 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
864 ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
865 ; CHECK-NEXT: $w2 = COPY [[LDRWroX]]
866 ; CHECK-NEXT: RET_ReallyLR implicit $w2
867 %0:gpr(p0) = COPY $x0
868 %1:gpr(s64) = COPY $x1
869 %2:gpr(p0) = G_PTR_ADD %0, %1
870 %4:gpr(s32) = G_LOAD %2(p0) :: (load (s32) from %ir.addr)
872 RET_ReallyLR implicit $w2
878 regBankSelected: true
879 tracksRegLiveness: true
880 machineFunctionInfo: {}
884 ; CHECK-LABEL: name: ldrsrox
885 ; CHECK: liveins: $d0, $x1
887 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
888 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
889 ; CHECK-NEXT: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
890 ; CHECK-NEXT: $s2 = COPY [[LDRSroX]]
891 ; CHECK-NEXT: RET_ReallyLR implicit $h2
892 %0:gpr(p0) = COPY $d0
893 %1:gpr(s64) = COPY $x1
894 %2:gpr(p0) = G_PTR_ADD %0, %1
895 %4:fpr(s32) = G_LOAD %2(p0) :: (load (s32) from %ir.addr)
897 RET_ReallyLR implicit $h2
903 regBankSelected: true
904 tracksRegLiveness: true
905 machineFunctionInfo: {}
909 ; CHECK-LABEL: name: ldrhrox
910 ; CHECK: liveins: $x0, $x1
912 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
913 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
914 ; CHECK-NEXT: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
915 ; CHECK-NEXT: $h2 = COPY [[LDRHroX]]
916 ; CHECK-NEXT: RET_ReallyLR implicit $h2
917 %0:gpr(p0) = COPY $x0
918 %1:gpr(s64) = COPY $x1
919 %2:gpr(p0) = G_PTR_ADD %0, %1
920 %4:fpr(s16) = G_LOAD %2(p0) :: (load (s16) from %ir.addr)
922 RET_ReallyLR implicit $h2
928 regBankSelected: true
929 tracksRegLiveness: true
930 machineFunctionInfo: {}
934 ; CHECK-LABEL: name: ldbbrox
935 ; CHECK: liveins: $x0, $x1
937 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
938 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
939 ; CHECK-NEXT: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
940 ; CHECK-NEXT: $w2 = COPY [[LDRBBroX]]
941 ; CHECK-NEXT: RET_ReallyLR implicit $w2
942 %0:gpr(p0) = COPY $x0
943 %1:gpr(s64) = COPY $x1
944 %2:gpr(p0) = G_PTR_ADD %0, %1
945 %4:gpr(s32) = G_LOAD %2(p0) :: (load (s8) from %ir.addr)
947 RET_ReallyLR implicit $w2
953 regBankSelected: true
954 tracksRegLiveness: true
955 machineFunctionInfo: {}
959 ; CHECK-LABEL: name: ldrqrox
960 ; CHECK: liveins: $d0, $x1
962 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
963 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
964 ; CHECK-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
965 ; CHECK-NEXT: $q0 = COPY [[LDRQroX]]
966 ; CHECK-NEXT: RET_ReallyLR implicit $q0
967 %0:gpr(p0) = COPY $d0
968 %1:gpr(s64) = COPY $x1
969 %2:gpr(p0) = G_PTR_ADD %0, %1
970 %4:fpr(<2 x s64>) = G_LOAD %2(p0) :: (load (<2 x s64>) from %ir.addr)
971 $q0 = COPY %4(<2 x s64>)
972 RET_ReallyLR implicit $q0