1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
5 define void @ldrxrox_breg_oreg(i64* %addr) { ret void }
6 define void @ldrdrox_breg_oreg(i64* %addr) { ret void }
7 define void @more_than_one_use(i64* %addr) { ret void }
8 define void @ldrxrox_shl(i64* %addr) { ret void }
9 define void @ldrdrox_shl(i64* %addr) { ret void }
10 define void @ldrxrox_mul_rhs(i64* %addr) { ret void }
11 define void @ldrdrox_mul_rhs(i64* %addr) { ret void }
12 define void @ldrxrox_mul_lhs(i64* %addr) { ret void }
13 define void @ldrdrox_mul_lhs(i64* %addr) { ret void }
14 define void @mul_not_pow_2(i64* %addr) { ret void }
15 define void @mul_wrong_pow_2(i64* %addr) { ret void }
16 define void @more_than_one_use_shl_1(i64* %addr) { ret void }
17 define void @more_than_one_use_shl_2(i64* %addr) { ret void }
18 define void @more_than_one_use_shl_lsl_fast(i64* %addr) #1 { ret void }
19 define void @more_than_one_use_shl_lsl_slow(i64* %addr) { ret void }
20 define void @more_than_one_use_shl_minsize(i64* %addr) #0 { ret void }
21 define void @ldrwrox(i64* %addr) { ret void }
22 define void @ldrsrox(i64* %addr) { ret void }
23 define void @ldrhrox(i64* %addr) { ret void }
24 define void @ldbbrox(i64* %addr) { ret void }
25 define void @ldrqrox(i64* %addr) { ret void }
26 attributes #0 = { optsize }
27 attributes #1 = { "target-features"="+addr-lsl-fast" }
31 name: ldrxrox_breg_oreg
35 tracksRegLiveness: true
36 machineFunctionInfo: {}
41 ; CHECK-LABEL: name: ldrxrox_breg_oreg
42 ; CHECK: liveins: $x0, $x1
43 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
44 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
45 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
46 ; CHECK: $x0 = COPY [[LDRXroX]]
47 ; CHECK: RET_ReallyLR implicit $x0
49 %1:gpr(s64) = COPY $x1
50 %2:gpr(p0) = G_PTR_ADD %0, %1
51 %4:gpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
53 RET_ReallyLR implicit $x0
57 name: ldrdrox_breg_oreg
61 tracksRegLiveness: true
62 machineFunctionInfo: {}
66 ; CHECK-LABEL: name: ldrdrox_breg_oreg
67 ; CHECK: liveins: $d0, $x1
68 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
69 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
70 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
71 ; CHECK: $d0 = COPY [[LDRDroX]]
72 ; CHECK: RET_ReallyLR implicit $d0
74 %1:gpr(s64) = COPY $x1
75 %2:gpr(p0) = G_PTR_ADD %0, %1
76 %4:fpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
78 RET_ReallyLR implicit $d0
81 name: more_than_one_use
85 tracksRegLiveness: true
86 machineFunctionInfo: {}
90 ; This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
92 ; CHECK-LABEL: name: more_than_one_use
93 ; CHECK: liveins: $x0, $x1
94 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
95 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
96 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
97 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
98 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
99 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
100 ; CHECK: $x0 = COPY [[ADDXrr1]]
101 ; CHECK: RET_ReallyLR implicit $x0
102 %0:gpr(p0) = COPY $x0
103 %1:gpr(s64) = COPY $x1
104 %2:gpr(p0) = G_PTR_ADD %0, %1
105 %4:gpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
106 %5:gpr(s64) = G_PTRTOINT %2
107 %6:gpr(s64) = G_ADD %5, %4
109 RET_ReallyLR implicit $x0
116 regBankSelected: true
117 tracksRegLiveness: true
118 machineFunctionInfo: {}
121 liveins: $x0, $x1, $x2
122 ; CHECK-LABEL: name: ldrxrox_shl
123 ; CHECK: liveins: $x0, $x1, $x2
124 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
125 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
126 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
127 ; CHECK: $x2 = COPY [[LDRXroX]]
128 ; CHECK: RET_ReallyLR implicit $x2
129 %0:gpr(s64) = COPY $x0
130 %1:gpr(s64) = G_CONSTANT i64 3
131 %2:gpr(s64) = G_SHL %0, %1(s64)
132 %3:gpr(p0) = COPY $x1
133 %4:gpr(p0) = G_PTR_ADD %3, %2
134 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
136 RET_ReallyLR implicit $x2
143 regBankSelected: true
144 tracksRegLiveness: true
145 machineFunctionInfo: {}
148 liveins: $x0, $x1, $d2
149 ; CHECK-LABEL: name: ldrdrox_shl
150 ; CHECK: liveins: $x0, $x1, $d2
151 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
152 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
153 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
154 ; CHECK: $d2 = COPY [[LDRDroX]]
155 ; CHECK: RET_ReallyLR implicit $d2
156 %0:gpr(s64) = COPY $x0
157 %1:gpr(s64) = G_CONSTANT i64 3
158 %2:gpr(s64) = G_SHL %0, %1(s64)
159 %3:gpr(p0) = COPY $x1
160 %4:gpr(p0) = G_PTR_ADD %3, %2
161 %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
163 RET_ReallyLR implicit $d2
167 name: ldrxrox_mul_rhs
170 regBankSelected: true
171 tracksRegLiveness: true
172 machineFunctionInfo: {}
175 liveins: $x0, $x1, $x2
176 ; CHECK-LABEL: name: ldrxrox_mul_rhs
177 ; CHECK: liveins: $x0, $x1, $x2
178 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
179 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
180 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
181 ; CHECK: $x2 = COPY [[LDRXroX]]
182 ; CHECK: RET_ReallyLR implicit $x2
183 %0:gpr(s64) = COPY $x0
184 %1:gpr(s64) = G_CONSTANT i64 8
185 %2:gpr(s64) = G_MUL %0, %1(s64)
186 %3:gpr(p0) = COPY $x1
187 %4:gpr(p0) = G_PTR_ADD %3, %2
188 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
190 RET_ReallyLR implicit $x2
194 name: ldrdrox_mul_rhs
197 regBankSelected: true
198 tracksRegLiveness: true
199 machineFunctionInfo: {}
202 liveins: $x0, $x1, $d2
203 ; CHECK-LABEL: name: ldrdrox_mul_rhs
204 ; CHECK: liveins: $x0, $x1, $d2
205 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
206 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
207 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
208 ; CHECK: $d2 = COPY [[LDRDroX]]
209 ; CHECK: RET_ReallyLR implicit $d2
210 %0:gpr(s64) = COPY $x0
211 %1:gpr(s64) = G_CONSTANT i64 8
212 %2:gpr(s64) = G_MUL %0, %1(s64)
213 %3:gpr(p0) = COPY $x1
214 %4:gpr(p0) = G_PTR_ADD %3, %2
215 %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
217 RET_ReallyLR implicit $d2
221 name: ldrxrox_mul_lhs
224 regBankSelected: true
225 tracksRegLiveness: true
226 machineFunctionInfo: {}
229 liveins: $x0, $x1, $x2
230 ; CHECK-LABEL: name: ldrxrox_mul_lhs
231 ; CHECK: liveins: $x0, $x1, $x2
232 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
233 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
234 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
235 ; CHECK: $x2 = COPY [[LDRXroX]]
236 ; CHECK: RET_ReallyLR implicit $x2
237 %0:gpr(s64) = COPY $x0
238 %1:gpr(s64) = G_CONSTANT i64 8
239 %2:gpr(s64) = G_MUL %1, %0(s64)
240 %3:gpr(p0) = COPY $x1
241 %4:gpr(p0) = G_PTR_ADD %3, %2
242 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
244 RET_ReallyLR implicit $x2
248 name: ldrdrox_mul_lhs
251 regBankSelected: true
252 tracksRegLiveness: true
253 machineFunctionInfo: {}
256 liveins: $x0, $x1, $d2
257 ; CHECK-LABEL: name: ldrdrox_mul_lhs
258 ; CHECK: liveins: $x0, $x1, $d2
259 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
260 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
261 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
262 ; CHECK: $d2 = COPY [[LDRDroX]]
263 ; CHECK: RET_ReallyLR implicit $d2
264 %0:gpr(s64) = COPY $x0
265 %1:gpr(s64) = G_CONSTANT i64 8
266 %2:gpr(s64) = G_MUL %1, %0(s64)
267 %3:gpr(p0) = COPY $x1
268 %4:gpr(p0) = G_PTR_ADD %3, %2
269 %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
271 RET_ReallyLR implicit $d2
278 regBankSelected: true
279 tracksRegLiveness: true
280 machineFunctionInfo: {}
283 ; Show that we don't get a shifted load from a mul when we don't have a
284 ; power of 2. (The bit isn't set on the load.)
285 liveins: $x0, $x1, $d2
286 ; CHECK-LABEL: name: mul_not_pow_2
287 ; CHECK: liveins: $x0, $x1, $d2
288 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
289 ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
290 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
291 ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
292 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
293 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
294 ; CHECK: $d2 = COPY [[LDRDroX]]
295 ; CHECK: RET_ReallyLR implicit $d2
296 %0:gpr(s64) = COPY $x0
297 %1:gpr(s64) = G_CONSTANT i64 7
298 %2:gpr(s64) = G_MUL %1, %0(s64)
299 %3:gpr(p0) = COPY $x1
300 %4:gpr(p0) = G_PTR_ADD %3, %2
301 %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
303 RET_ReallyLR implicit $d2
307 name: mul_wrong_pow_2
310 regBankSelected: true
311 tracksRegLiveness: true
312 machineFunctionInfo: {}
315 ; Show that we don't get a shifted load from a mul when we don't have
316 ; the right power of 2. (The bit isn't set on the load.)
317 liveins: $x0, $x1, $d2
318 ; CHECK-LABEL: name: mul_wrong_pow_2
319 ; CHECK: liveins: $x0, $x1, $d2
320 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
321 ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
322 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
323 ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
324 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
325 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
326 ; CHECK: $d2 = COPY [[LDRDroX]]
327 ; CHECK: RET_ReallyLR implicit $d2
328 %0:gpr(s64) = COPY $x0
329 %1:gpr(s64) = G_CONSTANT i64 16
330 %2:gpr(s64) = G_MUL %1, %0(s64)
331 %3:gpr(p0) = COPY $x1
332 %4:gpr(p0) = G_PTR_ADD %3, %2
333 %5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
335 RET_ReallyLR implicit $d2
339 name: more_than_one_use_shl_1
342 regBankSelected: true
343 tracksRegLiveness: true
344 machineFunctionInfo: {}
347 ; Show that we can still fall back to the register-register addressing
348 ; mode when we fail to pull in the shift.
349 liveins: $x0, $x1, $x2
350 ; CHECK-LABEL: name: more_than_one_use_shl_1
351 ; CHECK: liveins: $x0, $x1, $x2
352 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
353 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
354 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
355 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
356 ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
357 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
358 ; CHECK: $x2 = COPY [[ADDXrr]]
359 ; CHECK: RET_ReallyLR implicit $x2
360 %0:gpr(s64) = COPY $x0
361 %1:gpr(s64) = G_CONSTANT i64 3
362 %2:gpr(s64) = G_SHL %0, %1(s64)
363 %3:gpr(p0) = COPY $x1
364 %4:gpr(p0) = G_PTR_ADD %3, %2
365 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
366 %6:gpr(s64) = G_ADD %2, %1
367 %7:gpr(s64) = G_ADD %5, %6
369 RET_ReallyLR implicit $x2
373 name: more_than_one_use_shl_2
376 regBankSelected: true
377 tracksRegLiveness: true
378 machineFunctionInfo: {}
381 ; Show that when the GEP is used outside a memory op, we don't do any
383 liveins: $x0, $x1, $x2
384 ; CHECK-LABEL: name: more_than_one_use_shl_2
385 ; CHECK: liveins: $x0, $x1, $x2
386 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
387 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
388 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
389 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
390 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
391 ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
392 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
393 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
394 ; CHECK: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
395 ; CHECK: $x2 = COPY [[ADDXrr2]]
396 ; CHECK: RET_ReallyLR implicit $x2
397 %0:gpr(s64) = COPY $x0
398 %1:gpr(s64) = G_CONSTANT i64 3
399 %2:gpr(s64) = G_SHL %0, %1(s64)
400 %3:gpr(p0) = COPY $x1
401 %4:gpr(p0) = G_PTR_ADD %3, %2
402 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
403 %6:gpr(s64) = G_ADD %2, %1
404 %7:gpr(s64) = G_ADD %5, %6
405 %8:gpr(s64) = G_PTRTOINT %4
406 %9:gpr(s64) = G_ADD %8, %7
408 RET_ReallyLR implicit $x2
412 name: more_than_one_use_shl_lsl_fast
415 regBankSelected: true
416 tracksRegLiveness: true
417 machineFunctionInfo: {}
420 ; Show that when we have a fastpath for shift-left, we perform the folding
421 ; if it has more than one use.
422 liveins: $x0, $x1, $x2
423 ; CHECK-LABEL: name: more_than_one_use_shl_lsl_fast
424 ; CHECK: liveins: $x0, $x1, $x2
425 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
426 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
427 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
428 ; CHECK: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
429 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
430 ; CHECK: $x2 = COPY [[ADDXrr]]
431 ; CHECK: RET_ReallyLR implicit $x2
432 %0:gpr(s64) = COPY $x0
433 %1:gpr(s64) = G_CONSTANT i64 3
434 %2:gpr(s64) = G_SHL %0, %1(s64)
435 %3:gpr(p0) = COPY $x1
436 %4:gpr(p0) = G_PTR_ADD %3, %2
437 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
438 %6:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
439 %7:gpr(s64) = G_ADD %5, %6
441 RET_ReallyLR implicit $x2
445 name: more_than_one_use_shl_lsl_slow
448 regBankSelected: true
449 tracksRegLiveness: true
450 machineFunctionInfo: {}
453 ; Show that we don't fold into multiple memory ops when we don't have a
454 ; fastpath for shift-left.
455 liveins: $x0, $x1, $x2
456 ; CHECK-LABEL: name: more_than_one_use_shl_lsl_slow
457 ; CHECK: liveins: $x0, $x1, $x2
458 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
459 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
460 ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
461 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
462 ; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
463 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
464 ; CHECK: $x2 = COPY [[ADDXrr]]
465 ; CHECK: RET_ReallyLR implicit $x2
466 %0:gpr(s64) = COPY $x0
467 %1:gpr(s64) = G_CONSTANT i64 3
468 %2:gpr(s64) = G_SHL %0, %1(s64)
469 %3:gpr(p0) = COPY $x1
470 %4:gpr(p0) = G_PTR_ADD %3, %2
471 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
472 %6:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
473 %7:gpr(s64) = G_ADD %5, %6
475 RET_ReallyLR implicit $x2
479 name: more_than_one_use_shl_minsize
482 regBankSelected: true
483 tracksRegLiveness: true
484 machineFunctionInfo: {}
487 ; Show that when we're optimizing for size, we'll do the folding no matter
489 liveins: $x0, $x1, $x2
490 ; CHECK-LABEL: name: more_than_one_use_shl_minsize
491 ; CHECK: liveins: $x0, $x1, $x2
492 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
493 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
494 ; CHECK: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
495 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
496 ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
497 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
498 ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
499 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
500 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
501 ; CHECK: $x2 = COPY [[ADDXrr1]]
502 ; CHECK: RET_ReallyLR implicit $x2
503 %0:gpr(s64) = COPY $x0
504 %1:gpr(s64) = G_CONSTANT i64 3
505 %2:gpr(s64) = G_SHL %0, %1(s64)
506 %3:gpr(p0) = COPY $x1
507 %4:gpr(p0) = G_PTR_ADD %3, %2
508 %5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
509 %6:gpr(s64) = G_ADD %2, %1
510 %7:gpr(s64) = G_ADD %5, %6
511 %8:gpr(s64) = G_PTRTOINT %4
512 %9:gpr(s64) = G_ADD %8, %7
514 RET_ReallyLR implicit $x2
520 regBankSelected: true
521 tracksRegLiveness: true
522 machineFunctionInfo: {}
526 ; CHECK-LABEL: name: ldrwrox
527 ; CHECK: liveins: $x0, $x1
528 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
529 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
530 ; CHECK: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
531 ; CHECK: $w2 = COPY [[LDRWroX]]
532 ; CHECK: RET_ReallyLR implicit $w2
533 %0:gpr(p0) = COPY $x0
534 %1:gpr(s64) = COPY $x1
535 %2:gpr(p0) = G_PTR_ADD %0, %1
536 %4:gpr(s32) = G_LOAD %2(p0) :: (load (s32) from %ir.addr)
538 RET_ReallyLR implicit $w2
544 regBankSelected: true
545 tracksRegLiveness: true
546 machineFunctionInfo: {}
550 ; CHECK-LABEL: name: ldrsrox
551 ; CHECK: liveins: $d0, $x1
552 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
553 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
554 ; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
555 ; CHECK: $s2 = COPY [[LDRSroX]]
556 ; CHECK: RET_ReallyLR implicit $h2
557 %0:gpr(p0) = COPY $d0
558 %1:gpr(s64) = COPY $x1
559 %2:gpr(p0) = G_PTR_ADD %0, %1
560 %4:fpr(s32) = G_LOAD %2(p0) :: (load (s32) from %ir.addr)
562 RET_ReallyLR implicit $h2
568 regBankSelected: true
569 tracksRegLiveness: true
570 machineFunctionInfo: {}
574 ; CHECK-LABEL: name: ldrhrox
575 ; CHECK: liveins: $x0, $x1
576 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
577 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
578 ; CHECK: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
579 ; CHECK: $h2 = COPY [[LDRHroX]]
580 ; CHECK: RET_ReallyLR implicit $h2
581 %0:gpr(p0) = COPY $x0
582 %1:gpr(s64) = COPY $x1
583 %2:gpr(p0) = G_PTR_ADD %0, %1
584 %4:fpr(s16) = G_LOAD %2(p0) :: (load (s16) from %ir.addr)
586 RET_ReallyLR implicit $h2
592 regBankSelected: true
593 tracksRegLiveness: true
594 machineFunctionInfo: {}
598 ; CHECK-LABEL: name: ldbbrox
599 ; CHECK: liveins: $x0, $x1
600 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
601 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
602 ; CHECK: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
603 ; CHECK: $w2 = COPY [[LDRBBroX]]
604 ; CHECK: RET_ReallyLR implicit $w2
605 %0:gpr(p0) = COPY $x0
606 %1:gpr(s64) = COPY $x1
607 %2:gpr(p0) = G_PTR_ADD %0, %1
608 %4:gpr(s32) = G_LOAD %2(p0) :: (load (s8) from %ir.addr)
610 RET_ReallyLR implicit $w2
616 regBankSelected: true
617 tracksRegLiveness: true
618 machineFunctionInfo: {}
622 ; CHECK-LABEL: name: ldrqrox
623 ; CHECK: liveins: $d0, $x1
624 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
625 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
626 ; CHECK: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
627 ; CHECK: $q0 = COPY [[LDRQroX]]
628 ; CHECK: RET_ReallyLR implicit $q0
629 %0:gpr(p0) = COPY $d0
630 %1:gpr(s64) = COPY $x1
631 %2:gpr(p0) = G_PTR_ADD %0, %1
632 %4:fpr(<2 x s64>) = G_LOAD %2(p0) :: (load (<2 x s64>) from %ir.addr)
633 $q0 = COPY %4(<2 x s64>)
634 RET_ReallyLR implicit $q0