1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
5 define void @ldrxrox_breg_oreg(i64* %addr) { ret void }
6 define void @ldrdrox_breg_oreg(i64* %addr) { ret void }
7 define void @more_than_one_use(i64* %addr) { ret void }
8 define void @ldrxrox_shl(i64* %addr) { ret void }
9 define void @ldrdrox_shl(i64* %addr) { ret void }
10 define void @ldrxrox_mul_rhs(i64* %addr) { ret void }
11 define void @ldrdrox_mul_rhs(i64* %addr) { ret void }
12 define void @ldrxrox_mul_lhs(i64* %addr) { ret void }
13 define void @ldrdrox_mul_lhs(i64* %addr) { ret void }
14 define void @mul_not_pow_2(i64* %addr) { ret void }
15 define void @mul_wrong_pow_2(i64* %addr) { ret void }
16 define void @more_than_one_use_shl_1(i64* %addr) { ret void }
17 define void @more_than_one_use_shl_2(i64* %addr) { ret void }
18 define void @more_than_one_use_shl_lsl_fast(i64* %addr) #1 { ret void }
19 define void @more_than_one_use_shl_lsl_slow(i64* %addr) { ret void }
20 define void @more_than_one_use_shl_minsize(i64* %addr) #0 { ret void }
21 attributes #0 = { optsize minsize }
22 attributes #1 = { "target-features"="+lsl-fast" }
26 name: ldrxrox_breg_oreg
30 tracksRegLiveness: true
31 machineFunctionInfo: {}
36 ; CHECK-LABEL: name: ldrxrox_breg_oreg
37 ; CHECK: liveins: $x0, $x1
38 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
39 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
40 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load 8 from %ir.addr)
41 ; CHECK: $x0 = COPY [[LDRXroX]]
42 ; CHECK: RET_ReallyLR implicit $x0
44 %1:gpr(s64) = COPY $x1
45 %2:gpr(p0) = G_GEP %0, %1
46 %4:gpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
48 RET_ReallyLR implicit $x0
52 name: ldrdrox_breg_oreg
56 tracksRegLiveness: true
57 machineFunctionInfo: {}
61 ; CHECK-LABEL: name: ldrdrox_breg_oreg
62 ; CHECK: liveins: $d0, $x1
63 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
64 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
65 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load 8 from %ir.addr)
66 ; CHECK: $d0 = COPY [[LDRDroX]]
67 ; CHECK: RET_ReallyLR implicit $d0
69 %1:gpr(s64) = COPY $x1
70 %2:gpr(p0) = G_GEP %0, %1
71 %4:fpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
73 RET_ReallyLR implicit $d0
77 name: more_than_one_use
81 tracksRegLiveness: true
82 machineFunctionInfo: {}
86 ; This shouldn't be folded, since we reuse the result of the G_GEP outside
88 ; CHECK-LABEL: name: more_than_one_use
89 ; CHECK: liveins: $x0, $x1
90 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
91 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
92 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
93 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
94 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
95 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
96 ; CHECK: $x0 = COPY [[ADDXrr1]]
97 ; CHECK: RET_ReallyLR implicit $x0
99 %1:gpr(s64) = COPY $x1
100 %2:gpr(p0) = G_GEP %0, %1
101 %4:gpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
102 %5:gpr(s64) = G_PTRTOINT %2
103 %6:gpr(s64) = G_ADD %5, %4
105 RET_ReallyLR implicit $x0
112 regBankSelected: true
113 tracksRegLiveness: true
114 machineFunctionInfo: {}
117 liveins: $x0, $x1, $x2
118 ; CHECK-LABEL: name: ldrxrox_shl
119 ; CHECK: liveins: $x0, $x1, $x2
120 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
121 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
122 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
123 ; CHECK: $x2 = COPY [[LDRXroX]]
124 ; CHECK: RET_ReallyLR implicit $x2
125 %0:gpr(s64) = COPY $x0
126 %1:gpr(s64) = G_CONSTANT i64 3
127 %2:gpr(s64) = G_SHL %0, %1(s64)
128 %3:gpr(p0) = COPY $x1
129 %4:gpr(p0) = G_GEP %3, %2
130 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
132 RET_ReallyLR implicit $x2
139 regBankSelected: true
140 tracksRegLiveness: true
141 machineFunctionInfo: {}
144 liveins: $x0, $x1, $d2
145 ; CHECK-LABEL: name: ldrdrox_shl
146 ; CHECK: liveins: $x0, $x1, $d2
147 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
148 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
149 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
150 ; CHECK: $d2 = COPY [[LDRDroX]]
151 ; CHECK: RET_ReallyLR implicit $d2
152 %0:gpr(s64) = COPY $x0
153 %1:gpr(s64) = G_CONSTANT i64 3
154 %2:gpr(s64) = G_SHL %0, %1(s64)
155 %3:gpr(p0) = COPY $x1
156 %4:gpr(p0) = G_GEP %3, %2
157 %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
159 RET_ReallyLR implicit $d2
163 name: ldrxrox_mul_rhs
166 regBankSelected: true
167 tracksRegLiveness: true
168 machineFunctionInfo: {}
171 liveins: $x0, $x1, $x2
172 ; CHECK-LABEL: name: ldrxrox_mul_rhs
173 ; CHECK: liveins: $x0, $x1, $x2
174 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
175 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
176 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
177 ; CHECK: $x2 = COPY [[LDRXroX]]
178 ; CHECK: RET_ReallyLR implicit $x2
179 %0:gpr(s64) = COPY $x0
180 %1:gpr(s64) = G_CONSTANT i64 8
181 %2:gpr(s64) = G_MUL %0, %1(s64)
182 %3:gpr(p0) = COPY $x1
183 %4:gpr(p0) = G_GEP %3, %2
184 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
186 RET_ReallyLR implicit $x2
190 name: ldrdrox_mul_rhs
193 regBankSelected: true
194 tracksRegLiveness: true
195 machineFunctionInfo: {}
198 liveins: $x0, $x1, $d2
199 ; CHECK-LABEL: name: ldrdrox_mul_rhs
200 ; CHECK: liveins: $x0, $x1, $d2
201 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
202 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
203 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
204 ; CHECK: $d2 = COPY [[LDRDroX]]
205 ; CHECK: RET_ReallyLR implicit $d2
206 %0:gpr(s64) = COPY $x0
207 %1:gpr(s64) = G_CONSTANT i64 8
208 %2:gpr(s64) = G_MUL %0, %1(s64)
209 %3:gpr(p0) = COPY $x1
210 %4:gpr(p0) = G_GEP %3, %2
211 %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
213 RET_ReallyLR implicit $d2
217 name: ldrxrox_mul_lhs
220 regBankSelected: true
221 tracksRegLiveness: true
222 machineFunctionInfo: {}
225 liveins: $x0, $x1, $x2
226 ; CHECK-LABEL: name: ldrxrox_mul_lhs
227 ; CHECK: liveins: $x0, $x1, $x2
228 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
229 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
230 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
231 ; CHECK: $x2 = COPY [[LDRXroX]]
232 ; CHECK: RET_ReallyLR implicit $x2
233 %0:gpr(s64) = COPY $x0
234 %1:gpr(s64) = G_CONSTANT i64 8
235 %2:gpr(s64) = G_MUL %1, %0(s64)
236 %3:gpr(p0) = COPY $x1
237 %4:gpr(p0) = G_GEP %3, %2
238 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
240 RET_ReallyLR implicit $x2
244 name: ldrdrox_mul_lhs
247 regBankSelected: true
248 tracksRegLiveness: true
249 machineFunctionInfo: {}
252 liveins: $x0, $x1, $d2
253 ; CHECK-LABEL: name: ldrdrox_mul_lhs
254 ; CHECK: liveins: $x0, $x1, $d2
255 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
256 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
257 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
258 ; CHECK: $d2 = COPY [[LDRDroX]]
259 ; CHECK: RET_ReallyLR implicit $d2
260 %0:gpr(s64) = COPY $x0
261 %1:gpr(s64) = G_CONSTANT i64 8
262 %2:gpr(s64) = G_MUL %1, %0(s64)
263 %3:gpr(p0) = COPY $x1
264 %4:gpr(p0) = G_GEP %3, %2
265 %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
267 RET_ReallyLR implicit $d2
274 regBankSelected: true
275 tracksRegLiveness: true
276 machineFunctionInfo: {}
279 ; Show that we don't get a shifted load from a mul when we don't have a
280 ; power of 2. (The bit isn't set on the load.)
281 liveins: $x0, $x1, $d2
282 ; CHECK-LABEL: name: mul_not_pow_2
283 ; CHECK: liveins: $x0, $x1, $d2
284 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
285 ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 7
286 ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[MOVi64imm]], [[COPY]], $xzr
287 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
288 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr)
289 ; CHECK: $d2 = COPY [[LDRDroX]]
290 ; CHECK: RET_ReallyLR implicit $d2
291 %0:gpr(s64) = COPY $x0
292 %1:gpr(s64) = G_CONSTANT i64 7
293 %2:gpr(s64) = G_MUL %1, %0(s64)
294 %3:gpr(p0) = COPY $x1
295 %4:gpr(p0) = G_GEP %3, %2
296 %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
298 RET_ReallyLR implicit $d2
302 name: mul_wrong_pow_2
305 regBankSelected: true
306 tracksRegLiveness: true
307 machineFunctionInfo: {}
310 ; Show that we don't get a shifted load from a mul when we don't have
311 ; the right power of 2. (The bit isn't set on the load.)
312 liveins: $x0, $x1, $d2
313 ; CHECK-LABEL: name: mul_wrong_pow_2
314 ; CHECK: liveins: $x0, $x1, $d2
315 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
316 ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 16
317 ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[MOVi64imm]], [[COPY]], $xzr
318 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
319 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr)
320 ; CHECK: $d2 = COPY [[LDRDroX]]
321 ; CHECK: RET_ReallyLR implicit $d2
322 %0:gpr(s64) = COPY $x0
323 %1:gpr(s64) = G_CONSTANT i64 16
324 %2:gpr(s64) = G_MUL %1, %0(s64)
325 %3:gpr(p0) = COPY $x1
326 %4:gpr(p0) = G_GEP %3, %2
327 %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
329 RET_ReallyLR implicit $d2
333 name: more_than_one_use_shl_1
336 regBankSelected: true
337 tracksRegLiveness: true
338 machineFunctionInfo: {}
341 ; Show that we can still fall back to the register-register addressing
342 ; mode when we fail to pull in the shift.
343 liveins: $x0, $x1, $x2
344 ; CHECK-LABEL: name: more_than_one_use_shl_1
345 ; CHECK: liveins: $x0, $x1, $x2
346 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
347 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
348 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
349 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load 8 from %ir.addr)
350 ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
351 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
352 ; CHECK: $x2 = COPY [[ADDXrr]]
353 ; CHECK: RET_ReallyLR implicit $x2
354 %0:gpr(s64) = COPY $x0
355 %1:gpr(s64) = G_CONSTANT i64 3
356 %2:gpr(s64) = G_SHL %0, %1(s64)
357 %3:gpr(p0) = COPY $x1
358 %4:gpr(p0) = G_GEP %3, %2
359 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
360 %6:gpr(s64) = G_ADD %2, %1
361 %7:gpr(s64) = G_ADD %5, %6
363 RET_ReallyLR implicit $x2
367 name: more_than_one_use_shl_2
370 regBankSelected: true
371 tracksRegLiveness: true
372 machineFunctionInfo: {}
375 ; Show that when the GEP is used outside a memory op, we don't do any
377 liveins: $x0, $x1, $x2
378 ; CHECK-LABEL: name: more_than_one_use_shl_2
379 ; CHECK: liveins: $x0, $x1, $x2
380 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
381 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
382 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
383 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
384 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
385 ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
386 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
387 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
388 ; CHECK: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
389 ; CHECK: $x2 = COPY [[ADDXrr2]]
390 ; CHECK: RET_ReallyLR implicit $x2
391 %0:gpr(s64) = COPY $x0
392 %1:gpr(s64) = G_CONSTANT i64 3
393 %2:gpr(s64) = G_SHL %0, %1(s64)
394 %3:gpr(p0) = COPY $x1
395 %4:gpr(p0) = G_GEP %3, %2
396 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
397 %6:gpr(s64) = G_ADD %2, %1
398 %7:gpr(s64) = G_ADD %5, %6
399 %8:gpr(s64) = G_PTRTOINT %4
400 %9:gpr(s64) = G_ADD %8, %7
402 RET_ReallyLR implicit $x2
406 name: more_than_one_use_shl_lsl_fast
409 regBankSelected: true
410 tracksRegLiveness: true
411 machineFunctionInfo: {}
414 ; Show that when we have a fastpath for shift-left, we perform the folding
415 ; if it has more than one use.
416 liveins: $x0, $x1, $x2
417 ; CHECK-LABEL: name: more_than_one_use_shl_lsl_fast
418 ; CHECK: liveins: $x0, $x1, $x2
419 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
420 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
421 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
422 ; CHECK: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
423 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
424 ; CHECK: $x2 = COPY [[ADDXrr]]
425 ; CHECK: RET_ReallyLR implicit $x2
426 %0:gpr(s64) = COPY $x0
427 %1:gpr(s64) = G_CONSTANT i64 3
428 %2:gpr(s64) = G_SHL %0, %1(s64)
429 %3:gpr(p0) = COPY $x1
430 %4:gpr(p0) = G_GEP %3, %2
431 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
432 %6:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
433 %7:gpr(s64) = G_ADD %5, %6
435 RET_ReallyLR implicit $x2
439 name: more_than_one_use_shl_lsl_slow
442 regBankSelected: true
443 tracksRegLiveness: true
444 machineFunctionInfo: {}
447 ; Show that we don't fold into multiple memory ops when we don't have a
448 ; fastpath for shift-left.
449 liveins: $x0, $x1, $x2
450 ; CHECK-LABEL: name: more_than_one_use_shl_lsl_slow
451 ; CHECK: liveins: $x0, $x1, $x2
452 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
453 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[COPY]], 61, 60
454 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
455 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
456 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
457 ; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
458 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
459 ; CHECK: $x2 = COPY [[ADDXrr1]]
460 ; CHECK: RET_ReallyLR implicit $x2
461 %0:gpr(s64) = COPY $x0
462 %1:gpr(s64) = G_CONSTANT i64 3
463 %2:gpr(s64) = G_SHL %0, %1(s64)
464 %3:gpr(p0) = COPY $x1
465 %4:gpr(p0) = G_GEP %3, %2
466 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
467 %6:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
468 %7:gpr(s64) = G_ADD %5, %6
470 RET_ReallyLR implicit $x2
474 name: more_than_one_use_shl_minsize
477 regBankSelected: true
478 tracksRegLiveness: true
479 machineFunctionInfo: {}
482 ; Show that when we're optimizing for size, we'll do the folding no matter
484 liveins: $x0, $x1, $x2
485 ; CHECK-LABEL: name: more_than_one_use_shl_minsize
486 ; CHECK: liveins: $x0, $x1, $x2
487 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
488 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
489 ; CHECK: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
490 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY1]], [[UBFMXri]]
491 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
492 ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
493 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
494 ; CHECK: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
495 ; CHECK: $x2 = COPY [[ADDXrr2]]
496 ; CHECK: RET_ReallyLR implicit $x2
497 %0:gpr(s64) = COPY $x0
498 %1:gpr(s64) = G_CONSTANT i64 3
499 %2:gpr(s64) = G_SHL %0, %1(s64)
500 %3:gpr(p0) = COPY $x1
501 %4:gpr(p0) = G_GEP %3, %2
502 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
503 %6:gpr(s64) = G_ADD %2, %1
504 %7:gpr(s64) = G_ADD %5, %6
505 %8:gpr(s64) = G_PTRTOINT %4
506 %9:gpr(s64) = G_ADD %8, %7
508 RET_ReallyLR implicit $x2