1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
5 define void @ldrxrox_breg_oreg(i64* %addr) { ret void }
6 define void @ldrdrox_breg_oreg(i64* %addr) { ret void }
7 define void @more_than_one_use(i64* %addr) { ret void }
8 define void @ldrxrox_shl(i64* %addr) { ret void }
9 define void @ldrdrox_shl(i64* %addr) { ret void }
10 define void @ldrxrox_mul_rhs(i64* %addr) { ret void }
11 define void @ldrdrox_mul_rhs(i64* %addr) { ret void }
12 define void @ldrxrox_mul_lhs(i64* %addr) { ret void }
13 define void @ldrdrox_mul_lhs(i64* %addr) { ret void }
14 define void @mul_not_pow_2(i64* %addr) { ret void }
15 define void @mul_wrong_pow_2(i64* %addr) { ret void }
16 define void @more_than_one_use_shl_1(i64* %addr) { ret void }
17 define void @more_than_one_use_shl_2(i64* %addr) { ret void }
18 define void @more_than_one_use_shl_lsl_fast(i64* %addr) #1 { ret void }
19 define void @more_than_one_use_shl_lsl_slow(i64* %addr) { ret void }
20 define void @more_than_one_use_shl_minsize(i64* %addr) #0 { ret void }
21 define void @ldrwrox(i64* %addr) { ret void }
22 define void @ldrsrox(i64* %addr) { ret void }
23 define void @ldrhrox(i64* %addr) { ret void }
24 define void @ldbbrox(i64* %addr) { ret void }
25 define void @ldrqrox(i64* %addr) { ret void }
26 attributes #0 = { optsize minsize }
27 attributes #1 = { "target-features"="+lsl-fast" }
31 name: ldrxrox_breg_oreg
35 tracksRegLiveness: true
36 machineFunctionInfo: {}
41 ; CHECK-LABEL: name: ldrxrox_breg_oreg
42 ; CHECK: liveins: $x0, $x1
43 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
44 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
45 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load 8 from %ir.addr)
46 ; CHECK: $x0 = COPY [[LDRXroX]]
47 ; CHECK: RET_ReallyLR implicit $x0
49 %1:gpr(s64) = COPY $x1
50 %2:gpr(p0) = G_GEP %0, %1
51 %4:gpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
53 RET_ReallyLR implicit $x0
57 name: ldrdrox_breg_oreg
61 tracksRegLiveness: true
62 machineFunctionInfo: {}
66 ; CHECK-LABEL: name: ldrdrox_breg_oreg
67 ; CHECK: liveins: $d0, $x1
68 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
69 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
70 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load 8 from %ir.addr)
71 ; CHECK: $d0 = COPY [[LDRDroX]]
72 ; CHECK: RET_ReallyLR implicit $d0
74 %1:gpr(s64) = COPY $x1
75 %2:gpr(p0) = G_GEP %0, %1
76 %4:fpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
78 RET_ReallyLR implicit $d0
81 name: more_than_one_use
85 tracksRegLiveness: true
86 machineFunctionInfo: {}
90 ; This shouldn't be folded, since we reuse the result of the G_GEP outside
92 ; CHECK-LABEL: name: more_than_one_use
93 ; CHECK: liveins: $x0, $x1
94 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
95 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
96 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
97 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
98 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
99 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
100 ; CHECK: $x0 = COPY [[ADDXrr1]]
101 ; CHECK: RET_ReallyLR implicit $x0
102 %0:gpr(p0) = COPY $x0
103 %1:gpr(s64) = COPY $x1
104 %2:gpr(p0) = G_GEP %0, %1
105 %4:gpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
106 %5:gpr(s64) = G_PTRTOINT %2
107 %6:gpr(s64) = G_ADD %5, %4
109 RET_ReallyLR implicit $x0
116 regBankSelected: true
117 tracksRegLiveness: true
118 machineFunctionInfo: {}
121 liveins: $x0, $x1, $x2
122 ; CHECK-LABEL: name: ldrxrox_shl
123 ; CHECK: liveins: $x0, $x1, $x2
124 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
125 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
126 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
127 ; CHECK: $x2 = COPY [[LDRXroX]]
128 ; CHECK: RET_ReallyLR implicit $x2
129 %0:gpr(s64) = COPY $x0
130 %1:gpr(s64) = G_CONSTANT i64 3
131 %2:gpr(s64) = G_SHL %0, %1(s64)
132 %3:gpr(p0) = COPY $x1
133 %4:gpr(p0) = G_GEP %3, %2
134 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
136 RET_ReallyLR implicit $x2
143 regBankSelected: true
144 tracksRegLiveness: true
145 machineFunctionInfo: {}
148 liveins: $x0, $x1, $d2
149 ; CHECK-LABEL: name: ldrdrox_shl
150 ; CHECK: liveins: $x0, $x1, $d2
151 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
152 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
153 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
154 ; CHECK: $d2 = COPY [[LDRDroX]]
155 ; CHECK: RET_ReallyLR implicit $d2
156 %0:gpr(s64) = COPY $x0
157 %1:gpr(s64) = G_CONSTANT i64 3
158 %2:gpr(s64) = G_SHL %0, %1(s64)
159 %3:gpr(p0) = COPY $x1
160 %4:gpr(p0) = G_GEP %3, %2
161 %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
163 RET_ReallyLR implicit $d2
167 name: ldrxrox_mul_rhs
170 regBankSelected: true
171 tracksRegLiveness: true
172 machineFunctionInfo: {}
175 liveins: $x0, $x1, $x2
176 ; CHECK-LABEL: name: ldrxrox_mul_rhs
177 ; CHECK: liveins: $x0, $x1, $x2
178 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
179 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
180 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
181 ; CHECK: $x2 = COPY [[LDRXroX]]
182 ; CHECK: RET_ReallyLR implicit $x2
183 %0:gpr(s64) = COPY $x0
184 %1:gpr(s64) = G_CONSTANT i64 8
185 %2:gpr(s64) = G_MUL %0, %1(s64)
186 %3:gpr(p0) = COPY $x1
187 %4:gpr(p0) = G_GEP %3, %2
188 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
190 RET_ReallyLR implicit $x2
194 name: ldrdrox_mul_rhs
197 regBankSelected: true
198 tracksRegLiveness: true
199 machineFunctionInfo: {}
202 liveins: $x0, $x1, $d2
203 ; CHECK-LABEL: name: ldrdrox_mul_rhs
204 ; CHECK: liveins: $x0, $x1, $d2
205 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
206 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
207 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
208 ; CHECK: $d2 = COPY [[LDRDroX]]
209 ; CHECK: RET_ReallyLR implicit $d2
210 %0:gpr(s64) = COPY $x0
211 %1:gpr(s64) = G_CONSTANT i64 8
212 %2:gpr(s64) = G_MUL %0, %1(s64)
213 %3:gpr(p0) = COPY $x1
214 %4:gpr(p0) = G_GEP %3, %2
215 %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
217 RET_ReallyLR implicit $d2
221 name: ldrxrox_mul_lhs
224 regBankSelected: true
225 tracksRegLiveness: true
226 machineFunctionInfo: {}
229 liveins: $x0, $x1, $x2
230 ; CHECK-LABEL: name: ldrxrox_mul_lhs
231 ; CHECK: liveins: $x0, $x1, $x2
232 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
233 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
234 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
235 ; CHECK: $x2 = COPY [[LDRXroX]]
236 ; CHECK: RET_ReallyLR implicit $x2
237 %0:gpr(s64) = COPY $x0
238 %1:gpr(s64) = G_CONSTANT i64 8
239 %2:gpr(s64) = G_MUL %1, %0(s64)
240 %3:gpr(p0) = COPY $x1
241 %4:gpr(p0) = G_GEP %3, %2
242 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
244 RET_ReallyLR implicit $x2
248 name: ldrdrox_mul_lhs
251 regBankSelected: true
252 tracksRegLiveness: true
253 machineFunctionInfo: {}
256 liveins: $x0, $x1, $d2
257 ; CHECK-LABEL: name: ldrdrox_mul_lhs
258 ; CHECK: liveins: $x0, $x1, $d2
259 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
260 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
261 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
262 ; CHECK: $d2 = COPY [[LDRDroX]]
263 ; CHECK: RET_ReallyLR implicit $d2
264 %0:gpr(s64) = COPY $x0
265 %1:gpr(s64) = G_CONSTANT i64 8
266 %2:gpr(s64) = G_MUL %1, %0(s64)
267 %3:gpr(p0) = COPY $x1
268 %4:gpr(p0) = G_GEP %3, %2
269 %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
271 RET_ReallyLR implicit $d2
278 regBankSelected: true
279 tracksRegLiveness: true
280 machineFunctionInfo: {}
283 ; Show that we don't get a shifted load from a mul when we don't have a
284 ; power of 2. (The bit isn't set on the load.)
285 liveins: $x0, $x1, $d2
286 ; CHECK-LABEL: name: mul_not_pow_2
287 ; CHECK: liveins: $x0, $x1, $d2
288 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
289 ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 7
290 ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[MOVi64imm]], [[COPY]], $xzr
291 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
292 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr)
293 ; CHECK: $d2 = COPY [[LDRDroX]]
294 ; CHECK: RET_ReallyLR implicit $d2
295 %0:gpr(s64) = COPY $x0
296 %1:gpr(s64) = G_CONSTANT i64 7
297 %2:gpr(s64) = G_MUL %1, %0(s64)
298 %3:gpr(p0) = COPY $x1
299 %4:gpr(p0) = G_GEP %3, %2
300 %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
302 RET_ReallyLR implicit $d2
306 name: mul_wrong_pow_2
309 regBankSelected: true
310 tracksRegLiveness: true
311 machineFunctionInfo: {}
314 ; Show that we don't get a shifted load from a mul when we don't have
315 ; the right power of 2. (The bit isn't set on the load.)
316 liveins: $x0, $x1, $d2
317 ; CHECK-LABEL: name: mul_wrong_pow_2
318 ; CHECK: liveins: $x0, $x1, $d2
319 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
320 ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 16
321 ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[MOVi64imm]], [[COPY]], $xzr
322 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
323 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr)
324 ; CHECK: $d2 = COPY [[LDRDroX]]
325 ; CHECK: RET_ReallyLR implicit $d2
326 %0:gpr(s64) = COPY $x0
327 %1:gpr(s64) = G_CONSTANT i64 16
328 %2:gpr(s64) = G_MUL %1, %0(s64)
329 %3:gpr(p0) = COPY $x1
330 %4:gpr(p0) = G_GEP %3, %2
331 %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
333 RET_ReallyLR implicit $d2
337 name: more_than_one_use_shl_1
340 regBankSelected: true
341 tracksRegLiveness: true
342 machineFunctionInfo: {}
345 ; Show that we can still fall back to the register-register addressing
346 ; mode when we fail to pull in the shift.
347 liveins: $x0, $x1, $x2
348 ; CHECK-LABEL: name: more_than_one_use_shl_1
349 ; CHECK: liveins: $x0, $x1, $x2
350 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
351 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
352 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
353 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load 8 from %ir.addr)
354 ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
355 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
356 ; CHECK: $x2 = COPY [[ADDXrr]]
357 ; CHECK: RET_ReallyLR implicit $x2
358 %0:gpr(s64) = COPY $x0
359 %1:gpr(s64) = G_CONSTANT i64 3
360 %2:gpr(s64) = G_SHL %0, %1(s64)
361 %3:gpr(p0) = COPY $x1
362 %4:gpr(p0) = G_GEP %3, %2
363 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
364 %6:gpr(s64) = G_ADD %2, %1
365 %7:gpr(s64) = G_ADD %5, %6
367 RET_ReallyLR implicit $x2
371 name: more_than_one_use_shl_2
374 regBankSelected: true
375 tracksRegLiveness: true
376 machineFunctionInfo: {}
379 ; Show that when the GEP is used outside a memory op, we don't do any
381 liveins: $x0, $x1, $x2
382 ; CHECK-LABEL: name: more_than_one_use_shl_2
383 ; CHECK: liveins: $x0, $x1, $x2
384 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
385 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
386 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
387 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
388 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
389 ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
390 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
391 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
392 ; CHECK: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
393 ; CHECK: $x2 = COPY [[ADDXrr2]]
394 ; CHECK: RET_ReallyLR implicit $x2
395 %0:gpr(s64) = COPY $x0
396 %1:gpr(s64) = G_CONSTANT i64 3
397 %2:gpr(s64) = G_SHL %0, %1(s64)
398 %3:gpr(p0) = COPY $x1
399 %4:gpr(p0) = G_GEP %3, %2
400 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
401 %6:gpr(s64) = G_ADD %2, %1
402 %7:gpr(s64) = G_ADD %5, %6
403 %8:gpr(s64) = G_PTRTOINT %4
404 %9:gpr(s64) = G_ADD %8, %7
406 RET_ReallyLR implicit $x2
410 name: more_than_one_use_shl_lsl_fast
413 regBankSelected: true
414 tracksRegLiveness: true
415 machineFunctionInfo: {}
418 ; Show that when we have a fastpath for shift-left, we perform the folding
419 ; if it has more than one use.
420 liveins: $x0, $x1, $x2
421 ; CHECK-LABEL: name: more_than_one_use_shl_lsl_fast
422 ; CHECK: liveins: $x0, $x1, $x2
423 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
424 ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
425 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
426 ; CHECK: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
427 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
428 ; CHECK: $x2 = COPY [[ADDXrr]]
429 ; CHECK: RET_ReallyLR implicit $x2
430 %0:gpr(s64) = COPY $x0
431 %1:gpr(s64) = G_CONSTANT i64 3
432 %2:gpr(s64) = G_SHL %0, %1(s64)
433 %3:gpr(p0) = COPY $x1
434 %4:gpr(p0) = G_GEP %3, %2
435 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
436 %6:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
437 %7:gpr(s64) = G_ADD %5, %6
439 RET_ReallyLR implicit $x2
443 name: more_than_one_use_shl_lsl_slow
446 regBankSelected: true
447 tracksRegLiveness: true
448 machineFunctionInfo: {}
451 ; Show that we don't fold into multiple memory ops when we don't have a
452 ; fastpath for shift-left.
453 liveins: $x0, $x1, $x2
454 ; CHECK-LABEL: name: more_than_one_use_shl_lsl_slow
455 ; CHECK: liveins: $x0, $x1, $x2
456 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
457 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[COPY]], 61, 60
458 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
459 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
460 ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
461 ; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
462 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
463 ; CHECK: $x2 = COPY [[ADDXrr1]]
464 ; CHECK: RET_ReallyLR implicit $x2
465 %0:gpr(s64) = COPY $x0
466 %1:gpr(s64) = G_CONSTANT i64 3
467 %2:gpr(s64) = G_SHL %0, %1(s64)
468 %3:gpr(p0) = COPY $x1
469 %4:gpr(p0) = G_GEP %3, %2
470 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
471 %6:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
472 %7:gpr(s64) = G_ADD %5, %6
474 RET_ReallyLR implicit $x2
478 name: more_than_one_use_shl_minsize
481 regBankSelected: true
482 tracksRegLiveness: true
483 machineFunctionInfo: {}
486 ; Show that when we're optimizing for size, we'll do the folding no matter
488 liveins: $x0, $x1, $x2
489 ; CHECK-LABEL: name: more_than_one_use_shl_minsize
490 ; CHECK: liveins: $x0, $x1, $x2
491 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
492 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
493 ; CHECK: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
494 ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY1]], [[UBFMXri]]
495 ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
496 ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
497 ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
498 ; CHECK: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
499 ; CHECK: $x2 = COPY [[ADDXrr2]]
500 ; CHECK: RET_ReallyLR implicit $x2
501 %0:gpr(s64) = COPY $x0
502 %1:gpr(s64) = G_CONSTANT i64 3
503 %2:gpr(s64) = G_SHL %0, %1(s64)
504 %3:gpr(p0) = COPY $x1
505 %4:gpr(p0) = G_GEP %3, %2
506 %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
507 %6:gpr(s64) = G_ADD %2, %1
508 %7:gpr(s64) = G_ADD %5, %6
509 %8:gpr(s64) = G_PTRTOINT %4
510 %9:gpr(s64) = G_ADD %8, %7
512 RET_ReallyLR implicit $x2
518 regBankSelected: true
519 tracksRegLiveness: true
520 machineFunctionInfo: {}
524 ; CHECK-LABEL: name: ldrwrox
525 ; CHECK: liveins: $x0, $x1
526 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
527 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
528 ; CHECK: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load 4 from %ir.addr)
529 ; CHECK: $w2 = COPY [[LDRWroX]]
530 ; CHECK: RET_ReallyLR implicit $w2
531 %0:gpr(p0) = COPY $x0
532 %1:gpr(s64) = COPY $x1
533 %2:gpr(p0) = G_GEP %0, %1
534 %4:gpr(s32) = G_LOAD %2(p0) :: (load 4 from %ir.addr)
536 RET_ReallyLR implicit $w2
542 regBankSelected: true
543 tracksRegLiveness: true
544 machineFunctionInfo: {}
548 ; CHECK-LABEL: name: ldrsrox
549 ; CHECK: liveins: $d0, $x1
550 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
551 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
552 ; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load 4 from %ir.addr)
553 ; CHECK: $s2 = COPY [[LDRSroX]]
554 ; CHECK: RET_ReallyLR implicit $h2
555 %0:gpr(p0) = COPY $d0
556 %1:gpr(s64) = COPY $x1
557 %2:gpr(p0) = G_GEP %0, %1
558 %4:fpr(s32) = G_LOAD %2(p0) :: (load 4 from %ir.addr)
560 RET_ReallyLR implicit $h2
566 regBankSelected: true
567 tracksRegLiveness: true
568 machineFunctionInfo: {}
572 ; CHECK-LABEL: name: ldrhrox
573 ; CHECK: liveins: $x0, $x1
574 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
575 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
576 ; CHECK: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load 2 from %ir.addr)
577 ; CHECK: $h2 = COPY [[LDRHroX]]
578 ; CHECK: RET_ReallyLR implicit $h2
579 %0:gpr(p0) = COPY $x0
580 %1:gpr(s64) = COPY $x1
581 %2:gpr(p0) = G_GEP %0, %1
582 %4:fpr(s16) = G_LOAD %2(p0) :: (load 2 from %ir.addr)
584 RET_ReallyLR implicit $h2
590 regBankSelected: true
591 tracksRegLiveness: true
592 machineFunctionInfo: {}
596 ; CHECK-LABEL: name: ldbbrox
597 ; CHECK: liveins: $x0, $x1
598 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
599 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
600 ; CHECK: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load 1 from %ir.addr)
601 ; CHECK: $w2 = COPY [[LDRBBroX]]
602 ; CHECK: RET_ReallyLR implicit $w2
603 %0:gpr(p0) = COPY $x0
604 %1:gpr(s64) = COPY $x1
605 %2:gpr(p0) = G_GEP %0, %1
606 %4:gpr(s32) = G_LOAD %2(p0) :: (load 1 from %ir.addr)
608 RET_ReallyLR implicit $w2
614 regBankSelected: true
615 tracksRegLiveness: true
616 machineFunctionInfo: {}
620 ; CHECK-LABEL: name: ldrqrox
621 ; CHECK: liveins: $d0, $x1
622 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
623 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
624 ; CHECK: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load 16 from %ir.addr)
625 ; CHECK: $q0 = COPY [[LDRQroX]]
626 ; CHECK: RET_ReallyLR implicit $q0
627 %0:gpr(p0) = COPY $d0
628 %1:gpr(s64) = COPY $x1
629 %2:gpr(p0) = G_GEP %0, %1
630 %4:fpr(<2 x s64>) = G_LOAD %2(p0) :: (load 16 from %ir.addr)
631 $q0 = COPY %4(<2 x s64>)
632 RET_ReallyLR implicit $q0