1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc < %s -o - | FileCheck %s
3 ; RUN: llc -mattr=+alu-lsl-fast --aarch64-enable-sink-fold=false < %s -o - | FileCheck %s -check-prefix=LSLFAST
4 target triple = "aarch64-linux"
8 ; Check that ADDWrs/ADDXrs with shift > 4 is considered relatively
10 define void @f0(i1 %c0, i1 %c1, ptr %a, i64 %i) {
12 ; CHECK: // %bb.0: // %E
13 ; CHECK-NEXT: tbz w0, #0, .LBB0_5
14 ; CHECK-NEXT: // %bb.1: // %A
15 ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
16 ; CHECK-NEXT: .cfi_def_cfa_offset 16
17 ; CHECK-NEXT: .cfi_offset w30, -16
18 ; CHECK-NEXT: add x0, x2, x3, lsl #5
19 ; CHECK-NEXT: tbz w1, #0, .LBB0_3
20 ; CHECK-NEXT: // %bb.2: // %B
22 ; CHECK-NEXT: b .LBB0_4
23 ; CHECK-NEXT: .LBB0_3: // %C
24 ; CHECK-NEXT: mov x1, x0
26 ; CHECK-NEXT: .LBB0_4:
27 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
28 ; CHECK-NEXT: .LBB0_5: // %X
32 ; LSLFAST: // %bb.0: // %E
33 ; LSLFAST-NEXT: tbz w0, #0, .LBB0_5
34 ; LSLFAST-NEXT: // %bb.1: // %A
35 ; LSLFAST-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
36 ; LSLFAST-NEXT: .cfi_def_cfa_offset 16
37 ; LSLFAST-NEXT: .cfi_offset w30, -16
38 ; LSLFAST-NEXT: add x0, x2, x3, lsl #5
39 ; LSLFAST-NEXT: tbz w1, #0, .LBB0_3
40 ; LSLFAST-NEXT: // %bb.2: // %B
42 ; LSLFAST-NEXT: b .LBB0_4
43 ; LSLFAST-NEXT: .LBB0_3: // %C
44 ; LSLFAST-NEXT: mov x1, x0
46 ; LSLFAST-NEXT: .LBB0_4:
47 ; LSLFAST-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
48 ; LSLFAST-NEXT: .LBB0_5: // %X
51 %p0 = getelementptr {i64, i64, i64, i64}, ptr %a, i64 %i
52 br i1 %c0, label %A, label %X
55 br i1 %c1, label %B, label %C
62 %p1 = getelementptr {i64, i64, i64, i64}, ptr %a, i64 %i
63 call void @g(ptr %p1, ptr %p0)
70 ; Check that ADDWrs/ADDXrs with shift <= 4 is considered relatively fast on sub-targets
71 ; with feature +alu-lsl-fast, thus *not* CSE-d.
72 define void @f1(i1 %c0, i1 %c1, ptr %a, i64 %i) {
74 ; CHECK: // %bb.0: // %E
75 ; CHECK-NEXT: tbz w0, #0, .LBB1_5
76 ; CHECK-NEXT: // %bb.1: // %A
77 ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
78 ; CHECK-NEXT: .cfi_def_cfa_offset 16
79 ; CHECK-NEXT: .cfi_offset w30, -16
80 ; CHECK-NEXT: add x0, x2, x3, lsl #4
81 ; CHECK-NEXT: tbz w1, #0, .LBB1_3
82 ; CHECK-NEXT: // %bb.2: // %B
84 ; CHECK-NEXT: b .LBB1_4
85 ; CHECK-NEXT: .LBB1_3: // %C
86 ; CHECK-NEXT: mov x1, x0
88 ; CHECK-NEXT: .LBB1_4:
89 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
90 ; CHECK-NEXT: .LBB1_5: // %X
94 ; LSLFAST: // %bb.0: // %E
95 ; LSLFAST-NEXT: tbz w0, #0, .LBB1_5
96 ; LSLFAST-NEXT: // %bb.1: // %A
97 ; LSLFAST-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
98 ; LSLFAST-NEXT: .cfi_def_cfa_offset 16
99 ; LSLFAST-NEXT: .cfi_offset w30, -16
100 ; LSLFAST-NEXT: add x8, x2, x3, lsl #4
101 ; LSLFAST-NEXT: tbz w1, #0, .LBB1_3
102 ; LSLFAST-NEXT: // %bb.2: // %B
103 ; LSLFAST-NEXT: mov x0, x8
105 ; LSLFAST-NEXT: b .LBB1_4
106 ; LSLFAST-NEXT: .LBB1_3: // %C
107 ; LSLFAST-NEXT: add x0, x2, x3, lsl #4
108 ; LSLFAST-NEXT: mov x1, x8
110 ; LSLFAST-NEXT: .LBB1_4:
111 ; LSLFAST-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
112 ; LSLFAST-NEXT: .LBB1_5: // %X
115 %p0 = getelementptr {i64, i64}, ptr %a, i64 %i
116 br i1 %c0, label %A, label %X
119 br i1 %c1, label %B, label %C
122 call void @g(ptr %p0)
126 %p1 = getelementptr {i64, i64}, ptr %a, i64 %i
127 call void @g(ptr %p1, ptr %p0)