1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32I
3 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64I
5 ; This test case test the LocalStackSlotAllocation pass that use a base register
6 ; for the frame index that its offset is out-of-range (for RISC-V. the immediate
7 ; is 12 bits for the load store instruction (excludes vector load / store))
8 define void @use_frame_base_reg() {
9 ; RV32I-LABEL: use_frame_base_reg:
11 ; RV32I-NEXT: lui a0, 24
12 ; RV32I-NEXT: addi a0, a0, 1712
13 ; RV32I-NEXT: sub sp, sp, a0
14 ; RV32I-NEXT: .cfi_def_cfa_offset 100016
15 ; RV32I-NEXT: lui a0, 24
16 ; RV32I-NEXT: addi a0, a0, 1704
17 ; RV32I-NEXT: add a0, sp, a0
18 ; RV32I-NEXT: lbu zero, 4(a0)
19 ; RV32I-NEXT: lbu zero, 0(a0)
20 ; RV32I-NEXT: lui a0, 24
21 ; RV32I-NEXT: addi a0, a0, 1712
22 ; RV32I-NEXT: add sp, sp, a0
23 ; RV32I-NEXT: .cfi_def_cfa_offset 0
26 ; RV64I-LABEL: use_frame_base_reg:
28 ; RV64I-NEXT: lui a0, 24
29 ; RV64I-NEXT: addiw a0, a0, 1712
30 ; RV64I-NEXT: sub sp, sp, a0
31 ; RV64I-NEXT: .cfi_def_cfa_offset 100016
32 ; RV64I-NEXT: lui a0, 24
33 ; RV64I-NEXT: addiw a0, a0, 1704
34 ; RV64I-NEXT: add a0, sp, a0
35 ; RV64I-NEXT: lbu zero, 4(a0)
36 ; RV64I-NEXT: lbu zero, 0(a0)
37 ; RV64I-NEXT: lui a0, 24
38 ; RV64I-NEXT: addiw a0, a0, 1712
39 ; RV64I-NEXT: add sp, sp, a0
40 ; RV64I-NEXT: .cfi_def_cfa_offset 0
43 %va = alloca i8, align 4
44 %va1 = alloca i8, align 4
45 %large = alloca [ 100000 x i8 ]
46 %argp.cur = load volatile i8, ptr %va, align 4
47 %argp.next = load volatile i8, ptr %va1, align 4
51 ; Test containing a load with its own local offset. Make sure isFrameOffsetLegal
52 ; considers it and creates a virtual base register.
53 define void @load_with_offset() {
54 ; RV32I-LABEL: load_with_offset:
56 ; RV32I-NEXT: addi sp, sp, -2048
57 ; RV32I-NEXT: addi sp, sp, -464
58 ; RV32I-NEXT: .cfi_def_cfa_offset 2512
59 ; RV32I-NEXT: addi a0, sp, 2012
60 ; RV32I-NEXT: lbu a1, 0(a0)
61 ; RV32I-NEXT: sb a1, 0(a0)
62 ; RV32I-NEXT: addi sp, sp, 2032
63 ; RV32I-NEXT: addi sp, sp, 480
64 ; RV32I-NEXT: .cfi_def_cfa_offset 0
67 ; RV64I-LABEL: load_with_offset:
69 ; RV64I-NEXT: addi sp, sp, -2048
70 ; RV64I-NEXT: addi sp, sp, -464
71 ; RV64I-NEXT: .cfi_def_cfa_offset 2512
72 ; RV64I-NEXT: addi a0, sp, 2012
73 ; RV64I-NEXT: lbu a1, 0(a0)
74 ; RV64I-NEXT: sb a1, 0(a0)
75 ; RV64I-NEXT: addi sp, sp, 2032
76 ; RV64I-NEXT: addi sp, sp, 480
77 ; RV64I-NEXT: .cfi_def_cfa_offset 0
80 %va = alloca [2500 x i8], align 4
81 %va_gep = getelementptr [2000 x i8], ptr %va, i64 0, i64 2000
82 %load = load volatile i8, ptr %va_gep, align 4
83 store volatile i8 %load, ptr %va_gep, align 4
87 ; Test containing a load with its own local offset that is smaller than the
88 ; previous test case. Make sure we don't create a virtual base register.
89 define void @load_with_offset2() {
90 ; RV32I-LABEL: load_with_offset2:
92 ; RV32I-NEXT: addi sp, sp, -2048
93 ; RV32I-NEXT: addi sp, sp, -464
94 ; RV32I-NEXT: .cfi_def_cfa_offset 2512
95 ; RV32I-NEXT: lbu a0, 1412(sp)
96 ; RV32I-NEXT: sb a0, 1412(sp)
97 ; RV32I-NEXT: addi sp, sp, 2032
98 ; RV32I-NEXT: addi sp, sp, 480
99 ; RV32I-NEXT: .cfi_def_cfa_offset 0
102 ; RV64I-LABEL: load_with_offset2:
104 ; RV64I-NEXT: addi sp, sp, -2048
105 ; RV64I-NEXT: addi sp, sp, -464
106 ; RV64I-NEXT: .cfi_def_cfa_offset 2512
107 ; RV64I-NEXT: lbu a0, 1412(sp)
108 ; RV64I-NEXT: sb a0, 1412(sp)
109 ; RV64I-NEXT: addi sp, sp, 2032
110 ; RV64I-NEXT: addi sp, sp, 480
111 ; RV64I-NEXT: .cfi_def_cfa_offset 0
114 %va = alloca [2500 x i8], align 4
115 %va_gep = getelementptr [2000 x i8], ptr %va, i64 0, i64 1400
116 %load = load volatile i8, ptr %va_gep, align 4
117 store volatile i8 %load, ptr %va_gep, align 4
121 define void @frame_pointer() "frame-pointer"="all" {
122 ; RV32I-LABEL: frame_pointer:
124 ; RV32I-NEXT: addi sp, sp, -2032
125 ; RV32I-NEXT: .cfi_def_cfa_offset 2032
126 ; RV32I-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill
127 ; RV32I-NEXT: sw s0, 2024(sp) # 4-byte Folded Spill
128 ; RV32I-NEXT: .cfi_offset ra, -4
129 ; RV32I-NEXT: .cfi_offset s0, -8
130 ; RV32I-NEXT: addi s0, sp, 2032
131 ; RV32I-NEXT: .cfi_def_cfa s0, 0
132 ; RV32I-NEXT: addi sp, sp, -480
133 ; RV32I-NEXT: lbu a0, -1960(s0)
134 ; RV32I-NEXT: sb a0, -1960(s0)
135 ; RV32I-NEXT: addi sp, sp, 480
136 ; RV32I-NEXT: .cfi_def_cfa sp, 2032
137 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
138 ; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload
139 ; RV32I-NEXT: .cfi_restore ra
140 ; RV32I-NEXT: .cfi_restore s0
141 ; RV32I-NEXT: addi sp, sp, 2032
142 ; RV32I-NEXT: .cfi_def_cfa_offset 0
145 ; RV64I-LABEL: frame_pointer:
147 ; RV64I-NEXT: addi sp, sp, -2032
148 ; RV64I-NEXT: .cfi_def_cfa_offset 2032
149 ; RV64I-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill
150 ; RV64I-NEXT: sd s0, 2016(sp) # 8-byte Folded Spill
151 ; RV64I-NEXT: .cfi_offset ra, -8
152 ; RV64I-NEXT: .cfi_offset s0, -16
153 ; RV64I-NEXT: addi s0, sp, 2032
154 ; RV64I-NEXT: .cfi_def_cfa s0, 0
155 ; RV64I-NEXT: addi sp, sp, -496
156 ; RV64I-NEXT: addi a0, s0, -1972
157 ; RV64I-NEXT: lbu a1, 0(a0)
158 ; RV64I-NEXT: sb a1, 0(a0)
159 ; RV64I-NEXT: addi sp, sp, 496
160 ; RV64I-NEXT: .cfi_def_cfa sp, 2032
161 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
162 ; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload
163 ; RV64I-NEXT: .cfi_restore ra
164 ; RV64I-NEXT: .cfi_restore s0
165 ; RV64I-NEXT: addi sp, sp, 2032
166 ; RV64I-NEXT: .cfi_def_cfa_offset 0
169 %va = alloca [2500 x i8], align 4
170 %va_gep = getelementptr [2000 x i8], ptr %va, i64 0, i64 552
171 %load = load volatile i8, ptr %va_gep, align 4
172 store volatile i8 %load, ptr %va_gep, align 4