1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32I
3 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64I
5 ; This test case test the LocalStackSlotAllocation pass that use a base register
6 ; for the frame index that its offset is out-of-range (for RISC-V. the immediate
7 ; is 12 bits for the load store instruction (excludes vector load / store))
8 define void @use_frame_base_reg() {
9 ; RV32I-LABEL: use_frame_base_reg:
11 ; RV32I-NEXT: lui a0, 24
12 ; RV32I-NEXT: addi a0, a0, 1712
13 ; RV32I-NEXT: sub sp, sp, a0
14 ; RV32I-NEXT: .cfi_def_cfa_offset 100016
15 ; RV32I-NEXT: lui a0, 24
16 ; RV32I-NEXT: addi a0, a0, 1704
17 ; RV32I-NEXT: add a0, sp, a0
18 ; RV32I-NEXT: lbu zero, 4(a0)
19 ; RV32I-NEXT: lbu zero, 0(a0)
20 ; RV32I-NEXT: lui a0, 24
21 ; RV32I-NEXT: addi a0, a0, 1712
22 ; RV32I-NEXT: add sp, sp, a0
25 ; RV64I-LABEL: use_frame_base_reg:
27 ; RV64I-NEXT: lui a0, 24
28 ; RV64I-NEXT: addiw a0, a0, 1712
29 ; RV64I-NEXT: sub sp, sp, a0
30 ; RV64I-NEXT: .cfi_def_cfa_offset 100016
31 ; RV64I-NEXT: lui a0, 24
32 ; RV64I-NEXT: addiw a0, a0, 1704
33 ; RV64I-NEXT: add a0, sp, a0
34 ; RV64I-NEXT: lbu zero, 4(a0)
35 ; RV64I-NEXT: lbu zero, 0(a0)
36 ; RV64I-NEXT: lui a0, 24
37 ; RV64I-NEXT: addiw a0, a0, 1712
38 ; RV64I-NEXT: add sp, sp, a0
41 %va = alloca i8, align 4
42 %va1 = alloca i8, align 4
43 %large = alloca [ 100000 x i8 ]
44 %argp.cur = load volatile i8, ptr %va, align 4
45 %argp.next = load volatile i8, ptr %va1, align 4
49 ; Test containing a load with its own local offset. Make sure isFrameOffsetLegal
50 ; considers it and creates a virtual base register.
51 define void @load_with_offset() {
52 ; RV32I-LABEL: load_with_offset:
54 ; RV32I-NEXT: addi sp, sp, -2048
55 ; RV32I-NEXT: addi sp, sp, -464
56 ; RV32I-NEXT: .cfi_def_cfa_offset 2512
57 ; RV32I-NEXT: addi a0, sp, 2012
58 ; RV32I-NEXT: lbu a1, 0(a0)
59 ; RV32I-NEXT: sb a1, 0(a0)
60 ; RV32I-NEXT: addi sp, sp, 2032
61 ; RV32I-NEXT: addi sp, sp, 480
64 ; RV64I-LABEL: load_with_offset:
66 ; RV64I-NEXT: addi sp, sp, -2048
67 ; RV64I-NEXT: addi sp, sp, -464
68 ; RV64I-NEXT: .cfi_def_cfa_offset 2512
69 ; RV64I-NEXT: addi a0, sp, 2012
70 ; RV64I-NEXT: lbu a1, 0(a0)
71 ; RV64I-NEXT: sb a1, 0(a0)
72 ; RV64I-NEXT: addi sp, sp, 2032
73 ; RV64I-NEXT: addi sp, sp, 480
76 %va = alloca [2500 x i8], align 4
77 %va_gep = getelementptr [2000 x i8], ptr %va, i64 0, i64 2000
78 %load = load volatile i8, ptr %va_gep, align 4
79 store volatile i8 %load, ptr %va_gep, align 4
83 ; Test containing a load with its own local offset that is smaller than the
84 ; previous test case. Make sure we don't create a virtual base register.
85 define void @load_with_offset2() {
86 ; RV32I-LABEL: load_with_offset2:
88 ; RV32I-NEXT: addi sp, sp, -2048
89 ; RV32I-NEXT: addi sp, sp, -464
90 ; RV32I-NEXT: .cfi_def_cfa_offset 2512
91 ; RV32I-NEXT: lbu a0, 1412(sp)
92 ; RV32I-NEXT: sb a0, 1412(sp)
93 ; RV32I-NEXT: addi sp, sp, 2032
94 ; RV32I-NEXT: addi sp, sp, 480
97 ; RV64I-LABEL: load_with_offset2:
99 ; RV64I-NEXT: addi sp, sp, -2048
100 ; RV64I-NEXT: addi sp, sp, -464
101 ; RV64I-NEXT: .cfi_def_cfa_offset 2512
102 ; RV64I-NEXT: lbu a0, 1412(sp)
103 ; RV64I-NEXT: sb a0, 1412(sp)
104 ; RV64I-NEXT: addi sp, sp, 2032
105 ; RV64I-NEXT: addi sp, sp, 480
108 %va = alloca [2500 x i8], align 4
109 %va_gep = getelementptr [2000 x i8], ptr %va, i64 0, i64 1400
110 %load = load volatile i8, ptr %va_gep, align 4
111 store volatile i8 %load, ptr %va_gep, align 4
115 define void @frame_pointer() "frame-pointer"="all" {
116 ; RV32I-LABEL: frame_pointer:
118 ; RV32I-NEXT: addi sp, sp, -2032
119 ; RV32I-NEXT: .cfi_def_cfa_offset 2032
120 ; RV32I-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill
121 ; RV32I-NEXT: sw s0, 2024(sp) # 4-byte Folded Spill
122 ; RV32I-NEXT: .cfi_offset ra, -4
123 ; RV32I-NEXT: .cfi_offset s0, -8
124 ; RV32I-NEXT: addi s0, sp, 2032
125 ; RV32I-NEXT: .cfi_def_cfa s0, 0
126 ; RV32I-NEXT: addi sp, sp, -480
127 ; RV32I-NEXT: lbu a0, -1960(s0)
128 ; RV32I-NEXT: sb a0, -1960(s0)
129 ; RV32I-NEXT: addi sp, sp, 480
130 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
131 ; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload
132 ; RV32I-NEXT: addi sp, sp, 2032
135 ; RV64I-LABEL: frame_pointer:
137 ; RV64I-NEXT: addi sp, sp, -2032
138 ; RV64I-NEXT: .cfi_def_cfa_offset 2032
139 ; RV64I-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill
140 ; RV64I-NEXT: sd s0, 2016(sp) # 8-byte Folded Spill
141 ; RV64I-NEXT: .cfi_offset ra, -8
142 ; RV64I-NEXT: .cfi_offset s0, -16
143 ; RV64I-NEXT: addi s0, sp, 2032
144 ; RV64I-NEXT: .cfi_def_cfa s0, 0
145 ; RV64I-NEXT: addi sp, sp, -496
146 ; RV64I-NEXT: addi a0, s0, -1972
147 ; RV64I-NEXT: lbu a1, 0(a0)
148 ; RV64I-NEXT: sb a1, 0(a0)
149 ; RV64I-NEXT: addi sp, sp, 496
150 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
151 ; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload
152 ; RV64I-NEXT: addi sp, sp, 2032
155 %va = alloca [2500 x i8], align 4
156 %va_gep = getelementptr [2000 x i8], ptr %va, i64 0, i64 552
157 %load = load volatile i8, ptr %va_gep, align 4
158 store volatile i8 %load, ptr %va_gep, align 4