1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s -check-prefix=RV32I
4 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s -check-prefix=RV64I
7 ; Check that memory accesses to array elements with large offsets have those
8 ; offsets split into a base offset, plus a smaller offset that is folded into
9 ; the memory operation. We should also only compute that base offset once,
10 ; since it can be shared for all memory operations in this test.
11 define void @test1([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) {
13 ; RV32I: # %bb.0: # %entry
14 ; RV32I-NEXT: lui a2, 20
15 ; RV32I-NEXT: addi a2, a2, -1920
16 ; RV32I-NEXT: lw a0, 0(a0)
17 ; RV32I-NEXT: add a0, a0, a2
18 ; RV32I-NEXT: addi a3, zero, 1
19 ; RV32I-NEXT: sw a3, 4(a0)
20 ; RV32I-NEXT: addi a4, zero, 2
21 ; RV32I-NEXT: sw a4, 0(a0)
22 ; RV32I-NEXT: add a0, a1, a2
23 ; RV32I-NEXT: sw a4, 4(a0)
24 ; RV32I-NEXT: sw a3, 0(a0)
25 ; RV32I-NEXT: .cfi_def_cfa_offset 0
29 ; RV64I: # %bb.0: # %entry
30 ; RV64I-NEXT: lui a2, 20
31 ; RV64I-NEXT: addiw a2, a2, -1920
32 ; RV64I-NEXT: ld a0, 0(a0)
33 ; RV64I-NEXT: add a0, a0, a2
34 ; RV64I-NEXT: addi a3, zero, 1
35 ; RV64I-NEXT: sw a3, 4(a0)
36 ; RV64I-NEXT: addi a4, zero, 2
37 ; RV64I-NEXT: sw a4, 0(a0)
38 ; RV64I-NEXT: add a0, a1, a2
39 ; RV64I-NEXT: sw a4, 4(a0)
40 ; RV64I-NEXT: sw a3, 0(a0)
41 ; RV64I-NEXT: .cfi_def_cfa_offset 0
44 %s = load [65536 x i32]*, [65536 x i32]** %sp
45 %gep0 = getelementptr [65536 x i32], [65536 x i32]* %s, i64 0, i32 20000
46 %gep1 = getelementptr [65536 x i32], [65536 x i32]* %s, i64 0, i32 20001
47 %gep2 = getelementptr [65536 x i32], [65536 x i32]* %t, i64 0, i32 20000
48 %gep3 = getelementptr [65536 x i32], [65536 x i32]* %t, i64 0, i32 20001
49 store i32 2, i32* %gep0
50 store i32 1, i32* %gep1
51 store i32 1, i32* %gep2
52 store i32 2, i32* %gep3
56 ; Ditto. Check it when the GEPs are not in the entry block.
57 define void @test2([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) {
59 ; RV32I: # %bb.0: # %entry
60 ; RV32I-NEXT: lui a3, 20
61 ; RV32I-NEXT: addi a3, a3, -1920
62 ; RV32I-NEXT: lw a0, 0(a0)
63 ; RV32I-NEXT: add a0, a0, a3
64 ; RV32I-NEXT: add a1, a1, a3
65 ; RV32I-NEXT: mv a3, zero
66 ; RV32I-NEXT: bge a3, a2, .LBB1_2
67 ; RV32I-NEXT: .LBB1_1: # %while_body
68 ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
69 ; RV32I-NEXT: sw a3, 4(a0)
70 ; RV32I-NEXT: addi a4, a3, 1
71 ; RV32I-NEXT: sw a4, 0(a0)
72 ; RV32I-NEXT: sw a3, 4(a1)
73 ; RV32I-NEXT: sw a4, 0(a1)
74 ; RV32I-NEXT: mv a3, a4
75 ; RV32I-NEXT: blt a3, a2, .LBB1_1
76 ; RV32I-NEXT: .LBB1_2: # %while_end
77 ; RV32I-NEXT: .cfi_def_cfa_offset 0
81 ; RV64I: # %bb.0: # %entry
82 ; RV64I-NEXT: lui a3, 20
83 ; RV64I-NEXT: addiw a3, a3, -1920
84 ; RV64I-NEXT: ld a0, 0(a0)
85 ; RV64I-NEXT: add a0, a0, a3
86 ; RV64I-NEXT: add a1, a1, a3
87 ; RV64I-NEXT: sext.w a2, a2
88 ; RV64I-NEXT: mv a3, zero
89 ; RV64I-NEXT: sext.w a4, a3
90 ; RV64I-NEXT: bge a4, a2, .LBB1_2
91 ; RV64I-NEXT: .LBB1_1: # %while_body
92 ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
93 ; RV64I-NEXT: sw a3, 4(a0)
94 ; RV64I-NEXT: addi a4, a3, 1
95 ; RV64I-NEXT: sw a4, 0(a0)
96 ; RV64I-NEXT: sw a3, 4(a1)
97 ; RV64I-NEXT: sw a4, 0(a1)
98 ; RV64I-NEXT: mv a3, a4
99 ; RV64I-NEXT: sext.w a4, a3
100 ; RV64I-NEXT: blt a4, a2, .LBB1_1
101 ; RV64I-NEXT: .LBB1_2: # %while_end
102 ; RV64I-NEXT: .cfi_def_cfa_offset 0
105 %s = load [65536 x i32]*, [65536 x i32]** %sp
108 %phi = phi i32 [ 0, %entry ], [ %i, %while_body ]
109 %gep0 = getelementptr [65536 x i32], [65536 x i32]* %s, i64 0, i32 20000
110 %gep1 = getelementptr [65536 x i32], [65536 x i32]* %s, i64 0, i32 20001
111 %gep2 = getelementptr [65536 x i32], [65536 x i32]* %t, i64 0, i32 20000
112 %gep3 = getelementptr [65536 x i32], [65536 x i32]* %t, i64 0, i32 20001
113 %cmp = icmp slt i32 %phi, %n
114 br i1 %cmp, label %while_body, label %while_end
118 store i32 %i, i32* %gep0
119 store i32 %phi, i32* %gep1
120 store i32 %i, i32* %gep2
121 store i32 %phi, i32* %gep3