1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3 ; RUN: | FileCheck -check-prefix=RV64I %s
5 ; Check indexed and unindexed, sext, zext and anyext loads
7 define dso_local i64 @lb(ptr %a) nounwind {
10 ; RV64I-NEXT: lb a1, 1(a0)
11 ; RV64I-NEXT: lbu zero, 0(a0)
12 ; RV64I-NEXT: mv a0, a1
14 %1 = getelementptr i8, ptr %a, i32 1
16 %3 = sext i8 %2 to i64
17 ; the unused load will produce an anyext for selection
18 %4 = load volatile i8, ptr %a
22 define dso_local i64 @lh(ptr %a) nounwind {
25 ; RV64I-NEXT: lh a1, 4(a0)
26 ; RV64I-NEXT: lh zero, 0(a0)
27 ; RV64I-NEXT: mv a0, a1
29 %1 = getelementptr i16, ptr %a, i32 2
31 %3 = sext i16 %2 to i64
32 ; the unused load will produce an anyext for selection
33 %4 = load volatile i16, ptr %a
37 define dso_local i64 @lw(ptr %a) nounwind {
40 ; RV64I-NEXT: lw a1, 12(a0)
41 ; RV64I-NEXT: lw zero, 0(a0)
42 ; RV64I-NEXT: mv a0, a1
44 %1 = getelementptr i32, ptr %a, i32 3
46 %3 = sext i32 %2 to i64
47 ; the unused load will produce an anyext for selection
48 %4 = load volatile i32, ptr %a
52 define dso_local i64 @lbu(ptr %a) nounwind {
55 ; RV64I-NEXT: lbu a1, 4(a0)
56 ; RV64I-NEXT: lbu a0, 0(a0)
57 ; RV64I-NEXT: add a0, a1, a0
59 %1 = getelementptr i8, ptr %a, i32 4
61 %3 = zext i8 %2 to i64
62 %4 = load volatile i8, ptr %a
63 %5 = zext i8 %4 to i64
68 define dso_local i64 @lhu(ptr %a) nounwind {
71 ; RV64I-NEXT: lhu a1, 10(a0)
72 ; RV64I-NEXT: lhu a0, 0(a0)
73 ; RV64I-NEXT: add a0, a1, a0
75 %1 = getelementptr i16, ptr %a, i32 5
77 %3 = zext i16 %2 to i64
78 %4 = load volatile i16, ptr %a
79 %5 = zext i16 %4 to i64
84 define dso_local i64 @lwu(ptr %a) nounwind {
87 ; RV64I-NEXT: lwu a1, 24(a0)
88 ; RV64I-NEXT: lwu a0, 0(a0)
89 ; RV64I-NEXT: add a0, a1, a0
91 %1 = getelementptr i32, ptr %a, i32 6
93 %3 = zext i32 %2 to i64
94 %4 = load volatile i32, ptr %a
95 %5 = zext i32 %4 to i64
100 ; Check indexed and unindexed stores
102 define dso_local void @sb(ptr %a, i8 %b) nounwind {
105 ; RV64I-NEXT: sb a1, 0(a0)
106 ; RV64I-NEXT: sb a1, 7(a0)
109 %1 = getelementptr i8, ptr %a, i32 7
114 define dso_local void @sh(ptr %a, i16 %b) nounwind {
117 ; RV64I-NEXT: sh a1, 0(a0)
118 ; RV64I-NEXT: sh a1, 16(a0)
121 %1 = getelementptr i16, ptr %a, i32 8
126 define dso_local void @sw(ptr %a, i32 %b) nounwind {
129 ; RV64I-NEXT: sw a1, 0(a0)
130 ; RV64I-NEXT: sw a1, 36(a0)
133 %1 = getelementptr i32, ptr %a, i32 9
138 ; 64-bit loads and stores
140 define dso_local i64 @ld(ptr %a) nounwind {
143 ; RV64I-NEXT: ld a1, 80(a0)
144 ; RV64I-NEXT: ld zero, 0(a0)
145 ; RV64I-NEXT: mv a0, a1
147 %1 = getelementptr i64, ptr %a, i32 10
148 %2 = load i64, ptr %1
149 %3 = load volatile i64, ptr %a
153 define dso_local void @sd(ptr %a, i64 %b) nounwind {
156 ; RV64I-NEXT: sd a1, 0(a0)
157 ; RV64I-NEXT: sd a1, 88(a0)
160 %1 = getelementptr i64, ptr %a, i32 11
165 ; Check load and store to an i1 location
166 define dso_local i64 @load_sext_zext_anyext_i1(ptr %a) nounwind {
167 ; RV64I-LABEL: load_sext_zext_anyext_i1:
169 ; RV64I-NEXT: lbu a1, 1(a0)
170 ; RV64I-NEXT: lbu a2, 2(a0)
171 ; RV64I-NEXT: lbu zero, 0(a0)
172 ; RV64I-NEXT: sub a0, a2, a1
175 %1 = getelementptr i1, ptr %a, i32 1
177 %3 = sext i1 %2 to i64
179 %4 = getelementptr i1, ptr %a, i32 2
181 %6 = zext i1 %5 to i64
183 ; extload i1 (anyext). Produced as the load is unused.
184 %8 = load volatile i1, ptr %a
188 define dso_local i16 @load_sext_zext_anyext_i1_i16(ptr %a) nounwind {
189 ; RV64I-LABEL: load_sext_zext_anyext_i1_i16:
191 ; RV64I-NEXT: lbu a1, 1(a0)
192 ; RV64I-NEXT: lbu a2, 2(a0)
193 ; RV64I-NEXT: lbu zero, 0(a0)
194 ; RV64I-NEXT: sub a0, a2, a1
197 %1 = getelementptr i1, ptr %a, i32 1
199 %3 = sext i1 %2 to i16
201 %4 = getelementptr i1, ptr %a, i32 2
203 %6 = zext i1 %5 to i16
205 ; extload i1 (anyext). Produced as the load is unused.
206 %8 = load volatile i1, ptr %a
210 ; Check load and store to a global
211 @G = dso_local global i64 0
213 define dso_local i64 @ld_sd_global(i64 %a) nounwind {
214 ; RV64I-LABEL: ld_sd_global:
216 ; RV64I-NEXT: lui a2, %hi(G)
217 ; RV64I-NEXT: ld a1, %lo(G)(a2)
218 ; RV64I-NEXT: addi a3, a2, %lo(G)
219 ; RV64I-NEXT: sd a0, %lo(G)(a2)
220 ; RV64I-NEXT: ld zero, 72(a3)
221 ; RV64I-NEXT: sd a0, 72(a3)
222 ; RV64I-NEXT: mv a0, a1
224 %1 = load volatile i64, ptr @G
226 %2 = getelementptr i64, ptr @G, i64 9
227 %3 = load volatile i64, ptr %2
232 define i64 @lw_near_local(ptr %a) {
233 ; RV64I-LABEL: lw_near_local:
235 ; RV64I-NEXT: addi a0, a0, 2047
236 ; RV64I-NEXT: ld a0, 9(a0)
238 %1 = getelementptr inbounds i64, ptr %a, i64 257
239 %2 = load volatile i64, ptr %1
243 define void @st_near_local(ptr %a, i64 %b) {
244 ; RV64I-LABEL: st_near_local:
246 ; RV64I-NEXT: addi a0, a0, 2047
247 ; RV64I-NEXT: sd a1, 9(a0)
249 %1 = getelementptr inbounds i64, ptr %a, i64 257
254 define i64 @lw_sw_near_local(ptr %a, i64 %b) {
255 ; RV64I-LABEL: lw_sw_near_local:
257 ; RV64I-NEXT: addi a2, a0, 2047
258 ; RV64I-NEXT: ld a0, 9(a2)
259 ; RV64I-NEXT: sd a1, 9(a2)
261 %1 = getelementptr inbounds i64, ptr %a, i64 257
262 %2 = load volatile i64, ptr %1
267 define i64 @lw_far_local(ptr %a) {
268 ; RV64I-LABEL: lw_far_local:
270 ; RV64I-NEXT: lui a1, 8
271 ; RV64I-NEXT: add a0, a0, a1
272 ; RV64I-NEXT: ld a0, -8(a0)
274 %1 = getelementptr inbounds i64, ptr %a, i64 4095
275 %2 = load volatile i64, ptr %1
279 define void @st_far_local(ptr %a, i64 %b) {
280 ; RV64I-LABEL: st_far_local:
282 ; RV64I-NEXT: lui a2, 8
283 ; RV64I-NEXT: add a0, a0, a2
284 ; RV64I-NEXT: sd a1, -8(a0)
286 %1 = getelementptr inbounds i64, ptr %a, i64 4095
291 define i64 @lw_sw_far_local(ptr %a, i64 %b) {
292 ; RV64I-LABEL: lw_sw_far_local:
294 ; RV64I-NEXT: lui a2, 8
295 ; RV64I-NEXT: add a2, a0, a2
296 ; RV64I-NEXT: ld a0, -8(a2)
297 ; RV64I-NEXT: sd a1, -8(a2)
299 %1 = getelementptr inbounds i64, ptr %a, i64 4095
300 %2 = load volatile i64, ptr %1
305 ; Make sure we don't fold the addiw into the load offset. The sign extend of the
307 define i64 @lw_really_far_local(ptr %a) {
308 ; RV64I-LABEL: lw_really_far_local:
310 ; RV64I-NEXT: lui a1, 524288
311 ; RV64I-NEXT: addiw a1, a1, -2048
312 ; RV64I-NEXT: add a0, a0, a1
313 ; RV64I-NEXT: ld a0, 0(a0)
315 %1 = getelementptr inbounds i64, ptr %a, i64 268435200
316 %2 = load volatile i64, ptr %1
320 ; Make sure we don't fold the addiw into the store offset. The sign extend of
321 ; the addiw is required.
322 define void @st_really_far_local(ptr %a, i64 %b) {
323 ; RV64I-LABEL: st_really_far_local:
325 ; RV64I-NEXT: lui a2, 524288
326 ; RV64I-NEXT: addiw a2, a2, -2048
327 ; RV64I-NEXT: add a0, a0, a2
328 ; RV64I-NEXT: sd a1, 0(a0)
330 %1 = getelementptr inbounds i64, ptr %a, i64 268435200
335 ; Make sure we don't fold the addiw into the load/store offset. The sign extend
336 ; of the addiw is required.
337 define i64 @lw_sw_really_far_local(ptr %a, i64 %b) {
338 ; RV64I-LABEL: lw_sw_really_far_local:
340 ; RV64I-NEXT: lui a2, 524288
341 ; RV64I-NEXT: addiw a2, a2, -2048
342 ; RV64I-NEXT: add a2, a0, a2
343 ; RV64I-NEXT: ld a0, 0(a2)
344 ; RV64I-NEXT: sd a1, 0(a2)
346 %1 = getelementptr inbounds i64, ptr %a, i64 268435200
347 %2 = load volatile i64, ptr %1
352 %struct.quux = type { i32, [0 x i8] }
354 ; Make sure we don't remove the addi and fold the C from
355 ; (add (addi FrameIndex, C), X) into the store address.
356 ; FrameIndex cannot be the operand of an ADD. We must keep the ADDI.
357 define void @addi_fold_crash(i64 %arg) nounwind {
358 ; RV64I-LABEL: addi_fold_crash:
359 ; RV64I: # %bb.0: # %bb
360 ; RV64I-NEXT: addi sp, sp, -16
361 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
362 ; RV64I-NEXT: addi a1, sp, 4
363 ; RV64I-NEXT: add a0, a1, a0
364 ; RV64I-NEXT: sb zero, 0(a0)
365 ; RV64I-NEXT: mv a0, a1
366 ; RV64I-NEXT: call snork@plt
367 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
368 ; RV64I-NEXT: addi sp, sp, 16
371 %tmp = alloca %struct.quux, align 8
372 %tmp1 = getelementptr inbounds %struct.quux, ptr %tmp, i64 0, i32 1
373 %tmp2 = getelementptr inbounds %struct.quux, ptr %tmp, i64 0, i32 1, i64 %arg
374 store i8 0, ptr %tmp2, align 1
375 call void @snork(ptr %tmp1)
379 declare void @snork(ptr)