1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64 -lsr-preferred-addressing-mode=preindexed %s -o - | FileCheck %s
4 ; In LSR for constant offsets and steps, we can generate pre-inc
5 ; accesses by having the offset equal the step and generate a reuse
6 ; formula. However, there are cases where the step, results in an
7 ; illegal addressing mode.
9 ; In this test, we set the preferred addressing mode to be preindexed,
10 ; in order to test a scenario where the step results in an illegal
11 ; addressing mode and because of that it should not generate a reuse formula.
13 ; This test was created in order to reproduce a bug that was observed
14 ; when building a bootstrap build on an AArch64 machine, where the
15 ; preferred addresing mode is preindexed.
18 %"Type" = type <{[166 x [338 x i8]]}>
19 define void @test_lsr_pre_inc_offset_check(ptr %p) {
20 ; CHECK-LABEL: test_lsr_pre_inc_offset_check:
21 ; CHECK: // %bb.0: // %entry
22 ; CHECK-NEXT: mov w8, #165
23 ; CHECK-NEXT: add x9, x0, #339
24 ; CHECK-NEXT: mov w10, #2
25 ; CHECK-NEXT: .LBB0_1: // %main
26 ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
27 ; CHECK-NEXT: str wzr, [x9]
28 ; CHECK-NEXT: subs x8, x8, #1
29 ; CHECK-NEXT: strb w10, [x9, #1]
30 ; CHECK-NEXT: add x9, x9, #338
31 ; CHECK-NEXT: b.ne .LBB0_1
32 ; CHECK-NEXT: // %bb.2: // %exit
39 %arrayidx.i = getelementptr inbounds %"Type", ptr %p, i64 0, i32 0, i64 %indvars, i64 1
40 store i32 0, ptr %arrayidx.i, align 1
43 %arrayidx.p = getelementptr inbounds %"Type", ptr %p, i64 0, i32 0, i64 %indvars, i64 2
44 store i8 2, ptr %arrayidx.p, align 1
45 %indvars.iv.next = add nuw nsw i64 %indvars, 1
46 %add.i = add nuw i8 %begin, 1
47 %cmp.i.not = icmp eq i64 %indvars.iv.next, 166
48 br i1 %cmp.i.not, label %exit, label %main
50 %begin = phi i8 [ 1, %entry ], [ %add.i, %if.end ]
51 %indvars = phi i64 [ 1, %entry ], [ %indvars.iv.next, %if.end ]