1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
3 ; RUN: opt < %s -loop-reduce -mtriple=x86_64 -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
4 ; RUN: opt < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost=false -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
5 ; RUN: llc < %s -O2 -mtriple=x86_64-unknown-unknown -lsr-insns-cost | FileCheck %s
7 ; OPT test checks that LSR optimize compare for static counter to compare with 0.
9 ; LLC test checks that LSR optimize compare for static counter.
10 ; That means that instead of creating the following:
11 ; movl %ecx, (%rdx,%rax,4)
14 ; LSR should optimize out cmp:
15 ; movl %ecx, 4096(%rdx,%rax)
18 ; movl %ecx, 4096(%rdx,%rax,4)
21 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
23 define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q) {
26 ; INSN-NEXT: [[Q1:%.*]] = bitcast i32* [[Q:%.*]] to i8*
27 ; INSN-NEXT: [[Y3:%.*]] = bitcast i32* [[Y:%.*]] to i8*
28 ; INSN-NEXT: [[X7:%.*]] = bitcast i32* [[X:%.*]] to i8*
29 ; INSN-NEXT: br label [[FOR_BODY:%.*]]
30 ; INSN: for.cond.cleanup:
33 ; INSN-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY]] ], [ -4096, [[ENTRY:%.*]] ]
34 ; INSN-NEXT: [[UGLYGEP8:%.*]] = getelementptr i8, i8* [[X7]], i64 [[LSR_IV]]
35 ; INSN-NEXT: [[UGLYGEP89:%.*]] = bitcast i8* [[UGLYGEP8]] to i32*
36 ; INSN-NEXT: [[SCEVGEP10:%.*]] = getelementptr i32, i32* [[UGLYGEP89]], i64 1024
37 ; INSN-NEXT: [[TMP:%.*]] = load i32, i32* [[SCEVGEP10]], align 4
38 ; INSN-NEXT: [[UGLYGEP4:%.*]] = getelementptr i8, i8* [[Y3]], i64 [[LSR_IV]]
39 ; INSN-NEXT: [[UGLYGEP45:%.*]] = bitcast i8* [[UGLYGEP4]] to i32*
40 ; INSN-NEXT: [[SCEVGEP6:%.*]] = getelementptr i32, i32* [[UGLYGEP45]], i64 1024
41 ; INSN-NEXT: [[TMP1:%.*]] = load i32, i32* [[SCEVGEP6]], align 4
42 ; INSN-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP]]
43 ; INSN-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, i8* [[Q1]], i64 [[LSR_IV]]
44 ; INSN-NEXT: [[UGLYGEP2:%.*]] = bitcast i8* [[UGLYGEP]] to i32*
45 ; INSN-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[UGLYGEP2]], i64 1024
46 ; INSN-NEXT: store i32 [[ADD]], i32* [[SCEVGEP]], align 4
47 ; INSN-NEXT: [[LSR_IV_NEXT]] = add nsw i64 [[LSR_IV]], 4
48 ; INSN-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[LSR_IV_NEXT]], 0
49 ; INSN-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
53 ; REGS-NEXT: br label [[FOR_BODY:%.*]]
54 ; REGS: for.cond.cleanup:
57 ; REGS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
58 ; REGS-NEXT: [[SCEVGEP2:%.*]] = getelementptr i32, i32* [[X:%.*]], i64 [[INDVARS_IV]]
59 ; REGS-NEXT: [[TMP:%.*]] = load i32, i32* [[SCEVGEP2]], align 4
60 ; REGS-NEXT: [[SCEVGEP1:%.*]] = getelementptr i32, i32* [[Y:%.*]], i64 [[INDVARS_IV]]
61 ; REGS-NEXT: [[TMP1:%.*]] = load i32, i32* [[SCEVGEP1]], align 4
62 ; REGS-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP]]
63 ; REGS-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[Q:%.*]], i64 [[INDVARS_IV]]
64 ; REGS-NEXT: store i32 [[ADD]], i32* [[SCEVGEP]], align 4
65 ; REGS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
66 ; REGS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024
67 ; REGS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
70 ; CHECK: # %bb.0: # %entry
71 ; CHECK-NEXT: movq $-4096, %rax # imm = 0xF000
72 ; CHECK-NEXT: .p2align 4, 0x90
73 ; CHECK-NEXT: .LBB0_1: # %for.body
74 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
75 ; CHECK-NEXT: movl 4096(%rsi,%rax), %ecx
76 ; CHECK-NEXT: addl 4096(%rdi,%rax), %ecx
77 ; CHECK-NEXT: movl %ecx, 4096(%rdx,%rax)
78 ; CHECK-NEXT: addq $4, %rax
79 ; CHECK-NEXT: jne .LBB0_1
80 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
85 for.cond.cleanup: ; preds = %for.body
88 for.body: ; preds = %for.body, %entry
89 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
90 %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
91 %tmp = load i32, i32* %arrayidx, align 4
92 %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
93 %tmp1 = load i32, i32* %arrayidx2, align 4
94 %add = add nsw i32 %tmp1, %tmp
95 %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
96 store i32 %add, i32* %arrayidx4, align 4
97 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
98 %exitcond = icmp eq i64 %indvars.iv.next, 1024
99 br i1 %exitcond, label %for.cond.cleanup, label %for.body