1 target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
2 ; RUN: opt < %s -alignment-from-assumptions -S | FileCheck %s
3 ; RUN: opt < %s -passes=alignment-from-assumptions -S | FileCheck %s
5 define i32 @foo(i32* nocapture %a) nounwind uwtable readonly {
7 %ptrint = ptrtoint i32* %a to i64
8 %maskedptr = and i64 %ptrint, 31
9 %maskcond = icmp eq i64 %maskedptr, 0
10 tail call void @llvm.assume(i1 %maskcond)
11 %0 = load i32, i32* %a, align 4
15 ; CHECK: load i32, i32* {{[^,]+}}, align 32
19 define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly {
21 %ptrint = ptrtoint i32* %a to i64
22 %offsetptr = add i64 %ptrint, 24
23 %maskedptr = and i64 %offsetptr, 31
24 %maskcond = icmp eq i64 %maskedptr, 0
25 tail call void @llvm.assume(i1 %maskcond)
26 %arrayidx = getelementptr inbounds i32, i32* %a, i64 2
27 %0 = load i32, i32* %arrayidx, align 4
31 ; CHECK: load i32, i32* {{[^,]+}}, align 16
35 define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly {
37 %ptrint = ptrtoint i32* %a to i64
38 %offsetptr = add i64 %ptrint, 28
39 %maskedptr = and i64 %offsetptr, 31
40 %maskcond = icmp eq i64 %maskedptr, 0
41 tail call void @llvm.assume(i1 %maskcond)
42 %arrayidx = getelementptr inbounds i32, i32* %a, i64 -1
43 %0 = load i32, i32* %arrayidx, align 4
47 ; CHECK: load i32, i32* {{[^,]+}}, align 32
51 define i32 @goo(i32* nocapture %a) nounwind uwtable readonly {
53 %ptrint = ptrtoint i32* %a to i64
54 %maskedptr = and i64 %ptrint, 31
55 %maskcond = icmp eq i64 %maskedptr, 0
56 tail call void @llvm.assume(i1 %maskcond)
57 %0 = load i32, i32* %a, align 4
61 ; CHECK: load i32, i32* {{[^,]+}}, align 32
65 define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly {
67 %ptrint = ptrtoint i32* %a to i64
68 %maskedptr = and i64 %ptrint, 31
69 %maskcond = icmp eq i64 %maskedptr, 0
70 tail call void @llvm.assume(i1 %maskcond)
73 for.body: ; preds = %entry, %for.body
74 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
75 %r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
76 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
77 %0 = load i32, i32* %arrayidx, align 4
78 %add = add nsw i32 %0, %r.06
79 %indvars.iv.next = add i64 %indvars.iv, 8
80 %1 = trunc i64 %indvars.iv.next to i32
81 %cmp = icmp slt i32 %1, 2048
82 br i1 %cmp, label %for.body, label %for.end
84 for.end: ; preds = %for.body
85 %add.lcssa = phi i32 [ %add, %for.body ]
89 ; CHECK: load i32, i32* %arrayidx, align 32
90 ; CHECK: ret i32 %add.lcssa
93 define i32 @joo(i32* nocapture %a) nounwind uwtable readonly {
95 %ptrint = ptrtoint i32* %a to i64
96 %maskedptr = and i64 %ptrint, 31
97 %maskcond = icmp eq i64 %maskedptr, 0
98 tail call void @llvm.assume(i1 %maskcond)
101 for.body: ; preds = %entry, %for.body
102 %indvars.iv = phi i64 [ 4, %entry ], [ %indvars.iv.next, %for.body ]
103 %r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
104 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
105 %0 = load i32, i32* %arrayidx, align 4
106 %add = add nsw i32 %0, %r.06
107 %indvars.iv.next = add i64 %indvars.iv, 8
108 %1 = trunc i64 %indvars.iv.next to i32
109 %cmp = icmp slt i32 %1, 2048
110 br i1 %cmp, label %for.body, label %for.end
112 for.end: ; preds = %for.body
113 %add.lcssa = phi i32 [ %add, %for.body ]
117 ; CHECK: load i32, i32* %arrayidx, align 16
118 ; CHECK: ret i32 %add.lcssa
121 define i32 @koo(i32* nocapture %a) nounwind uwtable readonly {
123 %ptrint = ptrtoint i32* %a to i64
124 %maskedptr = and i64 %ptrint, 31
125 %maskcond = icmp eq i64 %maskedptr, 0
126 tail call void @llvm.assume(i1 %maskcond)
129 for.body: ; preds = %entry, %for.body
130 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
131 %r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
132 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
133 %0 = load i32, i32* %arrayidx, align 4
134 %add = add nsw i32 %0, %r.06
135 %indvars.iv.next = add i64 %indvars.iv, 4
136 %1 = trunc i64 %indvars.iv.next to i32
137 %cmp = icmp slt i32 %1, 2048
138 br i1 %cmp, label %for.body, label %for.end
140 for.end: ; preds = %for.body
141 %add.lcssa = phi i32 [ %add, %for.body ]
145 ; CHECK: load i32, i32* %arrayidx, align 16
146 ; CHECK: ret i32 %add.lcssa
149 define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly {
151 %ptrint = ptrtoint i32* %a to i64
152 %maskedptr = and i64 %ptrint, 31
153 %maskcond = icmp eq i64 %maskedptr, 0
154 tail call void @llvm.assume(i1 %maskcond)
157 for.body: ; preds = %entry, %for.body
158 %indvars.iv = phi i64 [ -4, %entry ], [ %indvars.iv.next, %for.body ]
159 %r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
160 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
161 %0 = load i32, i32* %arrayidx, align 4
162 %add = add nsw i32 %0, %r.06
163 %indvars.iv.next = add i64 %indvars.iv, 4
164 %1 = trunc i64 %indvars.iv.next to i32
165 %cmp = icmp slt i32 %1, 2048
166 br i1 %cmp, label %for.body, label %for.end
168 for.end: ; preds = %for.body
169 %add.lcssa = phi i32 [ %add, %for.body ]
173 ; CHECK: load i32, i32* %arrayidx, align 16
174 ; CHECK: ret i32 %add.lcssa
177 define i32 @moo(i32* nocapture %a) nounwind uwtable {
179 %ptrint = ptrtoint i32* %a to i64
180 %maskedptr = and i64 %ptrint, 31
181 %maskcond = icmp eq i64 %maskedptr, 0
182 tail call void @llvm.assume(i1 %maskcond)
183 %0 = bitcast i32* %a to i8*
184 tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false)
188 ; CHECK: @llvm.memset.p0i8.i64(i8* align 32 %0, i8 0, i64 64, i1 false)
189 ; CHECK: ret i32 undef
192 define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
194 %ptrint = ptrtoint i32* %a to i64
195 %maskedptr = and i64 %ptrint, 31
196 %maskcond = icmp eq i64 %maskedptr, 0
197 tail call void @llvm.assume(i1 %maskcond)
198 %ptrint1 = ptrtoint i32* %b to i64
199 %maskedptr3 = and i64 %ptrint1, 127
200 %maskcond4 = icmp eq i64 %maskedptr3, 0
201 tail call void @llvm.assume(i1 %maskcond4)
202 %0 = bitcast i32* %a to i8*
203 %1 = bitcast i32* %b to i8*
204 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false)
208 ; CHECK: @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 %0, i8* align 128 %1, i64 64, i1 false)
209 ; CHECK: ret i32 undef
212 declare void @llvm.assume(i1) nounwind
214 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
215 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind