1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
2 ; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
4 ; ------------------------------------------------------------------------------
6 ; ------------------------------------------------------------------------------
8 define void @simple_forwardpropagate(ptr %a) {
9 ; CHECK-LABEL: define void @simple_forwardpropagate
10 ; CHECK-SAME: (ptr [[A:%.*]]) {
11 ; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
12 ; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
13 ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
14 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
15 ; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
16 ; CHECK-NEXT: store i32 345, ptr [[A]], align 32
17 ; CHECK-NEXT: ret void
19 %ptrint = ptrtoint ptr %a to i64
20 %maskedptr = and i64 %ptrint, 31
21 %maskcond = icmp eq i64 %maskedptr, 0
22 tail call void @llvm.assume(i1 %maskcond)
24 %load.a = load i32, ptr %a, align 4
25 store i32 345, ptr %a, align 4
30 define void @simple_backpropagate(ptr %a) {
31 ; CHECK-LABEL: define void @simple_backpropagate
32 ; CHECK-SAME: (ptr [[A:%.*]]) {
33 ; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
34 ; CHECK-NEXT: store i32 345, ptr [[A]], align 32
35 ; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
36 ; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
37 ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
38 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
39 ; CHECK-NEXT: ret void
41 %load.a = load i32, ptr %a, align 4
42 store i32 345, ptr %a, align 4
44 %ptrint = ptrtoint ptr %a to i64
45 %maskedptr = and i64 %ptrint, 31
46 %maskcond = icmp eq i64 %maskedptr, 0
47 tail call void @llvm.assume(i1 %maskcond)
52 define void @simple_forwardpropagate_bundle(ptr %a) {
53 ; CHECK-LABEL: define void @simple_forwardpropagate_bundle
54 ; CHECK-SAME: (ptr [[A:%.*]]) {
55 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 32) ]
56 ; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
57 ; CHECK-NEXT: store i32 345, ptr [[A]], align 32
58 ; CHECK-NEXT: ret void
60 call void @llvm.assume(i1 true) ["align"(ptr %a, i32 32)]
61 %load.a = load i32, ptr %a, align 4
62 store i32 345, ptr %a, align 4
66 define void @simple_backpropagate_bundle(ptr %a) {
67 ; CHECK-LABEL: define void @simple_backpropagate_bundle
68 ; CHECK-SAME: (ptr [[A:%.*]]) {
69 ; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
70 ; CHECK-NEXT: store i32 345, ptr [[A]], align 32
71 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 32) ]
72 ; CHECK-NEXT: ret void
74 %load.a = load i32, ptr %a, align 4
75 store i32 345, ptr %a, align 4
76 call void @llvm.assume(i1 true) ["align"(ptr %a, i32 32)]
80 ; ------------------------------------------------------------------------------
82 ; ------------------------------------------------------------------------------
84 define void @loop_forwardpropagate(ptr %a, ptr %b) {
85 ; CHECK-LABEL: define void @loop_forwardpropagate
86 ; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]]) {
88 ; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
89 ; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
90 ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
91 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
92 ; CHECK-NEXT: [[PTRINT2:%.*]] = ptrtoint ptr [[B]] to i64
93 ; CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT2]], 63
94 ; CHECK-NEXT: [[MASKEDCOND2:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
95 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKEDCOND2]])
96 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
98 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_BODY]] ]
99 ; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
100 ; CHECK-NEXT: [[LOAD_B:%.*]] = load i32, ptr [[GEP_B]], align 64
101 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD_B]], 1
102 ; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
103 ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_A]], align 64
104 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 16
105 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[I_NEXT]], 1648
106 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
108 ; CHECK-NEXT: ret void
111 %ptrint = ptrtoint ptr %a to i64
112 %maskedptr = and i64 %ptrint, 63
113 %maskcond = icmp eq i64 %maskedptr, 0
114 tail call void @llvm.assume(i1 %maskcond)
116 %ptrint2 = ptrtoint ptr %b to i64
117 %maskedptr2 = and i64 %ptrint2, 63
118 %maskedcond2 = icmp eq i64 %maskedptr2, 0
119 tail call void @llvm.assume(i1 %maskedcond2)
124 %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
126 %gep.b = getelementptr inbounds i32, ptr %b, i64 %i
127 %load.b = load i32, ptr %gep.b, align 4
128 %add = add nsw i32 %load.b, 1
130 %gep.a = getelementptr inbounds i32, ptr %a, i64 %i
131 store i32 %add, ptr %gep.a, align 4
133 %i.next = add nuw nsw i64 %i, 16
134 %cmp = icmp slt i64 %i.next, 1648
136 br i1 %cmp, label %for.body, label %for.end
142 define void @loop_forwardpropagate_bundle(ptr %a, ptr %b) {
143 ; CHECK-LABEL: define void @loop_forwardpropagate_bundle
144 ; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]]) {
146 ; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 64) ]
147 ; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(ptr [[B]], i32 64) ]
148 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
150 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_BODY]] ]
151 ; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
152 ; CHECK-NEXT: [[LOAD_B:%.*]] = load i32, ptr [[GEP_B]], align 64
153 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD_B]], 1
154 ; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
155 ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_A]], align 64
156 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 16
157 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[I_NEXT]], 1648
158 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
160 ; CHECK-NEXT: ret void
163 tail call void @llvm.assume(i1 true) ["align"(ptr %a, i32 64)]
164 tail call void @llvm.assume(i1 true) ["align"(ptr %b, i32 64)]
168 %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
170 %gep.b = getelementptr inbounds i32, ptr %b, i64 %i
171 %load.b = load i32, ptr %gep.b, align 4
172 %add = add nsw i32 %load.b, 1
174 %gep.a = getelementptr inbounds i32, ptr %a, i64 %i
175 store i32 %add, ptr %gep.a, align 4
177 %i.next = add nuw nsw i64 %i, 16
178 %cmp = icmp slt i64 %i.next, 1648
180 br i1 %cmp, label %for.body, label %for.end
186 ; Check that assume is propagated backwards through all
187 ; operations that are `isGuaranteedToTransferExecutionToSuccessor`
188 ; (it should reach the load and mark it as `align 32`).
189 define void @complex_backpropagate(ptr %a, ptr %b, ptr %c) {
190 ; CHECK-LABEL: define void @complex_backpropagate
191 ; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
192 ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i64, align 8
193 ; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
194 ; CHECK-NEXT: [[LOAD_B:%.*]] = load i32, ptr [[B]], align 4
195 ; CHECK-NEXT: store i32 [[LOAD_B]], ptr [[A]], align 32
196 ; CHECK-NEXT: [[OBJ_SIZE:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[C]], i1 false, i1 false, i1 false)
197 ; CHECK-NEXT: store i64 [[OBJ_SIZE]], ptr [[ALLOCA]], align 8
198 ; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
199 ; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
200 ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
201 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
202 ; CHECK-NEXT: ret void
205 %load.a = load i32, ptr %a, align 4
207 %load.b = load i32, ptr %b
208 store i32 %load.b, ptr %a
210 %obj.size = call i64 @llvm.objectsize.i64.p0(ptr %c, i1 false)
211 store i64 %obj.size, ptr %alloca
213 %ptrint = ptrtoint ptr %a to i64
214 %maskedptr = and i64 %ptrint, 31
215 %maskcond = icmp eq i64 %maskedptr, 0
216 tail call void @llvm.assume(i1 %maskcond)
221 define void @complex_backpropagate_bundle(ptr %a, ptr %b, ptr %c) {
222 ; CHECK-LABEL: define void @complex_backpropagate_bundle
223 ; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
224 ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i64, align 8
225 ; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 32
226 ; CHECK-NEXT: [[LOAD_B:%.*]] = load i32, ptr [[B]], align 4
227 ; CHECK-NEXT: store i32 [[LOAD_B]], ptr [[A]], align 32
228 ; CHECK-NEXT: [[OBJ_SIZE:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[C]], i1 false, i1 false, i1 false)
229 ; CHECK-NEXT: store i64 [[OBJ_SIZE]], ptr [[ALLOCA]], align 8
230 ; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 32) ]
231 ; CHECK-NEXT: ret void
234 %load.a = load i32, ptr %a, align 4
236 %load.b = load i32, ptr %b
237 store i32 %load.b, ptr %a
239 %obj.size = call i64 @llvm.objectsize.i64.p0(ptr %c, i1 false)
240 store i64 %obj.size, ptr %alloca
242 tail call void @llvm.assume(i1 true) ["align"(ptr %a, i32 32)]
247 declare i64 @llvm.objectsize.i64.p0(ptr, i1)
248 declare void @llvm.assume(i1)