1 ; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa-walker>' -verify-memoryssa < %s 2>&1 | FileCheck %s
3 @g = external global i32
5 ; CHECK-LABEL: define {{.*}} @global(
7 ; CHECK: 1 = MemoryDef(liveOnEntry)
8 ; CHECK-NEXT: store i32 0
9 store i32 0, ptr @g, align 4, !invariant.group !0
11 ; CHECK: 2 = MemoryDef(1)
12 ; CHECK-NEXT: call void @clobber
13 call void @clobber(ptr @g)
15 ; FIXME: this could be clobbered by 1 if we walked the instruction list for loads/stores to @g.
16 ; But we can't look at the uses of @g in a function analysis.
17 ; CHECK: MemoryUse(2) {{.*}} clobbered by 2
18 ; CHECK-NEXT: %1 = load i32
19 %1 = load i32, ptr @g, align 4, !invariant.group !0
23 ; CHECK-LABEL: define {{.*}} @global2(
24 define i32 @global2() {
25 ; CHECK: 1 = MemoryDef(liveOnEntry)
26 ; CHECK-NEXT: store i32 0
27 store i32 0, ptr inttoptr (i64 ptrtoint (ptr @g to i64) to ptr), align 4, !invariant.group !0
29 ; CHECK: 2 = MemoryDef(1)
30 ; CHECK-NEXT: call void @clobber
31 call void @clobber(ptr inttoptr (i64 ptrtoint (ptr @g to i64) to ptr))
33 ; FIXME: this could be clobbered by 1 if we walked the instruction list for loads/stores to @g.
34 ; But we can't look at the uses of @g in a function analysis.
35 ; CHECK: MemoryUse(2) {{.*}} clobbered by 2
36 ; CHECK-NEXT: %1 = load i32
37 %1 = load i32, ptr inttoptr (i64 ptrtoint (ptr @g to i64) to ptr), align 4, !invariant.group !0
41 ; CHECK-LABEL: define {{.*}} @foo(
42 define i32 @foo(ptr %a) {
43 ; CHECK: 1 = MemoryDef(liveOnEntry)
44 ; CHECK-NEXT: store i32 0
45 store i32 0, ptr %a, align 4, !invariant.group !0
47 ; CHECK: 2 = MemoryDef(1)
48 ; CHECK-NEXT: store i32 1
49 store i32 1, ptr @g, align 4
51 ; CHECK: 3 = MemoryDef(2)
52 ; CHECK-NEXT: %a8 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
53 %a8 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
55 ; This have to be MemoryUse(2), because we can't skip the barrier based on
58 ; CHECK-NEXT: %1 = load i32
59 %1 = load i32, ptr %a8, align 4, !invariant.group !0
63 ; CHECK-LABEL: define {{.*}} @volatile1(
64 define void @volatile1(ptr %a) {
65 ; CHECK: 1 = MemoryDef(liveOnEntry)
66 ; CHECK-NEXT: store i32 0
67 store i32 0, ptr %a, align 4, !invariant.group !0
69 ; CHECK: 2 = MemoryDef(1)
70 ; CHECK-NEXT: call void @clobber
71 call void @clobber(ptr %a)
73 ; CHECK: 3 = MemoryDef(2){{.*}} clobbered by 2
74 ; CHECK-NEXT: load volatile
75 %b = load volatile i32, ptr %a, align 4, !invariant.group !0
80 ; CHECK-LABEL: define {{.*}} @volatile2(
81 define void @volatile2(ptr %a) {
82 ; CHECK: 1 = MemoryDef(liveOnEntry)
83 ; CHECK-NEXT: store volatile i32 0
84 store volatile i32 0, ptr %a, align 4, !invariant.group !0
86 ; CHECK: 2 = MemoryDef(1)
87 ; CHECK-NEXT: call void @clobber
88 call void @clobber(ptr %a)
90 ; CHECK: MemoryUse(2){{.*}} clobbered by 2
91 ; CHECK-NEXT: load i32
92 %b = load i32, ptr %a, align 4, !invariant.group !0
97 ; CHECK-LABEL: define {{.*}} @skipBarrier(
98 define i32 @skipBarrier(ptr %a) {
99 ; CHECK: 1 = MemoryDef(liveOnEntry)
100 ; CHECK-NEXT: store i32 0
101 store i32 0, ptr %a, align 4, !invariant.group !0
103 ; CHECK: 2 = MemoryDef(1)
104 ; CHECK-NEXT: %a8 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
105 %a8 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
107 ; We can skip the barrier only if the "skip" is not based on !invariant.group.
108 ; CHECK: MemoryUse(1)
109 ; CHECK-NEXT: %1 = load i32
110 %1 = load i32, ptr %a8, align 4, !invariant.group !0
114 ; CHECK-LABEL: define {{.*}} @skipBarrier2(
115 define i32 @skipBarrier2(ptr %a) {
117 ; CHECK: MemoryUse(liveOnEntry)
118 ; CHECK-NEXT: %v = load i32
119 %v = load i32, ptr %a, align 4, !invariant.group !0
121 ; CHECK: 1 = MemoryDef(liveOnEntry)
122 ; CHECK-NEXT: %a8 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
123 %a8 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
125 ; We can skip the barrier only if the "skip" is not based on !invariant.group.
126 ; CHECK: MemoryUse(liveOnEntry)
127 ; CHECK-NEXT: %v2 = load i32
128 %v2 = load i32, ptr %a8, align 4, !invariant.group !0
129 ; CHECK: 2 = MemoryDef(1)
130 ; CHECK-NEXT: store i32 1
131 store i32 1, ptr @g, align 4
133 ; CHECK: MemoryUse(2) {{.*}} clobbered by liveOnEntry
134 ; CHECK-NEXT: %v3 = load i32
135 %v3 = load i32, ptr %a8, align 4, !invariant.group !0
136 %add = add nsw i32 %v2, %v3
137 %add2 = add nsw i32 %add, %v
141 ; CHECK-LABEL: define {{.*}} @handleInvariantGroups(
142 define i32 @handleInvariantGroups(ptr %a) {
143 ; CHECK: 1 = MemoryDef(liveOnEntry)
144 ; CHECK-NEXT: store i32 0
145 store i32 0, ptr %a, align 4, !invariant.group !0
147 ; CHECK: 2 = MemoryDef(1)
148 ; CHECK-NEXT: store i32 1
149 store i32 1, ptr @g, align 4
150 ; CHECK: 3 = MemoryDef(2)
151 ; CHECK-NEXT: %a8 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
152 %a8 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
154 ; CHECK: MemoryUse(2)
155 ; CHECK-NEXT: %1 = load i32
156 %1 = load i32, ptr %a8, align 4, !invariant.group !0
158 ; CHECK: 4 = MemoryDef(3)
159 ; CHECK-NEXT: store i32 2
160 store i32 2, ptr @g, align 4
162 ; CHECK: MemoryUse(4) {{.*}} clobbered by 2
163 ; CHECK-NEXT: %2 = load i32
164 %2 = load i32, ptr %a8, align 4, !invariant.group !0
165 %add = add nsw i32 %1, %2
169 ; CHECK-LABEL: define {{.*}} @loop(
170 define i32 @loop(i1 %a) {
172 %0 = alloca i32, align 4
173 ; CHECK: 1 = MemoryDef(liveOnEntry)
174 ; CHECK-NEXT: store i32 4
175 store i32 4, ptr %0, !invariant.group !0
176 ; CHECK: 2 = MemoryDef(1)
177 ; CHECK-NEXT: call void @clobber
178 call void @clobber(ptr %0)
179 br i1 %a, label %Loop.Body, label %Loop.End
182 ; CHECK: MemoryUse(2) {{.*}} clobbered by 1
183 ; CHECK-NEXT: %1 = load i32
184 %1 = load i32, ptr %0, !invariant.group !0
185 br i1 %a, label %Loop.End, label %Loop.Body
188 ; CHECK: MemoryUse(2) {{.*}} clobbered by 1
189 ; CHECK-NEXT: %2 = load
190 %2 = load i32, ptr %0, align 4, !invariant.group !0
191 br i1 %a, label %Ret, label %Loop.Body
197 ; CHECK-LABEL: define {{.*}} @loop2(
198 define i8 @loop2(ptr %p) {
200 ; CHECK: 1 = MemoryDef(liveOnEntry)
201 ; CHECK-NEXT: store i8
202 store i8 4, ptr %p, !invariant.group !0
203 ; CHECK: 2 = MemoryDef(1)
204 ; CHECK-NEXT: call void @clobber
205 call void @clobber8(ptr %p)
207 ; CHECK: 3 = MemoryDef(2)
208 ; CHECK-NEXT: %after = call ptr @llvm.launder.invariant.group.p0(ptr %p)
209 %after = call ptr @llvm.launder.invariant.group.p0(ptr %p)
210 br i1 undef, label %Loop.Body, label %Loop.End
213 ; CHECK: MemoryUse(6)
214 ; CHECK-NEXT: %0 = load i8
215 %0 = load i8, ptr %after, !invariant.group !0
217 ; CHECK: MemoryUse(6) {{.*}} clobbered by 1
218 ; CHECK-NEXT: %1 = load i8
219 %1 = load i8, ptr %p, !invariant.group !0
221 ; CHECK: 4 = MemoryDef(6)
222 store i8 4, ptr %after, !invariant.group !0
224 br i1 undef, label %Loop.End, label %Loop.Body
227 ; CHECK: MemoryUse(5)
228 ; CHECK-NEXT: %2 = load
229 %2 = load i8, ptr %after, align 4, !invariant.group !0
231 ; CHECK: MemoryUse(5) {{.*}} clobbered by 1
232 ; CHECK-NEXT: %3 = load
233 %3 = load i8, ptr %p, align 4, !invariant.group !0
234 br i1 undef, label %Ret, label %Loop.Body
241 ; CHECK-LABEL: define {{.*}} @loop3(
242 define i8 @loop3(ptr %p) {
244 ; CHECK: 1 = MemoryDef(liveOnEntry)
245 ; CHECK-NEXT: store i8
246 store i8 4, ptr %p, !invariant.group !0
247 ; CHECK: 2 = MemoryDef(1)
248 ; CHECK-NEXT: call void @clobber
249 call void @clobber8(ptr %p)
251 ; CHECK: 3 = MemoryDef(2)
252 ; CHECK-NEXT: %after = call ptr @llvm.launder.invariant.group.p0(ptr %p)
253 %after = call ptr @llvm.launder.invariant.group.p0(ptr %p)
254 br i1 undef, label %Loop.Body, label %Loop.End
257 ; CHECK: MemoryUse(8)
258 ; CHECK-NEXT: %0 = load i8
259 %0 = load i8, ptr %after, !invariant.group !0
261 ; CHECK: 4 = MemoryDef(8)
262 ; CHECK-NEXT: call void @clobber8
263 call void @clobber8(ptr %after)
265 ; CHECK: MemoryUse(4) {{.*}} clobbered by 8
266 ; CHECK-NEXT: %1 = load i8
267 %1 = load i8, ptr %after, !invariant.group !0
269 br i1 undef, label %Loop.next, label %Loop.Body
271 ; CHECK: 5 = MemoryDef(4)
272 ; CHECK-NEXT: call void @clobber8
273 call void @clobber8(ptr %after)
275 ; CHECK: MemoryUse(5) {{.*}} clobbered by 8
276 ; CHECK-NEXT: %2 = load i8
277 %2 = load i8, ptr %after, !invariant.group !0
279 br i1 undef, label %Loop.End, label %Loop.Body
282 ; CHECK: MemoryUse(7)
283 ; CHECK-NEXT: %3 = load
284 %3 = load i8, ptr %after, align 4, !invariant.group !0
286 ; CHECK: 6 = MemoryDef(7)
287 ; CHECK-NEXT: call void @clobber8
288 call void @clobber8(ptr %after)
290 ; CHECK: MemoryUse(6) {{.*}} clobbered by 7
291 ; CHECK-NEXT: %4 = load
292 %4 = load i8, ptr %after, align 4, !invariant.group !0
293 br i1 undef, label %Ret, label %Loop.Body
299 ; CHECK-LABEL: define {{.*}} @loop4(
300 define i8 @loop4(ptr %p) {
302 ; CHECK: 1 = MemoryDef(liveOnEntry)
303 ; CHECK-NEXT: store i8
304 store i8 4, ptr %p, !invariant.group !0
305 ; CHECK: 2 = MemoryDef(1)
306 ; CHECK-NEXT: call void @clobber
307 call void @clobber8(ptr %p)
308 ; CHECK: 3 = MemoryDef(2)
309 ; CHECK-NEXT: %after = call ptr @llvm.launder.invariant.group.p0(ptr %p)
310 %after = call ptr @llvm.launder.invariant.group.p0(ptr %p)
311 br i1 undef, label %Loop.Pre, label %Loop.End
314 ; CHECK: MemoryUse(2)
315 ; CHECK-NEXT: %0 = load i8
316 %0 = load i8, ptr %after, !invariant.group !0
319 ; CHECK: MemoryUse(6)
320 ; CHECK-NEXT: %1 = load i8
321 %1 = load i8, ptr %after, !invariant.group !0
323 ; CHECK: MemoryUse(6) {{.*}} clobbered by 1
324 ; CHECK-NEXT: %2 = load i8
325 %2 = load i8, ptr %p, !invariant.group !0
327 ; CHECK: 4 = MemoryDef(6)
328 store i8 4, ptr %after, !invariant.group !0
329 br i1 undef, label %Loop.End, label %Loop.Body
332 ; CHECK: MemoryUse(5)
333 ; CHECK-NEXT: %3 = load
334 %3 = load i8, ptr %after, align 4, !invariant.group !0
336 ; CHECK: MemoryUse(5) {{.*}} clobbered by 1
337 ; CHECK-NEXT: %4 = load
338 %4 = load i8, ptr %p, align 4, !invariant.group !0
339 br i1 undef, label %Ret, label %Loop.Body
345 ; In the future we would like to CSE barriers if there is no clobber between.
346 ; CHECK-LABEL: define {{.*}} @optimizable(
347 define i8 @optimizable() {
350 ; CHECK: 1 = MemoryDef(liveOnEntry)
351 ; CHECK-NEXT: store i8 42, ptr %ptr, align 1, !invariant.group !0
352 store i8 42, ptr %ptr, !invariant.group !0
353 ; CHECK: 2 = MemoryDef(1)
354 ; CHECK-NEXT: call ptr @llvm.launder.invariant.group
355 %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
356 ; FIXME: This one could be CSEd.
357 ; CHECK: 3 = MemoryDef(2)
358 ; CHECK: call ptr @llvm.launder.invariant.group
359 %ptr3 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
360 ; CHECK: 4 = MemoryDef(3)
361 ; CHECK-NEXT: call void @clobber8(ptr %ptr)
362 call void @clobber8(ptr %ptr)
363 ; CHECK: 5 = MemoryDef(4)
364 ; CHECK-NEXT: call void @use(ptr %ptr2)
365 call void @use(ptr %ptr2)
366 ; CHECK: 6 = MemoryDef(5)
367 ; CHECK-NEXT: call void @use(ptr %ptr3)
368 call void @use(ptr %ptr3)
369 ; CHECK: MemoryUse(6)
370 ; CHECK-NEXT: load i8, ptr %ptr3, {{.*}}!invariant.group
371 %v = load i8, ptr %ptr3, !invariant.group !0
376 ; CHECK-LABEL: define {{.*}} @unoptimizable2()
377 define i8 @unoptimizable2() {
379 ; CHECK: 1 = MemoryDef(liveOnEntry)
380 ; CHECK-NEXT: store i8 42, ptr %ptr, align 1, !invariant.group !0
381 store i8 42, ptr %ptr, !invariant.group !0
382 ; CHECK: 2 = MemoryDef(1)
383 ; CHECK-NEXT: call ptr @llvm.launder.invariant.group
384 %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
385 ; CHECK: 3 = MemoryDef(2)
386 store i8 43, ptr %ptr
387 ; CHECK: 4 = MemoryDef(3)
388 ; CHECK-NEXT: call ptr @llvm.launder.invariant.group
389 %ptr3 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
390 ; CHECK: 5 = MemoryDef(4)
391 ; CHECK-NEXT: call void @clobber8(ptr %ptr)
392 call void @clobber8(ptr %ptr)
393 ; CHECK: 6 = MemoryDef(5)
394 ; CHECK-NEXT: call void @use(ptr %ptr2)
395 call void @use(ptr %ptr2)
396 ; CHECK: 7 = MemoryDef(6)
397 ; CHECK-NEXT: call void @use(ptr %ptr3)
398 call void @use(ptr %ptr3)
399 ; CHECK: MemoryUse(7)
400 ; CHECK-NEXT: %v = load i8, ptr %ptr3, align 1, !invariant.group !0
401 %v = load i8, ptr %ptr3, !invariant.group !0
406 declare ptr @llvm.launder.invariant.group.p0(ptr)
407 declare void @clobber(ptr)
408 declare void @clobber8(ptr)
409 declare void @use(ptr readonly)