BuildBot fix, compiler complains about array decay to pointer
[llvm-core.git] / unittests / Analysis / MemorySSATest.cpp
blob505e185190fde82d6d04d6e6a2d91950812f004a
1 //===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 #include "llvm/Analysis/MemorySSA.h"
10 #include "llvm/Analysis/AliasAnalysis.h"
11 #include "llvm/Analysis/BasicAliasAnalysis.h"
12 #include "llvm/Analysis/MemorySSAUpdater.h"
13 #include "llvm/IR/BasicBlock.h"
14 #include "llvm/IR/DataLayout.h"
15 #include "llvm/IR/Dominators.h"
16 #include "llvm/IR/IRBuilder.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/LLVMContext.h"
19 #include "gtest/gtest.h"
21 using namespace llvm;
23 const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
25 /// There's a lot of common setup between these tests. This fixture helps reduce
26 /// that. Tests should mock up a function, store it in F, and then call
27 /// setupAnalyses().
28 class MemorySSATest : public testing::Test {
29 protected:
30 // N.B. Many of these members depend on each other (e.g. the Module depends on
31 // the Context, etc.). So, order matters here (and in TestAnalyses).
32 LLVMContext C;
33 Module M;
34 IRBuilder<> B;
35 DataLayout DL;
36 TargetLibraryInfoImpl TLII;
37 TargetLibraryInfo TLI;
38 Function *F;
40 // Things that we need to build after the function is created.
41 struct TestAnalyses {
42 DominatorTree DT;
43 AssumptionCache AC;
44 AAResults AA;
45 BasicAAResult BAA;
46 // We need to defer MSSA construction until AA is *entirely* set up, which
47 // requires calling addAAResult. Hence, we just use a pointer here.
48 std::unique_ptr<MemorySSA> MSSA;
49 MemorySSAWalker *Walker;
51 TestAnalyses(MemorySSATest &Test)
52 : DT(*Test.F), AC(*Test.F), AA(Test.TLI),
53 BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) {
54 AA.addAAResult(BAA);
55 MSSA = make_unique<MemorySSA>(*Test.F, &AA, &DT);
56 Walker = MSSA->getWalker();
60 std::unique_ptr<TestAnalyses> Analyses;
62 void setupAnalyses() {
63 assert(F);
64 Analyses.reset(new TestAnalyses(*this));
67 public:
68 MemorySSATest()
69 : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
72 TEST_F(MemorySSATest, CreateALoad) {
73 // We create a diamond where there is a store on one side, and then after
74 // building MemorySSA, create a load after the merge point, and use it to test
75 // updating by creating an access for the load.
76 F = Function::Create(
77 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
78 GlobalValue::ExternalLinkage, "F", &M);
79 BasicBlock *Entry(BasicBlock::Create(C, "", F));
80 BasicBlock *Left(BasicBlock::Create(C, "", F));
81 BasicBlock *Right(BasicBlock::Create(C, "", F));
82 BasicBlock *Merge(BasicBlock::Create(C, "", F));
83 B.SetInsertPoint(Entry);
84 B.CreateCondBr(B.getTrue(), Left, Right);
85 B.SetInsertPoint(Left);
86 Argument *PointerArg = &*F->arg_begin();
87 B.CreateStore(B.getInt8(16), PointerArg);
88 BranchInst::Create(Merge, Left);
89 BranchInst::Create(Merge, Right);
91 setupAnalyses();
92 MemorySSA &MSSA = *Analyses->MSSA;
93 MemorySSAUpdater Updater(&MSSA);
94 // Add the load
95 B.SetInsertPoint(Merge);
96 LoadInst *LoadInst = B.CreateLoad(PointerArg);
98 // MemoryPHI should already exist.
99 MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
100 EXPECT_NE(MP, nullptr);
102 // Create the load memory acccess
103 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
104 LoadInst, MP, Merge, MemorySSA::Beginning));
105 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
106 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
107 MSSA.verifyMemorySSA();
109 TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
110 // We create a diamond, then build memoryssa with no memory accesses, and
111 // incrementally update it by inserting a store in the, entry, a load in the
112 // merge point, then a store in the branch, another load in the merge point,
113 // and then a store in the entry.
114 F = Function::Create(
115 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
116 GlobalValue::ExternalLinkage, "F", &M);
117 BasicBlock *Entry(BasicBlock::Create(C, "", F));
118 BasicBlock *Left(BasicBlock::Create(C, "", F));
119 BasicBlock *Right(BasicBlock::Create(C, "", F));
120 BasicBlock *Merge(BasicBlock::Create(C, "", F));
121 B.SetInsertPoint(Entry);
122 B.CreateCondBr(B.getTrue(), Left, Right);
123 B.SetInsertPoint(Left, Left->begin());
124 Argument *PointerArg = &*F->arg_begin();
125 B.SetInsertPoint(Left);
126 B.CreateBr(Merge);
127 B.SetInsertPoint(Right);
128 B.CreateBr(Merge);
130 setupAnalyses();
131 MemorySSA &MSSA = *Analyses->MSSA;
132 MemorySSAUpdater Updater(&MSSA);
133 // Add the store
134 B.SetInsertPoint(Entry, Entry->begin());
135 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
136 MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
137 EntryStore, nullptr, Entry, MemorySSA::Beginning);
138 Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
140 // Add the load
141 B.SetInsertPoint(Merge, Merge->begin());
142 LoadInst *FirstLoad = B.CreateLoad(PointerArg);
144 // MemoryPHI should not already exist.
145 MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
146 EXPECT_EQ(MP, nullptr);
148 // Create the load memory access
149 MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
150 FirstLoad, nullptr, Merge, MemorySSA::Beginning));
151 Updater.insertUse(FirstLoadAccess);
152 // Should just have a load using the entry access, because it should discover
153 // the phi is trivial
154 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess);
156 // Create a store on the left
157 // Add the store
158 B.SetInsertPoint(Left, Left->begin());
159 StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
160 MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
161 LeftStore, nullptr, Left, MemorySSA::Beginning);
162 Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false);
163 // We don't touch existing loads, so we need to create a new one to get a phi
164 // Add the second load
165 B.SetInsertPoint(Merge, Merge->begin());
166 LoadInst *SecondLoad = B.CreateLoad(PointerArg);
168 // MemoryPHI should not already exist.
169 MP = MSSA.getMemoryAccess(Merge);
170 EXPECT_EQ(MP, nullptr);
172 // Create the load memory access
173 MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
174 SecondLoad, nullptr, Merge, MemorySSA::Beginning));
175 Updater.insertUse(SecondLoadAccess);
176 // Now the load should be a phi of the entry store and the left store
177 MemoryPhi *MergePhi =
178 dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
179 EXPECT_NE(MergePhi, nullptr);
180 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
181 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
182 // Now create a store below the existing one in the entry
183 B.SetInsertPoint(Entry, --Entry->end());
184 StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
185 MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
186 SecondEntryStore, nullptr, Entry, MemorySSA::End);
187 // Insert it twice just to test renaming
188 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false);
189 EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi);
190 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true);
191 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi);
192 // and make sure the phi below it got updated, despite being blocks away
193 MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
194 EXPECT_NE(MergePhi, nullptr);
195 EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess);
196 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
197 MSSA.verifyMemorySSA();
200 TEST_F(MemorySSATest, CreateALoadUpdater) {
201 // We create a diamond, then build memoryssa with no memory accesses, and
202 // incrementally update it by inserting a store in one of the branches, and a
203 // load in the merge point
204 F = Function::Create(
205 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
206 GlobalValue::ExternalLinkage, "F", &M);
207 BasicBlock *Entry(BasicBlock::Create(C, "", F));
208 BasicBlock *Left(BasicBlock::Create(C, "", F));
209 BasicBlock *Right(BasicBlock::Create(C, "", F));
210 BasicBlock *Merge(BasicBlock::Create(C, "", F));
211 B.SetInsertPoint(Entry);
212 B.CreateCondBr(B.getTrue(), Left, Right);
213 B.SetInsertPoint(Left, Left->begin());
214 Argument *PointerArg = &*F->arg_begin();
215 B.SetInsertPoint(Left);
216 B.CreateBr(Merge);
217 B.SetInsertPoint(Right);
218 B.CreateBr(Merge);
220 setupAnalyses();
221 MemorySSA &MSSA = *Analyses->MSSA;
222 MemorySSAUpdater Updater(&MSSA);
223 B.SetInsertPoint(Left, Left->begin());
224 // Add the store
225 StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
226 MemoryAccess *StoreAccess =
227 Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning);
228 Updater.insertDef(cast<MemoryDef>(StoreAccess));
230 // Add the load
231 B.SetInsertPoint(Merge, Merge->begin());
232 LoadInst *LoadInst = B.CreateLoad(PointerArg);
234 // MemoryPHI should not already exist.
235 MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
236 EXPECT_EQ(MP, nullptr);
238 // Create the load memory acccess
239 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
240 LoadInst, nullptr, Merge, MemorySSA::Beginning));
241 Updater.insertUse(LoadAccess);
242 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
243 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
244 MSSA.verifyMemorySSA();
247 TEST_F(MemorySSATest, SinkLoad) {
248 F = Function::Create(
249 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
250 GlobalValue::ExternalLinkage, "F", &M);
251 BasicBlock *Entry(BasicBlock::Create(C, "", F));
252 BasicBlock *Left(BasicBlock::Create(C, "", F));
253 BasicBlock *Right(BasicBlock::Create(C, "", F));
254 BasicBlock *Merge(BasicBlock::Create(C, "", F));
255 B.SetInsertPoint(Entry);
256 B.CreateCondBr(B.getTrue(), Left, Right);
257 B.SetInsertPoint(Left, Left->begin());
258 Argument *PointerArg = &*F->arg_begin();
259 B.SetInsertPoint(Left);
260 B.CreateBr(Merge);
261 B.SetInsertPoint(Right);
262 B.CreateBr(Merge);
264 // Load in left block
265 B.SetInsertPoint(Left, Left->begin());
266 LoadInst *LoadInst1 = B.CreateLoad(PointerArg);
267 // Store in merge block
268 B.SetInsertPoint(Merge, Merge->begin());
269 B.CreateStore(B.getInt8(16), PointerArg);
271 setupAnalyses();
272 MemorySSA &MSSA = *Analyses->MSSA;
273 MemorySSAUpdater Updater(&MSSA);
275 // Mimic sinking of a load:
276 // - clone load
277 // - insert in "exit" block
278 // - insert in mssa
279 // - remove from original block
281 LoadInst *LoadInstClone = cast<LoadInst>(LoadInst1->clone());
282 Merge->getInstList().insert(Merge->begin(), LoadInstClone);
283 MemoryAccess * NewLoadAccess =
284 Updater.createMemoryAccessInBB(LoadInstClone, nullptr,
285 LoadInstClone->getParent(),
286 MemorySSA::Beginning);
287 Updater.insertUse(cast<MemoryUse>(NewLoadAccess));
288 MSSA.verifyMemorySSA();
289 Updater.removeMemoryAccess(MSSA.getMemoryAccess(LoadInst1));
290 MSSA.verifyMemorySSA();
293 TEST_F(MemorySSATest, MoveAStore) {
294 // We create a diamond where there is a in the entry, a store on one side, and
295 // a load at the end. After building MemorySSA, we test updating by moving
296 // the store from the side block to the entry block. This destroys the old
297 // access.
298 F = Function::Create(
299 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
300 GlobalValue::ExternalLinkage, "F", &M);
301 BasicBlock *Entry(BasicBlock::Create(C, "", F));
302 BasicBlock *Left(BasicBlock::Create(C, "", F));
303 BasicBlock *Right(BasicBlock::Create(C, "", F));
304 BasicBlock *Merge(BasicBlock::Create(C, "", F));
305 B.SetInsertPoint(Entry);
306 Argument *PointerArg = &*F->arg_begin();
307 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
308 B.CreateCondBr(B.getTrue(), Left, Right);
309 B.SetInsertPoint(Left);
310 StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
311 BranchInst::Create(Merge, Left);
312 BranchInst::Create(Merge, Right);
313 B.SetInsertPoint(Merge);
314 B.CreateLoad(PointerArg);
315 setupAnalyses();
316 MemorySSA &MSSA = *Analyses->MSSA;
317 MemorySSAUpdater Updater(&MSSA);
318 // Move the store
319 SideStore->moveBefore(Entry->getTerminator());
320 MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
321 MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
322 MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter(
323 SideStore, EntryStoreAccess, EntryStoreAccess);
324 EntryStoreAccess->replaceAllUsesWith(NewStoreAccess);
325 Updater.removeMemoryAccess(SideStoreAccess);
326 MSSA.verifyMemorySSA();
329 TEST_F(MemorySSATest, MoveAStoreUpdater) {
330 // We create a diamond where there is a in the entry, a store on one side, and
331 // a load at the end. After building MemorySSA, we test updating by moving
332 // the store from the side block to the entry block. This destroys the old
333 // access.
334 F = Function::Create(
335 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
336 GlobalValue::ExternalLinkage, "F", &M);
337 BasicBlock *Entry(BasicBlock::Create(C, "", F));
338 BasicBlock *Left(BasicBlock::Create(C, "", F));
339 BasicBlock *Right(BasicBlock::Create(C, "", F));
340 BasicBlock *Merge(BasicBlock::Create(C, "", F));
341 B.SetInsertPoint(Entry);
342 Argument *PointerArg = &*F->arg_begin();
343 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
344 B.CreateCondBr(B.getTrue(), Left, Right);
345 B.SetInsertPoint(Left);
346 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
347 BranchInst::Create(Merge, Left);
348 BranchInst::Create(Merge, Right);
349 B.SetInsertPoint(Merge);
350 auto *MergeLoad = B.CreateLoad(PointerArg);
351 setupAnalyses();
352 MemorySSA &MSSA = *Analyses->MSSA;
353 MemorySSAUpdater Updater(&MSSA);
355 // Move the store
356 SideStore->moveBefore(Entry->getTerminator());
357 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
358 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
359 auto *NewStoreAccess = Updater.createMemoryAccessAfter(
360 SideStore, EntryStoreAccess, EntryStoreAccess);
361 // Before, the load will point to a phi of the EntryStore and SideStore.
362 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
363 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
364 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
365 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
366 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
367 Updater.removeMemoryAccess(SideStoreAccess);
368 Updater.insertDef(cast<MemoryDef>(NewStoreAccess));
369 // After it's a phi of the new side store access.
370 EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess);
371 EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess);
372 MSSA.verifyMemorySSA();
375 TEST_F(MemorySSATest, MoveAStoreUpdaterMove) {
376 // We create a diamond where there is a in the entry, a store on one side, and
377 // a load at the end. After building MemorySSA, we test updating by moving
378 // the store from the side block to the entry block. This does not destroy
379 // the old access.
380 F = Function::Create(
381 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
382 GlobalValue::ExternalLinkage, "F", &M);
383 BasicBlock *Entry(BasicBlock::Create(C, "", F));
384 BasicBlock *Left(BasicBlock::Create(C, "", F));
385 BasicBlock *Right(BasicBlock::Create(C, "", F));
386 BasicBlock *Merge(BasicBlock::Create(C, "", F));
387 B.SetInsertPoint(Entry);
388 Argument *PointerArg = &*F->arg_begin();
389 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
390 B.CreateCondBr(B.getTrue(), Left, Right);
391 B.SetInsertPoint(Left);
392 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
393 BranchInst::Create(Merge, Left);
394 BranchInst::Create(Merge, Right);
395 B.SetInsertPoint(Merge);
396 auto *MergeLoad = B.CreateLoad(PointerArg);
397 setupAnalyses();
398 MemorySSA &MSSA = *Analyses->MSSA;
399 MemorySSAUpdater Updater(&MSSA);
401 // Move the store
402 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
403 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
404 // Before, the load will point to a phi of the EntryStore and SideStore.
405 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
406 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
407 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
408 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
409 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
410 SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator());
411 Updater.moveAfter(SideStoreAccess, EntryStoreAccess);
412 // After, it's a phi of the side store.
413 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
414 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
416 MSSA.verifyMemorySSA();
419 TEST_F(MemorySSATest, MoveAStoreAllAround) {
420 // We create a diamond where there is a in the entry, a store on one side, and
421 // a load at the end. After building MemorySSA, we test updating by moving
422 // the store from the side block to the entry block, then to the other side
423 // block, then to before the load. This does not destroy the old access.
424 F = Function::Create(
425 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
426 GlobalValue::ExternalLinkage, "F", &M);
427 BasicBlock *Entry(BasicBlock::Create(C, "", F));
428 BasicBlock *Left(BasicBlock::Create(C, "", F));
429 BasicBlock *Right(BasicBlock::Create(C, "", F));
430 BasicBlock *Merge(BasicBlock::Create(C, "", F));
431 B.SetInsertPoint(Entry);
432 Argument *PointerArg = &*F->arg_begin();
433 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
434 B.CreateCondBr(B.getTrue(), Left, Right);
435 B.SetInsertPoint(Left);
436 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
437 BranchInst::Create(Merge, Left);
438 BranchInst::Create(Merge, Right);
439 B.SetInsertPoint(Merge);
440 auto *MergeLoad = B.CreateLoad(PointerArg);
441 setupAnalyses();
442 MemorySSA &MSSA = *Analyses->MSSA;
443 MemorySSAUpdater Updater(&MSSA);
445 // Move the store
446 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
447 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
448 // Before, the load will point to a phi of the EntryStore and SideStore.
449 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
450 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
451 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
452 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
453 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
454 // Move the store before the entry store
455 SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator());
456 Updater.moveBefore(SideStoreAccess, EntryStoreAccess);
457 // After, it's a phi of the entry store.
458 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
459 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
460 MSSA.verifyMemorySSA();
461 // Now move the store to the right branch
462 SideStore->moveBefore(*Right, Right->begin());
463 Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning);
464 MSSA.verifyMemorySSA();
465 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
466 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
467 // Now move it before the load
468 SideStore->moveBefore(MergeLoad);
469 Updater.moveBefore(SideStoreAccess, LoadAccess);
470 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
471 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
472 MSSA.verifyMemorySSA();
475 TEST_F(MemorySSATest, RemoveAPhi) {
476 // We create a diamond where there is a store on one side, and then a load
477 // after the merge point. This enables us to test a bunch of different
478 // removal cases.
479 F = Function::Create(
480 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
481 GlobalValue::ExternalLinkage, "F", &M);
482 BasicBlock *Entry(BasicBlock::Create(C, "", F));
483 BasicBlock *Left(BasicBlock::Create(C, "", F));
484 BasicBlock *Right(BasicBlock::Create(C, "", F));
485 BasicBlock *Merge(BasicBlock::Create(C, "", F));
486 B.SetInsertPoint(Entry);
487 B.CreateCondBr(B.getTrue(), Left, Right);
488 B.SetInsertPoint(Left);
489 Argument *PointerArg = &*F->arg_begin();
490 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
491 BranchInst::Create(Merge, Left);
492 BranchInst::Create(Merge, Right);
493 B.SetInsertPoint(Merge);
494 LoadInst *LoadInst = B.CreateLoad(PointerArg);
496 setupAnalyses();
497 MemorySSA &MSSA = *Analyses->MSSA;
498 MemorySSAUpdater Updater(&MSSA);
500 // Before, the load will be a use of a phi<store, liveonentry>.
501 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
502 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
503 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
504 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
505 // Kill the store
506 Updater.removeMemoryAccess(StoreAccess);
507 MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess);
508 // Verify the phi ended up as liveonentry, liveonentry
509 for (auto &Op : MP->incoming_values())
510 EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get())));
511 // Replace the phi uses with the live on entry def
512 MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef());
513 // Verify the load is now defined by liveOnEntryDef
514 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
515 // Remove the PHI
516 Updater.removeMemoryAccess(MP);
517 MSSA.verifyMemorySSA();
520 TEST_F(MemorySSATest, RemoveMemoryAccess) {
521 // We create a diamond where there is a store on one side, and then a load
522 // after the merge point. This enables us to test a bunch of different
523 // removal cases.
524 F = Function::Create(
525 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
526 GlobalValue::ExternalLinkage, "F", &M);
527 BasicBlock *Entry(BasicBlock::Create(C, "", F));
528 BasicBlock *Left(BasicBlock::Create(C, "", F));
529 BasicBlock *Right(BasicBlock::Create(C, "", F));
530 BasicBlock *Merge(BasicBlock::Create(C, "", F));
531 B.SetInsertPoint(Entry);
532 B.CreateCondBr(B.getTrue(), Left, Right);
533 B.SetInsertPoint(Left);
534 Argument *PointerArg = &*F->arg_begin();
535 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
536 BranchInst::Create(Merge, Left);
537 BranchInst::Create(Merge, Right);
538 B.SetInsertPoint(Merge);
539 LoadInst *LoadInst = B.CreateLoad(PointerArg);
541 setupAnalyses();
542 MemorySSA &MSSA = *Analyses->MSSA;
543 MemorySSAWalker *Walker = Analyses->Walker;
544 MemorySSAUpdater Updater(&MSSA);
546 // Before, the load will be a use of a phi<store, liveonentry>. It should be
547 // the same after.
548 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
549 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
550 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
551 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
552 // The load is currently clobbered by one of the phi arguments, so the walker
553 // should determine the clobbering access as the phi.
554 EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
555 Updater.removeMemoryAccess(StoreAccess);
556 MSSA.verifyMemorySSA();
557 // After the removeaccess, let's see if we got the right accesses
558 // The load should still point to the phi ...
559 EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
560 // but we should now get live on entry for the clobbering definition of the
561 // load, since it will walk past the phi node since every argument is the
562 // same.
563 // XXX: This currently requires either removing the phi or resetting optimized
564 // on the load
566 EXPECT_FALSE(
567 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
568 // If we reset optimized, we get live on entry.
569 LoadAccess->resetOptimized();
570 EXPECT_TRUE(
571 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
572 // The phi should now be a two entry phi with two live on entry defs.
573 for (const auto &Op : DefiningAccess->operands()) {
574 MemoryAccess *Operand = cast<MemoryAccess>(&*Op);
575 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand));
578 // Now we try to remove the single valued phi
579 Updater.removeMemoryAccess(DefiningAccess);
580 MSSA.verifyMemorySSA();
581 // Now the load should be a load of live on entry.
582 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
585 // We had a bug with caching where the walker would report MemoryDef#3's clobber
586 // (below) was MemoryDef#1.
588 // define void @F(i8*) {
589 // %A = alloca i8, i8 1
590 // ; 1 = MemoryDef(liveOnEntry)
591 // store i8 0, i8* %A
592 // ; 2 = MemoryDef(1)
593 // store i8 1, i8* %A
594 // ; 3 = MemoryDef(2)
595 // store i8 2, i8* %A
596 // }
597 TEST_F(MemorySSATest, TestTripleStore) {
598 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
599 GlobalValue::ExternalLinkage, "F", &M);
600 B.SetInsertPoint(BasicBlock::Create(C, "", F));
601 Type *Int8 = Type::getInt8Ty(C);
602 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
603 StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
604 StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca);
605 StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca);
607 setupAnalyses();
608 MemorySSA &MSSA = *Analyses->MSSA;
609 MemorySSAWalker *Walker = Analyses->Walker;
611 unsigned I = 0;
612 for (StoreInst *V : {S1, S2, S3}) {
613 // Everything should be clobbered by its defining access
614 MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess();
615 MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V);
616 EXPECT_EQ(DefiningAccess, WalkerClobber)
617 << "Store " << I << " doesn't have the correct clobbering access";
618 // EXPECT_EQ expands such that if we increment I above, it won't get
619 // incremented except when we try to print the error message.
620 ++I;
624 // ...And fixing the above bug made it obvious that, when walking, MemorySSA's
625 // walker was caching the initial node it walked. This was fine (albeit
626 // mostly redundant) unless the initial node being walked is a clobber for the
627 // query. In that case, we'd cache that the node clobbered itself.
628 TEST_F(MemorySSATest, TestStoreAndLoad) {
629 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
630 GlobalValue::ExternalLinkage, "F", &M);
631 B.SetInsertPoint(BasicBlock::Create(C, "", F));
632 Type *Int8 = Type::getInt8Ty(C);
633 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
634 Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
635 Instruction *LI = B.CreateLoad(Alloca);
637 setupAnalyses();
638 MemorySSA &MSSA = *Analyses->MSSA;
639 MemorySSAWalker *Walker = Analyses->Walker;
641 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI);
642 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI));
643 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI)));
646 // Another bug (related to the above two fixes): It was noted that, given the
647 // following code:
648 // ; 1 = MemoryDef(liveOnEntry)
649 // store i8 0, i8* %1
651 // ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would
652 // hand back the store (correctly). A later call to
653 // getClobberingMemoryAccess(const Instruction*) would also hand back the store
654 // (incorrectly; it should return liveOnEntry).
656 // This test checks that repeated calls to either function returns what they're
657 // meant to.
658 TEST_F(MemorySSATest, TestStoreDoubleQuery) {
659 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
660 GlobalValue::ExternalLinkage, "F", &M);
661 B.SetInsertPoint(BasicBlock::Create(C, "", F));
662 Type *Int8 = Type::getInt8Ty(C);
663 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
664 StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
666 setupAnalyses();
667 MemorySSA &MSSA = *Analyses->MSSA;
668 MemorySSAWalker *Walker = Analyses->Walker;
670 MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI);
671 MemoryLocation StoreLoc = MemoryLocation::get(SI);
672 MemoryAccess *Clobber =
673 Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
674 MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
676 EXPECT_EQ(Clobber, StoreAccess);
677 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
679 // Try again (with entries in the cache already) for good measure...
680 Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
681 LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
682 EXPECT_EQ(Clobber, StoreAccess);
683 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
686 // Bug: During phi optimization, the walker wouldn't cache to the proper result
687 // in the farthest-walked BB.
689 // Specifically, it would assume that whatever we walked to was a clobber.
690 // "Whatever we walked to" isn't a clobber if we hit a cache entry.
692 // ...So, we need a test case that looks like:
693 // A
694 // / \
695 // B |
696 // \ /
697 // C
699 // Where, when we try to optimize a thing in 'C', a blocker is found in 'B'.
700 // The walk must determine that the blocker exists by using cache entries *while
701 // walking* 'B'.
702 TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
703 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
704 GlobalValue::ExternalLinkage, "F", &M);
705 B.SetInsertPoint(BasicBlock::Create(C, "A", F));
706 Type *Int8 = Type::getInt8Ty(C);
707 Constant *One = ConstantInt::get(Int8, 1);
708 Constant *Zero = ConstantInt::get(Int8, 0);
709 Value *AllocA = B.CreateAlloca(Int8, One, "a");
710 Value *AllocB = B.CreateAlloca(Int8, One, "b");
711 BasicBlock *IfThen = BasicBlock::Create(C, "B", F);
712 BasicBlock *IfEnd = BasicBlock::Create(C, "C", F);
714 B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd);
716 B.SetInsertPoint(IfThen);
717 Instruction *FirstStore = B.CreateStore(Zero, AllocA);
718 B.CreateStore(Zero, AllocB);
719 Instruction *ALoad0 = B.CreateLoad(AllocA, "");
720 Instruction *BStore = B.CreateStore(Zero, AllocB);
721 // Due to use optimization/etc. we make a store to A, which is removed after
722 // we build MSSA. This helps keep the test case simple-ish.
723 Instruction *KillStore = B.CreateStore(Zero, AllocA);
724 Instruction *ALoad = B.CreateLoad(AllocA, "");
725 B.CreateBr(IfEnd);
727 B.SetInsertPoint(IfEnd);
728 Instruction *BelowPhi = B.CreateStore(Zero, AllocA);
730 setupAnalyses();
731 MemorySSA &MSSA = *Analyses->MSSA;
732 MemorySSAWalker *Walker = Analyses->Walker;
733 MemorySSAUpdater Updater(&MSSA);
735 // Kill `KillStore`; it exists solely so that the load after it won't be
736 // optimized to FirstStore.
737 Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore));
738 KillStore->eraseFromParent();
739 auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad));
740 EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore));
742 // Populate the cache for the store to AllocB directly after FirstStore. It
743 // should point to something in block B (so something in D can't be optimized
744 // to it).
745 MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0);
746 EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber);
748 // If the bug exists, this will introduce a bad cache entry for %a on BStore.
749 // It will point to the store to %b after FirstStore. This only happens during
750 // phi optimization.
751 MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi);
752 MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd);
753 EXPECT_EQ(BottomClobber, Phi);
755 // This query will first check the cache for {%a, BStore}. It should point to
756 // FirstStore, not to the store after FirstStore.
757 MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad);
758 EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
761 // Test that our walker properly handles loads with the invariant group
762 // attribute. It's a bit hacky, since we add the invariant attribute *after*
763 // building MSSA. Otherwise, the use optimizer will optimize it for us, which
764 // isn't what we want.
765 // FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA.
766 TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
767 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
768 GlobalValue::ExternalLinkage, "F", &M);
769 B.SetInsertPoint(BasicBlock::Create(C, "", F));
770 Type *Int8 = Type::getInt8Ty(C);
771 Constant *One = ConstantInt::get(Int8, 1);
772 Value *AllocA = B.CreateAlloca(Int8, One, "");
774 Instruction *Store = B.CreateStore(One, AllocA);
775 Instruction *Load = B.CreateLoad(AllocA);
777 setupAnalyses();
778 MemorySSA &MSSA = *Analyses->MSSA;
779 MemorySSAWalker *Walker = Analyses->Walker;
781 auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load));
782 auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store));
783 EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA);
785 // ...At the time of writing, no cache should exist for LoadMA. Be a bit
786 // flexible to future changes.
787 Walker->invalidateInfo(LoadMA);
788 Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {}));
790 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA);
791 EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef());
794 // Test loads get reoptimized properly by the walker.
795 TEST_F(MemorySSATest, WalkerReopt) {
796 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
797 GlobalValue::ExternalLinkage, "F", &M);
798 B.SetInsertPoint(BasicBlock::Create(C, "", F));
799 Type *Int8 = Type::getInt8Ty(C);
800 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
801 Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA);
802 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
803 Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB);
804 Instruction *LIA = B.CreateLoad(AllocaA);
806 setupAnalyses();
807 MemorySSA &MSSA = *Analyses->MSSA;
808 MemorySSAWalker *Walker = Analyses->Walker;
809 MemorySSAUpdater Updater(&MSSA);
811 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA);
812 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA));
813 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA));
814 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA)));
815 Updater.removeMemoryAccess(LoadAccess);
817 // Create the load memory access pointing to an unoptimized place.
818 MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
819 LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End));
820 // This should it cause it to be optimized
821 EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber);
822 EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber);
825 // Test out MemorySSAUpdater::moveBefore
826 TEST_F(MemorySSATest, MoveAboveMemoryDef) {
827 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
828 GlobalValue::ExternalLinkage, "F", &M);
829 B.SetInsertPoint(BasicBlock::Create(C, "", F));
831 Type *Int8 = Type::getInt8Ty(C);
832 Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
833 Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
834 Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
836 StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A);
837 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_);
838 LoadInst *LoadB = B.CreateLoad(B_);
839 StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A);
840 StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C);
841 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A);
842 LoadInst *LoadC = B.CreateLoad(C);
844 setupAnalyses();
845 MemorySSA &MSSA = *Analyses->MSSA;
846 MemorySSAWalker &Walker = *Analyses->Walker;
848 MemorySSAUpdater Updater(&MSSA);
849 StoreC->moveBefore(StoreB);
850 Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)),
851 cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)));
853 MSSA.verifyMemorySSA();
855 EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(),
856 MSSA.getMemoryAccess(StoreC));
857 EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(),
858 MSSA.getMemoryAccess(StoreA0));
859 EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(),
860 MSSA.getMemoryAccess(StoreA1));
861 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB),
862 MSSA.getMemoryAccess(StoreB));
863 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC),
864 MSSA.getMemoryAccess(StoreC));
866 // exercise block numbering
867 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC),
868 MSSA.getMemoryAccess(StoreB)));
869 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1),
870 MSSA.getMemoryAccess(StoreA2)));
873 TEST_F(MemorySSATest, Irreducible) {
874 // Create the equivalent of
875 // x = something
876 // if (...)
877 // goto second_loop_entry
878 // while (...) {
879 // second_loop_entry:
880 // }
881 // use(x)
883 SmallVector<PHINode *, 8> Inserted;
884 IRBuilder<> B(C);
885 F = Function::Create(
886 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
887 GlobalValue::ExternalLinkage, "F", &M);
889 // Make blocks
890 BasicBlock *IfBB = BasicBlock::Create(C, "if", F);
891 BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F);
892 BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F);
893 BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F);
894 B.SetInsertPoint(IfBB);
895 B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB);
896 B.SetInsertPoint(LoopStartBB);
897 B.CreateBr(LoopMainBB);
898 B.SetInsertPoint(LoopMainBB);
899 B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB);
900 B.SetInsertPoint(AfterLoopBB);
901 Argument *FirstArg = &*F->arg_begin();
902 setupAnalyses();
903 MemorySSA &MSSA = *Analyses->MSSA;
904 MemorySSAUpdater Updater(&MSSA);
905 // Create the load memory acccess
906 LoadInst *LoadInst = B.CreateLoad(FirstArg);
907 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
908 LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning));
909 Updater.insertUse(LoadAccess);
910 MSSA.verifyMemorySSA();
913 TEST_F(MemorySSATest, MoveToBeforeLiveOnEntryInvalidatesCache) {
914 // Create:
915 // %1 = alloca i8
916 // ; 1 = MemoryDef(liveOnEntry)
917 // store i8 0, i8* %1
918 // ; 2 = MemoryDef(1)
919 // store i8 0, i8* %1
921 // ...And be sure that MSSA's caching doesn't give us `1` for the clobber of
922 // `2` after `1` is removed.
923 IRBuilder<> B(C);
924 F = Function::Create(
925 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
926 GlobalValue::ExternalLinkage, "F", &M);
928 BasicBlock *Entry = BasicBlock::Create(C, "if", F);
929 B.SetInsertPoint(Entry);
931 Value *A = B.CreateAlloca(B.getInt8Ty());
932 StoreInst *StoreA = B.CreateStore(B.getInt8(0), A);
933 StoreInst *StoreB = B.CreateStore(B.getInt8(0), A);
935 setupAnalyses();
937 MemorySSA &MSSA = *Analyses->MSSA;
939 auto *DefA = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
940 auto *DefB = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
942 MemoryAccess *BClobber = MSSA.getWalker()->getClobberingMemoryAccess(DefB);
943 ASSERT_EQ(DefA, BClobber);
945 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefA);
946 StoreA->eraseFromParent();
948 EXPECT_EQ(DefB->getDefiningAccess(), MSSA.getLiveOnEntryDef());
950 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefB),
951 MSSA.getLiveOnEntryDef())
952 << "(DefA = " << DefA << ")";
955 TEST_F(MemorySSATest, RemovingDefInvalidatesCache) {
956 // Create:
957 // %x = alloca i8
958 // %y = alloca i8
959 // ; 1 = MemoryDef(liveOnEntry)
960 // store i8 0, i8* %x
961 // ; 2 = MemoryDef(1)
962 // store i8 0, i8* %y
963 // ; 3 = MemoryDef(2)
964 // store i8 0, i8* %x
966 // And be sure that MSSA's caching handles the removal of def `1`
967 // appropriately.
968 IRBuilder<> B(C);
969 F = Function::Create(
970 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
971 GlobalValue::ExternalLinkage, "F", &M);
973 BasicBlock *Entry = BasicBlock::Create(C, "if", F);
974 B.SetInsertPoint(Entry);
976 Value *X = B.CreateAlloca(B.getInt8Ty());
977 Value *Y = B.CreateAlloca(B.getInt8Ty());
978 StoreInst *StoreX1 = B.CreateStore(B.getInt8(0), X);
979 StoreInst *StoreY = B.CreateStore(B.getInt8(0), Y);
980 StoreInst *StoreX2 = B.CreateStore(B.getInt8(0), X);
982 setupAnalyses();
984 MemorySSA &MSSA = *Analyses->MSSA;
986 auto *DefX1 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX1));
987 auto *DefY = cast<MemoryDef>(MSSA.getMemoryAccess(StoreY));
988 auto *DefX2 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX2));
990 EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
991 MemoryAccess *X2Clobber = MSSA.getWalker()->getClobberingMemoryAccess(DefX2);
992 ASSERT_EQ(DefX1, X2Clobber);
994 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefX1);
995 StoreX1->eraseFromParent();
997 EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
998 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefX2),
999 MSSA.getLiveOnEntryDef())
1000 << "(DefX1 = " << DefX1 << ")";
1003 // Test Must alias for optimized uses
1004 TEST_F(MemorySSATest, TestLoadMustAlias) {
1005 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
1006 GlobalValue::ExternalLinkage, "F", &M);
1007 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1008 Type *Int8 = Type::getInt8Ty(C);
1009 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1010 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
1012 B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
1013 // Check load from LOE
1014 LoadInst *LA1 = B.CreateLoad(AllocaA, "");
1015 // Check load alias cached for second load
1016 LoadInst *LA2 = B.CreateLoad(AllocaA, "");
1018 B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
1019 // Check load from store/def
1020 LoadInst *LA3 = B.CreateLoad(AllocaA, "");
1021 // Check load alias cached for second load
1022 LoadInst *LA4 = B.CreateLoad(AllocaA, "");
1024 setupAnalyses();
1025 MemorySSA &MSSA = *Analyses->MSSA;
1027 unsigned I = 0;
1028 for (LoadInst *V : {LA1, LA2}) {
1029 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
1030 EXPECT_EQ(MemUse->getOptimizedAccessType(), None)
1031 << "Load " << I << " doesn't have the correct alias information";
1032 // EXPECT_EQ expands such that if we increment I above, it won't get
1033 // incremented except when we try to print the error message.
1034 ++I;
1036 for (LoadInst *V : {LA3, LA4}) {
1037 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
1038 EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias)
1039 << "Load " << I << " doesn't have the correct alias information";
1040 // EXPECT_EQ expands such that if we increment I above, it won't get
1041 // incremented except when we try to print the error message.
1042 ++I;
1046 // Test Must alias for optimized defs.
1047 TEST_F(MemorySSATest, TestStoreMustAlias) {
1048 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
1049 GlobalValue::ExternalLinkage, "F", &M);
1050 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1051 Type *Int8 = Type::getInt8Ty(C);
1052 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1053 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
1054 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
1055 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
1056 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaA);
1057 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaB);
1058 StoreInst *SA3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaA);
1059 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaB);
1061 setupAnalyses();
1062 MemorySSA &MSSA = *Analyses->MSSA;
1063 MemorySSAWalker *Walker = Analyses->Walker;
1065 unsigned I = 0;
1066 for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) {
1067 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1068 EXPECT_EQ(MemDef->isOptimized(), false)
1069 << "Store " << I << " is optimized from the start?";
1070 EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
1071 << "Store " << I
1072 << " has correct alias information before being optimized?";
1073 if (V == SA1)
1074 Walker->getClobberingMemoryAccess(V);
1075 else {
1076 MemoryAccess *Def = MemDef->getDefiningAccess();
1077 MemoryAccess *Clob = Walker->getClobberingMemoryAccess(V);
1078 EXPECT_NE(Def, Clob) << "Store " << I
1079 << " has Defining Access equal to Clobbering Access";
1081 EXPECT_EQ(MemDef->isOptimized(), true)
1082 << "Store " << I << " was not optimized";
1083 if (I == 0 || I == 1)
1084 EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
1085 << "Store " << I << " doesn't have the correct alias information";
1086 else
1087 EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias)
1088 << "Store " << I << " doesn't have the correct alias information";
1089 // EXPECT_EQ expands such that if we increment I above, it won't get
1090 // incremented except when we try to print the error message.
1091 ++I;
1095 // Test May alias for optimized uses.
1096 TEST_F(MemorySSATest, TestLoadMayAlias) {
1097 F = Function::Create(FunctionType::get(B.getVoidTy(),
1098 {B.getInt8PtrTy(), B.getInt8PtrTy()},
1099 false),
1100 GlobalValue::ExternalLinkage, "F", &M);
1101 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1102 Type *Int8 = Type::getInt8Ty(C);
1103 auto *ArgIt = F->arg_begin();
1104 Argument *PointerA = &*ArgIt;
1105 Argument *PointerB = &*(++ArgIt);
1106 B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
1107 LoadInst *LA1 = B.CreateLoad(PointerA, "");
1108 B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
1109 LoadInst *LB1 = B.CreateLoad(PointerB, "");
1110 B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
1111 LoadInst *LA2 = B.CreateLoad(PointerA, "");
1112 B.CreateStore(ConstantInt::get(Int8, 0), PointerB);
1113 LoadInst *LB2 = B.CreateLoad(PointerB, "");
1115 setupAnalyses();
1116 MemorySSA &MSSA = *Analyses->MSSA;
1118 unsigned I = 0;
1119 for (LoadInst *V : {LA1, LB1}) {
1120 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
1121 EXPECT_EQ(MemUse->getOptimizedAccessType(), MayAlias)
1122 << "Load " << I << " doesn't have the correct alias information";
1123 // EXPECT_EQ expands such that if we increment I above, it won't get
1124 // incremented except when we try to print the error message.
1125 ++I;
1127 for (LoadInst *V : {LA2, LB2}) {
1128 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
1129 EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias)
1130 << "Load " << I << " doesn't have the correct alias information";
1131 // EXPECT_EQ expands such that if we increment I above, it won't get
1132 // incremented except when we try to print the error message.
1133 ++I;
1137 // Test May alias for optimized defs.
1138 TEST_F(MemorySSATest, TestStoreMayAlias) {
1139 F = Function::Create(FunctionType::get(B.getVoidTy(),
1140 {B.getInt8PtrTy(), B.getInt8PtrTy()},
1141 false),
1142 GlobalValue::ExternalLinkage, "F", &M);
1143 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1144 Type *Int8 = Type::getInt8Ty(C);
1145 auto *ArgIt = F->arg_begin();
1146 Argument *PointerA = &*ArgIt;
1147 Argument *PointerB = &*(++ArgIt);
1148 Value *AllocaC = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
1149 // Store into arg1, must alias because it's LOE => must
1150 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
1151 // Store into arg2, may alias store to arg1 => may
1152 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
1153 // Store into aloca, no alias with args, so must alias LOE => must
1154 StoreInst *SC1 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaC);
1155 // Store into arg1, may alias store to arg2 => may
1156 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 3), PointerA);
1157 // Store into arg2, may alias store to arg1 => may
1158 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 4), PointerB);
1159 // Store into aloca, no alias with args, so must alias SC1 => must
1160 StoreInst *SC2 = B.CreateStore(ConstantInt::get(Int8, 5), AllocaC);
1161 // Store into arg2, must alias store to arg2 => must
1162 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 6), PointerB);
1163 std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3};
1165 setupAnalyses();
1166 MemorySSA &MSSA = *Analyses->MSSA;
1167 MemorySSAWalker *Walker = Analyses->Walker;
1169 unsigned I = 0;
1170 for (StoreInst *V : Sts) {
1171 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1172 EXPECT_EQ(MemDef->isOptimized(), false)
1173 << "Store " << I << " is optimized from the start?";
1174 EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
1175 << "Store " << I
1176 << " has correct alias information before being optimized?";
1177 ++I;
1180 for (StoreInst *V : Sts)
1181 Walker->getClobberingMemoryAccess(V);
1183 I = 0;
1184 for (StoreInst *V : Sts) {
1185 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1186 EXPECT_EQ(MemDef->isOptimized(), true)
1187 << "Store " << I << " was not optimized";
1188 if (I == 1 || I == 3 || I == 4)
1189 EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
1190 << "Store " << I << " doesn't have the correct alias information";
1191 else if (I == 0 || I == 2)
1192 EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
1193 << "Store " << I << " doesn't have the correct alias information";
1194 else
1195 EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias)
1196 << "Store " << I << " doesn't have the correct alias information";
1197 // EXPECT_EQ expands such that if we increment I above, it won't get
1198 // incremented except when we try to print the error message.
1199 ++I;
1203 TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) {
1204 // Example code:
1205 // define void @a(i8* %foo) {
1206 // %bar = getelementptr i8, i8* %foo, i64 1
1207 // store i8 0, i8* %foo
1208 // store i8 0, i8* %bar
1209 // call void @llvm.lifetime.end.p0i8(i64 8, i32* %p)
1210 // call void @llvm.lifetime.start.p0i8(i64 8, i32* %p)
1211 // store i8 0, i8* %foo
1212 // store i8 0, i8* %bar
1213 // ret void
1214 // }
1216 // Patterns like this are possible after inlining; the stores to %foo and %bar
1217 // should both be clobbered by the lifetime.start call if they're dominated by
1218 // it.
1220 IRBuilder<> B(C);
1221 F = Function::Create(
1222 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
1223 GlobalValue::ExternalLinkage, "F", &M);
1225 // Make blocks
1226 BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
1228 B.SetInsertPoint(Entry);
1229 Value *Foo = &*F->arg_begin();
1231 Value *Bar = B.CreateGEP(Foo, B.getInt64(1), "bar");
1233 B.CreateStore(B.getInt8(0), Foo);
1234 B.CreateStore(B.getInt8(0), Bar);
1236 auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) {
1237 return Intrinsic::getDeclaration(&M, ID, {Foo->getType()});
1240 B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end),
1241 {B.getInt64(2), Foo});
1242 Instruction *LifetimeStart = B.CreateCall(
1243 GetLifetimeIntrinsic(Intrinsic::lifetime_start), {B.getInt64(2), Foo});
1245 Instruction *FooStore = B.CreateStore(B.getInt8(0), Foo);
1246 Instruction *BarStore = B.CreateStore(B.getInt8(0), Bar);
1248 setupAnalyses();
1249 MemorySSA &MSSA = *Analyses->MSSA;
1251 MemoryAccess *LifetimeStartAccess = MSSA.getMemoryAccess(LifetimeStart);
1252 ASSERT_NE(LifetimeStartAccess, nullptr);
1254 MemoryAccess *FooAccess = MSSA.getMemoryAccess(FooStore);
1255 ASSERT_NE(FooAccess, nullptr);
1257 MemoryAccess *BarAccess = MSSA.getMemoryAccess(BarStore);
1258 ASSERT_NE(BarAccess, nullptr);
1260 MemoryAccess *FooClobber =
1261 MSSA.getWalker()->getClobberingMemoryAccess(FooAccess);
1262 EXPECT_EQ(FooClobber, LifetimeStartAccess);
1264 MemoryAccess *BarClobber =
1265 MSSA.getWalker()->getClobberingMemoryAccess(BarAccess);
1266 EXPECT_EQ(BarClobber, LifetimeStartAccess);
1269 TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) {
1270 IRBuilder<> B(C);
1271 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getInt1Ty()}, false),
1272 GlobalValue::ExternalLinkage, "F", &M);
1274 // Make a CFG like
1275 // entry
1276 // / \
1277 // a b
1278 // \ /
1279 // c
1281 // Put a def in A and a def in B, move the def from A -> B, observe as the
1282 // optimization is invalidated.
1283 BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
1284 BasicBlock *BlockA = BasicBlock::Create(C, "a", F);
1285 BasicBlock *BlockB = BasicBlock::Create(C, "b", F);
1286 BasicBlock *BlockC = BasicBlock::Create(C, "c", F);
1288 B.SetInsertPoint(Entry);
1289 Type *Int8 = Type::getInt8Ty(C);
1290 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "alloc");
1291 StoreInst *StoreEntry = B.CreateStore(B.getInt8(0), Alloca);
1292 B.CreateCondBr(B.getTrue(), BlockA, BlockB);
1294 B.SetInsertPoint(BlockA);
1295 StoreInst *StoreA = B.CreateStore(B.getInt8(1), Alloca);
1296 B.CreateBr(BlockC);
1298 B.SetInsertPoint(BlockB);
1299 StoreInst *StoreB = B.CreateStore(B.getInt8(2), Alloca);
1300 B.CreateBr(BlockC);
1302 B.SetInsertPoint(BlockC);
1303 B.CreateUnreachable();
1305 setupAnalyses();
1306 MemorySSA &MSSA = *Analyses->MSSA;
1308 auto *AccessEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreEntry));
1309 auto *StoreAEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
1310 auto *StoreBEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
1312 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
1313 AccessEntry);
1314 ASSERT_TRUE(StoreAEntry->isOptimized());
1316 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry),
1317 AccessEntry);
1318 ASSERT_TRUE(StoreBEntry->isOptimized());
1320 // Note that if we did InsertionPlace::Beginning, we don't go out of our way
1321 // to invalidate the cache for StoreBEntry. If the user wants to actually do
1322 // moves like these, it's up to them to ensure that nearby cache entries are
1323 // correctly invalidated (which, in general, requires walking all instructions
1324 // that the moved instruction dominates. So we probably shouldn't be doing
1325 // moves like this in general. Still, works as a test-case. ;) )
1326 MemorySSAUpdater(&MSSA).moveToPlace(StoreAEntry, BlockB,
1327 MemorySSA::InsertionPlace::End);
1328 ASSERT_FALSE(StoreAEntry->isOptimized());
1329 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
1330 StoreBEntry);
1333 TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) {
1334 F = Function::Create(FunctionType::get(B.getVoidTy(),
1335 {B.getInt8PtrTy(), B.getInt8PtrTy()},
1336 false),
1337 GlobalValue::ExternalLinkage, "F", &M);
1338 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1339 Type *Int8 = Type::getInt8Ty(C);
1340 Value *AllocA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1341 Value *AllocB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
1343 StoreInst *StoreA = B.CreateStore(ConstantInt::get(Int8, 0), AllocA);
1344 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 1), AllocB);
1345 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocA);
1347 setupAnalyses();
1348 MemorySSA &MSSA = *Analyses->MSSA;
1349 MemorySSAWalker *Walker = Analyses->Walker;
1351 // If these don't hold, there's no chance of the test result being useful.
1352 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA),
1353 MSSA.getLiveOnEntryDef());
1354 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB),
1355 MSSA.getLiveOnEntryDef());
1356 auto *StoreAAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
1357 auto *StoreA2Access = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA2));
1358 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess);
1359 ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess);
1361 auto *StoreBAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
1362 ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID());
1363 ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID());
1365 auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) {
1366 llvm::sort(Defs, [](const MemoryDef *LHS, const MemoryDef *RHS) {
1367 return LHS->getID() < RHS->getID();
1371 auto SortedUserList = [&](const MemoryDef *MD) {
1372 std::vector<const MemoryDef *> Result;
1373 transform(MD->users(), std::back_inserter(Result),
1374 [](const User *U) { return cast<MemoryDef>(U); });
1375 SortVecByID(Result);
1376 return Result;
1379 // Use std::vectors, since they have nice pretty-printing if the test fails.
1380 // Parens are necessary because EXPECT_EQ is a macro, and we have commas in
1381 // our init lists...
1382 EXPECT_EQ(SortedUserList(StoreAAccess),
1383 (std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access}));
1385 EXPECT_EQ(SortedUserList(StoreBAccess),
1386 std::vector<const MemoryDef *>{StoreA2Access});
1388 // StoreAAccess should be present twice, since it uses liveOnEntry for both
1389 // its defining and optimized accesses. This is a bit awkward, and is not
1390 // relied upon anywhere at the moment. If this is painful, we can fix it.
1391 EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())),
1392 (std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess,
1393 StoreBAccess}));
1396 // entry
1397 // |
1398 // header
1399 // / \
1400 // body |
1401 // \ /
1402 // exit
1403 // header:
1404 // ; 1 = MemoryDef(liveOnEntry)
1405 // body:
1406 // ; 2 = MemoryDef(1)
1407 // exit:
1408 // ; 3 = MemoryPhi({body, 2}, {header, 1})
1409 // ; 4 = MemoryDef(3); optimized to 3, cannot optimize thorugh phi.
1410 // Insert edge: entry -> exit, check mssa Update is correct.
1411 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiNotOpt) {
1412 F = Function::Create(
1413 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
1414 GlobalValue::ExternalLinkage, "F", &M);
1415 Argument *PointerArg = &*F->arg_begin();
1416 BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1417 BasicBlock *Header(BasicBlock::Create(C, "header", F));
1418 BasicBlock *Body(BasicBlock::Create(C, "body", F));
1419 BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
1420 B.SetInsertPoint(Entry);
1421 BranchInst::Create(Header, Entry);
1422 B.SetInsertPoint(Header);
1423 B.CreateStore(B.getInt8(16), PointerArg);
1424 B.CreateCondBr(B.getTrue(), Exit, Body);
1425 B.SetInsertPoint(Body);
1426 B.CreateStore(B.getInt8(16), PointerArg);
1427 BranchInst::Create(Exit, Body);
1428 B.SetInsertPoint(Exit);
1429 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
1431 setupAnalyses();
1432 MemorySSA &MSSA = *Analyses->MSSA;
1433 MemorySSAWalker *Walker = Analyses->Walker;
1434 std::unique_ptr<MemorySSAUpdater> MSSAU =
1435 make_unique<MemorySSAUpdater>(&MSSA);
1437 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
1438 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
1440 // Alter CFG, add edge: entry -> exit
1441 Entry->getTerminator()->eraseFromParent();
1442 B.SetInsertPoint(Entry);
1443 B.CreateCondBr(B.getTrue(), Header, Exit);
1444 SmallVector<CFGUpdate, 1> Updates;
1445 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
1446 Analyses->DT.applyUpdates(Updates);
1447 MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1448 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
1451 // entry
1452 // |
1453 // header
1454 // / \
1455 // body |
1456 // \ /
1457 // exit
1458 // header:
1459 // ; 1 = MemoryDef(liveOnEntry)
1460 // body:
1461 // ; 2 = MemoryDef(1)
1462 // exit:
1463 // ; 3 = MemoryPhi({body, 2}, {header, 1})
1464 // ; 4 = MemoryDef(3); optimize this to 1 now, added edge should invalidate
1465 // the optimized access.
1466 // Insert edge: entry -> exit, check mssa Update is correct.
1467 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiOpt) {
1468 F = Function::Create(
1469 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
1470 GlobalValue::ExternalLinkage, "F", &M);
1471 Argument *PointerArg = &*F->arg_begin();
1472 Type *Int8 = Type::getInt8Ty(C);
1473 BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1474 BasicBlock *Header(BasicBlock::Create(C, "header", F));
1475 BasicBlock *Body(BasicBlock::Create(C, "body", F));
1476 BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
1478 B.SetInsertPoint(Entry);
1479 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1480 BranchInst::Create(Header, Entry);
1482 B.SetInsertPoint(Header);
1483 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
1484 B.CreateCondBr(B.getTrue(), Exit, Body);
1486 B.SetInsertPoint(Body);
1487 B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
1488 BranchInst::Create(Exit, Body);
1490 B.SetInsertPoint(Exit);
1491 StoreInst *S2 = B.CreateStore(B.getInt8(16), PointerArg);
1493 setupAnalyses();
1494 MemorySSA &MSSA = *Analyses->MSSA;
1495 MemorySSAWalker *Walker = Analyses->Walker;
1496 std::unique_ptr<MemorySSAUpdater> MSSAU =
1497 make_unique<MemorySSAUpdater>(&MSSA);
1499 MemoryDef *DefS1 = cast<MemoryDef>(MSSA.getMemoryAccess(S1));
1500 EXPECT_EQ(DefS1, Walker->getClobberingMemoryAccess(S2));
1502 // Alter CFG, add edge: entry -> exit
1503 Entry->getTerminator()->eraseFromParent();
1504 B.SetInsertPoint(Entry);
1505 B.CreateCondBr(B.getTrue(), Header, Exit);
1506 SmallVector<CFGUpdate, 1> Updates;
1507 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
1508 Analyses->DT.applyUpdates(Updates);
1509 MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1511 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
1512 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S2));
1515 // entry
1516 // / |
1517 // a |
1518 // / \ |
1519 // b c f
1520 // \ / |
1521 // d |
1522 // \ /
1523 // e
1524 // f:
1525 // ; 1 = MemoryDef(liveOnEntry)
1526 // e:
1527 // ; 2 = MemoryPhi({d, liveOnEntry}, {f, 1})
1529 // Insert edge: f -> c, check update is correct.
1530 // After update:
1531 // f:
1532 // ; 1 = MemoryDef(liveOnEntry)
1533 // c:
1534 // ; 3 = MemoryPhi({a, liveOnEntry}, {f, 1})
1535 // d:
1536 // ; 4 = MemoryPhi({b, liveOnEntry}, {c, 3})
1537 // e:
1538 // ; 2 = MemoryPhi({d, 4}, {f, 1})
1539 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithNoPhiAddNewPhis) {
1540 F = Function::Create(
1541 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
1542 GlobalValue::ExternalLinkage, "F", &M);
1543 Argument *PointerArg = &*F->arg_begin();
1544 BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1545 BasicBlock *ABlock(BasicBlock::Create(C, "a", F));
1546 BasicBlock *BBlock(BasicBlock::Create(C, "b", F));
1547 BasicBlock *CBlock(BasicBlock::Create(C, "c", F));
1548 BasicBlock *DBlock(BasicBlock::Create(C, "d", F));
1549 BasicBlock *EBlock(BasicBlock::Create(C, "e", F));
1550 BasicBlock *FBlock(BasicBlock::Create(C, "f", F));
1552 B.SetInsertPoint(Entry);
1553 B.CreateCondBr(B.getTrue(), ABlock, FBlock);
1554 B.SetInsertPoint(ABlock);
1555 B.CreateCondBr(B.getTrue(), BBlock, CBlock);
1556 B.SetInsertPoint(BBlock);
1557 BranchInst::Create(DBlock, BBlock);
1558 B.SetInsertPoint(CBlock);
1559 BranchInst::Create(DBlock, CBlock);
1560 B.SetInsertPoint(DBlock);
1561 BranchInst::Create(EBlock, DBlock);
1562 B.SetInsertPoint(FBlock);
1563 B.CreateStore(B.getInt8(16), PointerArg);
1564 BranchInst::Create(EBlock, FBlock);
1566 setupAnalyses();
1567 MemorySSA &MSSA = *Analyses->MSSA;
1568 std::unique_ptr<MemorySSAUpdater> MSSAU =
1569 make_unique<MemorySSAUpdater>(&MSSA);
1571 // Alter CFG, add edge: f -> c
1572 FBlock->getTerminator()->eraseFromParent();
1573 B.SetInsertPoint(FBlock);
1574 B.CreateCondBr(B.getTrue(), CBlock, EBlock);
1575 SmallVector<CFGUpdate, 1> Updates;
1576 Updates.push_back({cfg::UpdateKind::Insert, FBlock, CBlock});
1577 Analyses->DT.applyUpdates(Updates);
1578 MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1580 MemoryPhi *MPC = MSSA.getMemoryAccess(CBlock);
1581 EXPECT_NE(MPC, nullptr);
1582 MemoryPhi *MPD = MSSA.getMemoryAccess(DBlock);
1583 EXPECT_NE(MPD, nullptr);
1584 MemoryPhi *MPE = MSSA.getMemoryAccess(EBlock);
1585 EXPECT_EQ(MPD, MPE->getIncomingValueForBlock(DBlock));