[InstCombine] Signed saturation patterns
[llvm-complete.git] / unittests / Analysis / MemorySSATest.cpp
blobb470f162612637d98bc552bc55b20167e7e95f13
1 //===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/Analysis/MemorySSA.h"
9 #include "llvm/Analysis/AliasAnalysis.h"
10 #include "llvm/Analysis/BasicAliasAnalysis.h"
11 #include "llvm/Analysis/MemorySSAUpdater.h"
12 #include "llvm/IR/BasicBlock.h"
13 #include "llvm/IR/DataLayout.h"
14 #include "llvm/IR/Dominators.h"
15 #include "llvm/IR/IRBuilder.h"
16 #include "llvm/IR/Instructions.h"
17 #include "llvm/IR/LLVMContext.h"
18 #include "gtest/gtest.h"
20 using namespace llvm;
22 const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
24 /// There's a lot of common setup between these tests. This fixture helps reduce
25 /// that. Tests should mock up a function, store it in F, and then call
26 /// setupAnalyses().
27 class MemorySSATest : public testing::Test {
28 protected:
29 // N.B. Many of these members depend on each other (e.g. the Module depends on
30 // the Context, etc.). So, order matters here (and in TestAnalyses).
31 LLVMContext C;
32 Module M;
33 IRBuilder<> B;
34 DataLayout DL;
35 TargetLibraryInfoImpl TLII;
36 TargetLibraryInfo TLI;
37 Function *F;
39 // Things that we need to build after the function is created.
40 struct TestAnalyses {
41 DominatorTree DT;
42 AssumptionCache AC;
43 AAResults AA;
44 BasicAAResult BAA;
45 // We need to defer MSSA construction until AA is *entirely* set up, which
46 // requires calling addAAResult. Hence, we just use a pointer here.
47 std::unique_ptr<MemorySSA> MSSA;
48 MemorySSAWalker *Walker;
50 TestAnalyses(MemorySSATest &Test)
51 : DT(*Test.F), AC(*Test.F), AA(Test.TLI),
52 BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) {
53 AA.addAAResult(BAA);
54 MSSA = std::make_unique<MemorySSA>(*Test.F, &AA, &DT);
55 Walker = MSSA->getWalker();
59 std::unique_ptr<TestAnalyses> Analyses;
61 void setupAnalyses() {
62 assert(F);
63 Analyses.reset(new TestAnalyses(*this));
66 public:
67 MemorySSATest()
68 : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
71 TEST_F(MemorySSATest, CreateALoad) {
72 // We create a diamond where there is a store on one side, and then after
73 // building MemorySSA, create a load after the merge point, and use it to test
74 // updating by creating an access for the load.
75 F = Function::Create(
76 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
77 GlobalValue::ExternalLinkage, "F", &M);
78 BasicBlock *Entry(BasicBlock::Create(C, "", F));
79 BasicBlock *Left(BasicBlock::Create(C, "", F));
80 BasicBlock *Right(BasicBlock::Create(C, "", F));
81 BasicBlock *Merge(BasicBlock::Create(C, "", F));
82 B.SetInsertPoint(Entry);
83 B.CreateCondBr(B.getTrue(), Left, Right);
84 B.SetInsertPoint(Left);
85 Argument *PointerArg = &*F->arg_begin();
86 B.CreateStore(B.getInt8(16), PointerArg);
87 BranchInst::Create(Merge, Left);
88 BranchInst::Create(Merge, Right);
90 setupAnalyses();
91 MemorySSA &MSSA = *Analyses->MSSA;
92 MemorySSAUpdater Updater(&MSSA);
93 // Add the load
94 B.SetInsertPoint(Merge);
95 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
97 // MemoryPHI should already exist.
98 MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
99 EXPECT_NE(MP, nullptr);
101 // Create the load memory acccess
102 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
103 LoadInst, MP, Merge, MemorySSA::Beginning));
104 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
105 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
106 MSSA.verifyMemorySSA();
108 TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
109 // We create a diamond, then build memoryssa with no memory accesses, and
110 // incrementally update it by inserting a store in the, entry, a load in the
111 // merge point, then a store in the branch, another load in the merge point,
112 // and then a store in the entry.
113 F = Function::Create(
114 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
115 GlobalValue::ExternalLinkage, "F", &M);
116 BasicBlock *Entry(BasicBlock::Create(C, "", F));
117 BasicBlock *Left(BasicBlock::Create(C, "", F));
118 BasicBlock *Right(BasicBlock::Create(C, "", F));
119 BasicBlock *Merge(BasicBlock::Create(C, "", F));
120 B.SetInsertPoint(Entry);
121 B.CreateCondBr(B.getTrue(), Left, Right);
122 B.SetInsertPoint(Left, Left->begin());
123 Argument *PointerArg = &*F->arg_begin();
124 B.SetInsertPoint(Left);
125 B.CreateBr(Merge);
126 B.SetInsertPoint(Right);
127 B.CreateBr(Merge);
129 setupAnalyses();
130 MemorySSA &MSSA = *Analyses->MSSA;
131 MemorySSAUpdater Updater(&MSSA);
132 // Add the store
133 B.SetInsertPoint(Entry, Entry->begin());
134 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
135 MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
136 EntryStore, nullptr, Entry, MemorySSA::Beginning);
137 Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
139 // Add the load
140 B.SetInsertPoint(Merge, Merge->begin());
141 LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
143 // MemoryPHI should not already exist.
144 MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
145 EXPECT_EQ(MP, nullptr);
147 // Create the load memory access
148 MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
149 FirstLoad, nullptr, Merge, MemorySSA::Beginning));
150 Updater.insertUse(FirstLoadAccess);
151 // Should just have a load using the entry access, because it should discover
152 // the phi is trivial
153 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess);
155 // Create a store on the left
156 // Add the store
157 B.SetInsertPoint(Left, Left->begin());
158 StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
159 MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
160 LeftStore, nullptr, Left, MemorySSA::Beginning);
161 Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false);
163 // MemoryPHI should exist after adding LeftStore.
164 MP = MSSA.getMemoryAccess(Merge);
165 EXPECT_NE(MP, nullptr);
167 // Add the second load
168 B.SetInsertPoint(Merge, Merge->begin());
169 LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
171 // Create the load memory access
172 MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
173 SecondLoad, nullptr, Merge, MemorySSA::Beginning));
174 Updater.insertUse(SecondLoadAccess);
175 // Now the load should be a phi of the entry store and the left store
176 MemoryPhi *MergePhi =
177 dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
178 EXPECT_NE(MergePhi, nullptr);
179 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
180 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
181 // Now create a store below the existing one in the entry
182 B.SetInsertPoint(Entry, --Entry->end());
183 StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
184 MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
185 SecondEntryStore, nullptr, Entry, MemorySSA::End);
186 // Insert it twice just to test renaming
187 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false);
188 EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi);
189 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true);
190 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi);
191 // and make sure the phi below it got updated, despite being blocks away
192 MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
193 EXPECT_NE(MergePhi, nullptr);
194 EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess);
195 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
196 MSSA.verifyMemorySSA();
199 TEST_F(MemorySSATest, CreateALoadUpdater) {
200 // We create a diamond, then build memoryssa with no memory accesses, and
201 // incrementally update it by inserting a store in one of the branches, and a
202 // load in the merge point
203 F = Function::Create(
204 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
205 GlobalValue::ExternalLinkage, "F", &M);
206 BasicBlock *Entry(BasicBlock::Create(C, "", F));
207 BasicBlock *Left(BasicBlock::Create(C, "", F));
208 BasicBlock *Right(BasicBlock::Create(C, "", F));
209 BasicBlock *Merge(BasicBlock::Create(C, "", F));
210 B.SetInsertPoint(Entry);
211 B.CreateCondBr(B.getTrue(), Left, Right);
212 B.SetInsertPoint(Left, Left->begin());
213 Argument *PointerArg = &*F->arg_begin();
214 B.SetInsertPoint(Left);
215 B.CreateBr(Merge);
216 B.SetInsertPoint(Right);
217 B.CreateBr(Merge);
219 setupAnalyses();
220 MemorySSA &MSSA = *Analyses->MSSA;
221 MemorySSAUpdater Updater(&MSSA);
222 B.SetInsertPoint(Left, Left->begin());
223 // Add the store
224 StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
225 MemoryAccess *StoreAccess =
226 Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning);
227 Updater.insertDef(cast<MemoryDef>(StoreAccess));
229 // MemoryPHI should be created when inserting the def
230 MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
231 EXPECT_NE(MP, nullptr);
233 // Add the load
234 B.SetInsertPoint(Merge, Merge->begin());
235 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
237 // Create the load memory acccess
238 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
239 LoadInst, nullptr, Merge, MemorySSA::Beginning));
240 Updater.insertUse(LoadAccess);
241 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
242 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
243 MSSA.verifyMemorySSA();
246 TEST_F(MemorySSATest, SinkLoad) {
247 F = Function::Create(
248 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
249 GlobalValue::ExternalLinkage, "F", &M);
250 BasicBlock *Entry(BasicBlock::Create(C, "", F));
251 BasicBlock *Left(BasicBlock::Create(C, "", F));
252 BasicBlock *Right(BasicBlock::Create(C, "", F));
253 BasicBlock *Merge(BasicBlock::Create(C, "", F));
254 B.SetInsertPoint(Entry);
255 B.CreateCondBr(B.getTrue(), Left, Right);
256 B.SetInsertPoint(Left, Left->begin());
257 Argument *PointerArg = &*F->arg_begin();
258 B.SetInsertPoint(Left);
259 B.CreateBr(Merge);
260 B.SetInsertPoint(Right);
261 B.CreateBr(Merge);
263 // Load in left block
264 B.SetInsertPoint(Left, Left->begin());
265 LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg);
266 // Store in merge block
267 B.SetInsertPoint(Merge, Merge->begin());
268 B.CreateStore(B.getInt8(16), PointerArg);
270 setupAnalyses();
271 MemorySSA &MSSA = *Analyses->MSSA;
272 MemorySSAUpdater Updater(&MSSA);
274 // Mimic sinking of a load:
275 // - clone load
276 // - insert in "exit" block
277 // - insert in mssa
278 // - remove from original block
280 LoadInst *LoadInstClone = cast<LoadInst>(LoadInst1->clone());
281 Merge->getInstList().insert(Merge->begin(), LoadInstClone);
282 MemoryAccess * NewLoadAccess =
283 Updater.createMemoryAccessInBB(LoadInstClone, nullptr,
284 LoadInstClone->getParent(),
285 MemorySSA::Beginning);
286 Updater.insertUse(cast<MemoryUse>(NewLoadAccess));
287 MSSA.verifyMemorySSA();
288 Updater.removeMemoryAccess(MSSA.getMemoryAccess(LoadInst1));
289 MSSA.verifyMemorySSA();
292 TEST_F(MemorySSATest, MoveAStore) {
293 // We create a diamond where there is a in the entry, a store on one side, and
294 // a load at the end. After building MemorySSA, we test updating by moving
295 // the store from the side block to the entry block. This destroys the old
296 // access.
297 F = Function::Create(
298 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
299 GlobalValue::ExternalLinkage, "F", &M);
300 BasicBlock *Entry(BasicBlock::Create(C, "", F));
301 BasicBlock *Left(BasicBlock::Create(C, "", F));
302 BasicBlock *Right(BasicBlock::Create(C, "", F));
303 BasicBlock *Merge(BasicBlock::Create(C, "", F));
304 B.SetInsertPoint(Entry);
305 Argument *PointerArg = &*F->arg_begin();
306 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
307 B.CreateCondBr(B.getTrue(), Left, Right);
308 B.SetInsertPoint(Left);
309 StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
310 BranchInst::Create(Merge, Left);
311 BranchInst::Create(Merge, Right);
312 B.SetInsertPoint(Merge);
313 B.CreateLoad(B.getInt8Ty(), PointerArg);
314 setupAnalyses();
315 MemorySSA &MSSA = *Analyses->MSSA;
316 MemorySSAUpdater Updater(&MSSA);
317 // Move the store
318 SideStore->moveBefore(Entry->getTerminator());
319 MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
320 MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
321 MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter(
322 SideStore, EntryStoreAccess, EntryStoreAccess);
323 EntryStoreAccess->replaceAllUsesWith(NewStoreAccess);
324 Updater.removeMemoryAccess(SideStoreAccess);
325 MSSA.verifyMemorySSA();
328 TEST_F(MemorySSATest, MoveAStoreUpdater) {
329 // We create a diamond where there is a in the entry, a store on one side, and
330 // a load at the end. After building MemorySSA, we test updating by moving
331 // the store from the side block to the entry block. This destroys the old
332 // access.
333 F = Function::Create(
334 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
335 GlobalValue::ExternalLinkage, "F", &M);
336 BasicBlock *Entry(BasicBlock::Create(C, "", F));
337 BasicBlock *Left(BasicBlock::Create(C, "", F));
338 BasicBlock *Right(BasicBlock::Create(C, "", F));
339 BasicBlock *Merge(BasicBlock::Create(C, "", F));
340 B.SetInsertPoint(Entry);
341 Argument *PointerArg = &*F->arg_begin();
342 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
343 B.CreateCondBr(B.getTrue(), Left, Right);
344 B.SetInsertPoint(Left);
345 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
346 BranchInst::Create(Merge, Left);
347 BranchInst::Create(Merge, Right);
348 B.SetInsertPoint(Merge);
349 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
350 setupAnalyses();
351 MemorySSA &MSSA = *Analyses->MSSA;
352 MemorySSAUpdater Updater(&MSSA);
354 // Move the store
355 SideStore->moveBefore(Entry->getTerminator());
356 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
357 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
358 auto *NewStoreAccess = Updater.createMemoryAccessAfter(
359 SideStore, EntryStoreAccess, EntryStoreAccess);
360 // Before, the load will point to a phi of the EntryStore and SideStore.
361 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
362 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
363 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
364 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
365 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
366 Updater.removeMemoryAccess(SideStoreAccess);
367 Updater.insertDef(cast<MemoryDef>(NewStoreAccess));
368 // After it's a phi of the new side store access.
369 EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess);
370 EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess);
371 MSSA.verifyMemorySSA();
374 TEST_F(MemorySSATest, MoveAStoreUpdaterMove) {
375 // We create a diamond where there is a in the entry, a store on one side, and
376 // a load at the end. After building MemorySSA, we test updating by moving
377 // the store from the side block to the entry block. This does not destroy
378 // the old access.
379 F = Function::Create(
380 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
381 GlobalValue::ExternalLinkage, "F", &M);
382 BasicBlock *Entry(BasicBlock::Create(C, "", F));
383 BasicBlock *Left(BasicBlock::Create(C, "", F));
384 BasicBlock *Right(BasicBlock::Create(C, "", F));
385 BasicBlock *Merge(BasicBlock::Create(C, "", F));
386 B.SetInsertPoint(Entry);
387 Argument *PointerArg = &*F->arg_begin();
388 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
389 B.CreateCondBr(B.getTrue(), Left, Right);
390 B.SetInsertPoint(Left);
391 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
392 BranchInst::Create(Merge, Left);
393 BranchInst::Create(Merge, Right);
394 B.SetInsertPoint(Merge);
395 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
396 setupAnalyses();
397 MemorySSA &MSSA = *Analyses->MSSA;
398 MemorySSAUpdater Updater(&MSSA);
400 // Move the store
401 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
402 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
403 // Before, the load will point to a phi of the EntryStore and SideStore.
404 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
405 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
406 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
407 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
408 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
409 SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator());
410 Updater.moveAfter(SideStoreAccess, EntryStoreAccess);
411 // After, it's a phi of the side store.
412 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
413 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
415 MSSA.verifyMemorySSA();
418 TEST_F(MemorySSATest, MoveAStoreAllAround) {
419 // We create a diamond where there is a in the entry, a store on one side, and
420 // a load at the end. After building MemorySSA, we test updating by moving
421 // the store from the side block to the entry block, then to the other side
422 // block, then to before the load. This does not destroy the old access.
423 F = Function::Create(
424 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
425 GlobalValue::ExternalLinkage, "F", &M);
426 BasicBlock *Entry(BasicBlock::Create(C, "", F));
427 BasicBlock *Left(BasicBlock::Create(C, "", F));
428 BasicBlock *Right(BasicBlock::Create(C, "", F));
429 BasicBlock *Merge(BasicBlock::Create(C, "", F));
430 B.SetInsertPoint(Entry);
431 Argument *PointerArg = &*F->arg_begin();
432 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
433 B.CreateCondBr(B.getTrue(), Left, Right);
434 B.SetInsertPoint(Left);
435 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
436 BranchInst::Create(Merge, Left);
437 BranchInst::Create(Merge, Right);
438 B.SetInsertPoint(Merge);
439 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
440 setupAnalyses();
441 MemorySSA &MSSA = *Analyses->MSSA;
442 MemorySSAUpdater Updater(&MSSA);
444 // Move the store
445 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
446 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
447 // Before, the load will point to a phi of the EntryStore and SideStore.
448 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
449 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
450 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
451 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
452 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
453 // Move the store before the entry store
454 SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator());
455 Updater.moveBefore(SideStoreAccess, EntryStoreAccess);
456 // After, it's a phi of the entry store.
457 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
458 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
459 MSSA.verifyMemorySSA();
460 // Now move the store to the right branch
461 SideStore->moveBefore(*Right, Right->begin());
462 Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning);
463 MSSA.verifyMemorySSA();
464 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
465 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
466 // Now move it before the load
467 SideStore->moveBefore(MergeLoad);
468 Updater.moveBefore(SideStoreAccess, LoadAccess);
469 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
470 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
471 MSSA.verifyMemorySSA();
474 TEST_F(MemorySSATest, RemoveAPhi) {
475 // We create a diamond where there is a store on one side, and then a load
476 // after the merge point. This enables us to test a bunch of different
477 // removal cases.
478 F = Function::Create(
479 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
480 GlobalValue::ExternalLinkage, "F", &M);
481 BasicBlock *Entry(BasicBlock::Create(C, "", F));
482 BasicBlock *Left(BasicBlock::Create(C, "", F));
483 BasicBlock *Right(BasicBlock::Create(C, "", F));
484 BasicBlock *Merge(BasicBlock::Create(C, "", F));
485 B.SetInsertPoint(Entry);
486 B.CreateCondBr(B.getTrue(), Left, Right);
487 B.SetInsertPoint(Left);
488 Argument *PointerArg = &*F->arg_begin();
489 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
490 BranchInst::Create(Merge, Left);
491 BranchInst::Create(Merge, Right);
492 B.SetInsertPoint(Merge);
493 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
495 setupAnalyses();
496 MemorySSA &MSSA = *Analyses->MSSA;
497 MemorySSAUpdater Updater(&MSSA);
499 // Before, the load will be a use of a phi<store, liveonentry>.
500 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
501 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
502 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
503 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
504 // Kill the store
505 Updater.removeMemoryAccess(StoreAccess);
506 MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess);
507 // Verify the phi ended up as liveonentry, liveonentry
508 for (auto &Op : MP->incoming_values())
509 EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get())));
510 // Replace the phi uses with the live on entry def
511 MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef());
512 // Verify the load is now defined by liveOnEntryDef
513 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
514 // Remove the PHI
515 Updater.removeMemoryAccess(MP);
516 MSSA.verifyMemorySSA();
519 TEST_F(MemorySSATest, RemoveMemoryAccess) {
520 // We create a diamond where there is a store on one side, and then a load
521 // after the merge point. This enables us to test a bunch of different
522 // removal cases.
523 F = Function::Create(
524 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
525 GlobalValue::ExternalLinkage, "F", &M);
526 BasicBlock *Entry(BasicBlock::Create(C, "", F));
527 BasicBlock *Left(BasicBlock::Create(C, "", F));
528 BasicBlock *Right(BasicBlock::Create(C, "", F));
529 BasicBlock *Merge(BasicBlock::Create(C, "", F));
530 B.SetInsertPoint(Entry);
531 B.CreateCondBr(B.getTrue(), Left, Right);
532 B.SetInsertPoint(Left);
533 Argument *PointerArg = &*F->arg_begin();
534 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
535 BranchInst::Create(Merge, Left);
536 BranchInst::Create(Merge, Right);
537 B.SetInsertPoint(Merge);
538 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
540 setupAnalyses();
541 MemorySSA &MSSA = *Analyses->MSSA;
542 MemorySSAWalker *Walker = Analyses->Walker;
543 MemorySSAUpdater Updater(&MSSA);
545 // Before, the load will be a use of a phi<store, liveonentry>. It should be
546 // the same after.
547 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
548 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
549 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
550 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
551 // The load is currently clobbered by one of the phi arguments, so the walker
552 // should determine the clobbering access as the phi.
553 EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
554 Updater.removeMemoryAccess(StoreAccess);
555 MSSA.verifyMemorySSA();
556 // After the removeaccess, let's see if we got the right accesses
557 // The load should still point to the phi ...
558 EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
559 // but we should now get live on entry for the clobbering definition of the
560 // load, since it will walk past the phi node since every argument is the
561 // same.
562 // XXX: This currently requires either removing the phi or resetting optimized
563 // on the load
565 EXPECT_FALSE(
566 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
567 // If we reset optimized, we get live on entry.
568 LoadAccess->resetOptimized();
569 EXPECT_TRUE(
570 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
571 // The phi should now be a two entry phi with two live on entry defs.
572 for (const auto &Op : DefiningAccess->operands()) {
573 MemoryAccess *Operand = cast<MemoryAccess>(&*Op);
574 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand));
577 // Now we try to remove the single valued phi
578 Updater.removeMemoryAccess(DefiningAccess);
579 MSSA.verifyMemorySSA();
580 // Now the load should be a load of live on entry.
581 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
584 // We had a bug with caching where the walker would report MemoryDef#3's clobber
585 // (below) was MemoryDef#1.
587 // define void @F(i8*) {
588 // %A = alloca i8, i8 1
589 // ; 1 = MemoryDef(liveOnEntry)
590 // store i8 0, i8* %A
591 // ; 2 = MemoryDef(1)
592 // store i8 1, i8* %A
593 // ; 3 = MemoryDef(2)
594 // store i8 2, i8* %A
595 // }
596 TEST_F(MemorySSATest, TestTripleStore) {
597 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
598 GlobalValue::ExternalLinkage, "F", &M);
599 B.SetInsertPoint(BasicBlock::Create(C, "", F));
600 Type *Int8 = Type::getInt8Ty(C);
601 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
602 StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
603 StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca);
604 StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca);
606 setupAnalyses();
607 MemorySSA &MSSA = *Analyses->MSSA;
608 MemorySSAWalker *Walker = Analyses->Walker;
610 unsigned I = 0;
611 for (StoreInst *V : {S1, S2, S3}) {
612 // Everything should be clobbered by its defining access
613 MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess();
614 MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V);
615 EXPECT_EQ(DefiningAccess, WalkerClobber)
616 << "Store " << I << " doesn't have the correct clobbering access";
617 // EXPECT_EQ expands such that if we increment I above, it won't get
618 // incremented except when we try to print the error message.
619 ++I;
623 // ...And fixing the above bug made it obvious that, when walking, MemorySSA's
624 // walker was caching the initial node it walked. This was fine (albeit
625 // mostly redundant) unless the initial node being walked is a clobber for the
626 // query. In that case, we'd cache that the node clobbered itself.
627 TEST_F(MemorySSATest, TestStoreAndLoad) {
628 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
629 GlobalValue::ExternalLinkage, "F", &M);
630 B.SetInsertPoint(BasicBlock::Create(C, "", F));
631 Type *Int8 = Type::getInt8Ty(C);
632 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
633 Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
634 Instruction *LI = B.CreateLoad(Int8, Alloca);
636 setupAnalyses();
637 MemorySSA &MSSA = *Analyses->MSSA;
638 MemorySSAWalker *Walker = Analyses->Walker;
640 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI);
641 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI));
642 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI)));
645 // Another bug (related to the above two fixes): It was noted that, given the
646 // following code:
647 // ; 1 = MemoryDef(liveOnEntry)
648 // store i8 0, i8* %1
650 // ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would
651 // hand back the store (correctly). A later call to
652 // getClobberingMemoryAccess(const Instruction*) would also hand back the store
653 // (incorrectly; it should return liveOnEntry).
655 // This test checks that repeated calls to either function returns what they're
656 // meant to.
657 TEST_F(MemorySSATest, TestStoreDoubleQuery) {
658 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
659 GlobalValue::ExternalLinkage, "F", &M);
660 B.SetInsertPoint(BasicBlock::Create(C, "", F));
661 Type *Int8 = Type::getInt8Ty(C);
662 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
663 StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
665 setupAnalyses();
666 MemorySSA &MSSA = *Analyses->MSSA;
667 MemorySSAWalker *Walker = Analyses->Walker;
669 MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI);
670 MemoryLocation StoreLoc = MemoryLocation::get(SI);
671 MemoryAccess *Clobber =
672 Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
673 MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
675 EXPECT_EQ(Clobber, StoreAccess);
676 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
678 // Try again (with entries in the cache already) for good measure...
679 Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
680 LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
681 EXPECT_EQ(Clobber, StoreAccess);
682 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
685 // Bug: During phi optimization, the walker wouldn't cache to the proper result
686 // in the farthest-walked BB.
688 // Specifically, it would assume that whatever we walked to was a clobber.
689 // "Whatever we walked to" isn't a clobber if we hit a cache entry.
691 // ...So, we need a test case that looks like:
692 // A
693 // / \
694 // B |
695 // \ /
696 // C
698 // Where, when we try to optimize a thing in 'C', a blocker is found in 'B'.
699 // The walk must determine that the blocker exists by using cache entries *while
700 // walking* 'B'.
701 TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
702 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
703 GlobalValue::ExternalLinkage, "F", &M);
704 B.SetInsertPoint(BasicBlock::Create(C, "A", F));
705 Type *Int8 = Type::getInt8Ty(C);
706 Constant *One = ConstantInt::get(Int8, 1);
707 Constant *Zero = ConstantInt::get(Int8, 0);
708 Value *AllocA = B.CreateAlloca(Int8, One, "a");
709 Value *AllocB = B.CreateAlloca(Int8, One, "b");
710 BasicBlock *IfThen = BasicBlock::Create(C, "B", F);
711 BasicBlock *IfEnd = BasicBlock::Create(C, "C", F);
713 B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd);
715 B.SetInsertPoint(IfThen);
716 Instruction *FirstStore = B.CreateStore(Zero, AllocA);
717 B.CreateStore(Zero, AllocB);
718 Instruction *ALoad0 = B.CreateLoad(Int8, AllocA, "");
719 Instruction *BStore = B.CreateStore(Zero, AllocB);
720 // Due to use optimization/etc. we make a store to A, which is removed after
721 // we build MSSA. This helps keep the test case simple-ish.
722 Instruction *KillStore = B.CreateStore(Zero, AllocA);
723 Instruction *ALoad = B.CreateLoad(Int8, AllocA, "");
724 B.CreateBr(IfEnd);
726 B.SetInsertPoint(IfEnd);
727 Instruction *BelowPhi = B.CreateStore(Zero, AllocA);
729 setupAnalyses();
730 MemorySSA &MSSA = *Analyses->MSSA;
731 MemorySSAWalker *Walker = Analyses->Walker;
732 MemorySSAUpdater Updater(&MSSA);
734 // Kill `KillStore`; it exists solely so that the load after it won't be
735 // optimized to FirstStore.
736 Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore));
737 KillStore->eraseFromParent();
738 auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad));
739 EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore));
741 // Populate the cache for the store to AllocB directly after FirstStore. It
742 // should point to something in block B (so something in D can't be optimized
743 // to it).
744 MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0);
745 EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber);
747 // If the bug exists, this will introduce a bad cache entry for %a on BStore.
748 // It will point to the store to %b after FirstStore. This only happens during
749 // phi optimization.
750 MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi);
751 MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd);
752 EXPECT_EQ(BottomClobber, Phi);
754 // This query will first check the cache for {%a, BStore}. It should point to
755 // FirstStore, not to the store after FirstStore.
756 MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad);
757 EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
760 // Test that our walker properly handles loads with the invariant group
761 // attribute. It's a bit hacky, since we add the invariant attribute *after*
762 // building MSSA. Otherwise, the use optimizer will optimize it for us, which
763 // isn't what we want.
764 // FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA.
765 TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
766 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
767 GlobalValue::ExternalLinkage, "F", &M);
768 B.SetInsertPoint(BasicBlock::Create(C, "", F));
769 Type *Int8 = Type::getInt8Ty(C);
770 Constant *One = ConstantInt::get(Int8, 1);
771 Value *AllocA = B.CreateAlloca(Int8, One, "");
773 Instruction *Store = B.CreateStore(One, AllocA);
774 Instruction *Load = B.CreateLoad(Int8, AllocA);
776 setupAnalyses();
777 MemorySSA &MSSA = *Analyses->MSSA;
778 MemorySSAWalker *Walker = Analyses->Walker;
780 auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load));
781 auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store));
782 EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA);
784 // ...At the time of writing, no cache should exist for LoadMA. Be a bit
785 // flexible to future changes.
786 Walker->invalidateInfo(LoadMA);
787 Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {}));
789 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA);
790 EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef());
793 // Test loads get reoptimized properly by the walker.
794 TEST_F(MemorySSATest, WalkerReopt) {
795 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
796 GlobalValue::ExternalLinkage, "F", &M);
797 B.SetInsertPoint(BasicBlock::Create(C, "", F));
798 Type *Int8 = Type::getInt8Ty(C);
799 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
800 Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA);
801 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
802 Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB);
803 Instruction *LIA = B.CreateLoad(Int8, AllocaA);
805 setupAnalyses();
806 MemorySSA &MSSA = *Analyses->MSSA;
807 MemorySSAWalker *Walker = Analyses->Walker;
808 MemorySSAUpdater Updater(&MSSA);
810 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA);
811 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA));
812 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA));
813 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA)));
814 Updater.removeMemoryAccess(LoadAccess);
816 // Create the load memory access pointing to an unoptimized place.
817 MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
818 LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End));
819 // This should it cause it to be optimized
820 EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber);
821 EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber);
824 // Test out MemorySSAUpdater::moveBefore
825 TEST_F(MemorySSATest, MoveAboveMemoryDef) {
826 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
827 GlobalValue::ExternalLinkage, "F", &M);
828 B.SetInsertPoint(BasicBlock::Create(C, "", F));
830 Type *Int8 = Type::getInt8Ty(C);
831 Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
832 Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
833 Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
835 StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A);
836 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_);
837 LoadInst *LoadB = B.CreateLoad(Int8, B_);
838 StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A);
839 StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C);
840 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A);
841 LoadInst *LoadC = B.CreateLoad(Int8, C);
843 setupAnalyses();
844 MemorySSA &MSSA = *Analyses->MSSA;
845 MemorySSAWalker &Walker = *Analyses->Walker;
847 MemorySSAUpdater Updater(&MSSA);
848 StoreC->moveBefore(StoreB);
849 Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)),
850 cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)));
852 MSSA.verifyMemorySSA();
854 EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(),
855 MSSA.getMemoryAccess(StoreC));
856 EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(),
857 MSSA.getMemoryAccess(StoreA0));
858 EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(),
859 MSSA.getMemoryAccess(StoreA1));
860 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB),
861 MSSA.getMemoryAccess(StoreB));
862 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC),
863 MSSA.getMemoryAccess(StoreC));
865 // exercise block numbering
866 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC),
867 MSSA.getMemoryAccess(StoreB)));
868 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1),
869 MSSA.getMemoryAccess(StoreA2)));
872 TEST_F(MemorySSATest, Irreducible) {
873 // Create the equivalent of
874 // x = something
875 // if (...)
876 // goto second_loop_entry
877 // while (...) {
878 // second_loop_entry:
879 // }
880 // use(x)
882 SmallVector<PHINode *, 8> Inserted;
883 IRBuilder<> B(C);
884 F = Function::Create(
885 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
886 GlobalValue::ExternalLinkage, "F", &M);
888 // Make blocks
889 BasicBlock *IfBB = BasicBlock::Create(C, "if", F);
890 BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F);
891 BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F);
892 BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F);
893 B.SetInsertPoint(IfBB);
894 B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB);
895 B.SetInsertPoint(LoopStartBB);
896 B.CreateBr(LoopMainBB);
897 B.SetInsertPoint(LoopMainBB);
898 B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB);
899 B.SetInsertPoint(AfterLoopBB);
900 Argument *FirstArg = &*F->arg_begin();
901 setupAnalyses();
902 MemorySSA &MSSA = *Analyses->MSSA;
903 MemorySSAUpdater Updater(&MSSA);
904 // Create the load memory acccess
905 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), FirstArg);
906 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
907 LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning));
908 Updater.insertUse(LoadAccess);
909 MSSA.verifyMemorySSA();
912 TEST_F(MemorySSATest, MoveToBeforeLiveOnEntryInvalidatesCache) {
913 // Create:
914 // %1 = alloca i8
915 // ; 1 = MemoryDef(liveOnEntry)
916 // store i8 0, i8* %1
917 // ; 2 = MemoryDef(1)
918 // store i8 0, i8* %1
920 // ...And be sure that MSSA's caching doesn't give us `1` for the clobber of
921 // `2` after `1` is removed.
922 IRBuilder<> B(C);
923 F = Function::Create(
924 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
925 GlobalValue::ExternalLinkage, "F", &M);
927 BasicBlock *Entry = BasicBlock::Create(C, "if", F);
928 B.SetInsertPoint(Entry);
930 Value *A = B.CreateAlloca(B.getInt8Ty());
931 StoreInst *StoreA = B.CreateStore(B.getInt8(0), A);
932 StoreInst *StoreB = B.CreateStore(B.getInt8(0), A);
934 setupAnalyses();
936 MemorySSA &MSSA = *Analyses->MSSA;
938 auto *DefA = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
939 auto *DefB = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
941 MemoryAccess *BClobber = MSSA.getWalker()->getClobberingMemoryAccess(DefB);
942 ASSERT_EQ(DefA, BClobber);
944 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefA);
945 StoreA->eraseFromParent();
947 EXPECT_EQ(DefB->getDefiningAccess(), MSSA.getLiveOnEntryDef());
949 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefB),
950 MSSA.getLiveOnEntryDef())
951 << "(DefA = " << DefA << ")";
954 TEST_F(MemorySSATest, RemovingDefInvalidatesCache) {
955 // Create:
956 // %x = alloca i8
957 // %y = alloca i8
958 // ; 1 = MemoryDef(liveOnEntry)
959 // store i8 0, i8* %x
960 // ; 2 = MemoryDef(1)
961 // store i8 0, i8* %y
962 // ; 3 = MemoryDef(2)
963 // store i8 0, i8* %x
965 // And be sure that MSSA's caching handles the removal of def `1`
966 // appropriately.
967 IRBuilder<> B(C);
968 F = Function::Create(
969 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
970 GlobalValue::ExternalLinkage, "F", &M);
972 BasicBlock *Entry = BasicBlock::Create(C, "if", F);
973 B.SetInsertPoint(Entry);
975 Value *X = B.CreateAlloca(B.getInt8Ty());
976 Value *Y = B.CreateAlloca(B.getInt8Ty());
977 StoreInst *StoreX1 = B.CreateStore(B.getInt8(0), X);
978 StoreInst *StoreY = B.CreateStore(B.getInt8(0), Y);
979 StoreInst *StoreX2 = B.CreateStore(B.getInt8(0), X);
981 setupAnalyses();
983 MemorySSA &MSSA = *Analyses->MSSA;
985 auto *DefX1 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX1));
986 auto *DefY = cast<MemoryDef>(MSSA.getMemoryAccess(StoreY));
987 auto *DefX2 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX2));
989 EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
990 MemoryAccess *X2Clobber = MSSA.getWalker()->getClobberingMemoryAccess(DefX2);
991 ASSERT_EQ(DefX1, X2Clobber);
993 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefX1);
994 StoreX1->eraseFromParent();
996 EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
997 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefX2),
998 MSSA.getLiveOnEntryDef())
999 << "(DefX1 = " << DefX1 << ")";
1002 // Test Must alias for optimized uses
1003 TEST_F(MemorySSATest, TestLoadMustAlias) {
1004 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
1005 GlobalValue::ExternalLinkage, "F", &M);
1006 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1007 Type *Int8 = Type::getInt8Ty(C);
1008 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1009 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
1011 B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
1012 // Check load from LOE
1013 LoadInst *LA1 = B.CreateLoad(Int8, AllocaA, "");
1014 // Check load alias cached for second load
1015 LoadInst *LA2 = B.CreateLoad(Int8, AllocaA, "");
1017 B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
1018 // Check load from store/def
1019 LoadInst *LA3 = B.CreateLoad(Int8, AllocaA, "");
1020 // Check load alias cached for second load
1021 LoadInst *LA4 = B.CreateLoad(Int8, AllocaA, "");
1023 setupAnalyses();
1024 MemorySSA &MSSA = *Analyses->MSSA;
1026 unsigned I = 0;
1027 for (LoadInst *V : {LA1, LA2}) {
1028 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
1029 EXPECT_EQ(MemUse->getOptimizedAccessType(), None)
1030 << "Load " << I << " doesn't have the correct alias information";
1031 // EXPECT_EQ expands such that if we increment I above, it won't get
1032 // incremented except when we try to print the error message.
1033 ++I;
1035 for (LoadInst *V : {LA3, LA4}) {
1036 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
1037 EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias)
1038 << "Load " << I << " doesn't have the correct alias information";
1039 // EXPECT_EQ expands such that if we increment I above, it won't get
1040 // incremented except when we try to print the error message.
1041 ++I;
1045 // Test Must alias for optimized defs.
1046 TEST_F(MemorySSATest, TestStoreMustAlias) {
1047 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
1048 GlobalValue::ExternalLinkage, "F", &M);
1049 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1050 Type *Int8 = Type::getInt8Ty(C);
1051 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1052 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
1053 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
1054 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
1055 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaA);
1056 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaB);
1057 StoreInst *SA3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaA);
1058 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaB);
1060 setupAnalyses();
1061 MemorySSA &MSSA = *Analyses->MSSA;
1062 MemorySSAWalker *Walker = Analyses->Walker;
1064 unsigned I = 0;
1065 for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) {
1066 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1067 EXPECT_EQ(MemDef->isOptimized(), false)
1068 << "Store " << I << " is optimized from the start?";
1069 EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
1070 << "Store " << I
1071 << " has correct alias information before being optimized?";
1072 if (V == SA1)
1073 Walker->getClobberingMemoryAccess(V);
1074 else {
1075 MemoryAccess *Def = MemDef->getDefiningAccess();
1076 MemoryAccess *Clob = Walker->getClobberingMemoryAccess(V);
1077 EXPECT_NE(Def, Clob) << "Store " << I
1078 << " has Defining Access equal to Clobbering Access";
1080 EXPECT_EQ(MemDef->isOptimized(), true)
1081 << "Store " << I << " was not optimized";
1082 if (I == 0 || I == 1)
1083 EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
1084 << "Store " << I << " doesn't have the correct alias information";
1085 else
1086 EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias)
1087 << "Store " << I << " doesn't have the correct alias information";
1088 // EXPECT_EQ expands such that if we increment I above, it won't get
1089 // incremented except when we try to print the error message.
1090 ++I;
1094 // Test May alias for optimized uses.
1095 TEST_F(MemorySSATest, TestLoadMayAlias) {
1096 F = Function::Create(FunctionType::get(B.getVoidTy(),
1097 {B.getInt8PtrTy(), B.getInt8PtrTy()},
1098 false),
1099 GlobalValue::ExternalLinkage, "F", &M);
1100 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1101 Type *Int8 = Type::getInt8Ty(C);
1102 auto *ArgIt = F->arg_begin();
1103 Argument *PointerA = &*ArgIt;
1104 Argument *PointerB = &*(++ArgIt);
1105 B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
1106 LoadInst *LA1 = B.CreateLoad(Int8, PointerA, "");
1107 B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
1108 LoadInst *LB1 = B.CreateLoad(Int8, PointerB, "");
1109 B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
1110 LoadInst *LA2 = B.CreateLoad(Int8, PointerA, "");
1111 B.CreateStore(ConstantInt::get(Int8, 0), PointerB);
1112 LoadInst *LB2 = B.CreateLoad(Int8, PointerB, "");
1114 setupAnalyses();
1115 MemorySSA &MSSA = *Analyses->MSSA;
1117 unsigned I = 0;
1118 for (LoadInst *V : {LA1, LB1}) {
1119 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
1120 EXPECT_EQ(MemUse->getOptimizedAccessType(), MayAlias)
1121 << "Load " << I << " doesn't have the correct alias information";
1122 // EXPECT_EQ expands such that if we increment I above, it won't get
1123 // incremented except when we try to print the error message.
1124 ++I;
1126 for (LoadInst *V : {LA2, LB2}) {
1127 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
1128 EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias)
1129 << "Load " << I << " doesn't have the correct alias information";
1130 // EXPECT_EQ expands such that if we increment I above, it won't get
1131 // incremented except when we try to print the error message.
1132 ++I;
1136 // Test May alias for optimized defs.
1137 TEST_F(MemorySSATest, TestStoreMayAlias) {
1138 F = Function::Create(FunctionType::get(B.getVoidTy(),
1139 {B.getInt8PtrTy(), B.getInt8PtrTy()},
1140 false),
1141 GlobalValue::ExternalLinkage, "F", &M);
1142 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1143 Type *Int8 = Type::getInt8Ty(C);
1144 auto *ArgIt = F->arg_begin();
1145 Argument *PointerA = &*ArgIt;
1146 Argument *PointerB = &*(++ArgIt);
1147 Value *AllocaC = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
1148 // Store into arg1, must alias because it's LOE => must
1149 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
1150 // Store into arg2, may alias store to arg1 => may
1151 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
1152 // Store into aloca, no alias with args, so must alias LOE => must
1153 StoreInst *SC1 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaC);
1154 // Store into arg1, may alias store to arg2 => may
1155 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 3), PointerA);
1156 // Store into arg2, may alias store to arg1 => may
1157 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 4), PointerB);
1158 // Store into aloca, no alias with args, so must alias SC1 => must
1159 StoreInst *SC2 = B.CreateStore(ConstantInt::get(Int8, 5), AllocaC);
1160 // Store into arg2, must alias store to arg2 => must
1161 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 6), PointerB);
1162 std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3};
1164 setupAnalyses();
1165 MemorySSA &MSSA = *Analyses->MSSA;
1166 MemorySSAWalker *Walker = Analyses->Walker;
1168 unsigned I = 0;
1169 for (StoreInst *V : Sts) {
1170 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1171 EXPECT_EQ(MemDef->isOptimized(), false)
1172 << "Store " << I << " is optimized from the start?";
1173 EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
1174 << "Store " << I
1175 << " has correct alias information before being optimized?";
1176 ++I;
1179 for (StoreInst *V : Sts)
1180 Walker->getClobberingMemoryAccess(V);
1182 I = 0;
1183 for (StoreInst *V : Sts) {
1184 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1185 EXPECT_EQ(MemDef->isOptimized(), true)
1186 << "Store " << I << " was not optimized";
1187 if (I == 1 || I == 3 || I == 4)
1188 EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
1189 << "Store " << I << " doesn't have the correct alias information";
1190 else if (I == 0 || I == 2)
1191 EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
1192 << "Store " << I << " doesn't have the correct alias information";
1193 else
1194 EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias)
1195 << "Store " << I << " doesn't have the correct alias information";
1196 // EXPECT_EQ expands such that if we increment I above, it won't get
1197 // incremented except when we try to print the error message.
1198 ++I;
1202 TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) {
1203 // Example code:
1204 // define void @a(i8* %foo) {
1205 // %bar = getelementptr i8, i8* %foo, i64 1
1206 // store i8 0, i8* %foo
1207 // store i8 0, i8* %bar
1208 // call void @llvm.lifetime.end.p0i8(i64 8, i32* %p)
1209 // call void @llvm.lifetime.start.p0i8(i64 8, i32* %p)
1210 // store i8 0, i8* %foo
1211 // store i8 0, i8* %bar
1212 // ret void
1213 // }
1215 // Patterns like this are possible after inlining; the stores to %foo and %bar
1216 // should both be clobbered by the lifetime.start call if they're dominated by
1217 // it.
1219 IRBuilder<> B(C);
1220 F = Function::Create(
1221 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
1222 GlobalValue::ExternalLinkage, "F", &M);
1224 // Make blocks
1225 BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
1227 B.SetInsertPoint(Entry);
1228 Value *Foo = &*F->arg_begin();
1230 Value *Bar = B.CreateGEP(B.getInt8Ty(), Foo, B.getInt64(1), "bar");
1232 B.CreateStore(B.getInt8(0), Foo);
1233 B.CreateStore(B.getInt8(0), Bar);
1235 auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) {
1236 return Intrinsic::getDeclaration(&M, ID, {Foo->getType()});
1239 B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end),
1240 {B.getInt64(2), Foo});
1241 Instruction *LifetimeStart = B.CreateCall(
1242 GetLifetimeIntrinsic(Intrinsic::lifetime_start), {B.getInt64(2), Foo});
1244 Instruction *FooStore = B.CreateStore(B.getInt8(0), Foo);
1245 Instruction *BarStore = B.CreateStore(B.getInt8(0), Bar);
1247 setupAnalyses();
1248 MemorySSA &MSSA = *Analyses->MSSA;
1250 MemoryAccess *LifetimeStartAccess = MSSA.getMemoryAccess(LifetimeStart);
1251 ASSERT_NE(LifetimeStartAccess, nullptr);
1253 MemoryAccess *FooAccess = MSSA.getMemoryAccess(FooStore);
1254 ASSERT_NE(FooAccess, nullptr);
1256 MemoryAccess *BarAccess = MSSA.getMemoryAccess(BarStore);
1257 ASSERT_NE(BarAccess, nullptr);
1259 MemoryAccess *FooClobber =
1260 MSSA.getWalker()->getClobberingMemoryAccess(FooAccess);
1261 EXPECT_EQ(FooClobber, LifetimeStartAccess);
1263 MemoryAccess *BarClobber =
1264 MSSA.getWalker()->getClobberingMemoryAccess(BarAccess);
1265 EXPECT_EQ(BarClobber, LifetimeStartAccess);
1268 TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) {
1269 IRBuilder<> B(C);
1270 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getInt1Ty()}, false),
1271 GlobalValue::ExternalLinkage, "F", &M);
1273 // Make a CFG like
1274 // entry
1275 // / \
1276 // a b
1277 // \ /
1278 // c
1280 // Put a def in A and a def in B, move the def from A -> B, observe as the
1281 // optimization is invalidated.
1282 BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
1283 BasicBlock *BlockA = BasicBlock::Create(C, "a", F);
1284 BasicBlock *BlockB = BasicBlock::Create(C, "b", F);
1285 BasicBlock *BlockC = BasicBlock::Create(C, "c", F);
1287 B.SetInsertPoint(Entry);
1288 Type *Int8 = Type::getInt8Ty(C);
1289 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "alloc");
1290 StoreInst *StoreEntry = B.CreateStore(B.getInt8(0), Alloca);
1291 B.CreateCondBr(B.getTrue(), BlockA, BlockB);
1293 B.SetInsertPoint(BlockA);
1294 StoreInst *StoreA = B.CreateStore(B.getInt8(1), Alloca);
1295 B.CreateBr(BlockC);
1297 B.SetInsertPoint(BlockB);
1298 StoreInst *StoreB = B.CreateStore(B.getInt8(2), Alloca);
1299 B.CreateBr(BlockC);
1301 B.SetInsertPoint(BlockC);
1302 B.CreateUnreachable();
1304 setupAnalyses();
1305 MemorySSA &MSSA = *Analyses->MSSA;
1307 auto *AccessEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreEntry));
1308 auto *StoreAEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
1309 auto *StoreBEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
1311 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
1312 AccessEntry);
1313 ASSERT_TRUE(StoreAEntry->isOptimized());
1315 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry),
1316 AccessEntry);
1317 ASSERT_TRUE(StoreBEntry->isOptimized());
1319 // Note that if we did InsertionPlace::Beginning, we don't go out of our way
1320 // to invalidate the cache for StoreBEntry. If the user wants to actually do
1321 // moves like these, it's up to them to ensure that nearby cache entries are
1322 // correctly invalidated (which, in general, requires walking all instructions
1323 // that the moved instruction dominates. So we probably shouldn't be doing
1324 // moves like this in general. Still, works as a test-case. ;) )
1325 MemorySSAUpdater(&MSSA).moveToPlace(StoreAEntry, BlockB,
1326 MemorySSA::InsertionPlace::End);
1327 ASSERT_FALSE(StoreAEntry->isOptimized());
1328 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
1329 StoreBEntry);
1332 TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) {
1333 F = Function::Create(FunctionType::get(B.getVoidTy(),
1334 {B.getInt8PtrTy(), B.getInt8PtrTy()},
1335 false),
1336 GlobalValue::ExternalLinkage, "F", &M);
1337 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1338 Type *Int8 = Type::getInt8Ty(C);
1339 Value *AllocA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1340 Value *AllocB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
1342 StoreInst *StoreA = B.CreateStore(ConstantInt::get(Int8, 0), AllocA);
1343 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 1), AllocB);
1344 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocA);
1346 setupAnalyses();
1347 MemorySSA &MSSA = *Analyses->MSSA;
1348 MemorySSAWalker *Walker = Analyses->Walker;
1350 // If these don't hold, there's no chance of the test result being useful.
1351 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA),
1352 MSSA.getLiveOnEntryDef());
1353 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB),
1354 MSSA.getLiveOnEntryDef());
1355 auto *StoreAAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
1356 auto *StoreA2Access = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA2));
1357 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess);
1358 ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess);
1360 auto *StoreBAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
1361 ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID());
1362 ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID());
1364 auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) {
1365 llvm::sort(Defs, [](const MemoryDef *LHS, const MemoryDef *RHS) {
1366 return LHS->getID() < RHS->getID();
1370 auto SortedUserList = [&](const MemoryDef *MD) {
1371 std::vector<const MemoryDef *> Result;
1372 transform(MD->users(), std::back_inserter(Result),
1373 [](const User *U) { return cast<MemoryDef>(U); });
1374 SortVecByID(Result);
1375 return Result;
1378 // Use std::vectors, since they have nice pretty-printing if the test fails.
1379 // Parens are necessary because EXPECT_EQ is a macro, and we have commas in
1380 // our init lists...
1381 EXPECT_EQ(SortedUserList(StoreAAccess),
1382 (std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access}));
1384 EXPECT_EQ(SortedUserList(StoreBAccess),
1385 std::vector<const MemoryDef *>{StoreA2Access});
1387 // StoreAAccess should be present twice, since it uses liveOnEntry for both
1388 // its defining and optimized accesses. This is a bit awkward, and is not
1389 // relied upon anywhere at the moment. If this is painful, we can fix it.
1390 EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())),
1391 (std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess,
1392 StoreBAccess}));
1395 // entry
1396 // |
1397 // header
1398 // / \
1399 // body |
1400 // \ /
1401 // exit
1402 // header:
1403 // ; 1 = MemoryDef(liveOnEntry)
1404 // body:
1405 // ; 2 = MemoryDef(1)
1406 // exit:
1407 // ; 3 = MemoryPhi({body, 2}, {header, 1})
1408 // ; 4 = MemoryDef(3); optimized to 3, cannot optimize thorugh phi.
1409 // Insert edge: entry -> exit, check mssa Update is correct.
1410 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiNotOpt) {
1411 F = Function::Create(
1412 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
1413 GlobalValue::ExternalLinkage, "F", &M);
1414 Argument *PointerArg = &*F->arg_begin();
1415 BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1416 BasicBlock *Header(BasicBlock::Create(C, "header", F));
1417 BasicBlock *Body(BasicBlock::Create(C, "body", F));
1418 BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
1419 B.SetInsertPoint(Entry);
1420 BranchInst::Create(Header, Entry);
1421 B.SetInsertPoint(Header);
1422 B.CreateStore(B.getInt8(16), PointerArg);
1423 B.CreateCondBr(B.getTrue(), Exit, Body);
1424 B.SetInsertPoint(Body);
1425 B.CreateStore(B.getInt8(16), PointerArg);
1426 BranchInst::Create(Exit, Body);
1427 B.SetInsertPoint(Exit);
1428 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
1430 setupAnalyses();
1431 MemorySSA &MSSA = *Analyses->MSSA;
1432 MemorySSAWalker *Walker = Analyses->Walker;
1433 std::unique_ptr<MemorySSAUpdater> MSSAU =
1434 std::make_unique<MemorySSAUpdater>(&MSSA);
1436 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
1437 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
1439 // Alter CFG, add edge: entry -> exit
1440 Entry->getTerminator()->eraseFromParent();
1441 B.SetInsertPoint(Entry);
1442 B.CreateCondBr(B.getTrue(), Header, Exit);
1443 SmallVector<CFGUpdate, 1> Updates;
1444 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
1445 Analyses->DT.applyUpdates(Updates);
1446 MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1447 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
1450 // entry
1451 // |
1452 // header
1453 // / \
1454 // body |
1455 // \ /
1456 // exit
1457 // header:
1458 // ; 1 = MemoryDef(liveOnEntry)
1459 // body:
1460 // ; 2 = MemoryDef(1)
1461 // exit:
1462 // ; 3 = MemoryPhi({body, 2}, {header, 1})
1463 // ; 4 = MemoryDef(3); optimize this to 1 now, added edge should invalidate
1464 // the optimized access.
1465 // Insert edge: entry -> exit, check mssa Update is correct.
1466 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiOpt) {
1467 F = Function::Create(
1468 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
1469 GlobalValue::ExternalLinkage, "F", &M);
1470 Argument *PointerArg = &*F->arg_begin();
1471 Type *Int8 = Type::getInt8Ty(C);
1472 BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1473 BasicBlock *Header(BasicBlock::Create(C, "header", F));
1474 BasicBlock *Body(BasicBlock::Create(C, "body", F));
1475 BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
1477 B.SetInsertPoint(Entry);
1478 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1479 BranchInst::Create(Header, Entry);
1481 B.SetInsertPoint(Header);
1482 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
1483 B.CreateCondBr(B.getTrue(), Exit, Body);
1485 B.SetInsertPoint(Body);
1486 B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
1487 BranchInst::Create(Exit, Body);
1489 B.SetInsertPoint(Exit);
1490 StoreInst *S2 = B.CreateStore(B.getInt8(16), PointerArg);
1492 setupAnalyses();
1493 MemorySSA &MSSA = *Analyses->MSSA;
1494 MemorySSAWalker *Walker = Analyses->Walker;
1495 std::unique_ptr<MemorySSAUpdater> MSSAU =
1496 std::make_unique<MemorySSAUpdater>(&MSSA);
1498 MemoryDef *DefS1 = cast<MemoryDef>(MSSA.getMemoryAccess(S1));
1499 EXPECT_EQ(DefS1, Walker->getClobberingMemoryAccess(S2));
1501 // Alter CFG, add edge: entry -> exit
1502 Entry->getTerminator()->eraseFromParent();
1503 B.SetInsertPoint(Entry);
1504 B.CreateCondBr(B.getTrue(), Header, Exit);
1505 SmallVector<CFGUpdate, 1> Updates;
1506 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
1507 Analyses->DT.applyUpdates(Updates);
1508 MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1510 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
1511 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S2));
1514 // entry
1515 // / |
1516 // a |
1517 // / \ |
1518 // b c f
1519 // \ / |
1520 // d |
1521 // \ /
1522 // e
1523 // f:
1524 // ; 1 = MemoryDef(liveOnEntry)
1525 // e:
1526 // ; 2 = MemoryPhi({d, liveOnEntry}, {f, 1})
1528 // Insert edge: f -> c, check update is correct.
1529 // After update:
1530 // f:
1531 // ; 1 = MemoryDef(liveOnEntry)
1532 // c:
1533 // ; 3 = MemoryPhi({a, liveOnEntry}, {f, 1})
1534 // d:
1535 // ; 4 = MemoryPhi({b, liveOnEntry}, {c, 3})
1536 // e:
1537 // ; 2 = MemoryPhi({d, 4}, {f, 1})
1538 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithNoPhiAddNewPhis) {
1539 F = Function::Create(
1540 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
1541 GlobalValue::ExternalLinkage, "F", &M);
1542 Argument *PointerArg = &*F->arg_begin();
1543 BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1544 BasicBlock *ABlock(BasicBlock::Create(C, "a", F));
1545 BasicBlock *BBlock(BasicBlock::Create(C, "b", F));
1546 BasicBlock *CBlock(BasicBlock::Create(C, "c", F));
1547 BasicBlock *DBlock(BasicBlock::Create(C, "d", F));
1548 BasicBlock *EBlock(BasicBlock::Create(C, "e", F));
1549 BasicBlock *FBlock(BasicBlock::Create(C, "f", F));
1551 B.SetInsertPoint(Entry);
1552 B.CreateCondBr(B.getTrue(), ABlock, FBlock);
1553 B.SetInsertPoint(ABlock);
1554 B.CreateCondBr(B.getTrue(), BBlock, CBlock);
1555 B.SetInsertPoint(BBlock);
1556 BranchInst::Create(DBlock, BBlock);
1557 B.SetInsertPoint(CBlock);
1558 BranchInst::Create(DBlock, CBlock);
1559 B.SetInsertPoint(DBlock);
1560 BranchInst::Create(EBlock, DBlock);
1561 B.SetInsertPoint(FBlock);
1562 B.CreateStore(B.getInt8(16), PointerArg);
1563 BranchInst::Create(EBlock, FBlock);
1565 setupAnalyses();
1566 MemorySSA &MSSA = *Analyses->MSSA;
1567 std::unique_ptr<MemorySSAUpdater> MSSAU =
1568 std::make_unique<MemorySSAUpdater>(&MSSA);
1570 // Alter CFG, add edge: f -> c
1571 FBlock->getTerminator()->eraseFromParent();
1572 B.SetInsertPoint(FBlock);
1573 B.CreateCondBr(B.getTrue(), CBlock, EBlock);
1574 SmallVector<CFGUpdate, 1> Updates;
1575 Updates.push_back({cfg::UpdateKind::Insert, FBlock, CBlock});
1576 Analyses->DT.applyUpdates(Updates);
1577 MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1579 MemoryPhi *MPC = MSSA.getMemoryAccess(CBlock);
1580 EXPECT_NE(MPC, nullptr);
1581 MemoryPhi *MPD = MSSA.getMemoryAccess(DBlock);
1582 EXPECT_NE(MPD, nullptr);
1583 MemoryPhi *MPE = MSSA.getMemoryAccess(EBlock);
1584 EXPECT_EQ(MPD, MPE->getIncomingValueForBlock(DBlock));