Fix test failures introduced by PR #113697 (#116941)
[llvm-project.git] / llvm / unittests / Analysis / MemorySSATest.cpp
blob0ebbc881d26ab3f8358cf8e1a56423d99ebec6a1
1 //===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/Analysis/MemorySSA.h"
9 #include "llvm/Analysis/AliasAnalysis.h"
10 #include "llvm/Analysis/AssumptionCache.h"
11 #include "llvm/Analysis/BasicAliasAnalysis.h"
12 #include "llvm/Analysis/MemorySSAUpdater.h"
13 #include "llvm/Analysis/TargetLibraryInfo.h"
14 #include "llvm/AsmParser/Parser.h"
15 #include "llvm/IR/BasicBlock.h"
16 #include "llvm/IR/DataLayout.h"
17 #include "llvm/IR/Dominators.h"
18 #include "llvm/IR/IRBuilder.h"
19 #include "llvm/IR/Instructions.h"
20 #include "llvm/IR/LLVMContext.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/Support/SourceMgr.h"
23 #include "gtest/gtest.h"
25 using namespace llvm;
27 const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
29 /// There's a lot of common setup between these tests. This fixture helps reduce
30 /// that. Tests should mock up a function, store it in F, and then call
31 /// setupAnalyses().
32 class MemorySSATest : public testing::Test {
33 protected:
34 // N.B. Many of these members depend on each other (e.g. the Module depends on
35 // the Context, etc.). So, order matters here (and in TestAnalyses).
36 LLVMContext C;
37 Module M;
38 IRBuilder<> B;
39 DataLayout DL;
40 TargetLibraryInfoImpl TLII;
41 TargetLibraryInfo TLI;
42 Function *F;
44 // Things that we need to build after the function is created.
45 struct TestAnalyses {
46 DominatorTree DT;
47 AssumptionCache AC;
48 AAResults AA;
49 BasicAAResult BAA;
50 // We need to defer MSSA construction until AA is *entirely* set up, which
51 // requires calling addAAResult. Hence, we just use a pointer here.
52 std::unique_ptr<MemorySSA> MSSA;
53 MemorySSAWalker *Walker;
55 TestAnalyses(MemorySSATest &Test)
56 : DT(*Test.F), AC(*Test.F), AA(Test.TLI),
57 BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) {
58 AA.addAAResult(BAA);
59 MSSA = std::make_unique<MemorySSA>(*Test.F, &AA, &DT);
60 Walker = MSSA->getWalker();
64 std::unique_ptr<TestAnalyses> Analyses;
66 void setupAnalyses() {
67 assert(F);
68 Analyses.reset(new TestAnalyses(*this));
71 public:
72 MemorySSATest()
73 : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
76 TEST_F(MemorySSATest, CreateALoad) {
77 // We create a diamond where there is a store on one side, and then after
78 // building MemorySSA, create a load after the merge point, and use it to test
79 // updating by creating an access for the load.
80 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
81 GlobalValue::ExternalLinkage, "F", &M);
82 BasicBlock *Entry(BasicBlock::Create(C, "", F));
83 BasicBlock *Left(BasicBlock::Create(C, "", F));
84 BasicBlock *Right(BasicBlock::Create(C, "", F));
85 BasicBlock *Merge(BasicBlock::Create(C, "", F));
86 B.SetInsertPoint(Entry);
87 B.CreateCondBr(B.getTrue(), Left, Right);
88 B.SetInsertPoint(Left);
89 Argument *PointerArg = &*F->arg_begin();
90 B.CreateStore(B.getInt8(16), PointerArg);
91 BranchInst::Create(Merge, Left);
92 BranchInst::Create(Merge, Right);
94 setupAnalyses();
95 MemorySSA &MSSA = *Analyses->MSSA;
96 MemorySSAUpdater Updater(&MSSA);
97 // Add the load
98 B.SetInsertPoint(Merge);
99 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
101 // MemoryPHI should already exist.
102 MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
103 EXPECT_NE(MP, nullptr);
105 // Create the load memory acccess
106 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
107 LoadInst, MP, Merge, MemorySSA::Beginning));
108 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
109 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
110 MSSA.verifyMemorySSA();
112 TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
113 // We create a diamond, then build memoryssa with no memory accesses, and
114 // incrementally update it by inserting a store in the, entry, a load in the
115 // merge point, then a store in the branch, another load in the merge point,
116 // and then a store in the entry.
117 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
118 GlobalValue::ExternalLinkage, "F", &M);
119 BasicBlock *Entry(BasicBlock::Create(C, "", F));
120 BasicBlock *Left(BasicBlock::Create(C, "", F));
121 BasicBlock *Right(BasicBlock::Create(C, "", F));
122 BasicBlock *Merge(BasicBlock::Create(C, "", F));
123 B.SetInsertPoint(Entry);
124 B.CreateCondBr(B.getTrue(), Left, Right);
125 B.SetInsertPoint(Left, Left->begin());
126 Argument *PointerArg = &*F->arg_begin();
127 B.SetInsertPoint(Left);
128 B.CreateBr(Merge);
129 B.SetInsertPoint(Right);
130 B.CreateBr(Merge);
132 setupAnalyses();
133 MemorySSA &MSSA = *Analyses->MSSA;
134 MemorySSAUpdater Updater(&MSSA);
135 // Add the store
136 B.SetInsertPoint(Entry, Entry->begin());
137 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
138 MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
139 EntryStore, nullptr, Entry, MemorySSA::Beginning);
140 Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
142 // Add the load
143 B.SetInsertPoint(Merge, Merge->begin());
144 LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
146 // MemoryPHI should not already exist.
147 MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
148 EXPECT_EQ(MP, nullptr);
150 // Create the load memory access
151 MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
152 FirstLoad, nullptr, Merge, MemorySSA::Beginning));
153 Updater.insertUse(FirstLoadAccess);
154 // Should just have a load using the entry access, because it should discover
155 // the phi is trivial
156 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess);
158 // Create a store on the left
159 // Add the store
160 B.SetInsertPoint(Left, Left->begin());
161 StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
162 MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
163 LeftStore, nullptr, Left, MemorySSA::Beginning);
164 Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false);
166 // MemoryPHI should exist after adding LeftStore.
167 MP = MSSA.getMemoryAccess(Merge);
168 EXPECT_NE(MP, nullptr);
170 // Add the second load
171 B.SetInsertPoint(Merge, Merge->begin());
172 LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
174 // Create the load memory access
175 MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
176 SecondLoad, nullptr, Merge, MemorySSA::Beginning));
177 Updater.insertUse(SecondLoadAccess);
178 // Now the load should be a phi of the entry store and the left store
179 MemoryPhi *MergePhi =
180 dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
181 EXPECT_NE(MergePhi, nullptr);
182 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
183 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
184 // Now create a store below the existing one in the entry
185 B.SetInsertPoint(Entry, --Entry->end());
186 StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
187 MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
188 SecondEntryStore, nullptr, Entry, MemorySSA::End);
189 // Insert it twice just to test renaming
190 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false);
191 EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi);
192 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true);
193 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi);
194 // and make sure the phi below it got updated, despite being blocks away
195 MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
196 EXPECT_NE(MergePhi, nullptr);
197 EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess);
198 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
199 MSSA.verifyMemorySSA();
202 TEST_F(MemorySSATest, CreateALoadUpdater) {
203 // We create a diamond, then build memoryssa with no memory accesses, and
204 // incrementally update it by inserting a store in one of the branches, and a
205 // load in the merge point
206 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
207 GlobalValue::ExternalLinkage, "F", &M);
208 BasicBlock *Entry(BasicBlock::Create(C, "", F));
209 BasicBlock *Left(BasicBlock::Create(C, "", F));
210 BasicBlock *Right(BasicBlock::Create(C, "", F));
211 BasicBlock *Merge(BasicBlock::Create(C, "", F));
212 B.SetInsertPoint(Entry);
213 B.CreateCondBr(B.getTrue(), Left, Right);
214 B.SetInsertPoint(Left, Left->begin());
215 Argument *PointerArg = &*F->arg_begin();
216 B.SetInsertPoint(Left);
217 B.CreateBr(Merge);
218 B.SetInsertPoint(Right);
219 B.CreateBr(Merge);
221 setupAnalyses();
222 MemorySSA &MSSA = *Analyses->MSSA;
223 MemorySSAUpdater Updater(&MSSA);
224 B.SetInsertPoint(Left, Left->begin());
225 // Add the store
226 StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
227 MemoryAccess *StoreAccess =
228 Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning);
229 Updater.insertDef(cast<MemoryDef>(StoreAccess));
231 // MemoryPHI should be created when inserting the def
232 MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
233 EXPECT_NE(MP, nullptr);
235 // Add the load
236 B.SetInsertPoint(Merge, Merge->begin());
237 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
239 // Create the load memory acccess
240 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
241 LoadInst, nullptr, Merge, MemorySSA::Beginning));
242 Updater.insertUse(LoadAccess);
243 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
244 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
245 MSSA.verifyMemorySSA();
248 TEST_F(MemorySSATest, SinkLoad) {
249 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
250 GlobalValue::ExternalLinkage, "F", &M);
251 BasicBlock *Entry(BasicBlock::Create(C, "", F));
252 BasicBlock *Left(BasicBlock::Create(C, "", F));
253 BasicBlock *Right(BasicBlock::Create(C, "", F));
254 BasicBlock *Merge(BasicBlock::Create(C, "", F));
255 B.SetInsertPoint(Entry);
256 B.CreateCondBr(B.getTrue(), Left, Right);
257 B.SetInsertPoint(Left, Left->begin());
258 Argument *PointerArg = &*F->arg_begin();
259 B.SetInsertPoint(Left);
260 B.CreateBr(Merge);
261 B.SetInsertPoint(Right);
262 B.CreateBr(Merge);
264 // Load in left block
265 B.SetInsertPoint(Left, Left->begin());
266 LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg);
267 // Store in merge block
268 B.SetInsertPoint(Merge, Merge->begin());
269 B.CreateStore(B.getInt8(16), PointerArg);
271 setupAnalyses();
272 MemorySSA &MSSA = *Analyses->MSSA;
273 MemorySSAUpdater Updater(&MSSA);
275 // Mimic sinking of a load:
276 // - clone load
277 // - insert in "exit" block
278 // - insert in mssa
279 // - remove from original block
281 LoadInst *LoadInstClone = cast<LoadInst>(LoadInst1->clone());
282 LoadInstClone->insertInto(Merge, Merge->begin());
283 MemoryAccess * NewLoadAccess =
284 Updater.createMemoryAccessInBB(LoadInstClone, nullptr,
285 LoadInstClone->getParent(),
286 MemorySSA::Beginning);
287 Updater.insertUse(cast<MemoryUse>(NewLoadAccess));
288 MSSA.verifyMemorySSA();
289 Updater.removeMemoryAccess(MSSA.getMemoryAccess(LoadInst1));
290 MSSA.verifyMemorySSA();
293 TEST_F(MemorySSATest, MoveAStore) {
294 // We create a diamond where there is a in the entry, a store on one side, and
295 // a load at the end. After building MemorySSA, we test updating by moving
296 // the store from the side block to the entry block. This destroys the old
297 // access.
298 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
299 GlobalValue::ExternalLinkage, "F", &M);
300 BasicBlock *Entry(BasicBlock::Create(C, "", F));
301 BasicBlock *Left(BasicBlock::Create(C, "", F));
302 BasicBlock *Right(BasicBlock::Create(C, "", F));
303 BasicBlock *Merge(BasicBlock::Create(C, "", F));
304 B.SetInsertPoint(Entry);
305 Argument *PointerArg = &*F->arg_begin();
306 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
307 B.CreateCondBr(B.getTrue(), Left, Right);
308 B.SetInsertPoint(Left);
309 StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
310 BranchInst::Create(Merge, Left);
311 BranchInst::Create(Merge, Right);
312 B.SetInsertPoint(Merge);
313 B.CreateLoad(B.getInt8Ty(), PointerArg);
314 setupAnalyses();
315 MemorySSA &MSSA = *Analyses->MSSA;
316 MemorySSAUpdater Updater(&MSSA);
317 // Move the store
318 SideStore->moveBefore(Entry->getTerminator());
319 MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
320 MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
321 MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter(
322 SideStore, EntryStoreAccess, EntryStoreAccess);
323 EntryStoreAccess->replaceAllUsesWith(NewStoreAccess);
324 Updater.removeMemoryAccess(SideStoreAccess);
325 MSSA.verifyMemorySSA();
328 TEST_F(MemorySSATest, MoveAStoreUpdater) {
329 // We create a diamond where there is a in the entry, a store on one side, and
330 // a load at the end. After building MemorySSA, we test updating by moving
331 // the store from the side block to the entry block. This destroys the old
332 // access.
333 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
334 GlobalValue::ExternalLinkage, "F", &M);
335 BasicBlock *Entry(BasicBlock::Create(C, "", F));
336 BasicBlock *Left(BasicBlock::Create(C, "", F));
337 BasicBlock *Right(BasicBlock::Create(C, "", F));
338 BasicBlock *Merge(BasicBlock::Create(C, "", F));
339 B.SetInsertPoint(Entry);
340 Argument *PointerArg = &*F->arg_begin();
341 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
342 B.CreateCondBr(B.getTrue(), Left, Right);
343 B.SetInsertPoint(Left);
344 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
345 BranchInst::Create(Merge, Left);
346 BranchInst::Create(Merge, Right);
347 B.SetInsertPoint(Merge);
348 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
349 setupAnalyses();
350 MemorySSA &MSSA = *Analyses->MSSA;
351 MemorySSAUpdater Updater(&MSSA);
353 // Move the store
354 SideStore->moveBefore(Entry->getTerminator());
355 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
356 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
357 auto *NewStoreAccess = Updater.createMemoryAccessAfter(
358 SideStore, EntryStoreAccess, EntryStoreAccess);
359 // Before, the load will point to a phi of the EntryStore and SideStore.
360 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
361 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
362 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
363 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
364 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
365 Updater.removeMemoryAccess(SideStoreAccess);
366 Updater.insertDef(cast<MemoryDef>(NewStoreAccess));
367 // After it's a phi of the new side store access.
368 EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess);
369 EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess);
370 MSSA.verifyMemorySSA();
373 TEST_F(MemorySSATest, MoveAStoreUpdaterMove) {
374 // We create a diamond where there is a in the entry, a store on one side, and
375 // a load at the end. After building MemorySSA, we test updating by moving
376 // the store from the side block to the entry block. This does not destroy
377 // the old access.
378 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
379 GlobalValue::ExternalLinkage, "F", &M);
380 BasicBlock *Entry(BasicBlock::Create(C, "", F));
381 BasicBlock *Left(BasicBlock::Create(C, "", F));
382 BasicBlock *Right(BasicBlock::Create(C, "", F));
383 BasicBlock *Merge(BasicBlock::Create(C, "", F));
384 B.SetInsertPoint(Entry);
385 Argument *PointerArg = &*F->arg_begin();
386 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
387 B.CreateCondBr(B.getTrue(), Left, Right);
388 B.SetInsertPoint(Left);
389 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
390 BranchInst::Create(Merge, Left);
391 BranchInst::Create(Merge, Right);
392 B.SetInsertPoint(Merge);
393 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
394 setupAnalyses();
395 MemorySSA &MSSA = *Analyses->MSSA;
396 MemorySSAUpdater Updater(&MSSA);
398 // Move the store
399 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
400 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
401 // Before, the load will point to a phi of the EntryStore and SideStore.
402 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
403 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
404 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
405 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
406 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
407 SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator());
408 Updater.moveAfter(SideStoreAccess, EntryStoreAccess);
409 // After, it's a phi of the side store.
410 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
411 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
413 MSSA.verifyMemorySSA();
416 TEST_F(MemorySSATest, MoveAStoreAllAround) {
417 // We create a diamond where there is a in the entry, a store on one side, and
418 // a load at the end. After building MemorySSA, we test updating by moving
419 // the store from the side block to the entry block, then to the other side
420 // block, then to before the load. This does not destroy the old access.
421 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
422 GlobalValue::ExternalLinkage, "F", &M);
423 BasicBlock *Entry(BasicBlock::Create(C, "", F));
424 BasicBlock *Left(BasicBlock::Create(C, "", F));
425 BasicBlock *Right(BasicBlock::Create(C, "", F));
426 BasicBlock *Merge(BasicBlock::Create(C, "", F));
427 B.SetInsertPoint(Entry);
428 Argument *PointerArg = &*F->arg_begin();
429 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
430 B.CreateCondBr(B.getTrue(), Left, Right);
431 B.SetInsertPoint(Left);
432 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
433 BranchInst::Create(Merge, Left);
434 BranchInst::Create(Merge, Right);
435 B.SetInsertPoint(Merge);
436 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
437 setupAnalyses();
438 MemorySSA &MSSA = *Analyses->MSSA;
439 MemorySSAUpdater Updater(&MSSA);
441 // Move the store
442 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
443 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
444 // Before, the load will point to a phi of the EntryStore and SideStore.
445 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
446 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
447 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
448 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
449 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
450 // Move the store before the entry store
451 SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator());
452 Updater.moveBefore(SideStoreAccess, EntryStoreAccess);
453 // After, it's a phi of the entry store.
454 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
455 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
456 MSSA.verifyMemorySSA();
457 // Now move the store to the right branch
458 SideStore->moveBefore(*Right, Right->begin());
459 Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning);
460 MSSA.verifyMemorySSA();
461 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
462 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
463 // Now move it before the load
464 SideStore->moveBefore(MergeLoad);
465 Updater.moveBefore(SideStoreAccess, LoadAccess);
466 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
467 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
468 MSSA.verifyMemorySSA();
471 TEST_F(MemorySSATest, RemoveAPhi) {
472 // We create a diamond where there is a store on one side, and then a load
473 // after the merge point. This enables us to test a bunch of different
474 // removal cases.
475 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
476 GlobalValue::ExternalLinkage, "F", &M);
477 BasicBlock *Entry(BasicBlock::Create(C, "", F));
478 BasicBlock *Left(BasicBlock::Create(C, "", F));
479 BasicBlock *Right(BasicBlock::Create(C, "", F));
480 BasicBlock *Merge(BasicBlock::Create(C, "", F));
481 B.SetInsertPoint(Entry);
482 B.CreateCondBr(B.getTrue(), Left, Right);
483 B.SetInsertPoint(Left);
484 Argument *PointerArg = &*F->arg_begin();
485 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
486 BranchInst::Create(Merge, Left);
487 BranchInst::Create(Merge, Right);
488 B.SetInsertPoint(Merge);
489 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
491 setupAnalyses();
492 MemorySSA &MSSA = *Analyses->MSSA;
493 MemorySSAUpdater Updater(&MSSA);
495 // Before, the load will be a use of a phi<store, liveonentry>.
496 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
497 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
498 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
499 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
500 // Kill the store
501 Updater.removeMemoryAccess(StoreAccess);
502 MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess);
503 // Verify the phi ended up as liveonentry, liveonentry
504 for (auto &Op : MP->incoming_values())
505 EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get())));
506 // Replace the phi uses with the live on entry def
507 MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef());
508 // Verify the load is now defined by liveOnEntryDef
509 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
510 // Remove the PHI
511 Updater.removeMemoryAccess(MP);
512 MSSA.verifyMemorySSA();
515 TEST_F(MemorySSATest, RemoveMemoryAccess) {
516 // We create a diamond where there is a store on one side, and then a load
517 // after the merge point. This enables us to test a bunch of different
518 // removal cases.
519 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
520 GlobalValue::ExternalLinkage, "F", &M);
521 BasicBlock *Entry(BasicBlock::Create(C, "", F));
522 BasicBlock *Left(BasicBlock::Create(C, "", F));
523 BasicBlock *Right(BasicBlock::Create(C, "", F));
524 BasicBlock *Merge(BasicBlock::Create(C, "", F));
525 B.SetInsertPoint(Entry);
526 B.CreateCondBr(B.getTrue(), Left, Right);
527 B.SetInsertPoint(Left);
528 Argument *PointerArg = &*F->arg_begin();
529 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
530 BranchInst::Create(Merge, Left);
531 BranchInst::Create(Merge, Right);
532 B.SetInsertPoint(Merge);
533 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
535 setupAnalyses();
536 MemorySSA &MSSA = *Analyses->MSSA;
537 MemorySSAWalker *Walker = Analyses->Walker;
538 MemorySSAUpdater Updater(&MSSA);
540 // Before, the load will be a use of a phi<store, liveonentry>. It should be
541 // the same after.
542 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
543 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
544 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
545 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
546 // The load is currently clobbered by one of the phi arguments, so the walker
547 // should determine the clobbering access as the phi.
548 EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
549 Updater.removeMemoryAccess(StoreAccess);
550 MSSA.verifyMemorySSA();
551 // After the removeaccess, let's see if we got the right accesses
552 // The load should still point to the phi ...
553 EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
554 // but we should now get live on entry for the clobbering definition of the
555 // load, since it will walk past the phi node since every argument is the
556 // same.
557 // XXX: This currently requires either removing the phi or resetting optimized
558 // on the load
560 EXPECT_FALSE(
561 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
562 // If we reset optimized, we get live on entry.
563 LoadAccess->resetOptimized();
564 EXPECT_TRUE(
565 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
566 // The phi should now be a two entry phi with two live on entry defs.
567 for (const auto &Op : DefiningAccess->operands()) {
568 MemoryAccess *Operand = cast<MemoryAccess>(&*Op);
569 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand));
572 // Now we try to remove the single valued phi
573 Updater.removeMemoryAccess(DefiningAccess);
574 MSSA.verifyMemorySSA();
575 // Now the load should be a load of live on entry.
576 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
579 // We had a bug with caching where the walker would report MemoryDef#3's clobber
580 // (below) was MemoryDef#1.
582 // define void @F(i8*) {
583 // %A = alloca i8, i8 1
584 // ; 1 = MemoryDef(liveOnEntry)
585 // store i8 0, i8* %A
586 // ; 2 = MemoryDef(1)
587 // store i8 1, i8* %A
588 // ; 3 = MemoryDef(2)
589 // store i8 2, i8* %A
590 // }
591 TEST_F(MemorySSATest, TestTripleStore) {
592 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
593 GlobalValue::ExternalLinkage, "F", &M);
594 B.SetInsertPoint(BasicBlock::Create(C, "", F));
595 Type *Int8 = Type::getInt8Ty(C);
596 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
597 StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
598 StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca);
599 StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca);
601 setupAnalyses();
602 MemorySSA &MSSA = *Analyses->MSSA;
603 MemorySSAWalker *Walker = Analyses->Walker;
605 unsigned I = 0;
606 for (StoreInst *V : {S1, S2, S3}) {
607 // Everything should be clobbered by its defining access
608 MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess();
609 MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V);
610 EXPECT_EQ(DefiningAccess, WalkerClobber)
611 << "Store " << I << " doesn't have the correct clobbering access";
612 // EXPECT_EQ expands such that if we increment I above, it won't get
613 // incremented except when we try to print the error message.
614 ++I;
618 // ...And fixing the above bug made it obvious that, when walking, MemorySSA's
619 // walker was caching the initial node it walked. This was fine (albeit
620 // mostly redundant) unless the initial node being walked is a clobber for the
621 // query. In that case, we'd cache that the node clobbered itself.
622 TEST_F(MemorySSATest, TestStoreAndLoad) {
623 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
624 GlobalValue::ExternalLinkage, "F", &M);
625 B.SetInsertPoint(BasicBlock::Create(C, "", F));
626 Type *Int8 = Type::getInt8Ty(C);
627 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
628 Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
629 Instruction *LI = B.CreateLoad(Int8, Alloca);
631 setupAnalyses();
632 MemorySSA &MSSA = *Analyses->MSSA;
633 MemorySSAWalker *Walker = Analyses->Walker;
635 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI);
636 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI));
637 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI)));
640 // Another bug (related to the above two fixes): It was noted that, given the
641 // following code:
642 // ; 1 = MemoryDef(liveOnEntry)
643 // store i8 0, i8* %1
645 // ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would
646 // hand back the store (correctly). A later call to
647 // getClobberingMemoryAccess(const Instruction*) would also hand back the store
648 // (incorrectly; it should return liveOnEntry).
650 // This test checks that repeated calls to either function returns what they're
651 // meant to.
652 TEST_F(MemorySSATest, TestStoreDoubleQuery) {
653 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
654 GlobalValue::ExternalLinkage, "F", &M);
655 B.SetInsertPoint(BasicBlock::Create(C, "", F));
656 Type *Int8 = Type::getInt8Ty(C);
657 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
658 StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
660 setupAnalyses();
661 MemorySSA &MSSA = *Analyses->MSSA;
662 MemorySSAWalker *Walker = Analyses->Walker;
664 MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI);
665 MemoryLocation StoreLoc = MemoryLocation::get(SI);
666 MemoryAccess *Clobber =
667 Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
668 MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
670 EXPECT_EQ(Clobber, StoreAccess);
671 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
673 // Try again (with entries in the cache already) for good measure...
674 Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
675 LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
676 EXPECT_EQ(Clobber, StoreAccess);
677 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
680 // Bug: During phi optimization, the walker wouldn't cache to the proper result
681 // in the farthest-walked BB.
683 // Specifically, it would assume that whatever we walked to was a clobber.
684 // "Whatever we walked to" isn't a clobber if we hit a cache entry.
686 // ...So, we need a test case that looks like:
687 // A
688 // / \
689 // B |
690 // \ /
691 // C
693 // Where, when we try to optimize a thing in 'C', a blocker is found in 'B'.
694 // The walk must determine that the blocker exists by using cache entries *while
695 // walking* 'B'.
696 TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
697 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
698 GlobalValue::ExternalLinkage, "F", &M);
699 B.SetInsertPoint(BasicBlock::Create(C, "A", F));
700 Type *Int8 = Type::getInt8Ty(C);
701 Constant *One = ConstantInt::get(Int8, 1);
702 Constant *Zero = ConstantInt::get(Int8, 0);
703 Value *AllocA = B.CreateAlloca(Int8, One, "a");
704 Value *AllocB = B.CreateAlloca(Int8, One, "b");
705 BasicBlock *IfThen = BasicBlock::Create(C, "B", F);
706 BasicBlock *IfEnd = BasicBlock::Create(C, "C", F);
708 B.CreateCondBr(PoisonValue::get(Type::getInt1Ty(C)), IfThen, IfEnd);
710 B.SetInsertPoint(IfThen);
711 Instruction *FirstStore = B.CreateStore(Zero, AllocA);
712 B.CreateStore(Zero, AllocB);
713 Instruction *ALoad0 = B.CreateLoad(Int8, AllocA, "");
714 Instruction *BStore = B.CreateStore(Zero, AllocB);
715 // Due to use optimization/etc. we make a store to A, which is removed after
716 // we build MSSA. This helps keep the test case simple-ish.
717 Instruction *KillStore = B.CreateStore(Zero, AllocA);
718 Instruction *ALoad = B.CreateLoad(Int8, AllocA, "");
719 B.CreateBr(IfEnd);
721 B.SetInsertPoint(IfEnd);
722 Instruction *BelowPhi = B.CreateStore(Zero, AllocA);
724 setupAnalyses();
725 MemorySSA &MSSA = *Analyses->MSSA;
726 MemorySSAWalker *Walker = Analyses->Walker;
727 MemorySSAUpdater Updater(&MSSA);
729 // Kill `KillStore`; it exists solely so that the load after it won't be
730 // optimized to FirstStore.
731 Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore));
732 KillStore->eraseFromParent();
733 auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad));
734 EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore));
736 // Populate the cache for the store to AllocB directly after FirstStore. It
737 // should point to something in block B (so something in D can't be optimized
738 // to it).
739 MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0);
740 EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber);
742 // If the bug exists, this will introduce a bad cache entry for %a on BStore.
743 // It will point to the store to %b after FirstStore. This only happens during
744 // phi optimization.
745 MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi);
746 MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd);
747 EXPECT_EQ(BottomClobber, Phi);
749 // This query will first check the cache for {%a, BStore}. It should point to
750 // FirstStore, not to the store after FirstStore.
751 MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad);
752 EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
755 // Test that our walker properly handles loads with the invariant group
756 // attribute. It's a bit hacky, since we add the invariant attribute *after*
757 // building MSSA. Otherwise, the use optimizer will optimize it for us, which
758 // isn't what we want.
759 // FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA.
760 TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
761 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
762 GlobalValue::ExternalLinkage, "F", &M);
763 B.SetInsertPoint(BasicBlock::Create(C, "", F));
764 Type *Int8 = Type::getInt8Ty(C);
765 Constant *One = ConstantInt::get(Int8, 1);
766 Value *AllocA = B.CreateAlloca(Int8, One, "");
768 Instruction *Store = B.CreateStore(One, AllocA);
769 Instruction *Load = B.CreateLoad(Int8, AllocA);
771 setupAnalyses();
772 MemorySSA &MSSA = *Analyses->MSSA;
773 MemorySSAWalker *Walker = Analyses->Walker;
775 auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load));
776 auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store));
777 EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA);
779 // ...At the time of writing, no cache should exist for LoadMA. Be a bit
780 // flexible to future changes.
781 Walker->invalidateInfo(LoadMA);
782 Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {}));
784 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA);
785 EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef());
788 // Test loads get reoptimized properly by the walker.
789 TEST_F(MemorySSATest, WalkerReopt) {
790 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
791 GlobalValue::ExternalLinkage, "F", &M);
792 B.SetInsertPoint(BasicBlock::Create(C, "", F));
793 Type *Int8 = Type::getInt8Ty(C);
794 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
795 Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA);
796 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
797 Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB);
798 Instruction *LIA = B.CreateLoad(Int8, AllocaA);
800 setupAnalyses();
801 MemorySSA &MSSA = *Analyses->MSSA;
802 MemorySSAWalker *Walker = Analyses->Walker;
803 MemorySSAUpdater Updater(&MSSA);
805 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA);
806 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA));
807 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA));
808 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA)));
809 Updater.removeMemoryAccess(LoadAccess);
811 // Create the load memory access pointing to an unoptimized place.
812 MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
813 LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End));
814 // This should it cause it to be optimized
815 EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber);
816 EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber);
819 // Test out MemorySSAUpdater::moveBefore
820 TEST_F(MemorySSATest, MoveAboveMemoryDef) {
821 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
822 GlobalValue::ExternalLinkage, "F", &M);
823 B.SetInsertPoint(BasicBlock::Create(C, "", F));
825 Type *Int8 = Type::getInt8Ty(C);
826 Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
827 Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
828 Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
830 StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A);
831 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_);
832 LoadInst *LoadB = B.CreateLoad(Int8, B_);
833 StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A);
834 StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C);
835 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A);
836 LoadInst *LoadC = B.CreateLoad(Int8, C);
838 setupAnalyses();
839 MemorySSA &MSSA = *Analyses->MSSA;
840 MemorySSAWalker &Walker = *Analyses->Walker;
842 MemorySSAUpdater Updater(&MSSA);
843 StoreC->moveBefore(StoreB);
844 Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)),
845 cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)));
847 MSSA.verifyMemorySSA();
849 EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(),
850 MSSA.getMemoryAccess(StoreC));
851 EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(),
852 MSSA.getMemoryAccess(StoreA0));
853 EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(),
854 MSSA.getMemoryAccess(StoreA1));
855 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB),
856 MSSA.getMemoryAccess(StoreB));
857 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC),
858 MSSA.getMemoryAccess(StoreC));
860 // exercise block numbering
861 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC),
862 MSSA.getMemoryAccess(StoreB)));
863 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1),
864 MSSA.getMemoryAccess(StoreA2)));
867 TEST_F(MemorySSATest, Irreducible) {
868 // Create the equivalent of
869 // x = something
870 // if (...)
871 // goto second_loop_entry
872 // while (...) {
873 // second_loop_entry:
874 // }
875 // use(x)
877 SmallVector<PHINode *, 8> Inserted;
878 IRBuilder<> B(C);
879 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
880 GlobalValue::ExternalLinkage, "F", &M);
882 // Make blocks
883 BasicBlock *IfBB = BasicBlock::Create(C, "if", F);
884 BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F);
885 BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F);
886 BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F);
887 B.SetInsertPoint(IfBB);
888 B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB);
889 B.SetInsertPoint(LoopStartBB);
890 B.CreateBr(LoopMainBB);
891 B.SetInsertPoint(LoopMainBB);
892 B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB);
893 B.SetInsertPoint(AfterLoopBB);
894 Argument *FirstArg = &*F->arg_begin();
895 setupAnalyses();
896 MemorySSA &MSSA = *Analyses->MSSA;
897 MemorySSAUpdater Updater(&MSSA);
898 // Create the load memory acccess
899 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), FirstArg);
900 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
901 LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning));
902 Updater.insertUse(LoadAccess);
903 MSSA.verifyMemorySSA();
906 TEST_F(MemorySSATest, MoveToBeforeLiveOnEntryInvalidatesCache) {
907 // Create:
908 // %1 = alloca i8
909 // ; 1 = MemoryDef(liveOnEntry)
910 // store i8 0, i8* %1
911 // ; 2 = MemoryDef(1)
912 // store i8 0, i8* %1
914 // ...And be sure that MSSA's caching doesn't give us `1` for the clobber of
915 // `2` after `1` is removed.
916 IRBuilder<> B(C);
917 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
918 GlobalValue::ExternalLinkage, "F", &M);
920 BasicBlock *Entry = BasicBlock::Create(C, "if", F);
921 B.SetInsertPoint(Entry);
923 Value *A = B.CreateAlloca(B.getInt8Ty());
924 StoreInst *StoreA = B.CreateStore(B.getInt8(0), A);
925 StoreInst *StoreB = B.CreateStore(B.getInt8(0), A);
927 setupAnalyses();
929 MemorySSA &MSSA = *Analyses->MSSA;
931 auto *DefA = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
932 auto *DefB = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
934 MemoryAccess *BClobber = MSSA.getWalker()->getClobberingMemoryAccess(DefB);
935 ASSERT_EQ(DefA, BClobber);
937 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefA);
938 StoreA->eraseFromParent();
940 EXPECT_EQ(DefB->getDefiningAccess(), MSSA.getLiveOnEntryDef());
942 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefB),
943 MSSA.getLiveOnEntryDef())
944 << "(DefA = " << DefA << ")";
947 TEST_F(MemorySSATest, RemovingDefInvalidatesCache) {
948 // Create:
949 // %x = alloca i8
950 // %y = alloca i8
951 // ; 1 = MemoryDef(liveOnEntry)
952 // store i8 0, i8* %x
953 // ; 2 = MemoryDef(1)
954 // store i8 0, i8* %y
955 // ; 3 = MemoryDef(2)
956 // store i8 0, i8* %x
958 // And be sure that MSSA's caching handles the removal of def `1`
959 // appropriately.
960 IRBuilder<> B(C);
961 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
962 GlobalValue::ExternalLinkage, "F", &M);
964 BasicBlock *Entry = BasicBlock::Create(C, "if", F);
965 B.SetInsertPoint(Entry);
967 Value *X = B.CreateAlloca(B.getInt8Ty());
968 Value *Y = B.CreateAlloca(B.getInt8Ty());
969 StoreInst *StoreX1 = B.CreateStore(B.getInt8(0), X);
970 StoreInst *StoreY = B.CreateStore(B.getInt8(0), Y);
971 StoreInst *StoreX2 = B.CreateStore(B.getInt8(0), X);
973 setupAnalyses();
975 MemorySSA &MSSA = *Analyses->MSSA;
977 auto *DefX1 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX1));
978 auto *DefY = cast<MemoryDef>(MSSA.getMemoryAccess(StoreY));
979 auto *DefX2 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX2));
981 EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
982 MemoryAccess *X2Clobber = MSSA.getWalker()->getClobberingMemoryAccess(DefX2);
983 ASSERT_EQ(DefX1, X2Clobber);
985 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefX1);
986 StoreX1->eraseFromParent();
988 EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
989 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefX2),
990 MSSA.getLiveOnEntryDef())
991 << "(DefX1 = " << DefX1 << ")";
994 // Test Must alias for optimized defs.
995 TEST_F(MemorySSATest, TestStoreMustAlias) {
996 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
997 GlobalValue::ExternalLinkage, "F", &M);
998 B.SetInsertPoint(BasicBlock::Create(C, "", F));
999 Type *Int8 = Type::getInt8Ty(C);
1000 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1001 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
1002 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
1003 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
1004 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaA);
1005 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaB);
1006 StoreInst *SA3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaA);
1007 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaB);
1009 setupAnalyses();
1010 MemorySSA &MSSA = *Analyses->MSSA;
1011 MemorySSAWalker *Walker = Analyses->Walker;
1013 unsigned I = 0;
1014 for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) {
1015 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1016 EXPECT_EQ(MemDef->isOptimized(), false)
1017 << "Store " << I << " is optimized from the start?";
1018 if (V == SA1)
1019 Walker->getClobberingMemoryAccess(V);
1020 else {
1021 MemoryAccess *Def = MemDef->getDefiningAccess();
1022 MemoryAccess *Clob = Walker->getClobberingMemoryAccess(V);
1023 EXPECT_NE(Def, Clob) << "Store " << I
1024 << " has Defining Access equal to Clobbering Access";
1026 EXPECT_EQ(MemDef->isOptimized(), true)
1027 << "Store " << I << " was not optimized";
1028 // EXPECT_EQ expands such that if we increment I above, it won't get
1029 // incremented except when we try to print the error message.
1030 ++I;
1034 // Test May alias for optimized defs.
1035 TEST_F(MemorySSATest, TestStoreMayAlias) {
1036 F = Function::Create(
1037 FunctionType::get(B.getVoidTy(), {B.getPtrTy(), B.getPtrTy()}, false),
1038 GlobalValue::ExternalLinkage, "F", &M);
1039 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1040 Type *Int8 = Type::getInt8Ty(C);
1041 auto *ArgIt = F->arg_begin();
1042 Argument *PointerA = &*ArgIt;
1043 Argument *PointerB = &*(++ArgIt);
1044 Value *AllocaC = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
1045 // Store into arg1, must alias because it's LOE => must
1046 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
1047 // Store into arg2, may alias store to arg1 => may
1048 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
1049 // Store into aloca, no alias with args, so must alias LOE => must
1050 StoreInst *SC1 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaC);
1051 // Store into arg1, may alias store to arg2 => may
1052 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 3), PointerA);
1053 // Store into arg2, may alias store to arg1 => may
1054 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 4), PointerB);
1055 // Store into aloca, no alias with args, so must alias SC1 => must
1056 StoreInst *SC2 = B.CreateStore(ConstantInt::get(Int8, 5), AllocaC);
1057 // Store into arg2, must alias store to arg2 => must
1058 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 6), PointerB);
1059 std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3};
1061 setupAnalyses();
1062 MemorySSA &MSSA = *Analyses->MSSA;
1063 MemorySSAWalker *Walker = Analyses->Walker;
1065 unsigned I = 0;
1066 for (StoreInst *V : Sts) {
1067 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1068 EXPECT_EQ(MemDef->isOptimized(), false)
1069 << "Store " << I << " is optimized from the start?";
1070 ++I;
1073 for (StoreInst *V : Sts)
1074 Walker->getClobberingMemoryAccess(V);
1076 I = 0;
1077 for (StoreInst *V : Sts) {
1078 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1079 EXPECT_EQ(MemDef->isOptimized(), true)
1080 << "Store " << I << " was not optimized";
1081 // EXPECT_EQ expands such that if we increment I above, it won't get
1082 // incremented except when we try to print the error message.
1083 ++I;
1087 TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) {
1088 // Example code:
1089 // define void @a(i8* %foo) {
1090 // %bar = getelementptr i8, i8* %foo, i64 1
1091 // %baz = getelementptr i8, i8* %foo, i64 2
1092 // store i8 0, i8* %foo
1093 // store i8 0, i8* %bar
1094 // call void @llvm.lifetime.end.p0i8(i64 3, i8* %foo)
1095 // call void @llvm.lifetime.start.p0i8(i64 3, i8* %foo)
1096 // store i8 0, i8* %foo
1097 // store i8 0, i8* %bar
1098 // call void @llvm.memset.p0i8(i8* %baz, i8 0, i64 1)
1099 // ret void
1100 // }
1102 // Patterns like this are possible after inlining; the stores to %foo and %bar
1103 // should both be clobbered by the lifetime.start call if they're dominated by
1104 // it.
1106 IRBuilder<> B(C);
1107 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1108 GlobalValue::ExternalLinkage, "F", &M);
1110 // Make blocks
1111 BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
1113 B.SetInsertPoint(Entry);
1114 Value *Foo = &*F->arg_begin();
1116 Value *Bar = B.CreatePtrAdd(Foo, B.getInt64(1), "bar");
1117 Value *Baz = B.CreatePtrAdd(Foo, B.getInt64(2), "baz");
1119 B.CreateStore(B.getInt8(0), Foo);
1120 B.CreateStore(B.getInt8(0), Bar);
1122 auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) {
1123 return Intrinsic::getOrInsertDeclaration(&M, ID, {Foo->getType()});
1126 B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end),
1127 {B.getInt64(3), Foo});
1128 Instruction *LifetimeStart = B.CreateCall(
1129 GetLifetimeIntrinsic(Intrinsic::lifetime_start), {B.getInt64(3), Foo});
1131 Instruction *FooStore = B.CreateStore(B.getInt8(0), Foo);
1132 Instruction *BarStore = B.CreateStore(B.getInt8(0), Bar);
1133 Instruction *BazMemSet = B.CreateMemSet(Baz, B.getInt8(0), 1, Align(1));
1135 setupAnalyses();
1136 MemorySSA &MSSA = *Analyses->MSSA;
1138 MemoryAccess *LifetimeStartAccess = MSSA.getMemoryAccess(LifetimeStart);
1139 ASSERT_NE(LifetimeStartAccess, nullptr);
1141 MemoryAccess *FooAccess = MSSA.getMemoryAccess(FooStore);
1142 ASSERT_NE(FooAccess, nullptr);
1144 MemoryAccess *BarAccess = MSSA.getMemoryAccess(BarStore);
1145 ASSERT_NE(BarAccess, nullptr);
1147 MemoryAccess *BazAccess = MSSA.getMemoryAccess(BazMemSet);
1148 ASSERT_NE(BazAccess, nullptr);
1150 MemoryAccess *FooClobber =
1151 MSSA.getWalker()->getClobberingMemoryAccess(FooAccess);
1152 EXPECT_EQ(FooClobber, LifetimeStartAccess);
1154 MemoryAccess *BarClobber =
1155 MSSA.getWalker()->getClobberingMemoryAccess(BarAccess);
1156 EXPECT_EQ(BarClobber, LifetimeStartAccess);
1158 MemoryAccess *BazClobber =
1159 MSSA.getWalker()->getClobberingMemoryAccess(BazAccess);
1160 EXPECT_EQ(BazClobber, LifetimeStartAccess);
1162 MemoryAccess *LifetimeStartClobber =
1163 MSSA.getWalker()->getClobberingMemoryAccess(
1164 LifetimeStartAccess, MemoryLocation::getAfter(Foo));
1165 EXPECT_EQ(LifetimeStartClobber, LifetimeStartAccess);
1168 TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) {
1169 IRBuilder<> B(C);
1170 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getInt1Ty()}, false),
1171 GlobalValue::ExternalLinkage, "F", &M);
1173 // Make a CFG like
1174 // entry
1175 // / \
1176 // a b
1177 // \ /
1178 // c
1180 // Put a def in A and a def in B, move the def from A -> B, observe as the
1181 // optimization is invalidated.
1182 BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
1183 BasicBlock *BlockA = BasicBlock::Create(C, "a", F);
1184 BasicBlock *BlockB = BasicBlock::Create(C, "b", F);
1185 BasicBlock *BlockC = BasicBlock::Create(C, "c", F);
1187 B.SetInsertPoint(Entry);
1188 Type *Int8 = Type::getInt8Ty(C);
1189 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "alloc");
1190 StoreInst *StoreEntry = B.CreateStore(B.getInt8(0), Alloca);
1191 B.CreateCondBr(B.getTrue(), BlockA, BlockB);
1193 B.SetInsertPoint(BlockA);
1194 StoreInst *StoreA = B.CreateStore(B.getInt8(1), Alloca);
1195 B.CreateBr(BlockC);
1197 B.SetInsertPoint(BlockB);
1198 StoreInst *StoreB = B.CreateStore(B.getInt8(2), Alloca);
1199 B.CreateBr(BlockC);
1201 B.SetInsertPoint(BlockC);
1202 B.CreateUnreachable();
1204 setupAnalyses();
1205 MemorySSA &MSSA = *Analyses->MSSA;
1207 auto *AccessEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreEntry));
1208 auto *StoreAEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
1209 auto *StoreBEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
1211 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
1212 AccessEntry);
1213 ASSERT_TRUE(StoreAEntry->isOptimized());
1215 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry),
1216 AccessEntry);
1217 ASSERT_TRUE(StoreBEntry->isOptimized());
1219 // Note that if we did InsertionPlace::Beginning, we don't go out of our way
1220 // to invalidate the cache for StoreBEntry. If the user wants to actually do
1221 // moves like these, it's up to them to ensure that nearby cache entries are
1222 // correctly invalidated (which, in general, requires walking all instructions
1223 // that the moved instruction dominates. So we probably shouldn't be doing
1224 // moves like this in general. Still, works as a test-case. ;) )
1225 MemorySSAUpdater(&MSSA).moveToPlace(StoreAEntry, BlockB,
1226 MemorySSA::InsertionPlace::End);
1227 ASSERT_FALSE(StoreAEntry->isOptimized());
1228 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
1229 StoreBEntry);
1232 TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) {
1233 F = Function::Create(
1234 FunctionType::get(B.getVoidTy(), {B.getPtrTy(), B.getPtrTy()}, false),
1235 GlobalValue::ExternalLinkage, "F", &M);
1236 B.SetInsertPoint(BasicBlock::Create(C, "", F));
1237 Type *Int8 = Type::getInt8Ty(C);
1238 Value *AllocA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1239 Value *AllocB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
1241 StoreInst *StoreA = B.CreateStore(ConstantInt::get(Int8, 0), AllocA);
1242 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 1), AllocB);
1243 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocA);
1245 setupAnalyses();
1246 MemorySSA &MSSA = *Analyses->MSSA;
1247 MemorySSAWalker *Walker = Analyses->Walker;
1249 // If these don't hold, there's no chance of the test result being useful.
1250 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA),
1251 MSSA.getLiveOnEntryDef());
1252 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB),
1253 MSSA.getLiveOnEntryDef());
1254 auto *StoreAAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
1255 auto *StoreA2Access = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA2));
1256 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess);
1257 ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess);
1259 auto *StoreBAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
1260 ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID());
1261 ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID());
1263 auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) {
1264 llvm::sort(Defs, [](const MemoryDef *LHS, const MemoryDef *RHS) {
1265 return LHS->getID() < RHS->getID();
1269 auto SortedUserList = [&](const MemoryDef *MD) {
1270 std::vector<const MemoryDef *> Result;
1271 transform(MD->users(), std::back_inserter(Result),
1272 [](const User *U) { return cast<MemoryDef>(U); });
1273 SortVecByID(Result);
1274 return Result;
1277 // Use std::vectors, since they have nice pretty-printing if the test fails.
1278 // Parens are necessary because EXPECT_EQ is a macro, and we have commas in
1279 // our init lists...
1280 EXPECT_EQ(SortedUserList(StoreAAccess),
1281 (std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access}));
1283 EXPECT_EQ(SortedUserList(StoreBAccess),
1284 std::vector<const MemoryDef *>{StoreA2Access});
1286 // StoreAAccess should be present twice, since it uses liveOnEntry for both
1287 // its defining and optimized accesses. This is a bit awkward, and is not
1288 // relied upon anywhere at the moment. If this is painful, we can fix it.
1289 EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())),
1290 (std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess,
1291 StoreBAccess}));
1294 // entry
1295 // |
1296 // header
1297 // / \
1298 // body |
1299 // \ /
1300 // exit
1301 // header:
1302 // ; 1 = MemoryDef(liveOnEntry)
1303 // body:
1304 // ; 2 = MemoryDef(1)
1305 // exit:
1306 // ; 3 = MemoryPhi({body, 2}, {header, 1})
1307 // ; 4 = MemoryDef(3); optimized to 3, cannot optimize thorugh phi.
1308 // Insert edge: entry -> exit, check mssa Update is correct.
1309 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiNotOpt) {
1310 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1311 GlobalValue::ExternalLinkage, "F", &M);
1312 Argument *PointerArg = &*F->arg_begin();
1313 BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1314 BasicBlock *Header(BasicBlock::Create(C, "header", F));
1315 BasicBlock *Body(BasicBlock::Create(C, "body", F));
1316 BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
1317 B.SetInsertPoint(Entry);
1318 BranchInst::Create(Header, Entry);
1319 B.SetInsertPoint(Header);
1320 B.CreateStore(B.getInt8(16), PointerArg);
1321 B.CreateCondBr(B.getTrue(), Exit, Body);
1322 B.SetInsertPoint(Body);
1323 B.CreateStore(B.getInt8(16), PointerArg);
1324 BranchInst::Create(Exit, Body);
1325 B.SetInsertPoint(Exit);
1326 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
1328 setupAnalyses();
1329 MemorySSA &MSSA = *Analyses->MSSA;
1330 MemorySSAWalker *Walker = Analyses->Walker;
1331 std::unique_ptr<MemorySSAUpdater> MSSAU =
1332 std::make_unique<MemorySSAUpdater>(&MSSA);
1334 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
1335 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
1337 // Alter CFG, add edge: entry -> exit
1338 Entry->getTerminator()->eraseFromParent();
1339 B.SetInsertPoint(Entry);
1340 B.CreateCondBr(B.getTrue(), Header, Exit);
1341 SmallVector<CFGUpdate, 1> Updates;
1342 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
1343 Analyses->DT.applyUpdates(Updates);
1344 MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1345 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
1348 // entry
1349 // |
1350 // header
1351 // / \
1352 // body |
1353 // \ /
1354 // exit
1355 // header:
1356 // ; 1 = MemoryDef(liveOnEntry)
1357 // body:
1358 // ; 2 = MemoryDef(1)
1359 // exit:
1360 // ; 3 = MemoryPhi({body, 2}, {header, 1})
1361 // ; 4 = MemoryDef(3); optimize this to 1 now, added edge should invalidate
1362 // the optimized access.
1363 // Insert edge: entry -> exit, check mssa Update is correct.
1364 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiOpt) {
1365 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1366 GlobalValue::ExternalLinkage, "F", &M);
1367 Argument *PointerArg = &*F->arg_begin();
1368 Type *Int8 = Type::getInt8Ty(C);
1369 BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1370 BasicBlock *Header(BasicBlock::Create(C, "header", F));
1371 BasicBlock *Body(BasicBlock::Create(C, "body", F));
1372 BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
1374 B.SetInsertPoint(Entry);
1375 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1376 BranchInst::Create(Header, Entry);
1378 B.SetInsertPoint(Header);
1379 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
1380 B.CreateCondBr(B.getTrue(), Exit, Body);
1382 B.SetInsertPoint(Body);
1383 B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
1384 BranchInst::Create(Exit, Body);
1386 B.SetInsertPoint(Exit);
1387 StoreInst *S2 = B.CreateStore(B.getInt8(16), PointerArg);
1389 setupAnalyses();
1390 MemorySSA &MSSA = *Analyses->MSSA;
1391 MemorySSAWalker *Walker = Analyses->Walker;
1392 std::unique_ptr<MemorySSAUpdater> MSSAU =
1393 std::make_unique<MemorySSAUpdater>(&MSSA);
1395 MemoryDef *DefS1 = cast<MemoryDef>(MSSA.getMemoryAccess(S1));
1396 EXPECT_EQ(DefS1, Walker->getClobberingMemoryAccess(S2));
1398 // Alter CFG, add edge: entry -> exit
1399 Entry->getTerminator()->eraseFromParent();
1400 B.SetInsertPoint(Entry);
1401 B.CreateCondBr(B.getTrue(), Header, Exit);
1402 SmallVector<CFGUpdate, 1> Updates;
1403 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
1404 Analyses->DT.applyUpdates(Updates);
1405 MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1407 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
1408 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S2));
1411 // entry
1412 // / |
1413 // a |
1414 // / \ |
1415 // b c f
1416 // \ / |
1417 // d |
1418 // \ /
1419 // e
1420 // f:
1421 // ; 1 = MemoryDef(liveOnEntry)
1422 // e:
1423 // ; 2 = MemoryPhi({d, liveOnEntry}, {f, 1})
1425 // Insert edge: f -> c, check update is correct.
1426 // After update:
1427 // f:
1428 // ; 1 = MemoryDef(liveOnEntry)
1429 // c:
1430 // ; 3 = MemoryPhi({a, liveOnEntry}, {f, 1})
1431 // d:
1432 // ; 4 = MemoryPhi({b, liveOnEntry}, {c, 3})
1433 // e:
1434 // ; 2 = MemoryPhi({d, 4}, {f, 1})
1435 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithNoPhiAddNewPhis) {
1436 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1437 GlobalValue::ExternalLinkage, "F", &M);
1438 Argument *PointerArg = &*F->arg_begin();
1439 BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1440 BasicBlock *ABlock(BasicBlock::Create(C, "a", F));
1441 BasicBlock *BBlock(BasicBlock::Create(C, "b", F));
1442 BasicBlock *CBlock(BasicBlock::Create(C, "c", F));
1443 BasicBlock *DBlock(BasicBlock::Create(C, "d", F));
1444 BasicBlock *EBlock(BasicBlock::Create(C, "e", F));
1445 BasicBlock *FBlock(BasicBlock::Create(C, "f", F));
1447 B.SetInsertPoint(Entry);
1448 B.CreateCondBr(B.getTrue(), ABlock, FBlock);
1449 B.SetInsertPoint(ABlock);
1450 B.CreateCondBr(B.getTrue(), BBlock, CBlock);
1451 B.SetInsertPoint(BBlock);
1452 BranchInst::Create(DBlock, BBlock);
1453 B.SetInsertPoint(CBlock);
1454 BranchInst::Create(DBlock, CBlock);
1455 B.SetInsertPoint(DBlock);
1456 BranchInst::Create(EBlock, DBlock);
1457 B.SetInsertPoint(FBlock);
1458 B.CreateStore(B.getInt8(16), PointerArg);
1459 BranchInst::Create(EBlock, FBlock);
1461 setupAnalyses();
1462 MemorySSA &MSSA = *Analyses->MSSA;
1463 std::unique_ptr<MemorySSAUpdater> MSSAU =
1464 std::make_unique<MemorySSAUpdater>(&MSSA);
1466 // Alter CFG, add edge: f -> c
1467 FBlock->getTerminator()->eraseFromParent();
1468 B.SetInsertPoint(FBlock);
1469 B.CreateCondBr(B.getTrue(), CBlock, EBlock);
1470 SmallVector<CFGUpdate, 1> Updates;
1471 Updates.push_back({cfg::UpdateKind::Insert, FBlock, CBlock});
1472 Analyses->DT.applyUpdates(Updates);
1473 MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1475 MemoryPhi *MPC = MSSA.getMemoryAccess(CBlock);
1476 EXPECT_NE(MPC, nullptr);
1477 MemoryPhi *MPD = MSSA.getMemoryAccess(DBlock);
1478 EXPECT_NE(MPD, nullptr);
1479 MemoryPhi *MPE = MSSA.getMemoryAccess(EBlock);
1480 EXPECT_EQ(MPD, MPE->getIncomingValueForBlock(DBlock));
1483 TEST_F(MemorySSATest, TestCallClobber) {
1484 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1485 GlobalValue::ExternalLinkage, "F", &M);
1487 Value *Pointer1 = &*F->arg_begin();
1488 BasicBlock *Entry(BasicBlock::Create(C, "", F));
1489 B.SetInsertPoint(Entry);
1490 Value *Pointer2 = B.CreatePtrAdd(Pointer1, B.getInt64(1));
1491 Instruction *StorePointer1 = B.CreateStore(B.getInt8(0), Pointer1);
1492 Instruction *StorePointer2 = B.CreateStore(B.getInt8(0), Pointer2);
1493 Instruction *MemSet = B.CreateMemSet(Pointer2, B.getInt8(0), 1, Align(1));
1495 setupAnalyses();
1496 MemorySSA &MSSA = *Analyses->MSSA;
1497 MemorySSAWalker *Walker = Analyses->Walker;
1499 MemoryUseOrDef *Store1Access = MSSA.getMemoryAccess(StorePointer1);
1500 MemoryUseOrDef *Store2Access = MSSA.getMemoryAccess(StorePointer2);
1501 MemoryUseOrDef *MemSetAccess = MSSA.getMemoryAccess(MemSet);
1503 MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess(
1504 MemSetAccess, MemoryLocation(Pointer1, LocationSize::precise(1)));
1505 EXPECT_EQ(Pointer1Clobber, Store1Access);
1507 MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess(
1508 MemSetAccess, MemoryLocation(Pointer2, LocationSize::precise(1)));
1509 EXPECT_EQ(Pointer2Clobber, MemSetAccess);
1511 MemoryAccess *MemSetClobber = Walker->getClobberingMemoryAccess(MemSetAccess);
1512 EXPECT_EQ(MemSetClobber, Store2Access);
1515 TEST_F(MemorySSATest, TestLoadClobber) {
1516 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1517 GlobalValue::ExternalLinkage, "F", &M);
1519 Value *Pointer1 = &*F->arg_begin();
1520 BasicBlock *Entry(BasicBlock::Create(C, "", F));
1521 B.SetInsertPoint(Entry);
1522 Value *Pointer2 = B.CreatePtrAdd(Pointer1, B.getInt64(1));
1523 Instruction *LoadPointer1 =
1524 B.CreateLoad(B.getInt8Ty(), Pointer1, /* Volatile */ true);
1525 Instruction *LoadPointer2 =
1526 B.CreateLoad(B.getInt8Ty(), Pointer2, /* Volatile */ true);
1528 setupAnalyses();
1529 MemorySSA &MSSA = *Analyses->MSSA;
1530 MemorySSAWalker *Walker = Analyses->Walker;
1532 MemoryUseOrDef *Load1Access = MSSA.getMemoryAccess(LoadPointer1);
1533 MemoryUseOrDef *Load2Access = MSSA.getMemoryAccess(LoadPointer2);
1535 // When providing a memory location, we should never return a load as the
1536 // clobber.
1537 MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess(
1538 Load2Access, MemoryLocation(Pointer1, LocationSize::precise(1)));
1539 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer1Clobber));
1541 MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess(
1542 Load2Access, MemoryLocation(Pointer2, LocationSize::precise(1)));
1543 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer2Clobber));
1545 MemoryAccess *Load2Clobber = Walker->getClobberingMemoryAccess(Load2Access);
1546 EXPECT_EQ(Load2Clobber, Load1Access);
1549 // We want to test if the location information are retained
1550 // when the IsGuaranteedLoopInvariant function handles a
1551 // memory access referring to a pointer defined in the entry
1552 // block, hence automatically guaranteed to be loop invariant.
1553 TEST_F(MemorySSATest, TestLoopInvariantEntryBlockPointer) {
1554 SMDiagnostic E;
1555 auto LocalM =
1556 parseAssemblyString("define void @test(i64 %a0, i8* %a1, i1* %a2) {\n"
1557 "entry:\n"
1558 "%v0 = getelementptr i8, i8* %a1, i64 %a0\n"
1559 "%v1 = bitcast i8* %v0 to i64*\n"
1560 "%v2 = bitcast i8* %v0 to i32*\n"
1561 "%v3 = load i1, i1* %a2\n"
1562 "br i1 %v3, label %body, label %exit\n"
1563 "body:\n"
1564 "store i32 1, i32* %v2\n"
1565 "br label %exit\n"
1566 "exit:\n"
1567 "store i64 0, i64* %v1\n"
1568 "ret void\n"
1569 "}",
1570 E, C);
1571 ASSERT_TRUE(LocalM);
1572 F = LocalM->getFunction("test");
1573 ASSERT_TRUE(F);
1574 // Setup the analysis
1575 setupAnalyses();
1576 MemorySSA &MSSA = *Analyses->MSSA;
1577 // Find the exit block
1578 for (auto &BB : *F) {
1579 if (BB.getName() == "exit") {
1580 // Get the store instruction
1581 auto *SI = BB.getFirstNonPHI();
1582 // Get the memory access and location
1583 MemoryAccess *MA = MSSA.getMemoryAccess(SI);
1584 MemoryLocation ML = MemoryLocation::get(SI);
1585 // Use the 'upward_defs_iterator' which internally calls
1586 // IsGuaranteedLoopInvariant
1587 auto ItA = upward_defs_begin({MA, ML}, MSSA.getDomTree());
1588 auto ItB =
1589 upward_defs_begin({ItA->first, ItA->second}, MSSA.getDomTree());
1590 // Check if the location information have been retained
1591 EXPECT_TRUE(ItB->second.Size.isPrecise());
1592 EXPECT_TRUE(ItB->second.Size.hasValue());
1593 EXPECT_TRUE(ItB->second.Size.getValue() == 8);
1598 TEST_F(MemorySSATest, TestInvariantGroup) {
1599 SMDiagnostic E;
1600 auto M = parseAssemblyString("declare void @f(i8*)\n"
1601 "define i8 @test(i8* %p) {\n"
1602 "entry:\n"
1603 " store i8 42, i8* %p, !invariant.group !0\n"
1604 " call void @f(i8* %p)\n"
1605 " %v = load i8, i8* %p, !invariant.group !0\n"
1606 " ret i8 %v\n"
1607 "}\n"
1608 "!0 = !{}",
1609 E, C);
1610 ASSERT_TRUE(M);
1611 F = M->getFunction("test");
1612 ASSERT_TRUE(F);
1613 setupAnalyses();
1614 MemorySSA &MSSA = *Analyses->MSSA;
1615 MemorySSAWalker *Walker = Analyses->Walker;
1617 auto &BB = F->getEntryBlock();
1618 auto &SI = cast<StoreInst>(*BB.begin());
1619 auto &Call = cast<CallBase>(*std::next(BB.begin()));
1620 auto &LI = cast<LoadInst>(*std::next(std::next(BB.begin())));
1623 MemoryAccess *SAccess = MSSA.getMemoryAccess(&SI);
1624 MemoryAccess *LAccess = MSSA.getMemoryAccess(&LI);
1625 MemoryAccess *SClobber = Walker->getClobberingMemoryAccess(SAccess);
1626 EXPECT_TRUE(MSSA.isLiveOnEntryDef(SClobber));
1627 MemoryAccess *LClobber = Walker->getClobberingMemoryAccess(LAccess);
1628 EXPECT_EQ(SAccess, LClobber);
1631 // remove store and verify that the memory accesses still make sense
1632 MemorySSAUpdater Updater(&MSSA);
1633 Updater.removeMemoryAccess(&SI);
1634 SI.eraseFromParent();
1637 MemoryAccess *CallAccess = MSSA.getMemoryAccess(&Call);
1638 MemoryAccess *LAccess = MSSA.getMemoryAccess(&LI);
1639 MemoryAccess *LClobber = Walker->getClobberingMemoryAccess(LAccess);
1640 EXPECT_EQ(CallAccess, LClobber);
1644 static BasicBlock *getBasicBlockByName(Function &F, StringRef Name) {
1645 for (BasicBlock &BB : F)
1646 if (BB.getName() == Name)
1647 return &BB;
1648 llvm_unreachable("Expected to find basic block!");
1651 static Instruction *getInstructionByName(Function &F, StringRef Name) {
1652 for (BasicBlock &BB : F)
1653 for (Instruction &I : BB)
1654 if (I.getName() == Name)
1655 return &I;
1656 llvm_unreachable("Expected to find instruction!");
1659 TEST_F(MemorySSATest, TestVisitedBlocks) {
1660 SMDiagnostic E;
1661 auto M = parseAssemblyString(
1662 "define void @test(i64* noalias %P, i64 %N) {\n"
1663 "preheader.n:\n"
1664 " br label %header.n\n"
1665 "header.n:\n"
1666 " %n = phi i64 [ 0, %preheader.n ], [ %inc.n, %latch.n ]\n"
1667 " %guard.cond.i = icmp slt i64 0, %N\n"
1668 " br i1 %guard.cond.i, label %header.i.check, label %other.i\n"
1669 "header.i.check:\n"
1670 " br label %preheader.i\n"
1671 "preheader.i:\n"
1672 " br label %header.i\n"
1673 "header.i:\n"
1674 " %i = phi i64 [ 0, %preheader.i ], [ %inc.i, %header.i ]\n"
1675 " %v1 = load i64, i64* %P, align 8\n"
1676 " %v2 = load i64, i64* %P, align 8\n"
1677 " %inc.i = add nsw i64 %i, 1\n"
1678 " %cmp.i = icmp slt i64 %inc.i, %N\n"
1679 " br i1 %cmp.i, label %header.i, label %exit.i\n"
1680 "exit.i:\n"
1681 " br label %commonexit\n"
1682 "other.i:\n"
1683 " br label %commonexit\n"
1684 "commonexit:\n"
1685 " br label %latch.n\n"
1686 "latch.n:\n"
1687 " %inc.n = add nsw i64 %n, 1\n"
1688 " %cmp.n = icmp slt i64 %inc.n, %N\n"
1689 " br i1 %cmp.n, label %header.n, label %exit.n\n"
1690 "exit.n:\n"
1691 " ret void\n"
1692 "}\n",
1693 E, C);
1694 ASSERT_TRUE(M);
1695 F = M->getFunction("test");
1696 ASSERT_TRUE(F);
1697 setupAnalyses();
1698 MemorySSA &MSSA = *Analyses->MSSA;
1699 MemorySSAUpdater Updater(&MSSA);
1702 // Move %v1 before the terminator of %header.i.check
1703 BasicBlock *BB = getBasicBlockByName(*F, "header.i.check");
1704 Instruction *LI = getInstructionByName(*F, "v1");
1705 LI->moveBefore(BB->getTerminator());
1706 if (MemoryUseOrDef *MUD = MSSA.getMemoryAccess(LI))
1707 Updater.moveToPlace(MUD, BB, MemorySSA::BeforeTerminator);
1709 // Change the termiantor of %header.i.check to `br label true, label
1710 // %preheader.i, label %other.i`
1711 BB->getTerminator()->eraseFromParent();
1712 ConstantInt *BoolTrue = ConstantInt::getTrue(F->getContext());
1713 BranchInst::Create(getBasicBlockByName(*F, "preheader.i"),
1714 getBasicBlockByName(*F, "other.i"), BoolTrue, BB);
1715 SmallVector<DominatorTree::UpdateType, 4> DTUpdates;
1716 DTUpdates.push_back(DominatorTree::UpdateType(
1717 DominatorTree::Insert, BB, getBasicBlockByName(*F, "other.i")));
1718 Updater.applyUpdates(DTUpdates, Analyses->DT, true);
1721 // After the first moveToPlace(), %other.i is in VisitedBlocks, even after
1722 // there is a new edge to %other.i, which makes the second moveToPlace()
1723 // traverse incorrectly.
1725 // Move %v2 before the terminator of %preheader.i
1726 BasicBlock *BB = getBasicBlockByName(*F, "preheader.i");
1727 Instruction *LI = getInstructionByName(*F, "v2");
1728 LI->moveBefore(BB->getTerminator());
1729 // Check that there is no assertion of "Incomplete phi during partial
1730 // rename"
1731 if (MemoryUseOrDef *MUD = MSSA.getMemoryAccess(LI))
1732 Updater.moveToPlace(MUD, BB, MemorySSA::BeforeTerminator);
1736 TEST_F(MemorySSATest, TestNoDbgInsts) {
1737 SMDiagnostic E;
1738 auto M = parseAssemblyString(R"(
1739 define void @test() presplitcoroutine {
1740 entry:
1741 %i = alloca i32
1742 call void @llvm.dbg.declare(metadata ptr %i, metadata !6, metadata !DIExpression()), !dbg !10
1743 call void @llvm.dbg.value(metadata ptr %i, metadata !6, metadata !DIExpression()), !dbg !10
1744 ret void
1747 declare void @llvm.dbg.declare(metadata, metadata, metadata) nocallback nofree nosync nounwind readnone speculatable willreturn
1748 declare void @llvm.dbg.value(metadata, metadata, metadata) nocallback nofree nosync nounwind readnone speculatable willreturn
1750 !llvm.dbg.cu = !{!0}
1752 !0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, producer: "clang version 15.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !2, splitDebugInlining: false, nameTableKind: None)
1753 !1 = !DIFile(filename: "repro.cpp", directory: ".")
1754 !2 = !{}
1755 !3 = !{i32 7, !"Dwarf Version", i32 4}
1756 !4 = !{i32 2, !"Debug Info Version", i32 3}
1757 !5 = !{!"clang version 15.0.0"}
1758 !6 = !DILocalVariable(name: "i", scope: !7, file: !1, line: 24, type: !10)
1759 !7 = distinct !DILexicalBlock(scope: !8, file: !1, line: 23, column: 12)
1760 !8 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: !1, file: !1, line: 23, type: !9, scopeLine: 23, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
1761 !9 = !DISubroutineType(types: !2)
1762 !10 = !DILocation(line: 24, column: 7, scope: !7)
1764 E, C);
1765 ASSERT_TRUE(M);
1766 F = M->getFunction("test");
1767 ASSERT_TRUE(F);
1768 setupAnalyses();
1769 MemorySSA &MSSA = *Analyses->MSSA;
1770 MemorySSAUpdater Updater(&MSSA);
1772 BasicBlock &Entry = F->front();
1773 auto I = Entry.begin();
1774 Instruction *DbgDeclare = cast<Instruction>(I++);
1775 Instruction *DbgValue = cast<Instruction>(I++);
1776 ASSERT_EQ(MSSA.getMemoryAccess(DbgDeclare), nullptr);
1777 ASSERT_EQ(MSSA.getMemoryAccess(DbgValue), nullptr);