1 //===--- ExpandMemCmp.cpp - Expand memcmp() to load/stores ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass tries to expand memcmp() calls into optimally-sized loads and
10 // compares for the target.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/ADT/Statistic.h"
15 #include "llvm/Analysis/ConstantFolding.h"
16 #include "llvm/Analysis/DomTreeUpdater.h"
17 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
18 #include "llvm/Analysis/ProfileSummaryInfo.h"
19 #include "llvm/Analysis/TargetLibraryInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/TargetPassConfig.h"
23 #include "llvm/CodeGen/TargetSubtargetInfo.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/InitializePasses.h"
27 #include "llvm/Target/TargetMachine.h"
28 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
29 #include "llvm/Transforms/Utils/Local.h"
30 #include "llvm/Transforms/Utils/SizeOpts.h"
39 #define DEBUG_TYPE "expandmemcmp"
41 STATISTIC(NumMemCmpCalls
, "Number of memcmp calls");
42 STATISTIC(NumMemCmpNotConstant
, "Number of memcmp calls without constant size");
43 STATISTIC(NumMemCmpGreaterThanMax
,
44 "Number of memcmp calls with size greater than max size");
45 STATISTIC(NumMemCmpInlined
, "Number of inlined memcmp calls");
47 static cl::opt
<unsigned> MemCmpEqZeroNumLoadsPerBlock(
48 "memcmp-num-loads-per-block", cl::Hidden
, cl::init(1),
49 cl::desc("The number of loads per basic block for inline expansion of "
50 "memcmp that is only being compared against zero."));
52 static cl::opt
<unsigned> MaxLoadsPerMemcmp(
53 "max-loads-per-memcmp", cl::Hidden
,
54 cl::desc("Set maximum number of loads used in expanded memcmp"));
56 static cl::opt
<unsigned> MaxLoadsPerMemcmpOptSize(
57 "max-loads-per-memcmp-opt-size", cl::Hidden
,
58 cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"));
63 // This class provides helper functions to expand a memcmp library call into an
65 class MemCmpExpansion
{
67 BasicBlock
*BB
= nullptr;
68 PHINode
*PhiSrc1
= nullptr;
69 PHINode
*PhiSrc2
= nullptr;
71 ResultBlock() = default;
74 CallInst
*const CI
= nullptr;
77 unsigned MaxLoadSize
= 0;
78 uint64_t NumLoadsNonOneByte
= 0;
79 const uint64_t NumLoadsPerBlockForZeroCmp
;
80 std::vector
<BasicBlock
*> LoadCmpBlocks
;
81 BasicBlock
*EndBlock
= nullptr;
82 PHINode
*PhiRes
= nullptr;
83 const bool IsUsedForZeroCmp
;
85 DomTreeUpdater
*DTU
= nullptr;
87 // Represents the decomposition in blocks of the expansion. For example,
88 // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
89 // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {1, 32}.
91 LoadEntry(unsigned LoadSize
, uint64_t Offset
)
92 : LoadSize(LoadSize
), Offset(Offset
) {
95 // The size of the load for this block, in bytes.
97 // The offset of this load from the base pointer, in bytes.
100 using LoadEntryVector
= SmallVector
<LoadEntry
, 8>;
101 LoadEntryVector LoadSequence
;
103 void createLoadCmpBlocks();
104 void createResultBlock();
105 void setupResultBlockPHINodes();
106 void setupEndBlockPHINodes();
107 Value
*getCompareLoadPairs(unsigned BlockIndex
, unsigned &LoadIndex
);
108 void emitLoadCompareBlock(unsigned BlockIndex
);
109 void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex
,
110 unsigned &LoadIndex
);
111 void emitLoadCompareByteBlock(unsigned BlockIndex
, unsigned OffsetBytes
);
112 void emitMemCmpResultBlock();
113 Value
*getMemCmpExpansionZeroCase();
114 Value
*getMemCmpEqZeroOneBlock();
115 Value
*getMemCmpOneBlock();
117 Value
*Lhs
= nullptr;
118 Value
*Rhs
= nullptr;
120 LoadPair
getLoadPair(Type
*LoadSizeType
, bool NeedsBSwap
, Type
*CmpSizeType
,
121 unsigned OffsetBytes
);
123 static LoadEntryVector
124 computeGreedyLoadSequence(uint64_t Size
, llvm::ArrayRef
<unsigned> LoadSizes
,
125 unsigned MaxNumLoads
, unsigned &NumLoadsNonOneByte
);
126 static LoadEntryVector
127 computeOverlappingLoadSequence(uint64_t Size
, unsigned MaxLoadSize
,
128 unsigned MaxNumLoads
,
129 unsigned &NumLoadsNonOneByte
);
132 MemCmpExpansion(CallInst
*CI
, uint64_t Size
,
133 const TargetTransformInfo::MemCmpExpansionOptions
&Options
,
134 const bool IsUsedForZeroCmp
, const DataLayout
&TheDataLayout
,
135 DomTreeUpdater
*DTU
);
137 unsigned getNumBlocks();
138 uint64_t getNumLoads() const { return LoadSequence
.size(); }
140 Value
*getMemCmpExpansion();
143 MemCmpExpansion::LoadEntryVector
MemCmpExpansion::computeGreedyLoadSequence(
144 uint64_t Size
, llvm::ArrayRef
<unsigned> LoadSizes
,
145 const unsigned MaxNumLoads
, unsigned &NumLoadsNonOneByte
) {
146 NumLoadsNonOneByte
= 0;
147 LoadEntryVector LoadSequence
;
149 while (Size
&& !LoadSizes
.empty()) {
150 const unsigned LoadSize
= LoadSizes
.front();
151 const uint64_t NumLoadsForThisSize
= Size
/ LoadSize
;
152 if (LoadSequence
.size() + NumLoadsForThisSize
> MaxNumLoads
) {
153 // Do not expand if the total number of loads is larger than what the
154 // target allows. Note that it's important that we exit before completing
155 // the expansion to avoid using a ton of memory to store the expansion for
159 if (NumLoadsForThisSize
> 0) {
160 for (uint64_t I
= 0; I
< NumLoadsForThisSize
; ++I
) {
161 LoadSequence
.push_back({LoadSize
, Offset
});
165 ++NumLoadsNonOneByte
;
166 Size
= Size
% LoadSize
;
168 LoadSizes
= LoadSizes
.drop_front();
173 MemCmpExpansion::LoadEntryVector
174 MemCmpExpansion::computeOverlappingLoadSequence(uint64_t Size
,
175 const unsigned MaxLoadSize
,
176 const unsigned MaxNumLoads
,
177 unsigned &NumLoadsNonOneByte
) {
178 // These are already handled by the greedy approach.
179 if (Size
< 2 || MaxLoadSize
< 2)
182 // We try to do as many non-overlapping loads as possible starting from the
184 const uint64_t NumNonOverlappingLoads
= Size
/ MaxLoadSize
;
185 assert(NumNonOverlappingLoads
&& "there must be at least one load");
186 // There remain 0 to (MaxLoadSize - 1) bytes to load, this will be done with
187 // an overlapping load.
188 Size
= Size
- NumNonOverlappingLoads
* MaxLoadSize
;
189 // Bail if we do not need an overloapping store, this is already handled by
190 // the greedy approach.
193 // Bail if the number of loads (non-overlapping + potential overlapping one)
194 // is larger than the max allowed.
195 if ((NumNonOverlappingLoads
+ 1) > MaxNumLoads
)
198 // Add non-overlapping loads.
199 LoadEntryVector LoadSequence
;
201 for (uint64_t I
= 0; I
< NumNonOverlappingLoads
; ++I
) {
202 LoadSequence
.push_back({MaxLoadSize
, Offset
});
203 Offset
+= MaxLoadSize
;
206 // Add the last overlapping load.
207 assert(Size
> 0 && Size
< MaxLoadSize
&& "broken invariant");
208 LoadSequence
.push_back({MaxLoadSize
, Offset
- (MaxLoadSize
- Size
)});
209 NumLoadsNonOneByte
= 1;
213 // Initialize the basic block structure required for expansion of memcmp call
214 // with given maximum load size and memcmp size parameter.
215 // This structure includes:
216 // 1. A list of load compare blocks - LoadCmpBlocks.
217 // 2. An EndBlock, split from original instruction point, which is the block to
219 // 3. ResultBlock, block to branch to for early exit when a
220 // LoadCmpBlock finds a difference.
221 MemCmpExpansion::MemCmpExpansion(
222 CallInst
*const CI
, uint64_t Size
,
223 const TargetTransformInfo::MemCmpExpansionOptions
&Options
,
224 const bool IsUsedForZeroCmp
, const DataLayout
&TheDataLayout
,
226 : CI(CI
), Size(Size
), NumLoadsPerBlockForZeroCmp(Options
.NumLoadsPerBlock
),
227 IsUsedForZeroCmp(IsUsedForZeroCmp
), DL(TheDataLayout
), DTU(DTU
),
229 assert(Size
> 0 && "zero blocks");
230 // Scale the max size down if the target can load more bytes than we need.
231 llvm::ArrayRef
<unsigned> LoadSizes(Options
.LoadSizes
);
232 while (!LoadSizes
.empty() && LoadSizes
.front() > Size
) {
233 LoadSizes
= LoadSizes
.drop_front();
235 assert(!LoadSizes
.empty() && "cannot load Size bytes");
236 MaxLoadSize
= LoadSizes
.front();
237 // Compute the decomposition.
238 unsigned GreedyNumLoadsNonOneByte
= 0;
239 LoadSequence
= computeGreedyLoadSequence(Size
, LoadSizes
, Options
.MaxNumLoads
,
240 GreedyNumLoadsNonOneByte
);
241 NumLoadsNonOneByte
= GreedyNumLoadsNonOneByte
;
242 assert(LoadSequence
.size() <= Options
.MaxNumLoads
&& "broken invariant");
243 // If we allow overlapping loads and the load sequence is not already optimal,
244 // use overlapping loads.
245 if (Options
.AllowOverlappingLoads
&&
246 (LoadSequence
.empty() || LoadSequence
.size() > 2)) {
247 unsigned OverlappingNumLoadsNonOneByte
= 0;
248 auto OverlappingLoads
= computeOverlappingLoadSequence(
249 Size
, MaxLoadSize
, Options
.MaxNumLoads
, OverlappingNumLoadsNonOneByte
);
250 if (!OverlappingLoads
.empty() &&
251 (LoadSequence
.empty() ||
252 OverlappingLoads
.size() < LoadSequence
.size())) {
253 LoadSequence
= OverlappingLoads
;
254 NumLoadsNonOneByte
= OverlappingNumLoadsNonOneByte
;
257 assert(LoadSequence
.size() <= Options
.MaxNumLoads
&& "broken invariant");
260 unsigned MemCmpExpansion::getNumBlocks() {
261 if (IsUsedForZeroCmp
)
262 return getNumLoads() / NumLoadsPerBlockForZeroCmp
+
263 (getNumLoads() % NumLoadsPerBlockForZeroCmp
!= 0 ? 1 : 0);
264 return getNumLoads();
267 void MemCmpExpansion::createLoadCmpBlocks() {
268 for (unsigned i
= 0; i
< getNumBlocks(); i
++) {
269 BasicBlock
*BB
= BasicBlock::Create(CI
->getContext(), "loadbb",
270 EndBlock
->getParent(), EndBlock
);
271 LoadCmpBlocks
.push_back(BB
);
275 void MemCmpExpansion::createResultBlock() {
276 ResBlock
.BB
= BasicBlock::Create(CI
->getContext(), "res_block",
277 EndBlock
->getParent(), EndBlock
);
280 MemCmpExpansion::LoadPair
MemCmpExpansion::getLoadPair(Type
*LoadSizeType
,
283 unsigned OffsetBytes
) {
284 // Get the memory source at offset `OffsetBytes`.
285 Value
*LhsSource
= CI
->getArgOperand(0);
286 Value
*RhsSource
= CI
->getArgOperand(1);
287 Align LhsAlign
= LhsSource
->getPointerAlignment(DL
);
288 Align RhsAlign
= RhsSource
->getPointerAlignment(DL
);
289 if (OffsetBytes
> 0) {
290 auto *ByteType
= Type::getInt8Ty(CI
->getContext());
291 LhsSource
= Builder
.CreateConstGEP1_64(ByteType
, LhsSource
, OffsetBytes
);
292 RhsSource
= Builder
.CreateConstGEP1_64(ByteType
, RhsSource
, OffsetBytes
);
293 LhsAlign
= commonAlignment(LhsAlign
, OffsetBytes
);
294 RhsAlign
= commonAlignment(RhsAlign
, OffsetBytes
);
297 // Create a constant or a load from the source.
298 Value
*Lhs
= nullptr;
299 if (auto *C
= dyn_cast
<Constant
>(LhsSource
))
300 Lhs
= ConstantFoldLoadFromConstPtr(C
, LoadSizeType
, DL
);
302 Lhs
= Builder
.CreateAlignedLoad(LoadSizeType
, LhsSource
, LhsAlign
);
304 Value
*Rhs
= nullptr;
305 if (auto *C
= dyn_cast
<Constant
>(RhsSource
))
306 Rhs
= ConstantFoldLoadFromConstPtr(C
, LoadSizeType
, DL
);
308 Rhs
= Builder
.CreateAlignedLoad(LoadSizeType
, RhsSource
, RhsAlign
);
310 // Swap bytes if required.
312 Function
*Bswap
= Intrinsic::getDeclaration(CI
->getModule(),
313 Intrinsic::bswap
, LoadSizeType
);
314 Lhs
= Builder
.CreateCall(Bswap
, Lhs
);
315 Rhs
= Builder
.CreateCall(Bswap
, Rhs
);
318 // Zero extend if required.
319 if (CmpSizeType
!= nullptr && CmpSizeType
!= LoadSizeType
) {
320 Lhs
= Builder
.CreateZExt(Lhs
, CmpSizeType
);
321 Rhs
= Builder
.CreateZExt(Rhs
, CmpSizeType
);
326 // This function creates the IR instructions for loading and comparing 1 byte.
327 // It loads 1 byte from each source of the memcmp parameters with the given
328 // GEPIndex. It then subtracts the two loaded values and adds this result to the
329 // final phi node for selecting the memcmp result.
330 void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex
,
331 unsigned OffsetBytes
) {
332 BasicBlock
*BB
= LoadCmpBlocks
[BlockIndex
];
333 Builder
.SetInsertPoint(BB
);
334 const LoadPair Loads
=
335 getLoadPair(Type::getInt8Ty(CI
->getContext()), /*NeedsBSwap=*/false,
336 Type::getInt32Ty(CI
->getContext()), OffsetBytes
);
337 Value
*Diff
= Builder
.CreateSub(Loads
.Lhs
, Loads
.Rhs
);
339 PhiRes
->addIncoming(Diff
, BB
);
341 if (BlockIndex
< (LoadCmpBlocks
.size() - 1)) {
342 // Early exit branch if difference found to EndBlock. Otherwise, continue to
343 // next LoadCmpBlock,
344 Value
*Cmp
= Builder
.CreateICmp(ICmpInst::ICMP_NE
, Diff
,
345 ConstantInt::get(Diff
->getType(), 0));
347 BranchInst::Create(EndBlock
, LoadCmpBlocks
[BlockIndex
+ 1], Cmp
);
348 Builder
.Insert(CmpBr
);
351 {{DominatorTree::Insert
, BB
, EndBlock
},
352 {DominatorTree::Insert
, BB
, LoadCmpBlocks
[BlockIndex
+ 1]}});
354 // The last block has an unconditional branch to EndBlock.
355 BranchInst
*CmpBr
= BranchInst::Create(EndBlock
);
356 Builder
.Insert(CmpBr
);
358 DTU
->applyUpdates({{DominatorTree::Insert
, BB
, EndBlock
}});
362 /// Generate an equality comparison for one or more pairs of loaded values.
363 /// This is used in the case where the memcmp() call is compared equal or not
365 Value
*MemCmpExpansion::getCompareLoadPairs(unsigned BlockIndex
,
366 unsigned &LoadIndex
) {
367 assert(LoadIndex
< getNumLoads() &&
368 "getCompareLoadPairs() called with no remaining loads");
369 std::vector
<Value
*> XorList
, OrList
;
370 Value
*Diff
= nullptr;
372 const unsigned NumLoads
=
373 std::min(getNumLoads() - LoadIndex
, NumLoadsPerBlockForZeroCmp
);
375 // For a single-block expansion, start inserting before the memcmp call.
376 if (LoadCmpBlocks
.empty())
377 Builder
.SetInsertPoint(CI
);
379 Builder
.SetInsertPoint(LoadCmpBlocks
[BlockIndex
]);
381 Value
*Cmp
= nullptr;
382 // If we have multiple loads per block, we need to generate a composite
383 // comparison using xor+or. The type for the combinations is the largest load
385 IntegerType
*const MaxLoadType
=
386 NumLoads
== 1 ? nullptr
387 : IntegerType::get(CI
->getContext(), MaxLoadSize
* 8);
388 for (unsigned i
= 0; i
< NumLoads
; ++i
, ++LoadIndex
) {
389 const LoadEntry
&CurLoadEntry
= LoadSequence
[LoadIndex
];
390 const LoadPair Loads
= getLoadPair(
391 IntegerType::get(CI
->getContext(), CurLoadEntry
.LoadSize
* 8),
392 /*NeedsBSwap=*/false, MaxLoadType
, CurLoadEntry
.Offset
);
395 // If we have multiple loads per block, we need to generate a composite
396 // comparison using xor+or.
397 Diff
= Builder
.CreateXor(Loads
.Lhs
, Loads
.Rhs
);
398 Diff
= Builder
.CreateZExt(Diff
, MaxLoadType
);
399 XorList
.push_back(Diff
);
401 // If there's only one load per block, we just compare the loaded values.
402 Cmp
= Builder
.CreateICmpNE(Loads
.Lhs
, Loads
.Rhs
);
406 auto pairWiseOr
= [&](std::vector
<Value
*> &InList
) -> std::vector
<Value
*> {
407 std::vector
<Value
*> OutList
;
408 for (unsigned i
= 0; i
< InList
.size() - 1; i
= i
+ 2) {
409 Value
*Or
= Builder
.CreateOr(InList
[i
], InList
[i
+ 1]);
410 OutList
.push_back(Or
);
412 if (InList
.size() % 2 != 0)
413 OutList
.push_back(InList
.back());
418 // Pairwise OR the XOR results.
419 OrList
= pairWiseOr(XorList
);
421 // Pairwise OR the OR results until one result left.
422 while (OrList
.size() != 1) {
423 OrList
= pairWiseOr(OrList
);
426 assert(Diff
&& "Failed to find comparison diff");
427 Cmp
= Builder
.CreateICmpNE(OrList
[0], ConstantInt::get(Diff
->getType(), 0));
433 void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex
,
434 unsigned &LoadIndex
) {
435 Value
*Cmp
= getCompareLoadPairs(BlockIndex
, LoadIndex
);
437 BasicBlock
*NextBB
= (BlockIndex
== (LoadCmpBlocks
.size() - 1))
439 : LoadCmpBlocks
[BlockIndex
+ 1];
440 // Early exit branch if difference found to ResultBlock. Otherwise,
441 // continue to next LoadCmpBlock or EndBlock.
442 BasicBlock
*BB
= Builder
.GetInsertBlock();
443 BranchInst
*CmpBr
= BranchInst::Create(ResBlock
.BB
, NextBB
, Cmp
);
444 Builder
.Insert(CmpBr
);
446 DTU
->applyUpdates({{DominatorTree::Insert
, BB
, ResBlock
.BB
},
447 {DominatorTree::Insert
, BB
, NextBB
}});
449 // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
450 // since early exit to ResultBlock was not taken (no difference was found in
451 // any of the bytes).
452 if (BlockIndex
== LoadCmpBlocks
.size() - 1) {
453 Value
*Zero
= ConstantInt::get(Type::getInt32Ty(CI
->getContext()), 0);
454 PhiRes
->addIncoming(Zero
, LoadCmpBlocks
[BlockIndex
]);
458 // This function creates the IR intructions for loading and comparing using the
459 // given LoadSize. It loads the number of bytes specified by LoadSize from each
460 // source of the memcmp parameters. It then does a subtract to see if there was
461 // a difference in the loaded values. If a difference is found, it branches
462 // with an early exit to the ResultBlock for calculating which source was
463 // larger. Otherwise, it falls through to the either the next LoadCmpBlock or
464 // the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with
465 // a special case through emitLoadCompareByteBlock. The special handling can
466 // simply subtract the loaded values and add it to the result phi node.
467 void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex
) {
468 // There is one load per block in this case, BlockIndex == LoadIndex.
469 const LoadEntry
&CurLoadEntry
= LoadSequence
[BlockIndex
];
471 if (CurLoadEntry
.LoadSize
== 1) {
472 MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex
, CurLoadEntry
.Offset
);
477 IntegerType::get(CI
->getContext(), CurLoadEntry
.LoadSize
* 8);
478 Type
*MaxLoadType
= IntegerType::get(CI
->getContext(), MaxLoadSize
* 8);
479 assert(CurLoadEntry
.LoadSize
<= MaxLoadSize
&& "Unexpected load type");
481 Builder
.SetInsertPoint(LoadCmpBlocks
[BlockIndex
]);
483 const LoadPair Loads
=
484 getLoadPair(LoadSizeType
, /*NeedsBSwap=*/DL
.isLittleEndian(), MaxLoadType
,
485 CurLoadEntry
.Offset
);
487 // Add the loaded values to the phi nodes for calculating memcmp result only
488 // if result is not used in a zero equality.
489 if (!IsUsedForZeroCmp
) {
490 ResBlock
.PhiSrc1
->addIncoming(Loads
.Lhs
, LoadCmpBlocks
[BlockIndex
]);
491 ResBlock
.PhiSrc2
->addIncoming(Loads
.Rhs
, LoadCmpBlocks
[BlockIndex
]);
494 Value
*Cmp
= Builder
.CreateICmp(ICmpInst::ICMP_EQ
, Loads
.Lhs
, Loads
.Rhs
);
495 BasicBlock
*NextBB
= (BlockIndex
== (LoadCmpBlocks
.size() - 1))
497 : LoadCmpBlocks
[BlockIndex
+ 1];
498 // Early exit branch if difference found to ResultBlock. Otherwise, continue
499 // to next LoadCmpBlock or EndBlock.
500 BasicBlock
*BB
= Builder
.GetInsertBlock();
501 BranchInst
*CmpBr
= BranchInst::Create(NextBB
, ResBlock
.BB
, Cmp
);
502 Builder
.Insert(CmpBr
);
504 DTU
->applyUpdates({{DominatorTree::Insert
, BB
, NextBB
},
505 {DominatorTree::Insert
, BB
, ResBlock
.BB
}});
507 // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
508 // since early exit to ResultBlock was not taken (no difference was found in
509 // any of the bytes).
510 if (BlockIndex
== LoadCmpBlocks
.size() - 1) {
511 Value
*Zero
= ConstantInt::get(Type::getInt32Ty(CI
->getContext()), 0);
512 PhiRes
->addIncoming(Zero
, LoadCmpBlocks
[BlockIndex
]);
516 // This function populates the ResultBlock with a sequence to calculate the
517 // memcmp result. It compares the two loaded source values and returns -1 if
518 // src1 < src2 and 1 if src1 > src2.
519 void MemCmpExpansion::emitMemCmpResultBlock() {
520 // Special case: if memcmp result is used in a zero equality, result does not
521 // need to be calculated and can simply return 1.
522 if (IsUsedForZeroCmp
) {
523 BasicBlock::iterator InsertPt
= ResBlock
.BB
->getFirstInsertionPt();
524 Builder
.SetInsertPoint(ResBlock
.BB
, InsertPt
);
525 Value
*Res
= ConstantInt::get(Type::getInt32Ty(CI
->getContext()), 1);
526 PhiRes
->addIncoming(Res
, ResBlock
.BB
);
527 BranchInst
*NewBr
= BranchInst::Create(EndBlock
);
528 Builder
.Insert(NewBr
);
530 DTU
->applyUpdates({{DominatorTree::Insert
, ResBlock
.BB
, EndBlock
}});
533 BasicBlock::iterator InsertPt
= ResBlock
.BB
->getFirstInsertionPt();
534 Builder
.SetInsertPoint(ResBlock
.BB
, InsertPt
);
536 Value
*Cmp
= Builder
.CreateICmp(ICmpInst::ICMP_ULT
, ResBlock
.PhiSrc1
,
540 Builder
.CreateSelect(Cmp
, ConstantInt::get(Builder
.getInt32Ty(), -1),
541 ConstantInt::get(Builder
.getInt32Ty(), 1));
543 PhiRes
->addIncoming(Res
, ResBlock
.BB
);
544 BranchInst
*NewBr
= BranchInst::Create(EndBlock
);
545 Builder
.Insert(NewBr
);
547 DTU
->applyUpdates({{DominatorTree::Insert
, ResBlock
.BB
, EndBlock
}});
550 void MemCmpExpansion::setupResultBlockPHINodes() {
551 Type
*MaxLoadType
= IntegerType::get(CI
->getContext(), MaxLoadSize
* 8);
552 Builder
.SetInsertPoint(ResBlock
.BB
);
553 // Note: this assumes one load per block.
555 Builder
.CreatePHI(MaxLoadType
, NumLoadsNonOneByte
, "phi.src1");
557 Builder
.CreatePHI(MaxLoadType
, NumLoadsNonOneByte
, "phi.src2");
560 void MemCmpExpansion::setupEndBlockPHINodes() {
561 Builder
.SetInsertPoint(&EndBlock
->front());
562 PhiRes
= Builder
.CreatePHI(Type::getInt32Ty(CI
->getContext()), 2, "phi.res");
565 Value
*MemCmpExpansion::getMemCmpExpansionZeroCase() {
566 unsigned LoadIndex
= 0;
567 // This loop populates each of the LoadCmpBlocks with the IR sequence to
568 // handle multiple loads per block.
569 for (unsigned I
= 0; I
< getNumBlocks(); ++I
) {
570 emitLoadCompareBlockMultipleLoads(I
, LoadIndex
);
573 emitMemCmpResultBlock();
577 /// A memcmp expansion that compares equality with 0 and only has one block of
578 /// load and compare can bypass the compare, branch, and phi IR that is required
579 /// in the general case.
580 Value
*MemCmpExpansion::getMemCmpEqZeroOneBlock() {
581 unsigned LoadIndex
= 0;
582 Value
*Cmp
= getCompareLoadPairs(0, LoadIndex
);
583 assert(LoadIndex
== getNumLoads() && "some entries were not consumed");
584 return Builder
.CreateZExt(Cmp
, Type::getInt32Ty(CI
->getContext()));
587 /// A memcmp expansion that only has one block of load and compare can bypass
588 /// the compare, branch, and phi IR that is required in the general case.
589 Value
*MemCmpExpansion::getMemCmpOneBlock() {
590 Type
*LoadSizeType
= IntegerType::get(CI
->getContext(), Size
* 8);
591 bool NeedsBSwap
= DL
.isLittleEndian() && Size
!= 1;
593 // The i8 and i16 cases don't need compares. We zext the loaded values and
594 // subtract them to get the suitable negative, zero, or positive i32 result.
596 const LoadPair Loads
=
597 getLoadPair(LoadSizeType
, NeedsBSwap
, Builder
.getInt32Ty(),
599 return Builder
.CreateSub(Loads
.Lhs
, Loads
.Rhs
);
602 const LoadPair Loads
= getLoadPair(LoadSizeType
, NeedsBSwap
, LoadSizeType
,
604 // The result of memcmp is negative, zero, or positive, so produce that by
605 // subtracting 2 extended compare bits: sub (ugt, ult).
606 // If a target prefers to use selects to get -1/0/1, they should be able
607 // to transform this later. The inverse transform (going from selects to math)
608 // may not be possible in the DAG because the selects got converted into
609 // branches before we got there.
610 Value
*CmpUGT
= Builder
.CreateICmpUGT(Loads
.Lhs
, Loads
.Rhs
);
611 Value
*CmpULT
= Builder
.CreateICmpULT(Loads
.Lhs
, Loads
.Rhs
);
612 Value
*ZextUGT
= Builder
.CreateZExt(CmpUGT
, Builder
.getInt32Ty());
613 Value
*ZextULT
= Builder
.CreateZExt(CmpULT
, Builder
.getInt32Ty());
614 return Builder
.CreateSub(ZextUGT
, ZextULT
);
617 // This function expands the memcmp call into an inline expansion and returns
618 // the memcmp result.
619 Value
*MemCmpExpansion::getMemCmpExpansion() {
620 // Create the basic block framework for a multi-block expansion.
621 if (getNumBlocks() != 1) {
622 BasicBlock
*StartBlock
= CI
->getParent();
623 EndBlock
= SplitBlock(StartBlock
, CI
, DTU
, /*LI=*/nullptr,
624 /*MSSAU=*/nullptr, "endblock");
625 setupEndBlockPHINodes();
628 // If return value of memcmp is not used in a zero equality, we need to
629 // calculate which source was larger. The calculation requires the
630 // two loaded source values of each load compare block.
631 // These will be saved in the phi nodes created by setupResultBlockPHINodes.
632 if (!IsUsedForZeroCmp
) setupResultBlockPHINodes();
634 // Create the number of required load compare basic blocks.
635 createLoadCmpBlocks();
637 // Update the terminator added by SplitBlock to branch to the first
639 StartBlock
->getTerminator()->setSuccessor(0, LoadCmpBlocks
[0]);
641 DTU
->applyUpdates({{DominatorTree::Insert
, StartBlock
, LoadCmpBlocks
[0]},
642 {DominatorTree::Delete
, StartBlock
, EndBlock
}});
645 Builder
.SetCurrentDebugLocation(CI
->getDebugLoc());
647 if (IsUsedForZeroCmp
)
648 return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock()
649 : getMemCmpExpansionZeroCase();
651 if (getNumBlocks() == 1)
652 return getMemCmpOneBlock();
654 for (unsigned I
= 0; I
< getNumBlocks(); ++I
) {
655 emitLoadCompareBlock(I
);
658 emitMemCmpResultBlock();
662 // This function checks to see if an expansion of memcmp can be generated.
663 // It checks for constant compare size that is less than the max inline size.
664 // If an expansion cannot occur, returns false to leave as a library call.
665 // Otherwise, the library call is replaced with a new IR instruction sequence.
666 /// We want to transform:
667 /// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15)
670 /// %0 = bitcast i32* %buffer2 to i8*
671 /// %1 = bitcast i32* %buffer1 to i8*
672 /// %2 = bitcast i8* %1 to i64*
673 /// %3 = bitcast i8* %0 to i64*
674 /// %4 = load i64, i64* %2
675 /// %5 = load i64, i64* %3
676 /// %6 = call i64 @llvm.bswap.i64(i64 %4)
677 /// %7 = call i64 @llvm.bswap.i64(i64 %5)
678 /// %8 = sub i64 %6, %7
679 /// %9 = icmp ne i64 %8, 0
680 /// br i1 %9, label %res_block, label %loadbb1
681 /// res_block: ; preds = %loadbb2,
682 /// %loadbb1, %loadbb
683 /// %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ]
684 /// %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ]
685 /// %10 = icmp ult i64 %phi.src1, %phi.src2
686 /// %11 = select i1 %10, i32 -1, i32 1
687 /// br label %endblock
688 /// loadbb1: ; preds = %loadbb
689 /// %12 = bitcast i32* %buffer2 to i8*
690 /// %13 = bitcast i32* %buffer1 to i8*
691 /// %14 = bitcast i8* %13 to i32*
692 /// %15 = bitcast i8* %12 to i32*
693 /// %16 = getelementptr i32, i32* %14, i32 2
694 /// %17 = getelementptr i32, i32* %15, i32 2
695 /// %18 = load i32, i32* %16
696 /// %19 = load i32, i32* %17
697 /// %20 = call i32 @llvm.bswap.i32(i32 %18)
698 /// %21 = call i32 @llvm.bswap.i32(i32 %19)
699 /// %22 = zext i32 %20 to i64
700 /// %23 = zext i32 %21 to i64
701 /// %24 = sub i64 %22, %23
702 /// %25 = icmp ne i64 %24, 0
703 /// br i1 %25, label %res_block, label %loadbb2
704 /// loadbb2: ; preds = %loadbb1
705 /// %26 = bitcast i32* %buffer2 to i8*
706 /// %27 = bitcast i32* %buffer1 to i8*
707 /// %28 = bitcast i8* %27 to i16*
708 /// %29 = bitcast i8* %26 to i16*
709 /// %30 = getelementptr i16, i16* %28, i16 6
710 /// %31 = getelementptr i16, i16* %29, i16 6
711 /// %32 = load i16, i16* %30
712 /// %33 = load i16, i16* %31
713 /// %34 = call i16 @llvm.bswap.i16(i16 %32)
714 /// %35 = call i16 @llvm.bswap.i16(i16 %33)
715 /// %36 = zext i16 %34 to i64
716 /// %37 = zext i16 %35 to i64
717 /// %38 = sub i64 %36, %37
718 /// %39 = icmp ne i64 %38, 0
719 /// br i1 %39, label %res_block, label %loadbb3
720 /// loadbb3: ; preds = %loadbb2
721 /// %40 = bitcast i32* %buffer2 to i8*
722 /// %41 = bitcast i32* %buffer1 to i8*
723 /// %42 = getelementptr i8, i8* %41, i8 14
724 /// %43 = getelementptr i8, i8* %40, i8 14
725 /// %44 = load i8, i8* %42
726 /// %45 = load i8, i8* %43
727 /// %46 = zext i8 %44 to i32
728 /// %47 = zext i8 %45 to i32
729 /// %48 = sub i32 %46, %47
730 /// br label %endblock
731 /// endblock: ; preds = %res_block,
733 /// %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ]
735 static bool expandMemCmp(CallInst
*CI
, const TargetTransformInfo
*TTI
,
736 const TargetLowering
*TLI
, const DataLayout
*DL
,
737 ProfileSummaryInfo
*PSI
, BlockFrequencyInfo
*BFI
,
738 DomTreeUpdater
*DTU
, const bool IsBCmp
) {
741 // Early exit from expansion if -Oz.
742 if (CI
->getFunction()->hasMinSize())
745 // Early exit from expansion if size is not a constant.
746 ConstantInt
*SizeCast
= dyn_cast
<ConstantInt
>(CI
->getArgOperand(2));
748 NumMemCmpNotConstant
++;
751 const uint64_t SizeVal
= SizeCast
->getZExtValue();
756 // TTI call to check if target would like to expand memcmp. Also, get the
757 // available load sizes.
758 const bool IsUsedForZeroCmp
=
759 IsBCmp
|| isOnlyUsedInZeroEqualityComparison(CI
);
760 bool OptForSize
= CI
->getFunction()->hasOptSize() ||
761 llvm::shouldOptimizeForSize(CI
->getParent(), PSI
, BFI
);
762 auto Options
= TTI
->enableMemCmpExpansion(OptForSize
,
764 if (!Options
) return false;
766 if (MemCmpEqZeroNumLoadsPerBlock
.getNumOccurrences())
767 Options
.NumLoadsPerBlock
= MemCmpEqZeroNumLoadsPerBlock
;
770 MaxLoadsPerMemcmpOptSize
.getNumOccurrences())
771 Options
.MaxNumLoads
= MaxLoadsPerMemcmpOptSize
;
773 if (!OptForSize
&& MaxLoadsPerMemcmp
.getNumOccurrences())
774 Options
.MaxNumLoads
= MaxLoadsPerMemcmp
;
776 MemCmpExpansion
Expansion(CI
, SizeVal
, Options
, IsUsedForZeroCmp
, *DL
, DTU
);
778 // Don't expand if this will require more loads than desired by the target.
779 if (Expansion
.getNumLoads() == 0) {
780 NumMemCmpGreaterThanMax
++;
786 Value
*Res
= Expansion
.getMemCmpExpansion();
788 // Replace call with result of expansion and erase call.
789 CI
->replaceAllUsesWith(Res
);
790 CI
->eraseFromParent();
795 class ExpandMemCmpPass
: public FunctionPass
{
799 ExpandMemCmpPass() : FunctionPass(ID
) {
800 initializeExpandMemCmpPassPass(*PassRegistry::getPassRegistry());
803 bool runOnFunction(Function
&F
) override
{
804 if (skipFunction(F
)) return false;
806 auto *TPC
= getAnalysisIfAvailable
<TargetPassConfig
>();
810 const TargetLowering
* TL
=
811 TPC
->getTM
<TargetMachine
>().getSubtargetImpl(F
)->getTargetLowering();
813 const TargetLibraryInfo
*TLI
=
814 &getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
815 const TargetTransformInfo
*TTI
=
816 &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
817 auto *PSI
= &getAnalysis
<ProfileSummaryInfoWrapperPass
>().getPSI();
818 auto *BFI
= (PSI
&& PSI
->hasProfileSummary()) ?
819 &getAnalysis
<LazyBlockFrequencyInfoPass
>().getBFI() :
821 DominatorTree
*DT
= nullptr;
822 if (auto *DTWP
= getAnalysisIfAvailable
<DominatorTreeWrapperPass
>())
823 DT
= &DTWP
->getDomTree();
824 auto PA
= runImpl(F
, TLI
, TTI
, TL
, PSI
, BFI
, DT
);
825 return !PA
.areAllPreserved();
829 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
830 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
831 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
832 AU
.addRequired
<ProfileSummaryInfoWrapperPass
>();
833 AU
.addPreserved
<DominatorTreeWrapperPass
>();
834 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU
);
835 FunctionPass::getAnalysisUsage(AU
);
838 PreservedAnalyses
runImpl(Function
&F
, const TargetLibraryInfo
*TLI
,
839 const TargetTransformInfo
*TTI
,
840 const TargetLowering
*TL
, ProfileSummaryInfo
*PSI
,
841 BlockFrequencyInfo
*BFI
, DominatorTree
*DT
);
842 // Returns true if a change was made.
843 bool runOnBlock(BasicBlock
&BB
, const TargetLibraryInfo
*TLI
,
844 const TargetTransformInfo
*TTI
, const TargetLowering
*TL
,
845 const DataLayout
&DL
, ProfileSummaryInfo
*PSI
,
846 BlockFrequencyInfo
*BFI
, DomTreeUpdater
*DTU
);
849 bool ExpandMemCmpPass::runOnBlock(BasicBlock
&BB
, const TargetLibraryInfo
*TLI
,
850 const TargetTransformInfo
*TTI
,
851 const TargetLowering
*TL
,
852 const DataLayout
&DL
, ProfileSummaryInfo
*PSI
,
853 BlockFrequencyInfo
*BFI
,
854 DomTreeUpdater
*DTU
) {
855 for (Instruction
& I
: BB
) {
856 CallInst
*CI
= dyn_cast
<CallInst
>(&I
);
861 if (TLI
->getLibFunc(*CI
, Func
) &&
862 (Func
== LibFunc_memcmp
|| Func
== LibFunc_bcmp
) &&
863 expandMemCmp(CI
, TTI
, TL
, &DL
, PSI
, BFI
, DTU
, Func
== LibFunc_bcmp
)) {
871 ExpandMemCmpPass::runImpl(Function
&F
, const TargetLibraryInfo
*TLI
,
872 const TargetTransformInfo
*TTI
,
873 const TargetLowering
*TL
, ProfileSummaryInfo
*PSI
,
874 BlockFrequencyInfo
*BFI
, DominatorTree
*DT
) {
875 std::optional
<DomTreeUpdater
> DTU
;
877 DTU
.emplace(DT
, DomTreeUpdater::UpdateStrategy::Lazy
);
879 const DataLayout
& DL
= F
.getParent()->getDataLayout();
880 bool MadeChanges
= false;
881 for (auto BBIt
= F
.begin(); BBIt
!= F
.end();) {
882 if (runOnBlock(*BBIt
, TLI
, TTI
, TL
, DL
, PSI
, BFI
, DTU
? &*DTU
: nullptr)) {
884 // If changes were made, restart the function from the beginning, since
885 // the structure of the function was changed.
892 for (BasicBlock
&BB
: F
)
893 SimplifyInstructionsInBlock(&BB
);
895 return PreservedAnalyses::all();
896 PreservedAnalyses PA
;
897 PA
.preserve
<DominatorTreeAnalysis
>();
903 char ExpandMemCmpPass::ID
= 0;
904 INITIALIZE_PASS_BEGIN(ExpandMemCmpPass
, "expandmemcmp",
905 "Expand memcmp() to load/stores", false, false)
906 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
907 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
908 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass
)
909 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass
)
910 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
911 INITIALIZE_PASS_END(ExpandMemCmpPass
, "expandmemcmp",
912 "Expand memcmp() to load/stores", false, false)
914 FunctionPass
*llvm::createExpandMemCmpPass() {
915 return new ExpandMemCmpPass();