1 //===- AddrModeMatcher.cpp - Addressing mode matching facility --*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements target addressing mode matcher class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Transforms/Utils/AddrModeMatcher.h"
15 #include "llvm/DerivedTypes.h"
16 #include "llvm/GlobalValue.h"
17 #include "llvm/Instruction.h"
18 #include "llvm/Assembly/Writer.h"
19 #include "llvm/Target/TargetData.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/GetElementPtrTypeIterator.h"
22 #include "llvm/Support/PatternMatch.h"
23 #include "llvm/Support/raw_ostream.h"
24 #include "llvm/Support/CallSite.h"
27 using namespace llvm::PatternMatch
;
29 void ExtAddrMode::print(raw_ostream
&OS
) const {
30 bool NeedPlus
= false;
33 OS
<< (NeedPlus
? " + " : "")
35 WriteAsOperand(OS
, BaseGV
, /*PrintType=*/false);
40 OS
<< (NeedPlus
? " + " : "") << BaseOffs
, NeedPlus
= true;
43 OS
<< (NeedPlus
? " + " : "")
45 WriteAsOperand(OS
, BaseReg
, /*PrintType=*/false);
49 OS
<< (NeedPlus
? " + " : "")
51 WriteAsOperand(OS
, ScaledReg
, /*PrintType=*/false);
58 void ExtAddrMode::dump() const {
64 /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
65 /// Return true and update AddrMode if this addr mode is legal for the target,
67 bool AddressingModeMatcher::MatchScaledValue(Value
*ScaleReg
, int64_t Scale
,
69 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
70 // mode. Just process that directly.
72 return MatchAddr(ScaleReg
, Depth
);
74 // If the scale is 0, it takes nothing to add this.
78 // If we already have a scale of this value, we can add to it, otherwise, we
79 // need an available scale field.
80 if (AddrMode
.Scale
!= 0 && AddrMode
.ScaledReg
!= ScaleReg
)
83 ExtAddrMode TestAddrMode
= AddrMode
;
85 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
86 // [A+B + A*7] -> [B+A*8].
87 TestAddrMode
.Scale
+= Scale
;
88 TestAddrMode
.ScaledReg
= ScaleReg
;
90 // If the new address isn't legal, bail out.
91 if (!TLI
.isLegalAddressingMode(TestAddrMode
, AccessTy
))
94 // It was legal, so commit it.
95 AddrMode
= TestAddrMode
;
97 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
98 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
99 // X*Scale + C*Scale to addr mode.
100 ConstantInt
*CI
= 0; Value
*AddLHS
= 0;
101 if (isa
<Instruction
>(ScaleReg
) && // not a constant expr.
102 match(ScaleReg
, m_Add(m_Value(AddLHS
), m_ConstantInt(CI
)))) {
103 TestAddrMode
.ScaledReg
= AddLHS
;
104 TestAddrMode
.BaseOffs
+= CI
->getSExtValue()*TestAddrMode
.Scale
;
106 // If this addressing mode is legal, commit it and remember that we folded
108 if (TLI
.isLegalAddressingMode(TestAddrMode
, AccessTy
)) {
109 AddrModeInsts
.push_back(cast
<Instruction
>(ScaleReg
));
110 AddrMode
= TestAddrMode
;
115 // Otherwise, not (x+c)*scale, just return what we have.
119 /// MightBeFoldableInst - This is a little filter, which returns true if an
120 /// addressing computation involving I might be folded into a load/store
121 /// accessing it. This doesn't need to be perfect, but needs to accept at least
122 /// the set of instructions that MatchOperationAddr can.
123 static bool MightBeFoldableInst(Instruction
*I
) {
124 switch (I
->getOpcode()) {
125 case Instruction::BitCast
:
126 // Don't touch identity bitcasts.
127 if (I
->getType() == I
->getOperand(0)->getType())
129 return I
->getType()->isPointerTy() || I
->getType()->isIntegerTy();
130 case Instruction::PtrToInt
:
131 // PtrToInt is always a noop, as we know that the int type is pointer sized.
133 case Instruction::IntToPtr
:
134 // We know the input is intptr_t, so this is foldable.
136 case Instruction::Add
:
138 case Instruction::Mul
:
139 case Instruction::Shl
:
140 // Can only handle X*C and X << C.
141 return isa
<ConstantInt
>(I
->getOperand(1));
142 case Instruction::GetElementPtr
:
150 /// MatchOperationAddr - Given an instruction or constant expr, see if we can
151 /// fold the operation into the addressing mode. If so, update the addressing
152 /// mode and return true, otherwise return false without modifying AddrMode.
153 bool AddressingModeMatcher::MatchOperationAddr(User
*AddrInst
, unsigned Opcode
,
155 // Avoid exponential behavior on extremely deep expression trees.
156 if (Depth
>= 5) return false;
159 case Instruction::PtrToInt
:
160 // PtrToInt is always a noop, as we know that the int type is pointer sized.
161 return MatchAddr(AddrInst
->getOperand(0), Depth
);
162 case Instruction::IntToPtr
:
163 // This inttoptr is a no-op if the integer type is pointer sized.
164 if (TLI
.getValueType(AddrInst
->getOperand(0)->getType()) ==
166 return MatchAddr(AddrInst
->getOperand(0), Depth
);
168 case Instruction::BitCast
:
169 // BitCast is always a noop, and we can handle it as long as it is
170 // int->int or pointer->pointer (we don't want int<->fp or something).
171 if ((AddrInst
->getOperand(0)->getType()->isPointerTy() ||
172 AddrInst
->getOperand(0)->getType()->isIntegerTy()) &&
173 // Don't touch identity bitcasts. These were probably put here by LSR,
174 // and we don't want to mess around with them. Assume it knows what it
176 AddrInst
->getOperand(0)->getType() != AddrInst
->getType())
177 return MatchAddr(AddrInst
->getOperand(0), Depth
);
179 case Instruction::Add
: {
180 // Check to see if we can merge in the RHS then the LHS. If so, we win.
181 ExtAddrMode BackupAddrMode
= AddrMode
;
182 unsigned OldSize
= AddrModeInsts
.size();
183 if (MatchAddr(AddrInst
->getOperand(1), Depth
+1) &&
184 MatchAddr(AddrInst
->getOperand(0), Depth
+1))
187 // Restore the old addr mode info.
188 AddrMode
= BackupAddrMode
;
189 AddrModeInsts
.resize(OldSize
);
191 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
192 if (MatchAddr(AddrInst
->getOperand(0), Depth
+1) &&
193 MatchAddr(AddrInst
->getOperand(1), Depth
+1))
196 // Otherwise we definitely can't merge the ADD in.
197 AddrMode
= BackupAddrMode
;
198 AddrModeInsts
.resize(OldSize
);
201 //case Instruction::Or:
202 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
204 case Instruction::Mul
:
205 case Instruction::Shl
: {
206 // Can only handle X*C and X << C.
207 ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(AddrInst
->getOperand(1));
208 if (!RHS
) return false;
209 int64_t Scale
= RHS
->getSExtValue();
210 if (Opcode
== Instruction::Shl
)
211 Scale
= 1LL << Scale
;
213 return MatchScaledValue(AddrInst
->getOperand(0), Scale
, Depth
);
215 case Instruction::GetElementPtr
: {
216 // Scan the GEP. We check it if it contains constant offsets and at most
217 // one variable offset.
218 int VariableOperand
= -1;
219 unsigned VariableScale
= 0;
221 int64_t ConstantOffset
= 0;
222 const TargetData
*TD
= TLI
.getTargetData();
223 gep_type_iterator GTI
= gep_type_begin(AddrInst
);
224 for (unsigned i
= 1, e
= AddrInst
->getNumOperands(); i
!= e
; ++i
, ++GTI
) {
225 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
226 const StructLayout
*SL
= TD
->getStructLayout(STy
);
228 cast
<ConstantInt
>(AddrInst
->getOperand(i
))->getZExtValue();
229 ConstantOffset
+= SL
->getElementOffset(Idx
);
231 uint64_t TypeSize
= TD
->getTypeAllocSize(GTI
.getIndexedType());
232 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(AddrInst
->getOperand(i
))) {
233 ConstantOffset
+= CI
->getSExtValue()*TypeSize
;
234 } else if (TypeSize
) { // Scales of zero don't do anything.
235 // We only allow one variable index at the moment.
236 if (VariableOperand
!= -1)
239 // Remember the variable index.
241 VariableScale
= TypeSize
;
246 // A common case is for the GEP to only do a constant offset. In this case,
247 // just add it to the disp field and check validity.
248 if (VariableOperand
== -1) {
249 AddrMode
.BaseOffs
+= ConstantOffset
;
250 if (ConstantOffset
== 0 || TLI
.isLegalAddressingMode(AddrMode
, AccessTy
)){
251 // Check to see if we can fold the base pointer in too.
252 if (MatchAddr(AddrInst
->getOperand(0), Depth
+1))
255 AddrMode
.BaseOffs
-= ConstantOffset
;
259 // Save the valid addressing mode in case we can't match.
260 ExtAddrMode BackupAddrMode
= AddrMode
;
261 unsigned OldSize
= AddrModeInsts
.size();
263 // See if the scale and offset amount is valid for this target.
264 AddrMode
.BaseOffs
+= ConstantOffset
;
266 // Match the base operand of the GEP.
267 if (!MatchAddr(AddrInst
->getOperand(0), Depth
+1)) {
268 // If it couldn't be matched, just stuff the value in a register.
269 if (AddrMode
.HasBaseReg
) {
270 AddrMode
= BackupAddrMode
;
271 AddrModeInsts
.resize(OldSize
);
274 AddrMode
.HasBaseReg
= true;
275 AddrMode
.BaseReg
= AddrInst
->getOperand(0);
278 // Match the remaining variable portion of the GEP.
279 if (!MatchScaledValue(AddrInst
->getOperand(VariableOperand
), VariableScale
,
281 // If it couldn't be matched, try stuffing the base into a register
282 // instead of matching it, and retrying the match of the scale.
283 AddrMode
= BackupAddrMode
;
284 AddrModeInsts
.resize(OldSize
);
285 if (AddrMode
.HasBaseReg
)
287 AddrMode
.HasBaseReg
= true;
288 AddrMode
.BaseReg
= AddrInst
->getOperand(0);
289 AddrMode
.BaseOffs
+= ConstantOffset
;
290 if (!MatchScaledValue(AddrInst
->getOperand(VariableOperand
),
291 VariableScale
, Depth
)) {
292 // If even that didn't work, bail.
293 AddrMode
= BackupAddrMode
;
294 AddrModeInsts
.resize(OldSize
);
305 /// MatchAddr - If we can, try to add the value of 'Addr' into the current
306 /// addressing mode. If Addr can't be added to AddrMode this returns false and
307 /// leaves AddrMode unmodified. This assumes that Addr is either a pointer type
308 /// or intptr_t for the target.
310 bool AddressingModeMatcher::MatchAddr(Value
*Addr
, unsigned Depth
) {
311 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Addr
)) {
312 // Fold in immediates if legal for the target.
313 AddrMode
.BaseOffs
+= CI
->getSExtValue();
314 if (TLI
.isLegalAddressingMode(AddrMode
, AccessTy
))
316 AddrMode
.BaseOffs
-= CI
->getSExtValue();
317 } else if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(Addr
)) {
318 // If this is a global variable, try to fold it into the addressing mode.
319 if (AddrMode
.BaseGV
== 0) {
320 AddrMode
.BaseGV
= GV
;
321 if (TLI
.isLegalAddressingMode(AddrMode
, AccessTy
))
325 } else if (Instruction
*I
= dyn_cast
<Instruction
>(Addr
)) {
326 ExtAddrMode BackupAddrMode
= AddrMode
;
327 unsigned OldSize
= AddrModeInsts
.size();
329 // Check to see if it is possible to fold this operation.
330 if (MatchOperationAddr(I
, I
->getOpcode(), Depth
)) {
331 // Okay, it's possible to fold this. Check to see if it is actually
332 // *profitable* to do so. We use a simple cost model to avoid increasing
333 // register pressure too much.
334 if (I
->hasOneUse() ||
335 IsProfitableToFoldIntoAddressingMode(I
, BackupAddrMode
, AddrMode
)) {
336 AddrModeInsts
.push_back(I
);
340 // It isn't profitable to do this, roll back.
341 //cerr << "NOT FOLDING: " << *I;
342 AddrMode
= BackupAddrMode
;
343 AddrModeInsts
.resize(OldSize
);
345 } else if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Addr
)) {
346 if (MatchOperationAddr(CE
, CE
->getOpcode(), Depth
))
348 } else if (isa
<ConstantPointerNull
>(Addr
)) {
349 // Null pointer gets folded without affecting the addressing mode.
353 // Worse case, the target should support [reg] addressing modes. :)
354 if (!AddrMode
.HasBaseReg
) {
355 AddrMode
.HasBaseReg
= true;
356 AddrMode
.BaseReg
= Addr
;
357 // Still check for legality in case the target supports [imm] but not [i+r].
358 if (TLI
.isLegalAddressingMode(AddrMode
, AccessTy
))
360 AddrMode
.HasBaseReg
= false;
361 AddrMode
.BaseReg
= 0;
364 // If the base register is already taken, see if we can do [r+r].
365 if (AddrMode
.Scale
== 0) {
367 AddrMode
.ScaledReg
= Addr
;
368 if (TLI
.isLegalAddressingMode(AddrMode
, AccessTy
))
371 AddrMode
.ScaledReg
= 0;
378 /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
379 /// inline asm call are due to memory operands. If so, return true, otherwise
381 static bool IsOperandAMemoryOperand(CallInst
*CI
, InlineAsm
*IA
, Value
*OpVal
,
382 const TargetLowering
&TLI
) {
383 TargetLowering::AsmOperandInfoVector TargetConstraints
= TLI
.ParseConstraints(ImmutableCallSite(CI
));
384 for (unsigned i
= 0, e
= TargetConstraints
.size(); i
!= e
; ++i
) {
385 TargetLowering::AsmOperandInfo
&OpInfo
= TargetConstraints
[i
];
387 // Compute the constraint code and ConstraintType to use.
388 TLI
.ComputeConstraintToUse(OpInfo
, SDValue());
390 // If this asm operand is our Value*, and if it isn't an indirect memory
391 // operand, we can't fold it!
392 if (OpInfo
.CallOperandVal
== OpVal
&&
393 (OpInfo
.ConstraintType
!= TargetLowering::C_Memory
||
402 /// FindAllMemoryUses - Recursively walk all the uses of I until we find a
403 /// memory use. If we find an obviously non-foldable instruction, return true.
404 /// Add the ultimately found memory instructions to MemoryUses.
405 static bool FindAllMemoryUses(Instruction
*I
,
406 SmallVectorImpl
<std::pair
<Instruction
*,unsigned> > &MemoryUses
,
407 SmallPtrSet
<Instruction
*, 16> &ConsideredInsts
,
408 const TargetLowering
&TLI
) {
409 // If we already considered this instruction, we're done.
410 if (!ConsideredInsts
.insert(I
))
413 // If this is an obviously unfoldable instruction, bail out.
414 if (!MightBeFoldableInst(I
))
417 // Loop over all the uses, recursively processing them.
418 for (Value::use_iterator UI
= I
->use_begin(), E
= I
->use_end();
422 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(U
)) {
423 MemoryUses
.push_back(std::make_pair(LI
, UI
.getOperandNo()));
427 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
)) {
428 unsigned opNo
= UI
.getOperandNo();
429 if (opNo
== 0) return true; // Storing addr, not into addr.
430 MemoryUses
.push_back(std::make_pair(SI
, opNo
));
434 if (CallInst
*CI
= dyn_cast
<CallInst
>(U
)) {
435 InlineAsm
*IA
= dyn_cast
<InlineAsm
>(CI
->getCalledValue());
436 if (!IA
) return true;
438 // If this is a memory operand, we're cool, otherwise bail out.
439 if (!IsOperandAMemoryOperand(CI
, IA
, I
, TLI
))
444 if (FindAllMemoryUses(cast
<Instruction
>(U
), MemoryUses
, ConsideredInsts
,
453 /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
454 /// the use site that we're folding it into. If so, there is no cost to
455 /// include it in the addressing mode. KnownLive1 and KnownLive2 are two values
456 /// that we know are live at the instruction already.
457 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value
*Val
,Value
*KnownLive1
,
459 // If Val is either of the known-live values, we know it is live!
460 if (Val
== 0 || Val
== KnownLive1
|| Val
== KnownLive2
)
463 // All values other than instructions and arguments (e.g. constants) are live.
464 if (!isa
<Instruction
>(Val
) && !isa
<Argument
>(Val
)) return true;
466 // If Val is a constant sized alloca in the entry block, it is live, this is
467 // true because it is just a reference to the stack/frame pointer, which is
468 // live for the whole function.
469 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(Val
))
470 if (AI
->isStaticAlloca())
473 // Check to see if this value is already used in the memory instruction's
474 // block. If so, it's already live into the block at the very least, so we
475 // can reasonably fold it.
476 BasicBlock
*MemBB
= MemoryInst
->getParent();
477 for (Value::use_iterator UI
= Val
->use_begin(), E
= Val
->use_end();
479 // We know that uses of arguments and instructions have to be instructions.
480 if (cast
<Instruction
>(*UI
)->getParent() == MemBB
)
488 /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
489 /// mode of the machine to fold the specified instruction into a load or store
490 /// that ultimately uses it. However, the specified instruction has multiple
491 /// uses. Given this, it may actually increase register pressure to fold it
492 /// into the load. For example, consider this code:
496 /// use(Y) -> nonload/store
500 /// In this case, Y has multiple uses, and can be folded into the load of Z
501 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
502 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
503 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
504 /// number of computations either.
506 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
507 /// X was live across 'load Z' for other reasons, we actually *would* want to
508 /// fold the addressing mode in the Z case. This would make Y die earlier.
509 bool AddressingModeMatcher::
510 IsProfitableToFoldIntoAddressingMode(Instruction
*I
, ExtAddrMode
&AMBefore
,
511 ExtAddrMode
&AMAfter
) {
512 if (IgnoreProfitability
) return true;
514 // AMBefore is the addressing mode before this instruction was folded into it,
515 // and AMAfter is the addressing mode after the instruction was folded. Get
516 // the set of registers referenced by AMAfter and subtract out those
517 // referenced by AMBefore: this is the set of values which folding in this
518 // address extends the lifetime of.
520 // Note that there are only two potential values being referenced here,
521 // BaseReg and ScaleReg (global addresses are always available, as are any
522 // folded immediates).
523 Value
*BaseReg
= AMAfter
.BaseReg
, *ScaledReg
= AMAfter
.ScaledReg
;
525 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
526 // lifetime wasn't extended by adding this instruction.
527 if (ValueAlreadyLiveAtInst(BaseReg
, AMBefore
.BaseReg
, AMBefore
.ScaledReg
))
529 if (ValueAlreadyLiveAtInst(ScaledReg
, AMBefore
.BaseReg
, AMBefore
.ScaledReg
))
532 // If folding this instruction (and it's subexprs) didn't extend any live
533 // ranges, we're ok with it.
534 if (BaseReg
== 0 && ScaledReg
== 0)
537 // If all uses of this instruction are ultimately load/store/inlineasm's,
538 // check to see if their addressing modes will include this instruction. If
539 // so, we can fold it into all uses, so it doesn't matter if it has multiple
541 SmallVector
<std::pair
<Instruction
*,unsigned>, 16> MemoryUses
;
542 SmallPtrSet
<Instruction
*, 16> ConsideredInsts
;
543 if (FindAllMemoryUses(I
, MemoryUses
, ConsideredInsts
, TLI
))
544 return false; // Has a non-memory, non-foldable use!
546 // Now that we know that all uses of this instruction are part of a chain of
547 // computation involving only operations that could theoretically be folded
548 // into a memory use, loop over each of these uses and see if they could
549 // *actually* fold the instruction.
550 SmallVector
<Instruction
*, 32> MatchedAddrModeInsts
;
551 for (unsigned i
= 0, e
= MemoryUses
.size(); i
!= e
; ++i
) {
552 Instruction
*User
= MemoryUses
[i
].first
;
553 unsigned OpNo
= MemoryUses
[i
].second
;
555 // Get the access type of this use. If the use isn't a pointer, we don't
556 // know what it accesses.
557 Value
*Address
= User
->getOperand(OpNo
);
558 if (!Address
->getType()->isPointerTy())
560 const Type
*AddressAccessTy
=
561 cast
<PointerType
>(Address
->getType())->getElementType();
563 // Do a match against the root of this address, ignoring profitability. This
564 // will tell us if the addressing mode for the memory operation will
565 // *actually* cover the shared instruction.
567 AddressingModeMatcher
Matcher(MatchedAddrModeInsts
, TLI
, AddressAccessTy
,
569 Matcher
.IgnoreProfitability
= true;
570 bool Success
= Matcher
.MatchAddr(Address
, 0);
571 (void)Success
; assert(Success
&& "Couldn't select *anything*?");
573 // If the match didn't cover I, then it won't be shared by it.
574 if (std::find(MatchedAddrModeInsts
.begin(), MatchedAddrModeInsts
.end(),
575 I
) == MatchedAddrModeInsts
.end())
578 MatchedAddrModeInsts
.clear();