Disable stack coloring with register for now. It's not able to set kill markers.
[llvm/avr.git] / lib / Transforms / Utils / AddrModeMatcher.cpp
blobbcfa2303a8c640e8f01e1b81c5dd10af50bd80c7
1 //===- AddrModeMatcher.cpp - Addressing mode matching facility --*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements target addressing mode matcher class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Transforms/Utils/AddrModeMatcher.h"
15 #include "llvm/DerivedTypes.h"
16 #include "llvm/GlobalValue.h"
17 #include "llvm/Instruction.h"
18 #include "llvm/Assembly/Writer.h"
19 #include "llvm/Target/TargetData.h"
20 #include "llvm/Support/GetElementPtrTypeIterator.h"
21 #include "llvm/Support/PatternMatch.h"
22 #include "llvm/Support/raw_ostream.h"
24 using namespace llvm;
25 using namespace llvm::PatternMatch;
27 void ExtAddrMode::print(raw_ostream &OS) const {
28 bool NeedPlus = false;
29 OS << "[";
30 if (BaseGV) {
31 OS << (NeedPlus ? " + " : "")
32 << "GV:";
33 WriteAsOperand(OS, BaseGV, /*PrintType=*/false);
34 NeedPlus = true;
37 if (BaseOffs)
38 OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true;
40 if (BaseReg) {
41 OS << (NeedPlus ? " + " : "")
42 << "Base:";
43 WriteAsOperand(OS, BaseReg, /*PrintType=*/false);
44 NeedPlus = true;
46 if (Scale) {
47 OS << (NeedPlus ? " + " : "")
48 << Scale << "*";
49 WriteAsOperand(OS, ScaledReg, /*PrintType=*/false);
50 NeedPlus = true;
53 OS << ']';
56 void ExtAddrMode::dump() const {
57 print(errs());
58 cerr << '\n';
62 /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
63 /// Return true and update AddrMode if this addr mode is legal for the target,
64 /// false if not.
65 bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
66 unsigned Depth) {
67 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
68 // mode. Just process that directly.
69 if (Scale == 1)
70 return MatchAddr(ScaleReg, Depth);
72 // If the scale is 0, it takes nothing to add this.
73 if (Scale == 0)
74 return true;
76 // If we already have a scale of this value, we can add to it, otherwise, we
77 // need an available scale field.
78 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
79 return false;
81 ExtAddrMode TestAddrMode = AddrMode;
83 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
84 // [A+B + A*7] -> [B+A*8].
85 TestAddrMode.Scale += Scale;
86 TestAddrMode.ScaledReg = ScaleReg;
88 // If the new address isn't legal, bail out.
89 if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy))
90 return false;
92 // It was legal, so commit it.
93 AddrMode = TestAddrMode;
95 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
96 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
97 // X*Scale + C*Scale to addr mode.
98 ConstantInt *CI = 0; Value *AddLHS = 0;
99 if (isa<Instruction>(ScaleReg) && // not a constant expr.
100 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)),
101 MemoryInst->getContext())) {
102 TestAddrMode.ScaledReg = AddLHS;
103 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
105 // If this addressing mode is legal, commit it and remember that we folded
106 // this instruction.
107 if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) {
108 AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
109 AddrMode = TestAddrMode;
110 return true;
114 // Otherwise, not (x+c)*scale, just return what we have.
115 return true;
118 /// MightBeFoldableInst - This is a little filter, which returns true if an
119 /// addressing computation involving I might be folded into a load/store
120 /// accessing it. This doesn't need to be perfect, but needs to accept at least
121 /// the set of instructions that MatchOperationAddr can.
122 static bool MightBeFoldableInst(Instruction *I) {
123 switch (I->getOpcode()) {
124 case Instruction::BitCast:
125 // Don't touch identity bitcasts.
126 if (I->getType() == I->getOperand(0)->getType())
127 return false;
128 return isa<PointerType>(I->getType()) || isa<IntegerType>(I->getType());
129 case Instruction::PtrToInt:
130 // PtrToInt is always a noop, as we know that the int type is pointer sized.
131 return true;
132 case Instruction::IntToPtr:
133 // We know the input is intptr_t, so this is foldable.
134 return true;
135 case Instruction::Add:
136 return true;
137 case Instruction::Mul:
138 case Instruction::Shl:
139 // Can only handle X*C and X << C.
140 return isa<ConstantInt>(I->getOperand(1));
141 case Instruction::GetElementPtr:
142 return true;
143 default:
144 return false;
149 /// MatchOperationAddr - Given an instruction or constant expr, see if we can
150 /// fold the operation into the addressing mode. If so, update the addressing
151 /// mode and return true, otherwise return false without modifying AddrMode.
152 bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
153 unsigned Depth) {
154 // Avoid exponential behavior on extremely deep expression trees.
155 if (Depth >= 5) return false;
157 switch (Opcode) {
158 case Instruction::PtrToInt:
159 // PtrToInt is always a noop, as we know that the int type is pointer sized.
160 return MatchAddr(AddrInst->getOperand(0), Depth);
161 case Instruction::IntToPtr:
162 // This inttoptr is a no-op if the integer type is pointer sized.
163 if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
164 TLI.getPointerTy())
165 return MatchAddr(AddrInst->getOperand(0), Depth);
166 return false;
167 case Instruction::BitCast:
168 // BitCast is always a noop, and we can handle it as long as it is
169 // int->int or pointer->pointer (we don't want int<->fp or something).
170 if ((isa<PointerType>(AddrInst->getOperand(0)->getType()) ||
171 isa<IntegerType>(AddrInst->getOperand(0)->getType())) &&
172 // Don't touch identity bitcasts. These were probably put here by LSR,
173 // and we don't want to mess around with them. Assume it knows what it
174 // is doing.
175 AddrInst->getOperand(0)->getType() != AddrInst->getType())
176 return MatchAddr(AddrInst->getOperand(0), Depth);
177 return false;
178 case Instruction::Add: {
179 // Check to see if we can merge in the RHS then the LHS. If so, we win.
180 ExtAddrMode BackupAddrMode = AddrMode;
181 unsigned OldSize = AddrModeInsts.size();
182 if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
183 MatchAddr(AddrInst->getOperand(0), Depth+1))
184 return true;
186 // Restore the old addr mode info.
187 AddrMode = BackupAddrMode;
188 AddrModeInsts.resize(OldSize);
190 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
191 if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
192 MatchAddr(AddrInst->getOperand(1), Depth+1))
193 return true;
195 // Otherwise we definitely can't merge the ADD in.
196 AddrMode = BackupAddrMode;
197 AddrModeInsts.resize(OldSize);
198 break;
200 //case Instruction::Or:
201 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
202 //break;
203 case Instruction::Mul:
204 case Instruction::Shl: {
205 // Can only handle X*C and X << C.
206 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
207 if (!RHS) return false;
208 int64_t Scale = RHS->getSExtValue();
209 if (Opcode == Instruction::Shl)
210 Scale = 1LL << Scale;
212 return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth);
214 case Instruction::GetElementPtr: {
215 // Scan the GEP. We check it if it contains constant offsets and at most
216 // one variable offset.
217 int VariableOperand = -1;
218 unsigned VariableScale = 0;
220 int64_t ConstantOffset = 0;
221 const TargetData *TD = TLI.getTargetData();
222 gep_type_iterator GTI = gep_type_begin(AddrInst);
223 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
224 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
225 const StructLayout *SL = TD->getStructLayout(STy);
226 unsigned Idx =
227 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
228 ConstantOffset += SL->getElementOffset(Idx);
229 } else {
230 uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType());
231 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
232 ConstantOffset += CI->getSExtValue()*TypeSize;
233 } else if (TypeSize) { // Scales of zero don't do anything.
234 // We only allow one variable index at the moment.
235 if (VariableOperand != -1)
236 return false;
238 // Remember the variable index.
239 VariableOperand = i;
240 VariableScale = TypeSize;
245 // A common case is for the GEP to only do a constant offset. In this case,
246 // just add it to the disp field and check validity.
247 if (VariableOperand == -1) {
248 AddrMode.BaseOffs += ConstantOffset;
249 if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){
250 // Check to see if we can fold the base pointer in too.
251 if (MatchAddr(AddrInst->getOperand(0), Depth+1))
252 return true;
254 AddrMode.BaseOffs -= ConstantOffset;
255 return false;
258 // Save the valid addressing mode in case we can't match.
259 ExtAddrMode BackupAddrMode = AddrMode;
260 unsigned OldSize = AddrModeInsts.size();
262 // See if the scale and offset amount is valid for this target.
263 AddrMode.BaseOffs += ConstantOffset;
265 // Match the base operand of the GEP.
266 if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) {
267 // If it couldn't be matched, just stuff the value in a register.
268 if (AddrMode.HasBaseReg) {
269 AddrMode = BackupAddrMode;
270 AddrModeInsts.resize(OldSize);
271 return false;
273 AddrMode.HasBaseReg = true;
274 AddrMode.BaseReg = AddrInst->getOperand(0);
277 // Match the remaining variable portion of the GEP.
278 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
279 Depth)) {
280 // If it couldn't be matched, try stuffing the base into a register
281 // instead of matching it, and retrying the match of the scale.
282 AddrMode = BackupAddrMode;
283 AddrModeInsts.resize(OldSize);
284 if (AddrMode.HasBaseReg)
285 return false;
286 AddrMode.HasBaseReg = true;
287 AddrMode.BaseReg = AddrInst->getOperand(0);
288 AddrMode.BaseOffs += ConstantOffset;
289 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand),
290 VariableScale, Depth)) {
291 // If even that didn't work, bail.
292 AddrMode = BackupAddrMode;
293 AddrModeInsts.resize(OldSize);
294 return false;
298 return true;
301 return false;
304 /// MatchAddr - If we can, try to add the value of 'Addr' into the current
305 /// addressing mode. If Addr can't be added to AddrMode this returns false and
306 /// leaves AddrMode unmodified. This assumes that Addr is either a pointer type
307 /// or intptr_t for the target.
309 bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
310 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
311 // Fold in immediates if legal for the target.
312 AddrMode.BaseOffs += CI->getSExtValue();
313 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
314 return true;
315 AddrMode.BaseOffs -= CI->getSExtValue();
316 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
317 // If this is a global variable, try to fold it into the addressing mode.
318 if (AddrMode.BaseGV == 0) {
319 AddrMode.BaseGV = GV;
320 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
321 return true;
322 AddrMode.BaseGV = 0;
324 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
325 ExtAddrMode BackupAddrMode = AddrMode;
326 unsigned OldSize = AddrModeInsts.size();
328 // Check to see if it is possible to fold this operation.
329 if (MatchOperationAddr(I, I->getOpcode(), Depth)) {
330 // Okay, it's possible to fold this. Check to see if it is actually
331 // *profitable* to do so. We use a simple cost model to avoid increasing
332 // register pressure too much.
333 if (I->hasOneUse() ||
334 IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
335 AddrModeInsts.push_back(I);
336 return true;
339 // It isn't profitable to do this, roll back.
340 //cerr << "NOT FOLDING: " << *I;
341 AddrMode = BackupAddrMode;
342 AddrModeInsts.resize(OldSize);
344 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
345 if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
346 return true;
347 } else if (isa<ConstantPointerNull>(Addr)) {
348 // Null pointer gets folded without affecting the addressing mode.
349 return true;
352 // Worse case, the target should support [reg] addressing modes. :)
353 if (!AddrMode.HasBaseReg) {
354 AddrMode.HasBaseReg = true;
355 AddrMode.BaseReg = Addr;
356 // Still check for legality in case the target supports [imm] but not [i+r].
357 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
358 return true;
359 AddrMode.HasBaseReg = false;
360 AddrMode.BaseReg = 0;
363 // If the base register is already taken, see if we can do [r+r].
364 if (AddrMode.Scale == 0) {
365 AddrMode.Scale = 1;
366 AddrMode.ScaledReg = Addr;
367 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
368 return true;
369 AddrMode.Scale = 0;
370 AddrMode.ScaledReg = 0;
372 // Couldn't match.
373 return false;
377 /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
378 /// inline asm call are due to memory operands. If so, return true, otherwise
379 /// return false.
380 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
381 const TargetLowering &TLI) {
382 std::vector<InlineAsm::ConstraintInfo>
383 Constraints = IA->ParseConstraints();
385 unsigned ArgNo = 1; // ArgNo - The operand of the CallInst.
386 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
387 TargetLowering::AsmOperandInfo OpInfo(Constraints[i]);
389 // Compute the value type for each operand.
390 switch (OpInfo.Type) {
391 case InlineAsm::isOutput:
392 if (OpInfo.isIndirect)
393 OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
394 break;
395 case InlineAsm::isInput:
396 OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
397 break;
398 case InlineAsm::isClobber:
399 // Nothing to do.
400 break;
403 // Compute the constraint code and ConstraintType to use.
404 TLI.ComputeConstraintToUse(OpInfo, SDValue(),
405 OpInfo.ConstraintType == TargetLowering::C_Memory);
407 // If this asm operand is our Value*, and if it isn't an indirect memory
408 // operand, we can't fold it!
409 if (OpInfo.CallOperandVal == OpVal &&
410 (OpInfo.ConstraintType != TargetLowering::C_Memory ||
411 !OpInfo.isIndirect))
412 return false;
415 return true;
419 /// FindAllMemoryUses - Recursively walk all the uses of I until we find a
420 /// memory use. If we find an obviously non-foldable instruction, return true.
421 /// Add the ultimately found memory instructions to MemoryUses.
422 static bool FindAllMemoryUses(Instruction *I,
423 SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
424 SmallPtrSet<Instruction*, 16> &ConsideredInsts,
425 const TargetLowering &TLI) {
426 // If we already considered this instruction, we're done.
427 if (!ConsideredInsts.insert(I))
428 return false;
430 // If this is an obviously unfoldable instruction, bail out.
431 if (!MightBeFoldableInst(I))
432 return true;
434 // Loop over all the uses, recursively processing them.
435 for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
436 UI != E; ++UI) {
437 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
438 MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
439 continue;
442 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
443 if (UI.getOperandNo() == 0) return true; // Storing addr, not into addr.
444 MemoryUses.push_back(std::make_pair(SI, UI.getOperandNo()));
445 continue;
448 if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
449 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
450 if (IA == 0) return true;
452 // If this is a memory operand, we're cool, otherwise bail out.
453 if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
454 return true;
455 continue;
458 if (FindAllMemoryUses(cast<Instruction>(*UI), MemoryUses, ConsideredInsts,
459 TLI))
460 return true;
463 return false;
467 /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
468 /// the use site that we're folding it into. If so, there is no cost to
469 /// include it in the addressing mode. KnownLive1 and KnownLive2 are two values
470 /// that we know are live at the instruction already.
471 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
472 Value *KnownLive2) {
473 // If Val is either of the known-live values, we know it is live!
474 if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
475 return true;
477 // All values other than instructions and arguments (e.g. constants) are live.
478 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
480 // If Val is a constant sized alloca in the entry block, it is live, this is
481 // true because it is just a reference to the stack/frame pointer, which is
482 // live for the whole function.
483 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
484 if (AI->isStaticAlloca())
485 return true;
487 // Check to see if this value is already used in the memory instruction's
488 // block. If so, it's already live into the block at the very least, so we
489 // can reasonably fold it.
490 BasicBlock *MemBB = MemoryInst->getParent();
491 for (Value::use_iterator UI = Val->use_begin(), E = Val->use_end();
492 UI != E; ++UI)
493 // We know that uses of arguments and instructions have to be instructions.
494 if (cast<Instruction>(*UI)->getParent() == MemBB)
495 return true;
497 return false;
502 /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
503 /// mode of the machine to fold the specified instruction into a load or store
504 /// that ultimately uses it. However, the specified instruction has multiple
505 /// uses. Given this, it may actually increase register pressure to fold it
506 /// into the load. For example, consider this code:
508 /// X = ...
509 /// Y = X+1
510 /// use(Y) -> nonload/store
511 /// Z = Y+1
512 /// load Z
514 /// In this case, Y has multiple uses, and can be folded into the load of Z
515 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
516 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
517 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
518 /// number of computations either.
520 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
521 /// X was live across 'load Z' for other reasons, we actually *would* want to
522 /// fold the addressing mode in the Z case. This would make Y die earlier.
523 bool AddressingModeMatcher::
524 IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
525 ExtAddrMode &AMAfter) {
526 if (IgnoreProfitability) return true;
528 // AMBefore is the addressing mode before this instruction was folded into it,
529 // and AMAfter is the addressing mode after the instruction was folded. Get
530 // the set of registers referenced by AMAfter and subtract out those
531 // referenced by AMBefore: this is the set of values which folding in this
532 // address extends the lifetime of.
534 // Note that there are only two potential values being referenced here,
535 // BaseReg and ScaleReg (global addresses are always available, as are any
536 // folded immediates).
537 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
539 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
540 // lifetime wasn't extended by adding this instruction.
541 if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
542 BaseReg = 0;
543 if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
544 ScaledReg = 0;
546 // If folding this instruction (and it's subexprs) didn't extend any live
547 // ranges, we're ok with it.
548 if (BaseReg == 0 && ScaledReg == 0)
549 return true;
551 // If all uses of this instruction are ultimately load/store/inlineasm's,
552 // check to see if their addressing modes will include this instruction. If
553 // so, we can fold it into all uses, so it doesn't matter if it has multiple
554 // uses.
555 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
556 SmallPtrSet<Instruction*, 16> ConsideredInsts;
557 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
558 return false; // Has a non-memory, non-foldable use!
560 // Now that we know that all uses of this instruction are part of a chain of
561 // computation involving only operations that could theoretically be folded
562 // into a memory use, loop over each of these uses and see if they could
563 // *actually* fold the instruction.
564 SmallVector<Instruction*, 32> MatchedAddrModeInsts;
565 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
566 Instruction *User = MemoryUses[i].first;
567 unsigned OpNo = MemoryUses[i].second;
569 // Get the access type of this use. If the use isn't a pointer, we don't
570 // know what it accesses.
571 Value *Address = User->getOperand(OpNo);
572 if (!isa<PointerType>(Address->getType()))
573 return false;
574 const Type *AddressAccessTy =
575 cast<PointerType>(Address->getType())->getElementType();
577 // Do a match against the root of this address, ignoring profitability. This
578 // will tell us if the addressing mode for the memory operation will
579 // *actually* cover the shared instruction.
580 ExtAddrMode Result;
581 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
582 MemoryInst, Result);
583 Matcher.IgnoreProfitability = true;
584 bool Success = Matcher.MatchAddr(Address, 0);
585 Success = Success; assert(Success && "Couldn't select *anything*?");
587 // If the match didn't cover I, then it won't be shared by it.
588 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
589 I) == MatchedAddrModeInsts.end())
590 return false;
592 MatchedAddrModeInsts.clear();
595 return true;