[ARM] Better patterns for fp <> predicate vectors
[llvm-complete.git] / lib / Target / ARM / ARMCodeGenPrepare.cpp
blob2fc5f4aaab50f10e681db02c0ac8be981d75100a
1 //===----- ARMCodeGenPrepare.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass inserts intrinsics to handle small types that would otherwise be
11 /// promoted during legalization. Here we can manually promote types or insert
12 /// intrinsics which can handle narrow types that aren't supported by the
13 /// register classes.
15 //===----------------------------------------------------------------------===//
17 #include "ARM.h"
18 #include "ARMSubtarget.h"
19 #include "ARMTargetMachine.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/CodeGen/Passes.h"
22 #include "llvm/CodeGen/TargetPassConfig.h"
23 #include "llvm/IR/Attributes.h"
24 #include "llvm/IR/BasicBlock.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/IR/Value.h"
34 #include "llvm/IR/Verifier.h"
35 #include "llvm/Pass.h"
36 #include "llvm/Support/Casting.h"
37 #include "llvm/Support/CommandLine.h"
39 #define DEBUG_TYPE "arm-codegenprepare"
41 using namespace llvm;
43 static cl::opt<bool>
44 DisableCGP("arm-disable-cgp", cl::Hidden, cl::init(true),
45 cl::desc("Disable ARM specific CodeGenPrepare pass"));
47 static cl::opt<bool>
48 EnableDSP("arm-enable-scalar-dsp", cl::Hidden, cl::init(false),
49 cl::desc("Use DSP instructions for scalar operations"));
51 static cl::opt<bool>
52 EnableDSPWithImms("arm-enable-scalar-dsp-imms", cl::Hidden, cl::init(false),
53 cl::desc("Use DSP instructions for scalar operations\
54 with immediate operands"));
56 // The goal of this pass is to enable more efficient code generation for
57 // operations on narrow types (i.e. types with < 32-bits) and this is a
58 // motivating IR code example:
60 // define hidden i32 @cmp(i8 zeroext) {
61 // %2 = add i8 %0, -49
62 // %3 = icmp ult i8 %2, 3
63 // ..
64 // }
66 // The issue here is that i8 is type-legalized to i32 because i8 is not a
67 // legal type. Thus, arithmetic is done in integer-precision, but then the
68 // byte value is masked out as follows:
70 // t19: i32 = add t4, Constant:i32<-49>
71 // t24: i32 = and t19, Constant:i32<255>
73 // Consequently, we generate code like this:
75 // subs r0, #49
76 // uxtb r1, r0
77 // cmp r1, #3
79 // This shows that masking out the byte value results in generation of
80 // the UXTB instruction. This is not optimal as r0 already contains the byte
81 // value we need, and so instead we can just generate:
83 // sub.w r1, r0, #49
84 // cmp r1, #3
86 // We achieve this by type promoting the IR to i32 like so for this example:
88 // define i32 @cmp(i8 zeroext %c) {
89 // %0 = zext i8 %c to i32
90 // %c.off = add i32 %0, -49
91 // %1 = icmp ult i32 %c.off, 3
92 // ..
93 // }
95 // For this to be valid and legal, we need to prove that the i32 add is
96 // producing the same value as the i8 addition, and that e.g. no overflow
97 // happens.
99 // A brief sketch of the algorithm and some terminology.
100 // We pattern match interesting IR patterns:
101 // - which have "sources": instructions producing narrow values (i8, i16), and
102 // - they have "sinks": instructions consuming these narrow values.
104 // We collect all instruction connecting sources and sinks in a worklist, so
105 // that we can mutate these instruction and perform type promotion when it is
106 // legal to do so.
108 namespace {
109 class IRPromoter {
110 SmallPtrSet<Value*, 8> NewInsts;
111 SmallPtrSet<Instruction*, 4> InstsToRemove;
112 DenseMap<Value*, SmallVector<Type*, 4>> TruncTysMap;
113 SmallPtrSet<Value*, 8> Promoted;
114 Module *M = nullptr;
115 LLVMContext &Ctx;
116 // The type we promote to: always i32
117 IntegerType *ExtTy = nullptr;
118 // The type of the value that the search began from, either i8 or i16.
119 // This defines the max range of the values that we allow in the promoted
120 // tree.
121 IntegerType *OrigTy = nullptr;
122 SetVector<Value*> *Visited;
123 SmallPtrSetImpl<Value*> *Sources;
124 SmallPtrSetImpl<Instruction*> *Sinks;
125 SmallPtrSetImpl<Instruction*> *SafeToPromote;
126 SmallPtrSetImpl<Instruction*> *SafeWrap;
128 void ReplaceAllUsersOfWith(Value *From, Value *To);
129 void PrepareWrappingAdds(void);
130 void ExtendSources(void);
131 void ConvertTruncs(void);
132 void PromoteTree(void);
133 void TruncateSinks(void);
134 void Cleanup(void);
136 public:
137 IRPromoter(Module *M) : M(M), Ctx(M->getContext()),
138 ExtTy(Type::getInt32Ty(Ctx)) { }
141 void Mutate(Type *OrigTy,
142 SetVector<Value*> &Visited,
143 SmallPtrSetImpl<Value*> &Sources,
144 SmallPtrSetImpl<Instruction*> &Sinks,
145 SmallPtrSetImpl<Instruction*> &SafeToPromote,
146 SmallPtrSetImpl<Instruction*> &SafeWrap);
149 class ARMCodeGenPrepare : public FunctionPass {
150 const ARMSubtarget *ST = nullptr;
151 IRPromoter *Promoter = nullptr;
152 std::set<Value*> AllVisited;
153 SmallPtrSet<Instruction*, 8> SafeToPromote;
154 SmallPtrSet<Instruction*, 4> SafeWrap;
156 bool isSafeWrap(Instruction *I);
157 bool isSupportedValue(Value *V);
158 bool isLegalToPromote(Value *V);
159 bool TryToPromote(Value *V);
161 public:
162 static char ID;
163 static unsigned TypeSize;
164 Type *OrigTy = nullptr;
166 ARMCodeGenPrepare() : FunctionPass(ID) {}
168 void getAnalysisUsage(AnalysisUsage &AU) const override {
169 AU.addRequired<TargetPassConfig>();
172 StringRef getPassName() const override { return "ARM IR optimizations"; }
174 bool doInitialization(Module &M) override;
175 bool runOnFunction(Function &F) override;
176 bool doFinalization(Module &M) override;
181 static bool GenerateSignBits(Value *V) {
182 if (auto *Arg = dyn_cast<Argument>(V))
183 return Arg->hasSExtAttr();
185 if (!isa<Instruction>(V))
186 return false;
188 unsigned Opc = cast<Instruction>(V)->getOpcode();
189 return Opc == Instruction::AShr || Opc == Instruction::SDiv ||
190 Opc == Instruction::SRem || Opc == Instruction::SExt ||
191 Opc == Instruction::SIToFP;
194 static bool EqualTypeSize(Value *V) {
195 return V->getType()->getScalarSizeInBits() == ARMCodeGenPrepare::TypeSize;
198 static bool LessOrEqualTypeSize(Value *V) {
199 return V->getType()->getScalarSizeInBits() <= ARMCodeGenPrepare::TypeSize;
202 static bool GreaterThanTypeSize(Value *V) {
203 return V->getType()->getScalarSizeInBits() > ARMCodeGenPrepare::TypeSize;
206 static bool LessThanTypeSize(Value *V) {
207 return V->getType()->getScalarSizeInBits() < ARMCodeGenPrepare::TypeSize;
210 /// Some instructions can use 8- and 16-bit operands, and we don't need to
211 /// promote anything larger. We disallow booleans to make life easier when
212 /// dealing with icmps but allow any other integer that is <= 16 bits. Void
213 /// types are accepted so we can handle switches.
214 static bool isSupportedType(Value *V) {
215 Type *Ty = V->getType();
217 // Allow voids and pointers, these won't be promoted.
218 if (Ty->isVoidTy() || Ty->isPointerTy())
219 return true;
221 if (auto *Ld = dyn_cast<LoadInst>(V))
222 Ty = cast<PointerType>(Ld->getPointerOperandType())->getElementType();
224 if (!isa<IntegerType>(Ty) ||
225 cast<IntegerType>(V->getType())->getBitWidth() == 1)
226 return false;
228 return LessOrEqualTypeSize(V);
231 /// Return true if the given value is a source in the use-def chain, producing
232 /// a narrow 'TypeSize' value. These values will be zext to start the promotion
233 /// of the tree to i32. We guarantee that these won't populate the upper bits
234 /// of the register. ZExt on the loads will be free, and the same for call
235 /// return values because we only accept ones that guarantee a zeroext ret val.
236 /// Many arguments will have the zeroext attribute too, so those would be free
237 /// too.
238 static bool isSource(Value *V) {
239 if (!isa<IntegerType>(V->getType()))
240 return false;
242 // TODO Allow zext to be sources.
243 if (isa<Argument>(V))
244 return true;
245 else if (isa<LoadInst>(V))
246 return true;
247 else if (isa<BitCastInst>(V))
248 return true;
249 else if (auto *Call = dyn_cast<CallInst>(V))
250 return Call->hasRetAttr(Attribute::AttrKind::ZExt);
251 else if (auto *Trunc = dyn_cast<TruncInst>(V))
252 return EqualTypeSize(Trunc);
253 return false;
256 /// Return true if V will require any promoted values to be truncated for the
257 /// the IR to remain valid. We can't mutate the value type of these
258 /// instructions.
259 static bool isSink(Value *V) {
260 // TODO The truncate also isn't actually necessary because we would already
261 // proved that the data value is kept within the range of the original data
262 // type.
264 // Sinks are:
265 // - points where the value in the register is being observed, such as an
266 // icmp, switch or store.
267 // - points where value types have to match, such as calls and returns.
268 // - zext are included to ease the transformation and are generally removed
269 // later on.
270 if (auto *Store = dyn_cast<StoreInst>(V))
271 return LessOrEqualTypeSize(Store->getValueOperand());
272 if (auto *Return = dyn_cast<ReturnInst>(V))
273 return LessOrEqualTypeSize(Return->getReturnValue());
274 if (auto *ZExt = dyn_cast<ZExtInst>(V))
275 return GreaterThanTypeSize(ZExt);
276 if (auto *Switch = dyn_cast<SwitchInst>(V))
277 return LessThanTypeSize(Switch->getCondition());
278 if (auto *ICmp = dyn_cast<ICmpInst>(V))
279 return ICmp->isSigned() || LessThanTypeSize(ICmp->getOperand(0));
281 return isa<CallInst>(V);
284 /// Return whether this instruction can safely wrap.
285 bool ARMCodeGenPrepare::isSafeWrap(Instruction *I) {
286 // We can support a, potentially, wrapping instruction (I) if:
287 // - It is only used by an unsigned icmp.
288 // - The icmp uses a constant.
289 // - The wrapping value (I) is decreasing, i.e would underflow - wrapping
290 // around zero to become a larger number than before.
291 // - The wrapping instruction (I) also uses a constant.
293 // We can then use the two constants to calculate whether the result would
294 // wrap in respect to itself in the original bitwidth. If it doesn't wrap,
295 // just underflows the range, the icmp would give the same result whether the
296 // result has been truncated or not. We calculate this by:
297 // - Zero extending both constants, if needed, to 32-bits.
298 // - Take the absolute value of I's constant, adding this to the icmp const.
299 // - Check that this value is not out of range for small type. If it is, it
300 // means that it has underflowed enough to wrap around the icmp constant.
302 // For example:
304 // %sub = sub i8 %a, 2
305 // %cmp = icmp ule i8 %sub, 254
307 // If %a = 0, %sub = -2 == FE == 254
308 // But if this is evalulated as a i32
309 // %sub = -2 == FF FF FF FE == 4294967294
310 // So the unsigned compares (i8 and i32) would not yield the same result.
312 // Another way to look at it is:
313 // %a - 2 <= 254
314 // %a + 2 <= 254 + 2
315 // %a <= 256
316 // And we can't represent 256 in the i8 format, so we don't support it.
318 // Whereas:
320 // %sub i8 %a, 1
321 // %cmp = icmp ule i8 %sub, 254
323 // If %a = 0, %sub = -1 == FF == 255
324 // As i32:
325 // %sub = -1 == FF FF FF FF == 4294967295
327 // In this case, the unsigned compare results would be the same and this
328 // would also be true for ult, uge and ugt:
329 // - (255 < 254) == (0xFFFFFFFF < 254) == false
330 // - (255 <= 254) == (0xFFFFFFFF <= 254) == false
331 // - (255 > 254) == (0xFFFFFFFF > 254) == true
332 // - (255 >= 254) == (0xFFFFFFFF >= 254) == true
334 // To demonstrate why we can't handle increasing values:
336 // %add = add i8 %a, 2
337 // %cmp = icmp ult i8 %add, 127
339 // If %a = 254, %add = 256 == (i8 1)
340 // As i32:
341 // %add = 256
343 // (1 < 127) != (256 < 127)
345 unsigned Opc = I->getOpcode();
346 if (Opc != Instruction::Add && Opc != Instruction::Sub)
347 return false;
349 if (!I->hasOneUse() ||
350 !isa<ICmpInst>(*I->user_begin()) ||
351 !isa<ConstantInt>(I->getOperand(1)))
352 return false;
354 ConstantInt *OverflowConst = cast<ConstantInt>(I->getOperand(1));
355 bool NegImm = OverflowConst->isNegative();
356 bool IsDecreasing = ((Opc == Instruction::Sub) && !NegImm) ||
357 ((Opc == Instruction::Add) && NegImm);
358 if (!IsDecreasing)
359 return false;
361 // Don't support an icmp that deals with sign bits.
362 auto *CI = cast<ICmpInst>(*I->user_begin());
363 if (CI->isSigned() || CI->isEquality())
364 return false;
366 ConstantInt *ICmpConst = nullptr;
367 if (auto *Const = dyn_cast<ConstantInt>(CI->getOperand(0)))
368 ICmpConst = Const;
369 else if (auto *Const = dyn_cast<ConstantInt>(CI->getOperand(1)))
370 ICmpConst = Const;
371 else
372 return false;
374 // Now check that the result can't wrap on itself.
375 APInt Total = ICmpConst->getValue().getBitWidth() < 32 ?
376 ICmpConst->getValue().zext(32) : ICmpConst->getValue();
378 Total += OverflowConst->getValue().getBitWidth() < 32 ?
379 OverflowConst->getValue().abs().zext(32) : OverflowConst->getValue().abs();
381 APInt Max = APInt::getAllOnesValue(ARMCodeGenPrepare::TypeSize);
383 if (Total.getBitWidth() > Max.getBitWidth()) {
384 if (Total.ugt(Max.zext(Total.getBitWidth())))
385 return false;
386 } else if (Max.getBitWidth() > Total.getBitWidth()) {
387 if (Total.zext(Max.getBitWidth()).ugt(Max))
388 return false;
389 } else if (Total.ugt(Max))
390 return false;
392 LLVM_DEBUG(dbgs() << "ARM CGP: Allowing safe overflow for " << *I << "\n");
393 SafeWrap.insert(I);
394 return true;
397 static bool shouldPromote(Value *V) {
398 if (!isa<IntegerType>(V->getType()) || isSink(V))
399 return false;
401 if (isSource(V))
402 return true;
404 auto *I = dyn_cast<Instruction>(V);
405 if (!I)
406 return false;
408 if (isa<ICmpInst>(I))
409 return false;
411 return true;
414 /// Return whether we can safely mutate V's type to ExtTy without having to be
415 /// concerned with zero extending or truncation.
416 static bool isPromotedResultSafe(Value *V) {
417 if (GenerateSignBits(V))
418 return false;
420 if (!isa<Instruction>(V))
421 return true;
423 if (!isa<OverflowingBinaryOperator>(V))
424 return true;
426 return cast<Instruction>(V)->hasNoUnsignedWrap();
429 /// Return the intrinsic for the instruction that can perform the same
430 /// operation but on a narrow type. This is using the parallel dsp intrinsics
431 /// on scalar values.
432 static Intrinsic::ID getNarrowIntrinsic(Instruction *I) {
433 // Whether we use the signed or unsigned versions of these intrinsics
434 // doesn't matter because we're not using the GE bits that they set in
435 // the APSR.
436 switch(I->getOpcode()) {
437 default:
438 break;
439 case Instruction::Add:
440 return ARMCodeGenPrepare::TypeSize == 16 ? Intrinsic::arm_uadd16 :
441 Intrinsic::arm_uadd8;
442 case Instruction::Sub:
443 return ARMCodeGenPrepare::TypeSize == 16 ? Intrinsic::arm_usub16 :
444 Intrinsic::arm_usub8;
446 llvm_unreachable("unhandled opcode for narrow intrinsic");
449 void IRPromoter::ReplaceAllUsersOfWith(Value *From, Value *To) {
450 SmallVector<Instruction*, 4> Users;
451 Instruction *InstTo = dyn_cast<Instruction>(To);
452 bool ReplacedAll = true;
454 LLVM_DEBUG(dbgs() << "ARM CGP: Replacing " << *From << " with " << *To
455 << "\n");
457 for (Use &U : From->uses()) {
458 auto *User = cast<Instruction>(U.getUser());
459 if (InstTo && User->isIdenticalTo(InstTo)) {
460 ReplacedAll = false;
461 continue;
463 Users.push_back(User);
466 for (auto *U : Users)
467 U->replaceUsesOfWith(From, To);
469 if (ReplacedAll)
470 if (auto *I = dyn_cast<Instruction>(From))
471 InstsToRemove.insert(I);
474 void IRPromoter::PrepareWrappingAdds() {
475 LLVM_DEBUG(dbgs() << "ARM CGP: Prepare underflowing adds.\n");
476 IRBuilder<> Builder{Ctx};
478 // For adds that safely wrap and use a negative immediate as operand 1, we
479 // create an equivalent instruction using a positive immediate.
480 // That positive immediate can then be zext along with all the other
481 // immediates later.
482 for (auto *I : *SafeWrap) {
483 if (I->getOpcode() != Instruction::Add)
484 continue;
486 LLVM_DEBUG(dbgs() << "ARM CGP: Adjusting " << *I << "\n");
487 assert((isa<ConstantInt>(I->getOperand(1)) &&
488 cast<ConstantInt>(I->getOperand(1))->isNegative()) &&
489 "Wrapping should have a negative immediate as the second operand");
491 auto Const = cast<ConstantInt>(I->getOperand(1));
492 auto *NewConst = ConstantInt::get(Ctx, Const->getValue().abs());
493 Builder.SetInsertPoint(I);
494 Value *NewVal = Builder.CreateSub(I->getOperand(0), NewConst);
495 if (auto *NewInst = dyn_cast<Instruction>(NewVal)) {
496 NewInst->copyIRFlags(I);
497 NewInsts.insert(NewInst);
499 InstsToRemove.insert(I);
500 I->replaceAllUsesWith(NewVal);
501 LLVM_DEBUG(dbgs() << "ARM CGP: New equivalent: " << *NewVal << "\n");
503 for (auto *I : NewInsts)
504 Visited->insert(I);
507 void IRPromoter::ExtendSources() {
508 IRBuilder<> Builder{Ctx};
510 auto InsertZExt = [&](Value *V, Instruction *InsertPt) {
511 assert(V->getType() != ExtTy && "zext already extends to i32");
512 LLVM_DEBUG(dbgs() << "ARM CGP: Inserting ZExt for " << *V << "\n");
513 Builder.SetInsertPoint(InsertPt);
514 if (auto *I = dyn_cast<Instruction>(V))
515 Builder.SetCurrentDebugLocation(I->getDebugLoc());
517 Value *ZExt = Builder.CreateZExt(V, ExtTy);
518 if (auto *I = dyn_cast<Instruction>(ZExt)) {
519 if (isa<Argument>(V))
520 I->moveBefore(InsertPt);
521 else
522 I->moveAfter(InsertPt);
523 NewInsts.insert(I);
526 ReplaceAllUsersOfWith(V, ZExt);
529 // Now, insert extending instructions between the sources and their users.
530 LLVM_DEBUG(dbgs() << "ARM CGP: Promoting sources:\n");
531 for (auto V : *Sources) {
532 LLVM_DEBUG(dbgs() << " - " << *V << "\n");
533 if (auto *I = dyn_cast<Instruction>(V))
534 InsertZExt(I, I);
535 else if (auto *Arg = dyn_cast<Argument>(V)) {
536 BasicBlock &BB = Arg->getParent()->front();
537 InsertZExt(Arg, &*BB.getFirstInsertionPt());
538 } else {
539 llvm_unreachable("unhandled source that needs extending");
541 Promoted.insert(V);
545 void IRPromoter::PromoteTree() {
546 LLVM_DEBUG(dbgs() << "ARM CGP: Mutating the tree..\n");
548 IRBuilder<> Builder{Ctx};
550 // Mutate the types of the instructions within the tree. Here we handle
551 // constant operands.
552 for (auto *V : *Visited) {
553 if (Sources->count(V))
554 continue;
556 auto *I = cast<Instruction>(V);
557 if (Sinks->count(I))
558 continue;
560 for (unsigned i = 0, e = I->getNumOperands(); i < e; ++i) {
561 Value *Op = I->getOperand(i);
562 if ((Op->getType() == ExtTy) || !isa<IntegerType>(Op->getType()))
563 continue;
565 if (auto *Const = dyn_cast<ConstantInt>(Op)) {
566 Constant *NewConst = ConstantExpr::getZExt(Const, ExtTy);
567 I->setOperand(i, NewConst);
568 } else if (isa<UndefValue>(Op))
569 I->setOperand(i, UndefValue::get(ExtTy));
572 if (shouldPromote(I)) {
573 I->mutateType(ExtTy);
574 Promoted.insert(I);
578 // Finally, any instructions that should be promoted but haven't yet been,
579 // need to be handled using intrinsics.
580 for (auto *V : *Visited) {
581 auto *I = dyn_cast<Instruction>(V);
582 if (!I)
583 continue;
585 if (Sources->count(I) || Sinks->count(I))
586 continue;
588 if (!shouldPromote(I) || SafeToPromote->count(I) || NewInsts.count(I))
589 continue;
591 assert(EnableDSP && "DSP intrinisc insertion not enabled!");
593 // Replace unsafe instructions with appropriate intrinsic calls.
594 LLVM_DEBUG(dbgs() << "ARM CGP: Inserting DSP intrinsic for "
595 << *I << "\n");
596 Function *DSPInst =
597 Intrinsic::getDeclaration(M, getNarrowIntrinsic(I));
598 Builder.SetInsertPoint(I);
599 Builder.SetCurrentDebugLocation(I->getDebugLoc());
600 Value *Args[] = { I->getOperand(0), I->getOperand(1) };
601 CallInst *Call = Builder.CreateCall(DSPInst, Args);
602 NewInsts.insert(Call);
603 ReplaceAllUsersOfWith(I, Call);
607 void IRPromoter::TruncateSinks() {
608 LLVM_DEBUG(dbgs() << "ARM CGP: Fixing up the sinks:\n");
610 IRBuilder<> Builder{Ctx};
612 auto InsertTrunc = [&](Value *V, Type *TruncTy) -> Instruction* {
613 if (!isa<Instruction>(V) || !isa<IntegerType>(V->getType()))
614 return nullptr;
616 if ((!Promoted.count(V) && !NewInsts.count(V)) || Sources->count(V))
617 return nullptr;
619 LLVM_DEBUG(dbgs() << "ARM CGP: Creating " << *TruncTy << " Trunc for "
620 << *V << "\n");
621 Builder.SetInsertPoint(cast<Instruction>(V));
622 auto *Trunc = dyn_cast<Instruction>(Builder.CreateTrunc(V, TruncTy));
623 if (Trunc)
624 NewInsts.insert(Trunc);
625 return Trunc;
628 // Fix up any stores or returns that use the results of the promoted
629 // chain.
630 for (auto I : *Sinks) {
631 LLVM_DEBUG(dbgs() << "ARM CGP: For Sink: " << *I << "\n");
633 // Handle calls separately as we need to iterate over arg operands.
634 if (auto *Call = dyn_cast<CallInst>(I)) {
635 for (unsigned i = 0; i < Call->getNumArgOperands(); ++i) {
636 Value *Arg = Call->getArgOperand(i);
637 Type *Ty = TruncTysMap[Call][i];
638 if (Instruction *Trunc = InsertTrunc(Arg, Ty)) {
639 Trunc->moveBefore(Call);
640 Call->setArgOperand(i, Trunc);
643 continue;
646 // Special case switches because we need to truncate the condition.
647 if (auto *Switch = dyn_cast<SwitchInst>(I)) {
648 Type *Ty = TruncTysMap[Switch][0];
649 if (Instruction *Trunc = InsertTrunc(Switch->getCondition(), Ty)) {
650 Trunc->moveBefore(Switch);
651 Switch->setCondition(Trunc);
653 continue;
656 // Now handle the others.
657 for (unsigned i = 0; i < I->getNumOperands(); ++i) {
658 Type *Ty = TruncTysMap[I][i];
659 if (Instruction *Trunc = InsertTrunc(I->getOperand(i), Ty)) {
660 Trunc->moveBefore(I);
661 I->setOperand(i, Trunc);
667 void IRPromoter::Cleanup() {
668 LLVM_DEBUG(dbgs() << "ARM CGP: Cleanup..\n");
669 // Some zexts will now have become redundant, along with their trunc
670 // operands, so remove them
671 for (auto V : *Visited) {
672 if (!isa<ZExtInst>(V))
673 continue;
675 auto ZExt = cast<ZExtInst>(V);
676 if (ZExt->getDestTy() != ExtTy)
677 continue;
679 Value *Src = ZExt->getOperand(0);
680 if (ZExt->getSrcTy() == ZExt->getDestTy()) {
681 LLVM_DEBUG(dbgs() << "ARM CGP: Removing unnecessary cast: " << *ZExt
682 << "\n");
683 ReplaceAllUsersOfWith(ZExt, Src);
684 continue;
687 // Unless they produce a value that is narrower than ExtTy, we can
688 // replace the result of the zext with the input of a newly inserted
689 // trunc.
690 if (NewInsts.count(Src) && isa<TruncInst>(Src) &&
691 Src->getType() == OrigTy) {
692 auto *Trunc = cast<TruncInst>(Src);
693 assert(Trunc->getOperand(0)->getType() == ExtTy &&
694 "expected inserted trunc to be operating on i32");
695 ReplaceAllUsersOfWith(ZExt, Trunc->getOperand(0));
699 for (auto *I : InstsToRemove) {
700 LLVM_DEBUG(dbgs() << "ARM CGP: Removing " << *I << "\n");
701 I->dropAllReferences();
702 I->eraseFromParent();
705 InstsToRemove.clear();
706 NewInsts.clear();
707 TruncTysMap.clear();
708 Promoted.clear();
709 SafeToPromote->clear();
710 SafeWrap->clear();
713 void IRPromoter::ConvertTruncs() {
714 LLVM_DEBUG(dbgs() << "ARM CGP: Converting truncs..\n");
715 IRBuilder<> Builder{Ctx};
717 for (auto *V : *Visited) {
718 if (!isa<TruncInst>(V) || Sources->count(V))
719 continue;
721 auto *Trunc = cast<TruncInst>(V);
722 Builder.SetInsertPoint(Trunc);
723 IntegerType *SrcTy = cast<IntegerType>(Trunc->getOperand(0)->getType());
724 IntegerType *DestTy = cast<IntegerType>(TruncTysMap[Trunc][0]);
726 unsigned NumBits = DestTy->getScalarSizeInBits();
727 ConstantInt *Mask =
728 ConstantInt::get(SrcTy, APInt::getMaxValue(NumBits).getZExtValue());
729 Value *Masked = Builder.CreateAnd(Trunc->getOperand(0), Mask);
731 if (auto *I = dyn_cast<Instruction>(Masked))
732 NewInsts.insert(I);
734 ReplaceAllUsersOfWith(Trunc, Masked);
738 void IRPromoter::Mutate(Type *OrigTy,
739 SetVector<Value*> &Visited,
740 SmallPtrSetImpl<Value*> &Sources,
741 SmallPtrSetImpl<Instruction*> &Sinks,
742 SmallPtrSetImpl<Instruction*> &SafeToPromote,
743 SmallPtrSetImpl<Instruction*> &SafeWrap) {
744 LLVM_DEBUG(dbgs() << "ARM CGP: Promoting use-def chains to from "
745 << ARMCodeGenPrepare::TypeSize << " to 32-bits\n");
747 assert(isa<IntegerType>(OrigTy) && "expected integer type");
748 this->OrigTy = cast<IntegerType>(OrigTy);
749 assert(OrigTy->getPrimitiveSizeInBits() < ExtTy->getPrimitiveSizeInBits() &&
750 "original type not smaller than extended type");
752 this->Visited = &Visited;
753 this->Sources = &Sources;
754 this->Sinks = &Sinks;
755 this->SafeToPromote = &SafeToPromote;
756 this->SafeWrap = &SafeWrap;
758 // Cache original types of the values that will likely need truncating
759 for (auto *I : Sinks) {
760 if (auto *Call = dyn_cast<CallInst>(I)) {
761 for (unsigned i = 0; i < Call->getNumArgOperands(); ++i) {
762 Value *Arg = Call->getArgOperand(i);
763 TruncTysMap[Call].push_back(Arg->getType());
765 } else if (auto *Switch = dyn_cast<SwitchInst>(I))
766 TruncTysMap[I].push_back(Switch->getCondition()->getType());
767 else {
768 for (unsigned i = 0; i < I->getNumOperands(); ++i)
769 TruncTysMap[I].push_back(I->getOperand(i)->getType());
772 for (auto *V : Visited) {
773 if (!isa<TruncInst>(V) || Sources.count(V))
774 continue;
775 auto *Trunc = cast<TruncInst>(V);
776 TruncTysMap[Trunc].push_back(Trunc->getDestTy());
779 // Convert adds using negative immediates to equivalent instructions that use
780 // positive constants.
781 PrepareWrappingAdds();
783 // Insert zext instructions between sources and their users.
784 ExtendSources();
786 // Promote visited instructions, mutating their types in place. Also insert
787 // DSP intrinsics, if enabled, for adds and subs which would be unsafe to
788 // promote.
789 PromoteTree();
791 // Convert any truncs, that aren't sources, into AND masks.
792 ConvertTruncs();
794 // Insert trunc instructions for use by calls, stores etc...
795 TruncateSinks();
797 // Finally, remove unecessary zexts and truncs, delete old instructions and
798 // clear the data structures.
799 Cleanup();
801 LLVM_DEBUG(dbgs() << "ARM CGP: Mutation complete\n");
804 /// We accept most instructions, as well as Arguments and ConstantInsts. We
805 /// Disallow casts other than zext and truncs and only allow calls if their
806 /// return value is zeroext. We don't allow opcodes that can introduce sign
807 /// bits.
808 bool ARMCodeGenPrepare::isSupportedValue(Value *V) {
809 if (auto *I = dyn_cast<ICmpInst>(V)) {
810 // Now that we allow small types than TypeSize, only allow icmp of
811 // TypeSize because they will require a trunc to be legalised.
812 // TODO: Allow icmp of smaller types, and calculate at the end
813 // whether the transform would be beneficial.
814 if (isa<PointerType>(I->getOperand(0)->getType()))
815 return true;
816 return EqualTypeSize(I->getOperand(0));
819 if (GenerateSignBits(V)) {
820 LLVM_DEBUG(dbgs() << "ARM CGP: No, instruction can generate sign bits.\n");
821 return false;
824 // Memory instructions
825 if (isa<StoreInst>(V) || isa<GetElementPtrInst>(V))
826 return true;
828 // Branches and targets.
829 if( isa<BranchInst>(V) || isa<SwitchInst>(V) || isa<BasicBlock>(V))
830 return true;
832 // Non-instruction values that we can handle.
833 if ((isa<Constant>(V) && !isa<ConstantExpr>(V)) || isa<Argument>(V))
834 return isSupportedType(V);
836 if (isa<PHINode>(V) || isa<SelectInst>(V) || isa<ReturnInst>(V) ||
837 isa<LoadInst>(V))
838 return isSupportedType(V);
840 if (auto *Cast = dyn_cast<CastInst>(V))
841 return isSupportedType(Cast) || isSupportedType(Cast->getOperand(0));
843 // Special cases for calls as we need to check for zeroext
844 // TODO We should accept calls even if they don't have zeroext, as they can
845 // still be sinks.
846 if (auto *Call = dyn_cast<CallInst>(V))
847 return isSupportedType(Call) &&
848 Call->hasRetAttr(Attribute::AttrKind::ZExt);
850 if (!isa<BinaryOperator>(V))
851 return false;
853 if (!isSupportedType(V))
854 return false;
856 return true;
859 /// Check that the type of V would be promoted and that the original type is
860 /// smaller than the targeted promoted type. Check that we're not trying to
861 /// promote something larger than our base 'TypeSize' type.
862 bool ARMCodeGenPrepare::isLegalToPromote(Value *V) {
864 auto *I = dyn_cast<Instruction>(V);
865 if (!I)
866 return true;
868 if (SafeToPromote.count(I))
869 return true;
871 if (isPromotedResultSafe(V) || isSafeWrap(I)) {
872 SafeToPromote.insert(I);
873 return true;
876 if (I->getOpcode() != Instruction::Add && I->getOpcode() != Instruction::Sub)
877 return false;
879 // If promotion is not safe, can we use a DSP instruction to natively
880 // handle the narrow type?
881 if (!ST->hasDSP() || !EnableDSP || !isSupportedType(I))
882 return false;
884 if (ST->isThumb() && !ST->hasThumb2())
885 return false;
887 // TODO
888 // Would it be profitable? For Thumb code, these parallel DSP instructions
889 // are only Thumb-2, so we wouldn't be able to dual issue on Cortex-M33. For
890 // Cortex-A, specifically Cortex-A72, the latency is double and throughput is
891 // halved. They also do not take immediates as operands.
892 for (auto &Op : I->operands()) {
893 if (isa<Constant>(Op)) {
894 if (!EnableDSPWithImms)
895 return false;
898 LLVM_DEBUG(dbgs() << "ARM CGP: Will use an intrinsic for: " << *I << "\n");
899 return true;
902 bool ARMCodeGenPrepare::TryToPromote(Value *V) {
903 OrigTy = V->getType();
904 TypeSize = OrigTy->getPrimitiveSizeInBits();
905 if (TypeSize > 16 || TypeSize < 8)
906 return false;
908 SafeToPromote.clear();
909 SafeWrap.clear();
911 if (!isSupportedValue(V) || !shouldPromote(V) || !isLegalToPromote(V))
912 return false;
914 LLVM_DEBUG(dbgs() << "ARM CGP: TryToPromote: " << *V << ", TypeSize = "
915 << TypeSize << "\n");
917 SetVector<Value*> WorkList;
918 SmallPtrSet<Value*, 8> Sources;
919 SmallPtrSet<Instruction*, 4> Sinks;
920 SetVector<Value*> CurrentVisited;
921 WorkList.insert(V);
923 // Return true if V was added to the worklist as a supported instruction,
924 // if it was already visited, or if we don't need to explore it (e.g.
925 // pointer values and GEPs), and false otherwise.
926 auto AddLegalInst = [&](Value *V) {
927 if (CurrentVisited.count(V))
928 return true;
930 // Ignore GEPs because they don't need promoting and the constant indices
931 // will prevent the transformation.
932 if (isa<GetElementPtrInst>(V))
933 return true;
935 if (!isSupportedValue(V) || (shouldPromote(V) && !isLegalToPromote(V))) {
936 LLVM_DEBUG(dbgs() << "ARM CGP: Can't handle: " << *V << "\n");
937 return false;
940 WorkList.insert(V);
941 return true;
944 // Iterate through, and add to, a tree of operands and users in the use-def.
945 while (!WorkList.empty()) {
946 Value *V = WorkList.back();
947 WorkList.pop_back();
948 if (CurrentVisited.count(V))
949 continue;
951 // Ignore non-instructions, other than arguments.
952 if (!isa<Instruction>(V) && !isSource(V))
953 continue;
955 // If we've already visited this value from somewhere, bail now because
956 // the tree has already been explored.
957 // TODO: This could limit the transform, ie if we try to promote something
958 // from an i8 and fail first, before trying an i16.
959 if (AllVisited.count(V))
960 return false;
962 CurrentVisited.insert(V);
963 AllVisited.insert(V);
965 // Calls can be both sources and sinks.
966 if (isSink(V))
967 Sinks.insert(cast<Instruction>(V));
969 if (isSource(V))
970 Sources.insert(V);
972 if (!isSink(V) && !isSource(V)) {
973 if (auto *I = dyn_cast<Instruction>(V)) {
974 // Visit operands of any instruction visited.
975 for (auto &U : I->operands()) {
976 if (!AddLegalInst(U))
977 return false;
982 // Don't visit users of a node which isn't going to be mutated unless its a
983 // source.
984 if (isSource(V) || shouldPromote(V)) {
985 for (Use &U : V->uses()) {
986 if (!AddLegalInst(U.getUser()))
987 return false;
992 LLVM_DEBUG(dbgs() << "ARM CGP: Visited nodes:\n";
993 for (auto *I : CurrentVisited)
994 I->dump();
996 unsigned ToPromote = 0;
997 for (auto *V : CurrentVisited) {
998 if (Sources.count(V))
999 continue;
1000 if (Sinks.count(cast<Instruction>(V)))
1001 continue;
1002 ++ToPromote;
1005 if (ToPromote < 2)
1006 return false;
1008 Promoter->Mutate(OrigTy, CurrentVisited, Sources, Sinks, SafeToPromote,
1009 SafeWrap);
1010 return true;
1013 bool ARMCodeGenPrepare::doInitialization(Module &M) {
1014 Promoter = new IRPromoter(&M);
1015 return false;
1018 bool ARMCodeGenPrepare::runOnFunction(Function &F) {
1019 if (skipFunction(F) || DisableCGP)
1020 return false;
1022 auto *TPC = &getAnalysis<TargetPassConfig>();
1023 if (!TPC)
1024 return false;
1026 const TargetMachine &TM = TPC->getTM<TargetMachine>();
1027 ST = &TM.getSubtarget<ARMSubtarget>(F);
1028 bool MadeChange = false;
1029 LLVM_DEBUG(dbgs() << "ARM CGP: Running on " << F.getName() << "\n");
1031 // Search up from icmps to try to promote their operands.
1032 for (BasicBlock &BB : F) {
1033 auto &Insts = BB.getInstList();
1034 for (auto &I : Insts) {
1035 if (AllVisited.count(&I))
1036 continue;
1038 if (isa<ICmpInst>(I)) {
1039 auto &CI = cast<ICmpInst>(I);
1041 // Skip signed or pointer compares
1042 if (CI.isSigned() || !isa<IntegerType>(CI.getOperand(0)->getType()))
1043 continue;
1045 LLVM_DEBUG(dbgs() << "ARM CGP: Searching from: " << CI << "\n");
1047 for (auto &Op : CI.operands()) {
1048 if (auto *I = dyn_cast<Instruction>(Op))
1049 MadeChange |= TryToPromote(I);
1053 LLVM_DEBUG(if (verifyFunction(F, &dbgs())) {
1054 dbgs() << F;
1055 report_fatal_error("Broken function after type promotion");
1058 if (MadeChange)
1059 LLVM_DEBUG(dbgs() << "After ARMCodeGenPrepare: " << F << "\n");
1061 return MadeChange;
1064 bool ARMCodeGenPrepare::doFinalization(Module &M) {
1065 delete Promoter;
1066 return false;
1069 INITIALIZE_PASS_BEGIN(ARMCodeGenPrepare, DEBUG_TYPE,
1070 "ARM IR optimizations", false, false)
1071 INITIALIZE_PASS_END(ARMCodeGenPrepare, DEBUG_TYPE, "ARM IR optimizations",
1072 false, false)
1074 char ARMCodeGenPrepare::ID = 0;
1075 unsigned ARMCodeGenPrepare::TypeSize = 0;
1077 FunctionPass *llvm::createARMCodeGenPreparePass() {
1078 return new ARMCodeGenPrepare();