[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / ARM / ARMParallelDSP.cpp
blob46baf89309399675db2cf1b68f6d377a21c27c59
1 //===- ARMParallelDSP.cpp - Parallel DSP Pass -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Armv6 introduced instructions to perform 32-bit SIMD operations. The
11 /// purpose of this pass is do some IR pattern matching to create ACLE
12 /// DSP intrinsics, which map on these 32-bit SIMD operations.
13 /// This pass runs only when unaligned accesses is supported/enabled.
15 //===----------------------------------------------------------------------===//
17 #include "ARM.h"
18 #include "ARMSubtarget.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/LoopAccessAnalysis.h"
25 #include "llvm/Analysis/TargetLibraryInfo.h"
26 #include "llvm/CodeGen/TargetPassConfig.h"
27 #include "llvm/IR/Instructions.h"
28 #include "llvm/IR/IntrinsicsARM.h"
29 #include "llvm/IR/NoFolder.h"
30 #include "llvm/IR/PatternMatch.h"
31 #include "llvm/Pass.h"
32 #include "llvm/PassRegistry.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Transforms/Scalar.h"
35 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 using namespace llvm;
38 using namespace PatternMatch;
40 #define DEBUG_TYPE "arm-parallel-dsp"
42 STATISTIC(NumSMLAD , "Number of smlad instructions generated");
44 static cl::opt<bool>
45 DisableParallelDSP("disable-arm-parallel-dsp", cl::Hidden, cl::init(false),
46 cl::desc("Disable the ARM Parallel DSP pass"));
48 static cl::opt<unsigned>
49 NumLoadLimit("arm-parallel-dsp-load-limit", cl::Hidden, cl::init(16),
50 cl::desc("Limit the number of loads analysed"));
52 namespace {
53 struct MulCandidate;
54 class Reduction;
56 using MulCandList = SmallVector<std::unique_ptr<MulCandidate>, 8>;
57 using MemInstList = SmallVectorImpl<LoadInst*>;
58 using MulPairList = SmallVector<std::pair<MulCandidate*, MulCandidate*>, 8>;
60 // 'MulCandidate' holds the multiplication instructions that are candidates
61 // for parallel execution.
62 struct MulCandidate {
63 Instruction *Root;
64 Value* LHS;
65 Value* RHS;
66 bool Exchange = false;
67 bool ReadOnly = true;
68 bool Paired = false;
69 SmallVector<LoadInst*, 2> VecLd; // Container for loads to widen.
71 MulCandidate(Instruction *I, Value *lhs, Value *rhs) :
72 Root(I), LHS(lhs), RHS(rhs) { }
74 bool HasTwoLoadInputs() const {
75 return isa<LoadInst>(LHS) && isa<LoadInst>(RHS);
78 LoadInst *getBaseLoad() const {
79 return VecLd.front();
83 /// Represent a sequence of multiply-accumulate operations with the aim to
84 /// perform the multiplications in parallel.
85 class Reduction {
86 Instruction *Root = nullptr;
87 Value *Acc = nullptr;
88 MulCandList Muls;
89 MulPairList MulPairs;
90 SetVector<Instruction*> Adds;
92 public:
93 Reduction() = delete;
95 Reduction (Instruction *Add) : Root(Add) { }
97 /// Record an Add instruction that is a part of the this reduction.
98 void InsertAdd(Instruction *I) { Adds.insert(I); }
100 /// Create MulCandidates, each rooted at a Mul instruction, that is a part
101 /// of this reduction.
102 void InsertMuls() {
103 auto GetMulOperand = [](Value *V) -> Instruction* {
104 if (auto *SExt = dyn_cast<SExtInst>(V)) {
105 if (auto *I = dyn_cast<Instruction>(SExt->getOperand(0)))
106 if (I->getOpcode() == Instruction::Mul)
107 return I;
108 } else if (auto *I = dyn_cast<Instruction>(V)) {
109 if (I->getOpcode() == Instruction::Mul)
110 return I;
112 return nullptr;
115 auto InsertMul = [this](Instruction *I) {
116 Value *LHS = cast<Instruction>(I->getOperand(0))->getOperand(0);
117 Value *RHS = cast<Instruction>(I->getOperand(1))->getOperand(0);
118 Muls.push_back(std::make_unique<MulCandidate>(I, LHS, RHS));
121 for (auto *Add : Adds) {
122 if (Add == Acc)
123 continue;
124 if (auto *Mul = GetMulOperand(Add->getOperand(0)))
125 InsertMul(Mul);
126 if (auto *Mul = GetMulOperand(Add->getOperand(1)))
127 InsertMul(Mul);
131 /// Add the incoming accumulator value, returns true if a value had not
132 /// already been added. Returning false signals to the user that this
133 /// reduction already has a value to initialise the accumulator.
134 bool InsertAcc(Value *V) {
135 if (Acc)
136 return false;
137 Acc = V;
138 return true;
141 /// Set two MulCandidates, rooted at muls, that can be executed as a single
142 /// parallel operation.
143 void AddMulPair(MulCandidate *Mul0, MulCandidate *Mul1,
144 bool Exchange = false) {
145 LLVM_DEBUG(dbgs() << "Pairing:\n"
146 << *Mul0->Root << "\n"
147 << *Mul1->Root << "\n");
148 Mul0->Paired = true;
149 Mul1->Paired = true;
150 if (Exchange)
151 Mul1->Exchange = true;
152 MulPairs.push_back(std::make_pair(Mul0, Mul1));
155 /// Return true if enough mul operations are found that can be executed in
156 /// parallel.
157 bool CreateParallelPairs();
159 /// Return the add instruction which is the root of the reduction.
160 Instruction *getRoot() { return Root; }
162 bool is64Bit() const { return Root->getType()->isIntegerTy(64); }
164 Type *getType() const { return Root->getType(); }
166 /// Return the incoming value to be accumulated. This maybe null.
167 Value *getAccumulator() { return Acc; }
169 /// Return the set of adds that comprise the reduction.
170 SetVector<Instruction*> &getAdds() { return Adds; }
172 /// Return the MulCandidate, rooted at mul instruction, that comprise the
173 /// the reduction.
174 MulCandList &getMuls() { return Muls; }
176 /// Return the MulCandidate, rooted at mul instructions, that have been
177 /// paired for parallel execution.
178 MulPairList &getMulPairs() { return MulPairs; }
180 /// To finalise, replace the uses of the root with the intrinsic call.
181 void UpdateRoot(Instruction *SMLAD) {
182 Root->replaceAllUsesWith(SMLAD);
185 void dump() {
186 LLVM_DEBUG(dbgs() << "Reduction:\n";
187 for (auto *Add : Adds)
188 LLVM_DEBUG(dbgs() << *Add << "\n");
189 for (auto &Mul : Muls)
190 LLVM_DEBUG(dbgs() << *Mul->Root << "\n"
191 << " " << *Mul->LHS << "\n"
192 << " " << *Mul->RHS << "\n");
193 LLVM_DEBUG(if (Acc) dbgs() << "Acc in: " << *Acc << "\n")
198 class WidenedLoad {
199 LoadInst *NewLd = nullptr;
200 SmallVector<LoadInst*, 4> Loads;
202 public:
203 WidenedLoad(SmallVectorImpl<LoadInst*> &Lds, LoadInst *Wide)
204 : NewLd(Wide) {
205 append_range(Loads, Lds);
207 LoadInst *getLoad() {
208 return NewLd;
212 class ARMParallelDSP : public FunctionPass {
213 ScalarEvolution *SE;
214 AliasAnalysis *AA;
215 TargetLibraryInfo *TLI;
216 DominatorTree *DT;
217 const DataLayout *DL;
218 Module *M;
219 std::map<LoadInst*, LoadInst*> LoadPairs;
220 SmallPtrSet<LoadInst*, 4> OffsetLoads;
221 std::map<LoadInst*, std::unique_ptr<WidenedLoad>> WideLoads;
223 template<unsigned>
224 bool IsNarrowSequence(Value *V);
225 bool Search(Value *V, BasicBlock *BB, Reduction &R);
226 bool RecordMemoryOps(BasicBlock *BB);
227 void InsertParallelMACs(Reduction &Reduction);
228 bool AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1, MemInstList &VecMem);
229 LoadInst* CreateWideLoad(MemInstList &Loads, IntegerType *LoadTy);
230 bool CreateParallelPairs(Reduction &R);
232 /// Try to match and generate: SMLAD, SMLADX - Signed Multiply Accumulate
233 /// Dual performs two signed 16x16-bit multiplications. It adds the
234 /// products to a 32-bit accumulate operand. Optionally, the instruction can
235 /// exchange the halfwords of the second operand before performing the
236 /// arithmetic.
237 bool MatchSMLAD(Function &F);
239 public:
240 static char ID;
242 ARMParallelDSP() : FunctionPass(ID) { }
244 void getAnalysisUsage(AnalysisUsage &AU) const override {
245 FunctionPass::getAnalysisUsage(AU);
246 AU.addRequired<AssumptionCacheTracker>();
247 AU.addRequired<ScalarEvolutionWrapperPass>();
248 AU.addRequired<AAResultsWrapperPass>();
249 AU.addRequired<TargetLibraryInfoWrapperPass>();
250 AU.addRequired<DominatorTreeWrapperPass>();
251 AU.addRequired<TargetPassConfig>();
252 AU.addPreserved<ScalarEvolutionWrapperPass>();
253 AU.addPreserved<GlobalsAAWrapperPass>();
254 AU.setPreservesCFG();
257 bool runOnFunction(Function &F) override {
258 if (DisableParallelDSP)
259 return false;
260 if (skipFunction(F))
261 return false;
263 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
264 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
265 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
266 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
267 auto &TPC = getAnalysis<TargetPassConfig>();
269 M = F.getParent();
270 DL = &M->getDataLayout();
272 auto &TM = TPC.getTM<TargetMachine>();
273 auto *ST = &TM.getSubtarget<ARMSubtarget>(F);
275 if (!ST->allowsUnalignedMem()) {
276 LLVM_DEBUG(dbgs() << "Unaligned memory access not supported: not "
277 "running pass ARMParallelDSP\n");
278 return false;
281 if (!ST->hasDSP()) {
282 LLVM_DEBUG(dbgs() << "DSP extension not enabled: not running pass "
283 "ARMParallelDSP\n");
284 return false;
287 if (!ST->isLittle()) {
288 LLVM_DEBUG(dbgs() << "Only supporting little endian: not running pass "
289 << "ARMParallelDSP\n");
290 return false;
293 LLVM_DEBUG(dbgs() << "\n== Parallel DSP pass ==\n");
294 LLVM_DEBUG(dbgs() << " - " << F.getName() << "\n\n");
296 bool Changes = MatchSMLAD(F);
297 return Changes;
302 bool ARMParallelDSP::AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1,
303 MemInstList &VecMem) {
304 if (!Ld0 || !Ld1)
305 return false;
307 if (!LoadPairs.count(Ld0) || LoadPairs[Ld0] != Ld1)
308 return false;
310 LLVM_DEBUG(dbgs() << "Loads are sequential and valid:\n";
311 dbgs() << "Ld0:"; Ld0->dump();
312 dbgs() << "Ld1:"; Ld1->dump();
315 VecMem.clear();
316 VecMem.push_back(Ld0);
317 VecMem.push_back(Ld1);
318 return true;
321 // MaxBitwidth: the maximum supported bitwidth of the elements in the DSP
322 // instructions, which is set to 16. So here we should collect all i8 and i16
323 // narrow operations.
324 // TODO: we currently only collect i16, and will support i8 later, so that's
325 // why we check that types are equal to MaxBitWidth, and not <= MaxBitWidth.
326 template<unsigned MaxBitWidth>
327 bool ARMParallelDSP::IsNarrowSequence(Value *V) {
328 if (auto *SExt = dyn_cast<SExtInst>(V)) {
329 if (SExt->getSrcTy()->getIntegerBitWidth() != MaxBitWidth)
330 return false;
332 if (auto *Ld = dyn_cast<LoadInst>(SExt->getOperand(0))) {
333 // Check that this load could be paired.
334 return LoadPairs.count(Ld) || OffsetLoads.count(Ld);
337 return false;
340 /// Iterate through the block and record base, offset pairs of loads which can
341 /// be widened into a single load.
342 bool ARMParallelDSP::RecordMemoryOps(BasicBlock *BB) {
343 SmallVector<LoadInst*, 8> Loads;
344 SmallVector<Instruction*, 8> Writes;
345 LoadPairs.clear();
346 WideLoads.clear();
348 // Collect loads and instruction that may write to memory. For now we only
349 // record loads which are simple, sign-extended and have a single user.
350 // TODO: Allow zero-extended loads.
351 for (auto &I : *BB) {
352 if (I.mayWriteToMemory())
353 Writes.push_back(&I);
354 auto *Ld = dyn_cast<LoadInst>(&I);
355 if (!Ld || !Ld->isSimple() ||
356 !Ld->hasOneUse() || !isa<SExtInst>(Ld->user_back()))
357 continue;
358 Loads.push_back(Ld);
361 if (Loads.empty() || Loads.size() > NumLoadLimit)
362 return false;
364 using InstSet = std::set<Instruction*>;
365 using DepMap = std::map<Instruction*, InstSet>;
366 DepMap RAWDeps;
368 // Record any writes that may alias a load.
369 const auto Size = LocationSize::beforeOrAfterPointer();
370 for (auto Write : Writes) {
371 for (auto Read : Loads) {
372 MemoryLocation ReadLoc =
373 MemoryLocation(Read->getPointerOperand(), Size);
375 if (!isModOrRefSet(intersectModRef(AA->getModRefInfo(Write, ReadLoc),
376 ModRefInfo::ModRef)))
377 continue;
378 if (Write->comesBefore(Read))
379 RAWDeps[Read].insert(Write);
383 // Check whether there's not a write between the two loads which would
384 // prevent them from being safely merged.
385 auto SafeToPair = [&](LoadInst *Base, LoadInst *Offset) {
386 bool BaseFirst = Base->comesBefore(Offset);
387 LoadInst *Dominator = BaseFirst ? Base : Offset;
388 LoadInst *Dominated = BaseFirst ? Offset : Base;
390 if (RAWDeps.count(Dominated)) {
391 InstSet &WritesBefore = RAWDeps[Dominated];
393 for (auto Before : WritesBefore) {
394 // We can't move the second load backward, past a write, to merge
395 // with the first load.
396 if (Dominator->comesBefore(Before))
397 return false;
400 return true;
403 // Record base, offset load pairs.
404 for (auto *Base : Loads) {
405 for (auto *Offset : Loads) {
406 if (Base == Offset || OffsetLoads.count(Offset))
407 continue;
409 if (isConsecutiveAccess(Base, Offset, *DL, *SE) &&
410 SafeToPair(Base, Offset)) {
411 LoadPairs[Base] = Offset;
412 OffsetLoads.insert(Offset);
413 break;
418 LLVM_DEBUG(if (!LoadPairs.empty()) {
419 dbgs() << "Consecutive load pairs:\n";
420 for (auto &MapIt : LoadPairs) {
421 LLVM_DEBUG(dbgs() << *MapIt.first << ", "
422 << *MapIt.second << "\n");
425 return LoadPairs.size() > 1;
428 // Search recursively back through the operands to find a tree of values that
429 // form a multiply-accumulate chain. The search records the Add and Mul
430 // instructions that form the reduction and allows us to find a single value
431 // to be used as the initial input to the accumlator.
432 bool ARMParallelDSP::Search(Value *V, BasicBlock *BB, Reduction &R) {
433 // If we find a non-instruction, try to use it as the initial accumulator
434 // value. This may have already been found during the search in which case
435 // this function will return false, signaling a search fail.
436 auto *I = dyn_cast<Instruction>(V);
437 if (!I)
438 return R.InsertAcc(V);
440 if (I->getParent() != BB)
441 return false;
443 switch (I->getOpcode()) {
444 default:
445 break;
446 case Instruction::PHI:
447 // Could be the accumulator value.
448 return R.InsertAcc(V);
449 case Instruction::Add: {
450 // Adds should be adding together two muls, or another add and a mul to
451 // be within the mac chain. One of the operands may also be the
452 // accumulator value at which point we should stop searching.
453 R.InsertAdd(I);
454 Value *LHS = I->getOperand(0);
455 Value *RHS = I->getOperand(1);
456 bool ValidLHS = Search(LHS, BB, R);
457 bool ValidRHS = Search(RHS, BB, R);
459 if (ValidLHS && ValidRHS)
460 return true;
462 return R.InsertAcc(I);
464 case Instruction::Mul: {
465 Value *MulOp0 = I->getOperand(0);
466 Value *MulOp1 = I->getOperand(1);
467 return IsNarrowSequence<16>(MulOp0) && IsNarrowSequence<16>(MulOp1);
469 case Instruction::SExt:
470 return Search(I->getOperand(0), BB, R);
472 return false;
475 // The pass needs to identify integer add/sub reductions of 16-bit vector
476 // multiplications.
477 // To use SMLAD:
478 // 1) we first need to find integer add then look for this pattern:
480 // acc0 = ...
481 // ld0 = load i16
482 // sext0 = sext i16 %ld0 to i32
483 // ld1 = load i16
484 // sext1 = sext i16 %ld1 to i32
485 // mul0 = mul %sext0, %sext1
486 // ld2 = load i16
487 // sext2 = sext i16 %ld2 to i32
488 // ld3 = load i16
489 // sext3 = sext i16 %ld3 to i32
490 // mul1 = mul i32 %sext2, %sext3
491 // add0 = add i32 %mul0, %acc0
492 // acc1 = add i32 %add0, %mul1
494 // Which can be selected to:
496 // ldr r0
497 // ldr r1
498 // smlad r2, r0, r1, r2
500 // If constants are used instead of loads, these will need to be hoisted
501 // out and into a register.
503 // If loop invariants are used instead of loads, these need to be packed
504 // before the loop begins.
506 bool ARMParallelDSP::MatchSMLAD(Function &F) {
507 bool Changed = false;
509 for (auto &BB : F) {
510 SmallPtrSet<Instruction*, 4> AllAdds;
511 if (!RecordMemoryOps(&BB))
512 continue;
514 for (Instruction &I : reverse(BB)) {
515 if (I.getOpcode() != Instruction::Add)
516 continue;
518 if (AllAdds.count(&I))
519 continue;
521 const auto *Ty = I.getType();
522 if (!Ty->isIntegerTy(32) && !Ty->isIntegerTy(64))
523 continue;
525 Reduction R(&I);
526 if (!Search(&I, &BB, R))
527 continue;
529 R.InsertMuls();
530 LLVM_DEBUG(dbgs() << "After search, Reduction:\n"; R.dump());
532 if (!CreateParallelPairs(R))
533 continue;
535 InsertParallelMACs(R);
536 Changed = true;
537 AllAdds.insert(R.getAdds().begin(), R.getAdds().end());
541 return Changed;
544 bool ARMParallelDSP::CreateParallelPairs(Reduction &R) {
546 // Not enough mul operations to make a pair.
547 if (R.getMuls().size() < 2)
548 return false;
550 // Check that the muls operate directly upon sign extended loads.
551 for (auto &MulCand : R.getMuls()) {
552 if (!MulCand->HasTwoLoadInputs())
553 return false;
556 auto CanPair = [&](Reduction &R, MulCandidate *PMul0, MulCandidate *PMul1) {
557 // The first elements of each vector should be loads with sexts. If we
558 // find that its two pairs of consecutive loads, then these can be
559 // transformed into two wider loads and the users can be replaced with
560 // DSP intrinsics.
561 auto Ld0 = static_cast<LoadInst*>(PMul0->LHS);
562 auto Ld1 = static_cast<LoadInst*>(PMul1->LHS);
563 auto Ld2 = static_cast<LoadInst*>(PMul0->RHS);
564 auto Ld3 = static_cast<LoadInst*>(PMul1->RHS);
566 // Check that each mul is operating on two different loads.
567 if (Ld0 == Ld2 || Ld1 == Ld3)
568 return false;
570 if (AreSequentialLoads(Ld0, Ld1, PMul0->VecLd)) {
571 if (AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
572 LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
573 R.AddMulPair(PMul0, PMul1);
574 return true;
575 } else if (AreSequentialLoads(Ld3, Ld2, PMul1->VecLd)) {
576 LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
577 LLVM_DEBUG(dbgs() << " exchanging Ld2 and Ld3\n");
578 R.AddMulPair(PMul0, PMul1, true);
579 return true;
581 } else if (AreSequentialLoads(Ld1, Ld0, PMul0->VecLd) &&
582 AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
583 LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
584 LLVM_DEBUG(dbgs() << " exchanging Ld0 and Ld1\n");
585 LLVM_DEBUG(dbgs() << " and swapping muls\n");
586 // Only the second operand can be exchanged, so swap the muls.
587 R.AddMulPair(PMul1, PMul0, true);
588 return true;
590 return false;
593 MulCandList &Muls = R.getMuls();
594 const unsigned Elems = Muls.size();
595 for (unsigned i = 0; i < Elems; ++i) {
596 MulCandidate *PMul0 = static_cast<MulCandidate*>(Muls[i].get());
597 if (PMul0->Paired)
598 continue;
600 for (unsigned j = 0; j < Elems; ++j) {
601 if (i == j)
602 continue;
604 MulCandidate *PMul1 = static_cast<MulCandidate*>(Muls[j].get());
605 if (PMul1->Paired)
606 continue;
608 const Instruction *Mul0 = PMul0->Root;
609 const Instruction *Mul1 = PMul1->Root;
610 if (Mul0 == Mul1)
611 continue;
613 assert(PMul0 != PMul1 && "expected different chains");
615 if (CanPair(R, PMul0, PMul1))
616 break;
619 return !R.getMulPairs().empty();
622 void ARMParallelDSP::InsertParallelMACs(Reduction &R) {
624 auto CreateSMLAD = [&](LoadInst* WideLd0, LoadInst *WideLd1,
625 Value *Acc, bool Exchange,
626 Instruction *InsertAfter) {
627 // Replace the reduction chain with an intrinsic call
629 Value* Args[] = { WideLd0, WideLd1, Acc };
630 Function *SMLAD = nullptr;
631 if (Exchange)
632 SMLAD = Acc->getType()->isIntegerTy(32) ?
633 Intrinsic::getDeclaration(M, Intrinsic::arm_smladx) :
634 Intrinsic::getDeclaration(M, Intrinsic::arm_smlaldx);
635 else
636 SMLAD = Acc->getType()->isIntegerTy(32) ?
637 Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
638 Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
640 IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
641 BasicBlock::iterator(InsertAfter));
642 Instruction *Call = Builder.CreateCall(SMLAD, Args);
643 NumSMLAD++;
644 return Call;
647 // Return the instruction after the dominated instruction.
648 auto GetInsertPoint = [this](Value *A, Value *B) {
649 assert((isa<Instruction>(A) || isa<Instruction>(B)) &&
650 "expected at least one instruction");
652 Value *V = nullptr;
653 if (!isa<Instruction>(A))
654 V = B;
655 else if (!isa<Instruction>(B))
656 V = A;
657 else
658 V = DT->dominates(cast<Instruction>(A), cast<Instruction>(B)) ? B : A;
660 return &*++BasicBlock::iterator(cast<Instruction>(V));
663 Value *Acc = R.getAccumulator();
665 // For any muls that were discovered but not paired, accumulate their values
666 // as before.
667 IRBuilder<NoFolder> Builder(R.getRoot()->getParent());
668 MulCandList &MulCands = R.getMuls();
669 for (auto &MulCand : MulCands) {
670 if (MulCand->Paired)
671 continue;
673 Instruction *Mul = cast<Instruction>(MulCand->Root);
674 LLVM_DEBUG(dbgs() << "Accumulating unpaired mul: " << *Mul << "\n");
676 if (R.getType() != Mul->getType()) {
677 assert(R.is64Bit() && "expected 64-bit result");
678 Builder.SetInsertPoint(&*++BasicBlock::iterator(Mul));
679 Mul = cast<Instruction>(Builder.CreateSExt(Mul, R.getRoot()->getType()));
682 if (!Acc) {
683 Acc = Mul;
684 continue;
687 // If Acc is the original incoming value to the reduction, it could be a
688 // phi. But the phi will dominate Mul, meaning that Mul will be the
689 // insertion point.
690 Builder.SetInsertPoint(GetInsertPoint(Mul, Acc));
691 Acc = Builder.CreateAdd(Mul, Acc);
694 if (!Acc) {
695 Acc = R.is64Bit() ?
696 ConstantInt::get(IntegerType::get(M->getContext(), 64), 0) :
697 ConstantInt::get(IntegerType::get(M->getContext(), 32), 0);
698 } else if (Acc->getType() != R.getType()) {
699 Builder.SetInsertPoint(R.getRoot());
700 Acc = Builder.CreateSExt(Acc, R.getType());
703 // Roughly sort the mul pairs in their program order.
704 llvm::sort(R.getMulPairs(), [](auto &PairA, auto &PairB) {
705 const Instruction *A = PairA.first->Root;
706 const Instruction *B = PairB.first->Root;
707 return A->comesBefore(B);
710 IntegerType *Ty = IntegerType::get(M->getContext(), 32);
711 for (auto &Pair : R.getMulPairs()) {
712 MulCandidate *LHSMul = Pair.first;
713 MulCandidate *RHSMul = Pair.second;
714 LoadInst *BaseLHS = LHSMul->getBaseLoad();
715 LoadInst *BaseRHS = RHSMul->getBaseLoad();
716 LoadInst *WideLHS = WideLoads.count(BaseLHS) ?
717 WideLoads[BaseLHS]->getLoad() : CreateWideLoad(LHSMul->VecLd, Ty);
718 LoadInst *WideRHS = WideLoads.count(BaseRHS) ?
719 WideLoads[BaseRHS]->getLoad() : CreateWideLoad(RHSMul->VecLd, Ty);
721 Instruction *InsertAfter = GetInsertPoint(WideLHS, WideRHS);
722 InsertAfter = GetInsertPoint(InsertAfter, Acc);
723 Acc = CreateSMLAD(WideLHS, WideRHS, Acc, RHSMul->Exchange, InsertAfter);
725 R.UpdateRoot(cast<Instruction>(Acc));
728 LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
729 IntegerType *LoadTy) {
730 assert(Loads.size() == 2 && "currently only support widening two loads");
732 LoadInst *Base = Loads[0];
733 LoadInst *Offset = Loads[1];
735 Instruction *BaseSExt = dyn_cast<SExtInst>(Base->user_back());
736 Instruction *OffsetSExt = dyn_cast<SExtInst>(Offset->user_back());
738 assert((BaseSExt && OffsetSExt)
739 && "Loads should have a single, extending, user");
741 std::function<void(Value*, Value*)> MoveBefore =
742 [&](Value *A, Value *B) -> void {
743 if (!isa<Instruction>(A) || !isa<Instruction>(B))
744 return;
746 auto *Source = cast<Instruction>(A);
747 auto *Sink = cast<Instruction>(B);
749 if (DT->dominates(Source, Sink) ||
750 Source->getParent() != Sink->getParent() ||
751 isa<PHINode>(Source) || isa<PHINode>(Sink))
752 return;
754 Source->moveBefore(Sink);
755 for (auto &Op : Source->operands())
756 MoveBefore(Op, Source);
759 // Insert the load at the point of the original dominating load.
760 LoadInst *DomLoad = DT->dominates(Base, Offset) ? Base : Offset;
761 IRBuilder<NoFolder> IRB(DomLoad->getParent(),
762 ++BasicBlock::iterator(DomLoad));
764 // Bitcast the pointer to a wider type and create the wide load, while making
765 // sure to maintain the original alignment as this prevents ldrd from being
766 // generated when it could be illegal due to memory alignment.
767 const unsigned AddrSpace = DomLoad->getPointerAddressSpace();
768 Value *VecPtr = IRB.CreateBitCast(Base->getPointerOperand(),
769 LoadTy->getPointerTo(AddrSpace));
770 LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr, Base->getAlign());
772 // Make sure everything is in the correct order in the basic block.
773 MoveBefore(Base->getPointerOperand(), VecPtr);
774 MoveBefore(VecPtr, WideLoad);
776 // From the wide load, create two values that equal the original two loads.
777 // Loads[0] needs trunc while Loads[1] needs a lshr and trunc.
778 // TODO: Support big-endian as well.
779 Value *Bottom = IRB.CreateTrunc(WideLoad, Base->getType());
780 Value *NewBaseSExt = IRB.CreateSExt(Bottom, BaseSExt->getType());
781 BaseSExt->replaceAllUsesWith(NewBaseSExt);
783 IntegerType *OffsetTy = cast<IntegerType>(Offset->getType());
784 Value *ShiftVal = ConstantInt::get(LoadTy, OffsetTy->getBitWidth());
785 Value *Top = IRB.CreateLShr(WideLoad, ShiftVal);
786 Value *Trunc = IRB.CreateTrunc(Top, OffsetTy);
787 Value *NewOffsetSExt = IRB.CreateSExt(Trunc, OffsetSExt->getType());
788 OffsetSExt->replaceAllUsesWith(NewOffsetSExt);
790 LLVM_DEBUG(dbgs() << "From Base and Offset:\n"
791 << *Base << "\n" << *Offset << "\n"
792 << "Created Wide Load:\n"
793 << *WideLoad << "\n"
794 << *Bottom << "\n"
795 << *NewBaseSExt << "\n"
796 << *Top << "\n"
797 << *Trunc << "\n"
798 << *NewOffsetSExt << "\n");
799 WideLoads.emplace(std::make_pair(Base,
800 std::make_unique<WidenedLoad>(Loads, WideLoad)));
801 return WideLoad;
804 Pass *llvm::createARMParallelDSPPass() {
805 return new ARMParallelDSP();
808 char ARMParallelDSP::ID = 0;
810 INITIALIZE_PASS_BEGIN(ARMParallelDSP, "arm-parallel-dsp",
811 "Transform functions to use DSP intrinsics", false, false)
812 INITIALIZE_PASS_END(ARMParallelDSP, "arm-parallel-dsp",
813 "Transform functions to use DSP intrinsics", false, false)