[ThinLTO] Add code comment. NFC
[llvm-complete.git] / lib / Transforms / Scalar / Scalarizer.cpp
blob2ee1a3a95f2a6995dc5ee61d09d7bab660c00210
1 //===- Scalarizer.cpp - Scalarize vector operations -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass converts vector operations into scalar operations, in order
10 // to expose optimization opportunities on the individual scalar operations.
11 // It is mainly intended for targets that do not have vector units, but it
12 // may also be useful for revectorizing code to different vector widths.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/ADT/PostOrderIterator.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/Analysis/VectorUtils.h"
20 #include "llvm/IR/Argument.h"
21 #include "llvm/IR/BasicBlock.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InstVisitor.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/IR/Value.h"
36 #include "llvm/Pass.h"
37 #include "llvm/Support/Casting.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/Options.h"
40 #include "llvm/Transforms/Scalar.h"
41 #include "llvm/Transforms/Scalar/Scalarizer.h"
42 #include <cassert>
43 #include <cstdint>
44 #include <iterator>
45 #include <map>
46 #include <utility>
48 using namespace llvm;
50 #define DEBUG_TYPE "scalarizer"
52 // This is disabled by default because having separate loads and stores
53 // makes it more likely that the -combiner-alias-analysis limits will be
54 // reached.
55 static cl::opt<bool>
56 ScalarizeLoadStore("scalarize-load-store", cl::init(false), cl::Hidden,
57 cl::desc("Allow the scalarizer pass to scalarize loads and store"));
59 namespace {
61 // Used to store the scattered form of a vector.
62 using ValueVector = SmallVector<Value *, 8>;
64 // Used to map a vector Value to its scattered form. We use std::map
65 // because we want iterators to persist across insertion and because the
66 // values are relatively large.
67 using ScatterMap = std::map<Value *, ValueVector>;
69 // Lists Instructions that have been replaced with scalar implementations,
70 // along with a pointer to their scattered forms.
71 using GatherList = SmallVector<std::pair<Instruction *, ValueVector *>, 16>;
73 // Provides a very limited vector-like interface for lazily accessing one
74 // component of a scattered vector or vector pointer.
75 class Scatterer {
76 public:
77 Scatterer() = default;
79 // Scatter V into Size components. If new instructions are needed,
80 // insert them before BBI in BB. If Cache is nonnull, use it to cache
81 // the results.
82 Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
83 ValueVector *cachePtr = nullptr);
85 // Return component I, creating a new Value for it if necessary.
86 Value *operator[](unsigned I);
88 // Return the number of components.
89 unsigned size() const { return Size; }
91 private:
92 BasicBlock *BB;
93 BasicBlock::iterator BBI;
94 Value *V;
95 ValueVector *CachePtr;
96 PointerType *PtrTy;
97 ValueVector Tmp;
98 unsigned Size;
101 // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp
102 // called Name that compares X and Y in the same way as FCI.
103 struct FCmpSplitter {
104 FCmpSplitter(FCmpInst &fci) : FCI(fci) {}
106 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
107 const Twine &Name) const {
108 return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name);
111 FCmpInst &FCI;
114 // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp
115 // called Name that compares X and Y in the same way as ICI.
116 struct ICmpSplitter {
117 ICmpSplitter(ICmpInst &ici) : ICI(ici) {}
119 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
120 const Twine &Name) const {
121 return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name);
124 ICmpInst &ICI;
127 // UnarySpliiter(UO)(Builder, X, Name) uses Builder to create
128 // a unary operator like UO called Name with operand X.
129 struct UnarySplitter {
130 UnarySplitter(UnaryOperator &uo) : UO(uo) {}
132 Value *operator()(IRBuilder<> &Builder, Value *Op, const Twine &Name) const {
133 return Builder.CreateUnOp(UO.getOpcode(), Op, Name);
136 UnaryOperator &UO;
139 // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create
140 // a binary operator like BO called Name with operands X and Y.
141 struct BinarySplitter {
142 BinarySplitter(BinaryOperator &bo) : BO(bo) {}
144 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
145 const Twine &Name) const {
146 return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name);
149 BinaryOperator &BO;
152 // Information about a load or store that we're scalarizing.
153 struct VectorLayout {
154 VectorLayout() = default;
156 // Return the alignment of element I.
157 uint64_t getElemAlign(unsigned I) {
158 return MinAlign(VecAlign, I * ElemSize);
161 // The type of the vector.
162 VectorType *VecTy = nullptr;
164 // The type of each element.
165 Type *ElemTy = nullptr;
167 // The alignment of the vector.
168 uint64_t VecAlign = 0;
170 // The size of each element.
171 uint64_t ElemSize = 0;
174 class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> {
175 public:
176 ScalarizerVisitor(unsigned ParallelLoopAccessMDKind)
177 : ParallelLoopAccessMDKind(ParallelLoopAccessMDKind) {
180 bool visit(Function &F);
182 // InstVisitor methods. They return true if the instruction was scalarized,
183 // false if nothing changed.
184 bool visitInstruction(Instruction &I) { return false; }
185 bool visitSelectInst(SelectInst &SI);
186 bool visitICmpInst(ICmpInst &ICI);
187 bool visitFCmpInst(FCmpInst &FCI);
188 bool visitUnaryOperator(UnaryOperator &UO);
189 bool visitBinaryOperator(BinaryOperator &BO);
190 bool visitGetElementPtrInst(GetElementPtrInst &GEPI);
191 bool visitCastInst(CastInst &CI);
192 bool visitBitCastInst(BitCastInst &BCI);
193 bool visitShuffleVectorInst(ShuffleVectorInst &SVI);
194 bool visitPHINode(PHINode &PHI);
195 bool visitLoadInst(LoadInst &LI);
196 bool visitStoreInst(StoreInst &SI);
197 bool visitCallInst(CallInst &ICI);
199 private:
200 Scatterer scatter(Instruction *Point, Value *V);
201 void gather(Instruction *Op, const ValueVector &CV);
202 bool canTransferMetadata(unsigned Kind);
203 void transferMetadataAndIRFlags(Instruction *Op, const ValueVector &CV);
204 bool getVectorLayout(Type *Ty, unsigned Alignment, VectorLayout &Layout,
205 const DataLayout &DL);
206 bool finish();
208 template<typename T> bool splitUnary(Instruction &, const T &);
209 template<typename T> bool splitBinary(Instruction &, const T &);
211 bool splitCall(CallInst &CI);
213 ScatterMap Scattered;
214 GatherList Gathered;
216 unsigned ParallelLoopAccessMDKind;
219 class ScalarizerLegacyPass : public FunctionPass {
220 public:
221 static char ID;
223 ScalarizerLegacyPass() : FunctionPass(ID) {
224 initializeScalarizerLegacyPassPass(*PassRegistry::getPassRegistry());
227 bool runOnFunction(Function &F) override;
230 } // end anonymous namespace
232 char ScalarizerLegacyPass::ID = 0;
233 INITIALIZE_PASS_BEGIN(ScalarizerLegacyPass, "scalarizer",
234 "Scalarize vector operations", false, false)
235 INITIALIZE_PASS_END(ScalarizerLegacyPass, "scalarizer",
236 "Scalarize vector operations", false, false)
238 Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
239 ValueVector *cachePtr)
240 : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) {
241 Type *Ty = V->getType();
242 PtrTy = dyn_cast<PointerType>(Ty);
243 if (PtrTy)
244 Ty = PtrTy->getElementType();
245 Size = Ty->getVectorNumElements();
246 if (!CachePtr)
247 Tmp.resize(Size, nullptr);
248 else if (CachePtr->empty())
249 CachePtr->resize(Size, nullptr);
250 else
251 assert(Size == CachePtr->size() && "Inconsistent vector sizes");
254 // Return component I, creating a new Value for it if necessary.
255 Value *Scatterer::operator[](unsigned I) {
256 ValueVector &CV = (CachePtr ? *CachePtr : Tmp);
257 // Try to reuse a previous value.
258 if (CV[I])
259 return CV[I];
260 IRBuilder<> Builder(BB, BBI);
261 if (PtrTy) {
262 Type *ElTy = PtrTy->getElementType()->getVectorElementType();
263 if (!CV[0]) {
264 Type *NewPtrTy = PointerType::get(ElTy, PtrTy->getAddressSpace());
265 CV[0] = Builder.CreateBitCast(V, NewPtrTy, V->getName() + ".i0");
267 if (I != 0)
268 CV[I] = Builder.CreateConstGEP1_32(ElTy, CV[0], I,
269 V->getName() + ".i" + Twine(I));
270 } else {
271 // Search through a chain of InsertElementInsts looking for element I.
272 // Record other elements in the cache. The new V is still suitable
273 // for all uncached indices.
274 while (true) {
275 InsertElementInst *Insert = dyn_cast<InsertElementInst>(V);
276 if (!Insert)
277 break;
278 ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2));
279 if (!Idx)
280 break;
281 unsigned J = Idx->getZExtValue();
282 V = Insert->getOperand(0);
283 if (I == J) {
284 CV[J] = Insert->getOperand(1);
285 return CV[J];
286 } else if (!CV[J]) {
287 // Only cache the first entry we find for each index we're not actively
288 // searching for. This prevents us from going too far up the chain and
289 // caching incorrect entries.
290 CV[J] = Insert->getOperand(1);
293 CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I),
294 V->getName() + ".i" + Twine(I));
296 return CV[I];
299 bool ScalarizerLegacyPass::runOnFunction(Function &F) {
300 if (skipFunction(F))
301 return false;
303 Module &M = *F.getParent();
304 unsigned ParallelLoopAccessMDKind =
305 M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
306 ScalarizerVisitor Impl(ParallelLoopAccessMDKind);
307 return Impl.visit(F);
310 FunctionPass *llvm::createScalarizerPass() {
311 return new ScalarizerLegacyPass();
314 bool ScalarizerVisitor::visit(Function &F) {
315 assert(Gathered.empty() && Scattered.empty());
317 // To ensure we replace gathered components correctly we need to do an ordered
318 // traversal of the basic blocks in the function.
319 ReversePostOrderTraversal<BasicBlock *> RPOT(&F.getEntryBlock());
320 for (BasicBlock *BB : RPOT) {
321 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
322 Instruction *I = &*II;
323 bool Done = InstVisitor::visit(I);
324 ++II;
325 if (Done && I->getType()->isVoidTy())
326 I->eraseFromParent();
329 return finish();
332 // Return a scattered form of V that can be accessed by Point. V must be a
333 // vector or a pointer to a vector.
334 Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V) {
335 if (Argument *VArg = dyn_cast<Argument>(V)) {
336 // Put the scattered form of arguments in the entry block,
337 // so that it can be used everywhere.
338 Function *F = VArg->getParent();
339 BasicBlock *BB = &F->getEntryBlock();
340 return Scatterer(BB, BB->begin(), V, &Scattered[V]);
342 if (Instruction *VOp = dyn_cast<Instruction>(V)) {
343 // Put the scattered form of an instruction directly after the
344 // instruction.
345 BasicBlock *BB = VOp->getParent();
346 return Scatterer(BB, std::next(BasicBlock::iterator(VOp)),
347 V, &Scattered[V]);
349 // In the fallback case, just put the scattered before Point and
350 // keep the result local to Point.
351 return Scatterer(Point->getParent(), Point->getIterator(), V);
354 // Replace Op with the gathered form of the components in CV. Defer the
355 // deletion of Op and creation of the gathered form to the end of the pass,
356 // so that we can avoid creating the gathered form if all uses of Op are
357 // replaced with uses of CV.
358 void ScalarizerVisitor::gather(Instruction *Op, const ValueVector &CV) {
359 // Since we're not deleting Op yet, stub out its operands, so that it
360 // doesn't make anything live unnecessarily.
361 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I)
362 Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType()));
364 transferMetadataAndIRFlags(Op, CV);
366 // If we already have a scattered form of Op (created from ExtractElements
367 // of Op itself), replace them with the new form.
368 ValueVector &SV = Scattered[Op];
369 if (!SV.empty()) {
370 for (unsigned I = 0, E = SV.size(); I != E; ++I) {
371 Value *V = SV[I];
372 if (V == nullptr)
373 continue;
375 Instruction *Old = cast<Instruction>(V);
376 CV[I]->takeName(Old);
377 Old->replaceAllUsesWith(CV[I]);
378 Old->eraseFromParent();
381 SV = CV;
382 Gathered.push_back(GatherList::value_type(Op, &SV));
385 // Return true if it is safe to transfer the given metadata tag from
386 // vector to scalar instructions.
387 bool ScalarizerVisitor::canTransferMetadata(unsigned Tag) {
388 return (Tag == LLVMContext::MD_tbaa
389 || Tag == LLVMContext::MD_fpmath
390 || Tag == LLVMContext::MD_tbaa_struct
391 || Tag == LLVMContext::MD_invariant_load
392 || Tag == LLVMContext::MD_alias_scope
393 || Tag == LLVMContext::MD_noalias
394 || Tag == ParallelLoopAccessMDKind
395 || Tag == LLVMContext::MD_access_group);
398 // Transfer metadata from Op to the instructions in CV if it is known
399 // to be safe to do so.
400 void ScalarizerVisitor::transferMetadataAndIRFlags(Instruction *Op,
401 const ValueVector &CV) {
402 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
403 Op->getAllMetadataOtherThanDebugLoc(MDs);
404 for (unsigned I = 0, E = CV.size(); I != E; ++I) {
405 if (Instruction *New = dyn_cast<Instruction>(CV[I])) {
406 for (const auto &MD : MDs)
407 if (canTransferMetadata(MD.first))
408 New->setMetadata(MD.first, MD.second);
409 New->copyIRFlags(Op);
410 if (Op->getDebugLoc() && !New->getDebugLoc())
411 New->setDebugLoc(Op->getDebugLoc());
416 // Try to fill in Layout from Ty, returning true on success. Alignment is
417 // the alignment of the vector, or 0 if the ABI default should be used.
418 bool ScalarizerVisitor::getVectorLayout(Type *Ty, unsigned Alignment,
419 VectorLayout &Layout, const DataLayout &DL) {
420 // Make sure we're dealing with a vector.
421 Layout.VecTy = dyn_cast<VectorType>(Ty);
422 if (!Layout.VecTy)
423 return false;
425 // Check that we're dealing with full-byte elements.
426 Layout.ElemTy = Layout.VecTy->getElementType();
427 if (!DL.typeSizeEqualsStoreSize(Layout.ElemTy))
428 return false;
430 if (Alignment)
431 Layout.VecAlign = Alignment;
432 else
433 Layout.VecAlign = DL.getABITypeAlignment(Layout.VecTy);
434 Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy);
435 return true;
438 // Scalarize one-operand instruction I, using Split(Builder, X, Name)
439 // to create an instruction like I with operand X and name Name.
440 template<typename Splitter>
441 bool ScalarizerVisitor::splitUnary(Instruction &I, const Splitter &Split) {
442 VectorType *VT = dyn_cast<VectorType>(I.getType());
443 if (!VT)
444 return false;
446 unsigned NumElems = VT->getNumElements();
447 IRBuilder<> Builder(&I);
448 Scatterer Op = scatter(&I, I.getOperand(0));
449 assert(Op.size() == NumElems && "Mismatched unary operation");
450 ValueVector Res;
451 Res.resize(NumElems);
452 for (unsigned Elem = 0; Elem < NumElems; ++Elem)
453 Res[Elem] = Split(Builder, Op[Elem], I.getName() + ".i" + Twine(Elem));
454 gather(&I, Res);
455 return true;
458 // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name)
459 // to create an instruction like I with operands X and Y and name Name.
460 template<typename Splitter>
461 bool ScalarizerVisitor::splitBinary(Instruction &I, const Splitter &Split) {
462 VectorType *VT = dyn_cast<VectorType>(I.getType());
463 if (!VT)
464 return false;
466 unsigned NumElems = VT->getNumElements();
467 IRBuilder<> Builder(&I);
468 Scatterer Op0 = scatter(&I, I.getOperand(0));
469 Scatterer Op1 = scatter(&I, I.getOperand(1));
470 assert(Op0.size() == NumElems && "Mismatched binary operation");
471 assert(Op1.size() == NumElems && "Mismatched binary operation");
472 ValueVector Res;
473 Res.resize(NumElems);
474 for (unsigned Elem = 0; Elem < NumElems; ++Elem)
475 Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem],
476 I.getName() + ".i" + Twine(Elem));
477 gather(&I, Res);
478 return true;
481 static bool isTriviallyScalariable(Intrinsic::ID ID) {
482 return isTriviallyVectorizable(ID);
485 // All of the current scalarizable intrinsics only have one mangled type.
486 static Function *getScalarIntrinsicDeclaration(Module *M,
487 Intrinsic::ID ID,
488 VectorType *Ty) {
489 return Intrinsic::getDeclaration(M, ID, { Ty->getScalarType() });
492 /// If a call to a vector typed intrinsic function, split into a scalar call per
493 /// element if possible for the intrinsic.
494 bool ScalarizerVisitor::splitCall(CallInst &CI) {
495 VectorType *VT = dyn_cast<VectorType>(CI.getType());
496 if (!VT)
497 return false;
499 Function *F = CI.getCalledFunction();
500 if (!F)
501 return false;
503 Intrinsic::ID ID = F->getIntrinsicID();
504 if (ID == Intrinsic::not_intrinsic || !isTriviallyScalariable(ID))
505 return false;
507 unsigned NumElems = VT->getNumElements();
508 unsigned NumArgs = CI.getNumArgOperands();
510 ValueVector ScalarOperands(NumArgs);
511 SmallVector<Scatterer, 8> Scattered(NumArgs);
513 Scattered.resize(NumArgs);
515 // Assumes that any vector type has the same number of elements as the return
516 // vector type, which is true for all current intrinsics.
517 for (unsigned I = 0; I != NumArgs; ++I) {
518 Value *OpI = CI.getOperand(I);
519 if (OpI->getType()->isVectorTy()) {
520 Scattered[I] = scatter(&CI, OpI);
521 assert(Scattered[I].size() == NumElems && "mismatched call operands");
522 } else {
523 ScalarOperands[I] = OpI;
527 ValueVector Res(NumElems);
528 ValueVector ScalarCallOps(NumArgs);
530 Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, VT);
531 IRBuilder<> Builder(&CI);
533 // Perform actual scalarization, taking care to preserve any scalar operands.
534 for (unsigned Elem = 0; Elem < NumElems; ++Elem) {
535 ScalarCallOps.clear();
537 for (unsigned J = 0; J != NumArgs; ++J) {
538 if (hasVectorInstrinsicScalarOpd(ID, J))
539 ScalarCallOps.push_back(ScalarOperands[J]);
540 else
541 ScalarCallOps.push_back(Scattered[J][Elem]);
544 Res[Elem] = Builder.CreateCall(NewIntrin, ScalarCallOps,
545 CI.getName() + ".i" + Twine(Elem));
548 gather(&CI, Res);
549 return true;
552 bool ScalarizerVisitor::visitSelectInst(SelectInst &SI) {
553 VectorType *VT = dyn_cast<VectorType>(SI.getType());
554 if (!VT)
555 return false;
557 unsigned NumElems = VT->getNumElements();
558 IRBuilder<> Builder(&SI);
559 Scatterer Op1 = scatter(&SI, SI.getOperand(1));
560 Scatterer Op2 = scatter(&SI, SI.getOperand(2));
561 assert(Op1.size() == NumElems && "Mismatched select");
562 assert(Op2.size() == NumElems && "Mismatched select");
563 ValueVector Res;
564 Res.resize(NumElems);
566 if (SI.getOperand(0)->getType()->isVectorTy()) {
567 Scatterer Op0 = scatter(&SI, SI.getOperand(0));
568 assert(Op0.size() == NumElems && "Mismatched select");
569 for (unsigned I = 0; I < NumElems; ++I)
570 Res[I] = Builder.CreateSelect(Op0[I], Op1[I], Op2[I],
571 SI.getName() + ".i" + Twine(I));
572 } else {
573 Value *Op0 = SI.getOperand(0);
574 for (unsigned I = 0; I < NumElems; ++I)
575 Res[I] = Builder.CreateSelect(Op0, Op1[I], Op2[I],
576 SI.getName() + ".i" + Twine(I));
578 gather(&SI, Res);
579 return true;
582 bool ScalarizerVisitor::visitICmpInst(ICmpInst &ICI) {
583 return splitBinary(ICI, ICmpSplitter(ICI));
586 bool ScalarizerVisitor::visitFCmpInst(FCmpInst &FCI) {
587 return splitBinary(FCI, FCmpSplitter(FCI));
590 bool ScalarizerVisitor::visitUnaryOperator(UnaryOperator &UO) {
591 return splitUnary(UO, UnarySplitter(UO));
594 bool ScalarizerVisitor::visitBinaryOperator(BinaryOperator &BO) {
595 return splitBinary(BO, BinarySplitter(BO));
598 bool ScalarizerVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
599 VectorType *VT = dyn_cast<VectorType>(GEPI.getType());
600 if (!VT)
601 return false;
603 IRBuilder<> Builder(&GEPI);
604 unsigned NumElems = VT->getNumElements();
605 unsigned NumIndices = GEPI.getNumIndices();
607 // The base pointer might be scalar even if it's a vector GEP. In those cases,
608 // splat the pointer into a vector value, and scatter that vector.
609 Value *Op0 = GEPI.getOperand(0);
610 if (!Op0->getType()->isVectorTy())
611 Op0 = Builder.CreateVectorSplat(NumElems, Op0);
612 Scatterer Base = scatter(&GEPI, Op0);
614 SmallVector<Scatterer, 8> Ops;
615 Ops.resize(NumIndices);
616 for (unsigned I = 0; I < NumIndices; ++I) {
617 Value *Op = GEPI.getOperand(I + 1);
619 // The indices might be scalars even if it's a vector GEP. In those cases,
620 // splat the scalar into a vector value, and scatter that vector.
621 if (!Op->getType()->isVectorTy())
622 Op = Builder.CreateVectorSplat(NumElems, Op);
624 Ops[I] = scatter(&GEPI, Op);
627 ValueVector Res;
628 Res.resize(NumElems);
629 for (unsigned I = 0; I < NumElems; ++I) {
630 SmallVector<Value *, 8> Indices;
631 Indices.resize(NumIndices);
632 for (unsigned J = 0; J < NumIndices; ++J)
633 Indices[J] = Ops[J][I];
634 Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
635 GEPI.getName() + ".i" + Twine(I));
636 if (GEPI.isInBounds())
637 if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
638 NewGEPI->setIsInBounds();
640 gather(&GEPI, Res);
641 return true;
644 bool ScalarizerVisitor::visitCastInst(CastInst &CI) {
645 VectorType *VT = dyn_cast<VectorType>(CI.getDestTy());
646 if (!VT)
647 return false;
649 unsigned NumElems = VT->getNumElements();
650 IRBuilder<> Builder(&CI);
651 Scatterer Op0 = scatter(&CI, CI.getOperand(0));
652 assert(Op0.size() == NumElems && "Mismatched cast");
653 ValueVector Res;
654 Res.resize(NumElems);
655 for (unsigned I = 0; I < NumElems; ++I)
656 Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(),
657 CI.getName() + ".i" + Twine(I));
658 gather(&CI, Res);
659 return true;
662 bool ScalarizerVisitor::visitBitCastInst(BitCastInst &BCI) {
663 VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy());
664 VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy());
665 if (!DstVT || !SrcVT)
666 return false;
668 unsigned DstNumElems = DstVT->getNumElements();
669 unsigned SrcNumElems = SrcVT->getNumElements();
670 IRBuilder<> Builder(&BCI);
671 Scatterer Op0 = scatter(&BCI, BCI.getOperand(0));
672 ValueVector Res;
673 Res.resize(DstNumElems);
675 if (DstNumElems == SrcNumElems) {
676 for (unsigned I = 0; I < DstNumElems; ++I)
677 Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(),
678 BCI.getName() + ".i" + Twine(I));
679 } else if (DstNumElems > SrcNumElems) {
680 // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the
681 // individual elements to the destination.
682 unsigned FanOut = DstNumElems / SrcNumElems;
683 Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut);
684 unsigned ResI = 0;
685 for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) {
686 Value *V = Op0[Op0I];
687 Instruction *VI;
688 // Look through any existing bitcasts before converting to <N x t2>.
689 // In the best case, the resulting conversion might be a no-op.
690 while ((VI = dyn_cast<Instruction>(V)) &&
691 VI->getOpcode() == Instruction::BitCast)
692 V = VI->getOperand(0);
693 V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast");
694 Scatterer Mid = scatter(&BCI, V);
695 for (unsigned MidI = 0; MidI < FanOut; ++MidI)
696 Res[ResI++] = Mid[MidI];
698 } else {
699 // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2.
700 unsigned FanIn = SrcNumElems / DstNumElems;
701 Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn);
702 unsigned Op0I = 0;
703 for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) {
704 Value *V = UndefValue::get(MidTy);
705 for (unsigned MidI = 0; MidI < FanIn; ++MidI)
706 V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI),
707 BCI.getName() + ".i" + Twine(ResI)
708 + ".upto" + Twine(MidI));
709 Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(),
710 BCI.getName() + ".i" + Twine(ResI));
713 gather(&BCI, Res);
714 return true;
717 bool ScalarizerVisitor::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
718 VectorType *VT = dyn_cast<VectorType>(SVI.getType());
719 if (!VT)
720 return false;
722 unsigned NumElems = VT->getNumElements();
723 Scatterer Op0 = scatter(&SVI, SVI.getOperand(0));
724 Scatterer Op1 = scatter(&SVI, SVI.getOperand(1));
725 ValueVector Res;
726 Res.resize(NumElems);
728 for (unsigned I = 0; I < NumElems; ++I) {
729 int Selector = SVI.getMaskValue(I);
730 if (Selector < 0)
731 Res[I] = UndefValue::get(VT->getElementType());
732 else if (unsigned(Selector) < Op0.size())
733 Res[I] = Op0[Selector];
734 else
735 Res[I] = Op1[Selector - Op0.size()];
737 gather(&SVI, Res);
738 return true;
741 bool ScalarizerVisitor::visitPHINode(PHINode &PHI) {
742 VectorType *VT = dyn_cast<VectorType>(PHI.getType());
743 if (!VT)
744 return false;
746 unsigned NumElems = VT->getNumElements();
747 IRBuilder<> Builder(&PHI);
748 ValueVector Res;
749 Res.resize(NumElems);
751 unsigned NumOps = PHI.getNumOperands();
752 for (unsigned I = 0; I < NumElems; ++I)
753 Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps,
754 PHI.getName() + ".i" + Twine(I));
756 for (unsigned I = 0; I < NumOps; ++I) {
757 Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I));
758 BasicBlock *IncomingBlock = PHI.getIncomingBlock(I);
759 for (unsigned J = 0; J < NumElems; ++J)
760 cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock);
762 gather(&PHI, Res);
763 return true;
766 bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
767 if (!ScalarizeLoadStore)
768 return false;
769 if (!LI.isSimple())
770 return false;
772 VectorLayout Layout;
773 if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout,
774 LI.getModule()->getDataLayout()))
775 return false;
777 unsigned NumElems = Layout.VecTy->getNumElements();
778 IRBuilder<> Builder(&LI);
779 Scatterer Ptr = scatter(&LI, LI.getPointerOperand());
780 ValueVector Res;
781 Res.resize(NumElems);
783 for (unsigned I = 0; I < NumElems; ++I)
784 Res[I] = Builder.CreateAlignedLoad(Layout.VecTy->getElementType(), Ptr[I],
785 Layout.getElemAlign(I),
786 LI.getName() + ".i" + Twine(I));
787 gather(&LI, Res);
788 return true;
791 bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) {
792 if (!ScalarizeLoadStore)
793 return false;
794 if (!SI.isSimple())
795 return false;
797 VectorLayout Layout;
798 Value *FullValue = SI.getValueOperand();
799 if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout,
800 SI.getModule()->getDataLayout()))
801 return false;
803 unsigned NumElems = Layout.VecTy->getNumElements();
804 IRBuilder<> Builder(&SI);
805 Scatterer Ptr = scatter(&SI, SI.getPointerOperand());
806 Scatterer Val = scatter(&SI, FullValue);
808 ValueVector Stores;
809 Stores.resize(NumElems);
810 for (unsigned I = 0; I < NumElems; ++I) {
811 unsigned Align = Layout.getElemAlign(I);
812 Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align);
814 transferMetadataAndIRFlags(&SI, Stores);
815 return true;
818 bool ScalarizerVisitor::visitCallInst(CallInst &CI) {
819 return splitCall(CI);
822 // Delete the instructions that we scalarized. If a full vector result
823 // is still needed, recreate it using InsertElements.
824 bool ScalarizerVisitor::finish() {
825 // The presence of data in Gathered or Scattered indicates changes
826 // made to the Function.
827 if (Gathered.empty() && Scattered.empty())
828 return false;
829 for (const auto &GMI : Gathered) {
830 Instruction *Op = GMI.first;
831 ValueVector &CV = *GMI.second;
832 if (!Op->use_empty()) {
833 // The value is still needed, so recreate it using a series of
834 // InsertElements.
835 Type *Ty = Op->getType();
836 Value *Res = UndefValue::get(Ty);
837 BasicBlock *BB = Op->getParent();
838 unsigned Count = Ty->getVectorNumElements();
839 IRBuilder<> Builder(Op);
840 if (isa<PHINode>(Op))
841 Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
842 for (unsigned I = 0; I < Count; ++I)
843 Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I),
844 Op->getName() + ".upto" + Twine(I));
845 Res->takeName(Op);
846 Op->replaceAllUsesWith(Res);
848 Op->eraseFromParent();
850 Gathered.clear();
851 Scattered.clear();
852 return true;
855 PreservedAnalyses ScalarizerPass::run(Function &F, FunctionAnalysisManager &AM) {
856 Module &M = *F.getParent();
857 unsigned ParallelLoopAccessMDKind =
858 M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
859 ScalarizerVisitor Impl(ParallelLoopAccessMDKind);
860 bool Changed = Impl.visit(F);
861 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();