1 //===- Scalarizer.cpp - Scalarize vector operations -----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass converts vector operations into scalar operations, in order
10 // to expose optimization opportunities on the individual scalar operations.
11 // It is mainly intended for targets that do not have vector units, but it
12 // may also be useful for revectorizing code to different vector widths.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/ADT/PostOrderIterator.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/Analysis/VectorUtils.h"
20 #include "llvm/IR/Argument.h"
21 #include "llvm/IR/BasicBlock.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InstVisitor.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/IR/Value.h"
36 #include "llvm/Pass.h"
37 #include "llvm/Support/Casting.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/Options.h"
40 #include "llvm/Transforms/Scalar.h"
41 #include "llvm/Transforms/Scalar/Scalarizer.h"
50 #define DEBUG_TYPE "scalarizer"
52 // This is disabled by default because having separate loads and stores
53 // makes it more likely that the -combiner-alias-analysis limits will be
56 ScalarizeLoadStore("scalarize-load-store", cl::init(false), cl::Hidden
,
57 cl::desc("Allow the scalarizer pass to scalarize loads and store"));
61 // Used to store the scattered form of a vector.
62 using ValueVector
= SmallVector
<Value
*, 8>;
64 // Used to map a vector Value to its scattered form. We use std::map
65 // because we want iterators to persist across insertion and because the
66 // values are relatively large.
67 using ScatterMap
= std::map
<Value
*, ValueVector
>;
69 // Lists Instructions that have been replaced with scalar implementations,
70 // along with a pointer to their scattered forms.
71 using GatherList
= SmallVector
<std::pair
<Instruction
*, ValueVector
*>, 16>;
73 // Provides a very limited vector-like interface for lazily accessing one
74 // component of a scattered vector or vector pointer.
77 Scatterer() = default;
79 // Scatter V into Size components. If new instructions are needed,
80 // insert them before BBI in BB. If Cache is nonnull, use it to cache
82 Scatterer(BasicBlock
*bb
, BasicBlock::iterator bbi
, Value
*v
,
83 ValueVector
*cachePtr
= nullptr);
85 // Return component I, creating a new Value for it if necessary.
86 Value
*operator[](unsigned I
);
88 // Return the number of components.
89 unsigned size() const { return Size
; }
93 BasicBlock::iterator BBI
;
95 ValueVector
*CachePtr
;
101 // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp
102 // called Name that compares X and Y in the same way as FCI.
103 struct FCmpSplitter
{
104 FCmpSplitter(FCmpInst
&fci
) : FCI(fci
) {}
106 Value
*operator()(IRBuilder
<> &Builder
, Value
*Op0
, Value
*Op1
,
107 const Twine
&Name
) const {
108 return Builder
.CreateFCmp(FCI
.getPredicate(), Op0
, Op1
, Name
);
114 // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp
115 // called Name that compares X and Y in the same way as ICI.
116 struct ICmpSplitter
{
117 ICmpSplitter(ICmpInst
&ici
) : ICI(ici
) {}
119 Value
*operator()(IRBuilder
<> &Builder
, Value
*Op0
, Value
*Op1
,
120 const Twine
&Name
) const {
121 return Builder
.CreateICmp(ICI
.getPredicate(), Op0
, Op1
, Name
);
127 // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create
128 // a binary operator like BO called Name with operands X and Y.
129 struct BinarySplitter
{
130 BinarySplitter(BinaryOperator
&bo
) : BO(bo
) {}
132 Value
*operator()(IRBuilder
<> &Builder
, Value
*Op0
, Value
*Op1
,
133 const Twine
&Name
) const {
134 return Builder
.CreateBinOp(BO
.getOpcode(), Op0
, Op1
, Name
);
140 // Information about a load or store that we're scalarizing.
141 struct VectorLayout
{
142 VectorLayout() = default;
144 // Return the alignment of element I.
145 uint64_t getElemAlign(unsigned I
) {
146 return MinAlign(VecAlign
, I
* ElemSize
);
149 // The type of the vector.
150 VectorType
*VecTy
= nullptr;
152 // The type of each element.
153 Type
*ElemTy
= nullptr;
155 // The alignment of the vector.
156 uint64_t VecAlign
= 0;
158 // The size of each element.
159 uint64_t ElemSize
= 0;
162 class ScalarizerVisitor
: public InstVisitor
<ScalarizerVisitor
, bool> {
164 ScalarizerVisitor(unsigned ParallelLoopAccessMDKind
)
165 : ParallelLoopAccessMDKind(ParallelLoopAccessMDKind
) {
168 bool visit(Function
&F
);
170 // InstVisitor methods. They return true if the instruction was scalarized,
171 // false if nothing changed.
172 bool visitInstruction(Instruction
&I
) { return false; }
173 bool visitSelectInst(SelectInst
&SI
);
174 bool visitICmpInst(ICmpInst
&ICI
);
175 bool visitFCmpInst(FCmpInst
&FCI
);
176 bool visitBinaryOperator(BinaryOperator
&BO
);
177 bool visitGetElementPtrInst(GetElementPtrInst
&GEPI
);
178 bool visitCastInst(CastInst
&CI
);
179 bool visitBitCastInst(BitCastInst
&BCI
);
180 bool visitShuffleVectorInst(ShuffleVectorInst
&SVI
);
181 bool visitPHINode(PHINode
&PHI
);
182 bool visitLoadInst(LoadInst
&LI
);
183 bool visitStoreInst(StoreInst
&SI
);
184 bool visitCallInst(CallInst
&ICI
);
187 Scatterer
scatter(Instruction
*Point
, Value
*V
);
188 void gather(Instruction
*Op
, const ValueVector
&CV
);
189 bool canTransferMetadata(unsigned Kind
);
190 void transferMetadata(Instruction
*Op
, const ValueVector
&CV
);
191 bool getVectorLayout(Type
*Ty
, unsigned Alignment
, VectorLayout
&Layout
,
192 const DataLayout
&DL
);
195 template<typename T
> bool splitBinary(Instruction
&, const T
&);
197 bool splitCall(CallInst
&CI
);
199 ScatterMap Scattered
;
202 unsigned ParallelLoopAccessMDKind
;
205 class ScalarizerLegacyPass
: public FunctionPass
{
209 ScalarizerLegacyPass() : FunctionPass(ID
) {
210 initializeScalarizerLegacyPassPass(*PassRegistry::getPassRegistry());
213 bool runOnFunction(Function
&F
) override
;
216 } // end anonymous namespace
218 char ScalarizerLegacyPass::ID
= 0;
219 INITIALIZE_PASS_BEGIN(ScalarizerLegacyPass
, "scalarizer",
220 "Scalarize vector operations", false, false)
221 INITIALIZE_PASS_END(ScalarizerLegacyPass
, "scalarizer",
222 "Scalarize vector operations", false, false)
224 Scatterer::Scatterer(BasicBlock
*bb
, BasicBlock::iterator bbi
, Value
*v
,
225 ValueVector
*cachePtr
)
226 : BB(bb
), BBI(bbi
), V(v
), CachePtr(cachePtr
) {
227 Type
*Ty
= V
->getType();
228 PtrTy
= dyn_cast
<PointerType
>(Ty
);
230 Ty
= PtrTy
->getElementType();
231 Size
= Ty
->getVectorNumElements();
233 Tmp
.resize(Size
, nullptr);
234 else if (CachePtr
->empty())
235 CachePtr
->resize(Size
, nullptr);
237 assert(Size
== CachePtr
->size() && "Inconsistent vector sizes");
240 // Return component I, creating a new Value for it if necessary.
241 Value
*Scatterer::operator[](unsigned I
) {
242 ValueVector
&CV
= (CachePtr
? *CachePtr
: Tmp
);
243 // Try to reuse a previous value.
246 IRBuilder
<> Builder(BB
, BBI
);
248 Type
*ElTy
= PtrTy
->getElementType()->getVectorElementType();
250 Type
*NewPtrTy
= PointerType::get(ElTy
, PtrTy
->getAddressSpace());
251 CV
[0] = Builder
.CreateBitCast(V
, NewPtrTy
, V
->getName() + ".i0");
254 CV
[I
] = Builder
.CreateConstGEP1_32(ElTy
, CV
[0], I
,
255 V
->getName() + ".i" + Twine(I
));
257 // Search through a chain of InsertElementInsts looking for element I.
258 // Record other elements in the cache. The new V is still suitable
259 // for all uncached indices.
261 InsertElementInst
*Insert
= dyn_cast
<InsertElementInst
>(V
);
264 ConstantInt
*Idx
= dyn_cast
<ConstantInt
>(Insert
->getOperand(2));
267 unsigned J
= Idx
->getZExtValue();
268 V
= Insert
->getOperand(0);
270 CV
[J
] = Insert
->getOperand(1);
273 // Only cache the first entry we find for each index we're not actively
274 // searching for. This prevents us from going too far up the chain and
275 // caching incorrect entries.
276 CV
[J
] = Insert
->getOperand(1);
279 CV
[I
] = Builder
.CreateExtractElement(V
, Builder
.getInt32(I
),
280 V
->getName() + ".i" + Twine(I
));
285 bool ScalarizerLegacyPass::runOnFunction(Function
&F
) {
289 Module
&M
= *F
.getParent();
290 unsigned ParallelLoopAccessMDKind
=
291 M
.getContext().getMDKindID("llvm.mem.parallel_loop_access");
292 ScalarizerVisitor
Impl(ParallelLoopAccessMDKind
);
293 return Impl
.visit(F
);
296 FunctionPass
*llvm::createScalarizerPass() {
297 return new ScalarizerLegacyPass();
300 bool ScalarizerVisitor::visit(Function
&F
) {
301 assert(Gathered
.empty() && Scattered
.empty());
303 // To ensure we replace gathered components correctly we need to do an ordered
304 // traversal of the basic blocks in the function.
305 ReversePostOrderTraversal
<BasicBlock
*> RPOT(&F
.getEntryBlock());
306 for (BasicBlock
*BB
: RPOT
) {
307 for (BasicBlock::iterator II
= BB
->begin(), IE
= BB
->end(); II
!= IE
;) {
308 Instruction
*I
= &*II
;
309 bool Done
= InstVisitor::visit(I
);
311 if (Done
&& I
->getType()->isVoidTy())
312 I
->eraseFromParent();
318 // Return a scattered form of V that can be accessed by Point. V must be a
319 // vector or a pointer to a vector.
320 Scatterer
ScalarizerVisitor::scatter(Instruction
*Point
, Value
*V
) {
321 if (Argument
*VArg
= dyn_cast
<Argument
>(V
)) {
322 // Put the scattered form of arguments in the entry block,
323 // so that it can be used everywhere.
324 Function
*F
= VArg
->getParent();
325 BasicBlock
*BB
= &F
->getEntryBlock();
326 return Scatterer(BB
, BB
->begin(), V
, &Scattered
[V
]);
328 if (Instruction
*VOp
= dyn_cast
<Instruction
>(V
)) {
329 // Put the scattered form of an instruction directly after the
331 BasicBlock
*BB
= VOp
->getParent();
332 return Scatterer(BB
, std::next(BasicBlock::iterator(VOp
)),
335 // In the fallback case, just put the scattered before Point and
336 // keep the result local to Point.
337 return Scatterer(Point
->getParent(), Point
->getIterator(), V
);
340 // Replace Op with the gathered form of the components in CV. Defer the
341 // deletion of Op and creation of the gathered form to the end of the pass,
342 // so that we can avoid creating the gathered form if all uses of Op are
343 // replaced with uses of CV.
344 void ScalarizerVisitor::gather(Instruction
*Op
, const ValueVector
&CV
) {
345 // Since we're not deleting Op yet, stub out its operands, so that it
346 // doesn't make anything live unnecessarily.
347 for (unsigned I
= 0, E
= Op
->getNumOperands(); I
!= E
; ++I
)
348 Op
->setOperand(I
, UndefValue::get(Op
->getOperand(I
)->getType()));
350 transferMetadata(Op
, CV
);
352 // If we already have a scattered form of Op (created from ExtractElements
353 // of Op itself), replace them with the new form.
354 ValueVector
&SV
= Scattered
[Op
];
356 for (unsigned I
= 0, E
= SV
.size(); I
!= E
; ++I
) {
361 Instruction
*Old
= cast
<Instruction
>(V
);
362 CV
[I
]->takeName(Old
);
363 Old
->replaceAllUsesWith(CV
[I
]);
364 Old
->eraseFromParent();
368 Gathered
.push_back(GatherList::value_type(Op
, &SV
));
371 // Return true if it is safe to transfer the given metadata tag from
372 // vector to scalar instructions.
373 bool ScalarizerVisitor::canTransferMetadata(unsigned Tag
) {
374 return (Tag
== LLVMContext::MD_tbaa
375 || Tag
== LLVMContext::MD_fpmath
376 || Tag
== LLVMContext::MD_tbaa_struct
377 || Tag
== LLVMContext::MD_invariant_load
378 || Tag
== LLVMContext::MD_alias_scope
379 || Tag
== LLVMContext::MD_noalias
380 || Tag
== ParallelLoopAccessMDKind
381 || Tag
== LLVMContext::MD_access_group
);
384 // Transfer metadata from Op to the instructions in CV if it is known
385 // to be safe to do so.
386 void ScalarizerVisitor::transferMetadata(Instruction
*Op
, const ValueVector
&CV
) {
387 SmallVector
<std::pair
<unsigned, MDNode
*>, 4> MDs
;
388 Op
->getAllMetadataOtherThanDebugLoc(MDs
);
389 for (unsigned I
= 0, E
= CV
.size(); I
!= E
; ++I
) {
390 if (Instruction
*New
= dyn_cast
<Instruction
>(CV
[I
])) {
391 for (const auto &MD
: MDs
)
392 if (canTransferMetadata(MD
.first
))
393 New
->setMetadata(MD
.first
, MD
.second
);
394 if (Op
->getDebugLoc() && !New
->getDebugLoc())
395 New
->setDebugLoc(Op
->getDebugLoc());
400 // Try to fill in Layout from Ty, returning true on success. Alignment is
401 // the alignment of the vector, or 0 if the ABI default should be used.
402 bool ScalarizerVisitor::getVectorLayout(Type
*Ty
, unsigned Alignment
,
403 VectorLayout
&Layout
, const DataLayout
&DL
) {
404 // Make sure we're dealing with a vector.
405 Layout
.VecTy
= dyn_cast
<VectorType
>(Ty
);
409 // Check that we're dealing with full-byte elements.
410 Layout
.ElemTy
= Layout
.VecTy
->getElementType();
411 if (DL
.getTypeSizeInBits(Layout
.ElemTy
) !=
412 DL
.getTypeStoreSizeInBits(Layout
.ElemTy
))
416 Layout
.VecAlign
= Alignment
;
418 Layout
.VecAlign
= DL
.getABITypeAlignment(Layout
.VecTy
);
419 Layout
.ElemSize
= DL
.getTypeStoreSize(Layout
.ElemTy
);
423 // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name)
424 // to create an instruction like I with operands X and Y and name Name.
425 template<typename Splitter
>
426 bool ScalarizerVisitor::splitBinary(Instruction
&I
, const Splitter
&Split
) {
427 VectorType
*VT
= dyn_cast
<VectorType
>(I
.getType());
431 unsigned NumElems
= VT
->getNumElements();
432 IRBuilder
<> Builder(&I
);
433 Scatterer Op0
= scatter(&I
, I
.getOperand(0));
434 Scatterer Op1
= scatter(&I
, I
.getOperand(1));
435 assert(Op0
.size() == NumElems
&& "Mismatched binary operation");
436 assert(Op1
.size() == NumElems
&& "Mismatched binary operation");
438 Res
.resize(NumElems
);
439 for (unsigned Elem
= 0; Elem
< NumElems
; ++Elem
)
440 Res
[Elem
] = Split(Builder
, Op0
[Elem
], Op1
[Elem
],
441 I
.getName() + ".i" + Twine(Elem
));
446 static bool isTriviallyScalariable(Intrinsic::ID ID
) {
447 return isTriviallyVectorizable(ID
);
450 // All of the current scalarizable intrinsics only have one mangled type.
451 static Function
*getScalarIntrinsicDeclaration(Module
*M
,
454 return Intrinsic::getDeclaration(M
, ID
, { Ty
->getScalarType() });
457 /// If a call to a vector typed intrinsic function, split into a scalar call per
458 /// element if possible for the intrinsic.
459 bool ScalarizerVisitor::splitCall(CallInst
&CI
) {
460 VectorType
*VT
= dyn_cast
<VectorType
>(CI
.getType());
464 Function
*F
= CI
.getCalledFunction();
468 Intrinsic::ID ID
= F
->getIntrinsicID();
469 if (ID
== Intrinsic::not_intrinsic
|| !isTriviallyScalariable(ID
))
472 unsigned NumElems
= VT
->getNumElements();
473 unsigned NumArgs
= CI
.getNumArgOperands();
475 ValueVector
ScalarOperands(NumArgs
);
476 SmallVector
<Scatterer
, 8> Scattered(NumArgs
);
478 Scattered
.resize(NumArgs
);
480 // Assumes that any vector type has the same number of elements as the return
481 // vector type, which is true for all current intrinsics.
482 for (unsigned I
= 0; I
!= NumArgs
; ++I
) {
483 Value
*OpI
= CI
.getOperand(I
);
484 if (OpI
->getType()->isVectorTy()) {
485 Scattered
[I
] = scatter(&CI
, OpI
);
486 assert(Scattered
[I
].size() == NumElems
&& "mismatched call operands");
488 ScalarOperands
[I
] = OpI
;
492 ValueVector
Res(NumElems
);
493 ValueVector
ScalarCallOps(NumArgs
);
495 Function
*NewIntrin
= getScalarIntrinsicDeclaration(F
->getParent(), ID
, VT
);
496 IRBuilder
<> Builder(&CI
);
498 // Perform actual scalarization, taking care to preserve any scalar operands.
499 for (unsigned Elem
= 0; Elem
< NumElems
; ++Elem
) {
500 ScalarCallOps
.clear();
502 for (unsigned J
= 0; J
!= NumArgs
; ++J
) {
503 if (hasVectorInstrinsicScalarOpd(ID
, J
))
504 ScalarCallOps
.push_back(ScalarOperands
[J
]);
506 ScalarCallOps
.push_back(Scattered
[J
][Elem
]);
509 Res
[Elem
] = Builder
.CreateCall(NewIntrin
, ScalarCallOps
,
510 CI
.getName() + ".i" + Twine(Elem
));
517 bool ScalarizerVisitor::visitSelectInst(SelectInst
&SI
) {
518 VectorType
*VT
= dyn_cast
<VectorType
>(SI
.getType());
522 unsigned NumElems
= VT
->getNumElements();
523 IRBuilder
<> Builder(&SI
);
524 Scatterer Op1
= scatter(&SI
, SI
.getOperand(1));
525 Scatterer Op2
= scatter(&SI
, SI
.getOperand(2));
526 assert(Op1
.size() == NumElems
&& "Mismatched select");
527 assert(Op2
.size() == NumElems
&& "Mismatched select");
529 Res
.resize(NumElems
);
531 if (SI
.getOperand(0)->getType()->isVectorTy()) {
532 Scatterer Op0
= scatter(&SI
, SI
.getOperand(0));
533 assert(Op0
.size() == NumElems
&& "Mismatched select");
534 for (unsigned I
= 0; I
< NumElems
; ++I
)
535 Res
[I
] = Builder
.CreateSelect(Op0
[I
], Op1
[I
], Op2
[I
],
536 SI
.getName() + ".i" + Twine(I
));
538 Value
*Op0
= SI
.getOperand(0);
539 for (unsigned I
= 0; I
< NumElems
; ++I
)
540 Res
[I
] = Builder
.CreateSelect(Op0
, Op1
[I
], Op2
[I
],
541 SI
.getName() + ".i" + Twine(I
));
547 bool ScalarizerVisitor::visitICmpInst(ICmpInst
&ICI
) {
548 return splitBinary(ICI
, ICmpSplitter(ICI
));
551 bool ScalarizerVisitor::visitFCmpInst(FCmpInst
&FCI
) {
552 return splitBinary(FCI
, FCmpSplitter(FCI
));
555 bool ScalarizerVisitor::visitBinaryOperator(BinaryOperator
&BO
) {
556 return splitBinary(BO
, BinarySplitter(BO
));
559 bool ScalarizerVisitor::visitGetElementPtrInst(GetElementPtrInst
&GEPI
) {
560 VectorType
*VT
= dyn_cast
<VectorType
>(GEPI
.getType());
564 IRBuilder
<> Builder(&GEPI
);
565 unsigned NumElems
= VT
->getNumElements();
566 unsigned NumIndices
= GEPI
.getNumIndices();
568 // The base pointer might be scalar even if it's a vector GEP. In those cases,
569 // splat the pointer into a vector value, and scatter that vector.
570 Value
*Op0
= GEPI
.getOperand(0);
571 if (!Op0
->getType()->isVectorTy())
572 Op0
= Builder
.CreateVectorSplat(NumElems
, Op0
);
573 Scatterer Base
= scatter(&GEPI
, Op0
);
575 SmallVector
<Scatterer
, 8> Ops
;
576 Ops
.resize(NumIndices
);
577 for (unsigned I
= 0; I
< NumIndices
; ++I
) {
578 Value
*Op
= GEPI
.getOperand(I
+ 1);
580 // The indices might be scalars even if it's a vector GEP. In those cases,
581 // splat the scalar into a vector value, and scatter that vector.
582 if (!Op
->getType()->isVectorTy())
583 Op
= Builder
.CreateVectorSplat(NumElems
, Op
);
585 Ops
[I
] = scatter(&GEPI
, Op
);
589 Res
.resize(NumElems
);
590 for (unsigned I
= 0; I
< NumElems
; ++I
) {
591 SmallVector
<Value
*, 8> Indices
;
592 Indices
.resize(NumIndices
);
593 for (unsigned J
= 0; J
< NumIndices
; ++J
)
594 Indices
[J
] = Ops
[J
][I
];
595 Res
[I
] = Builder
.CreateGEP(GEPI
.getSourceElementType(), Base
[I
], Indices
,
596 GEPI
.getName() + ".i" + Twine(I
));
597 if (GEPI
.isInBounds())
598 if (GetElementPtrInst
*NewGEPI
= dyn_cast
<GetElementPtrInst
>(Res
[I
]))
599 NewGEPI
->setIsInBounds();
605 bool ScalarizerVisitor::visitCastInst(CastInst
&CI
) {
606 VectorType
*VT
= dyn_cast
<VectorType
>(CI
.getDestTy());
610 unsigned NumElems
= VT
->getNumElements();
611 IRBuilder
<> Builder(&CI
);
612 Scatterer Op0
= scatter(&CI
, CI
.getOperand(0));
613 assert(Op0
.size() == NumElems
&& "Mismatched cast");
615 Res
.resize(NumElems
);
616 for (unsigned I
= 0; I
< NumElems
; ++I
)
617 Res
[I
] = Builder
.CreateCast(CI
.getOpcode(), Op0
[I
], VT
->getElementType(),
618 CI
.getName() + ".i" + Twine(I
));
623 bool ScalarizerVisitor::visitBitCastInst(BitCastInst
&BCI
) {
624 VectorType
*DstVT
= dyn_cast
<VectorType
>(BCI
.getDestTy());
625 VectorType
*SrcVT
= dyn_cast
<VectorType
>(BCI
.getSrcTy());
626 if (!DstVT
|| !SrcVT
)
629 unsigned DstNumElems
= DstVT
->getNumElements();
630 unsigned SrcNumElems
= SrcVT
->getNumElements();
631 IRBuilder
<> Builder(&BCI
);
632 Scatterer Op0
= scatter(&BCI
, BCI
.getOperand(0));
634 Res
.resize(DstNumElems
);
636 if (DstNumElems
== SrcNumElems
) {
637 for (unsigned I
= 0; I
< DstNumElems
; ++I
)
638 Res
[I
] = Builder
.CreateBitCast(Op0
[I
], DstVT
->getElementType(),
639 BCI
.getName() + ".i" + Twine(I
));
640 } else if (DstNumElems
> SrcNumElems
) {
641 // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the
642 // individual elements to the destination.
643 unsigned FanOut
= DstNumElems
/ SrcNumElems
;
644 Type
*MidTy
= VectorType::get(DstVT
->getElementType(), FanOut
);
646 for (unsigned Op0I
= 0; Op0I
< SrcNumElems
; ++Op0I
) {
647 Value
*V
= Op0
[Op0I
];
649 // Look through any existing bitcasts before converting to <N x t2>.
650 // In the best case, the resulting conversion might be a no-op.
651 while ((VI
= dyn_cast
<Instruction
>(V
)) &&
652 VI
->getOpcode() == Instruction::BitCast
)
653 V
= VI
->getOperand(0);
654 V
= Builder
.CreateBitCast(V
, MidTy
, V
->getName() + ".cast");
655 Scatterer Mid
= scatter(&BCI
, V
);
656 for (unsigned MidI
= 0; MidI
< FanOut
; ++MidI
)
657 Res
[ResI
++] = Mid
[MidI
];
660 // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2.
661 unsigned FanIn
= SrcNumElems
/ DstNumElems
;
662 Type
*MidTy
= VectorType::get(SrcVT
->getElementType(), FanIn
);
664 for (unsigned ResI
= 0; ResI
< DstNumElems
; ++ResI
) {
665 Value
*V
= UndefValue::get(MidTy
);
666 for (unsigned MidI
= 0; MidI
< FanIn
; ++MidI
)
667 V
= Builder
.CreateInsertElement(V
, Op0
[Op0I
++], Builder
.getInt32(MidI
),
668 BCI
.getName() + ".i" + Twine(ResI
)
669 + ".upto" + Twine(MidI
));
670 Res
[ResI
] = Builder
.CreateBitCast(V
, DstVT
->getElementType(),
671 BCI
.getName() + ".i" + Twine(ResI
));
678 bool ScalarizerVisitor::visitShuffleVectorInst(ShuffleVectorInst
&SVI
) {
679 VectorType
*VT
= dyn_cast
<VectorType
>(SVI
.getType());
683 unsigned NumElems
= VT
->getNumElements();
684 Scatterer Op0
= scatter(&SVI
, SVI
.getOperand(0));
685 Scatterer Op1
= scatter(&SVI
, SVI
.getOperand(1));
687 Res
.resize(NumElems
);
689 for (unsigned I
= 0; I
< NumElems
; ++I
) {
690 int Selector
= SVI
.getMaskValue(I
);
692 Res
[I
] = UndefValue::get(VT
->getElementType());
693 else if (unsigned(Selector
) < Op0
.size())
694 Res
[I
] = Op0
[Selector
];
696 Res
[I
] = Op1
[Selector
- Op0
.size()];
702 bool ScalarizerVisitor::visitPHINode(PHINode
&PHI
) {
703 VectorType
*VT
= dyn_cast
<VectorType
>(PHI
.getType());
707 unsigned NumElems
= VT
->getNumElements();
708 IRBuilder
<> Builder(&PHI
);
710 Res
.resize(NumElems
);
712 unsigned NumOps
= PHI
.getNumOperands();
713 for (unsigned I
= 0; I
< NumElems
; ++I
)
714 Res
[I
] = Builder
.CreatePHI(VT
->getElementType(), NumOps
,
715 PHI
.getName() + ".i" + Twine(I
));
717 for (unsigned I
= 0; I
< NumOps
; ++I
) {
718 Scatterer Op
= scatter(&PHI
, PHI
.getIncomingValue(I
));
719 BasicBlock
*IncomingBlock
= PHI
.getIncomingBlock(I
);
720 for (unsigned J
= 0; J
< NumElems
; ++J
)
721 cast
<PHINode
>(Res
[J
])->addIncoming(Op
[J
], IncomingBlock
);
727 bool ScalarizerVisitor::visitLoadInst(LoadInst
&LI
) {
728 if (!ScalarizeLoadStore
)
734 if (!getVectorLayout(LI
.getType(), LI
.getAlignment(), Layout
,
735 LI
.getModule()->getDataLayout()))
738 unsigned NumElems
= Layout
.VecTy
->getNumElements();
739 IRBuilder
<> Builder(&LI
);
740 Scatterer Ptr
= scatter(&LI
, LI
.getPointerOperand());
742 Res
.resize(NumElems
);
744 for (unsigned I
= 0; I
< NumElems
; ++I
)
745 Res
[I
] = Builder
.CreateAlignedLoad(Layout
.VecTy
->getElementType(), Ptr
[I
],
746 Layout
.getElemAlign(I
),
747 LI
.getName() + ".i" + Twine(I
));
752 bool ScalarizerVisitor::visitStoreInst(StoreInst
&SI
) {
753 if (!ScalarizeLoadStore
)
759 Value
*FullValue
= SI
.getValueOperand();
760 if (!getVectorLayout(FullValue
->getType(), SI
.getAlignment(), Layout
,
761 SI
.getModule()->getDataLayout()))
764 unsigned NumElems
= Layout
.VecTy
->getNumElements();
765 IRBuilder
<> Builder(&SI
);
766 Scatterer Ptr
= scatter(&SI
, SI
.getPointerOperand());
767 Scatterer Val
= scatter(&SI
, FullValue
);
770 Stores
.resize(NumElems
);
771 for (unsigned I
= 0; I
< NumElems
; ++I
) {
772 unsigned Align
= Layout
.getElemAlign(I
);
773 Stores
[I
] = Builder
.CreateAlignedStore(Val
[I
], Ptr
[I
], Align
);
775 transferMetadata(&SI
, Stores
);
779 bool ScalarizerVisitor::visitCallInst(CallInst
&CI
) {
780 return splitCall(CI
);
783 // Delete the instructions that we scalarized. If a full vector result
784 // is still needed, recreate it using InsertElements.
785 bool ScalarizerVisitor::finish() {
786 // The presence of data in Gathered or Scattered indicates changes
787 // made to the Function.
788 if (Gathered
.empty() && Scattered
.empty())
790 for (const auto &GMI
: Gathered
) {
791 Instruction
*Op
= GMI
.first
;
792 ValueVector
&CV
= *GMI
.second
;
793 if (!Op
->use_empty()) {
794 // The value is still needed, so recreate it using a series of
796 Type
*Ty
= Op
->getType();
797 Value
*Res
= UndefValue::get(Ty
);
798 BasicBlock
*BB
= Op
->getParent();
799 unsigned Count
= Ty
->getVectorNumElements();
800 IRBuilder
<> Builder(Op
);
801 if (isa
<PHINode
>(Op
))
802 Builder
.SetInsertPoint(BB
, BB
->getFirstInsertionPt());
803 for (unsigned I
= 0; I
< Count
; ++I
)
804 Res
= Builder
.CreateInsertElement(Res
, CV
[I
], Builder
.getInt32(I
),
805 Op
->getName() + ".upto" + Twine(I
));
807 Op
->replaceAllUsesWith(Res
);
809 Op
->eraseFromParent();
816 PreservedAnalyses
ScalarizerPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
817 Module
&M
= *F
.getParent();
818 unsigned ParallelLoopAccessMDKind
=
819 M
.getContext().getMDKindID("llvm.mem.parallel_loop_access");
820 ScalarizerVisitor
Impl(ParallelLoopAccessMDKind
);
821 bool Changed
= Impl
.visit(F
);
822 return Changed
? PreservedAnalyses::none() : PreservedAnalyses::all();