1 //===- ScalarizeMaskedMemIntrin.cpp - Scalarize unsupported masked mem ----===//
4 // The LLVM Compiler Infrastructure
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This pass replaces masked memory intrinsics - when unsupported by the target
12 // - with a chain of basic blocks, that deal with the elements one-by-one if the
13 // appropriate mask bit is set.
15 //===----------------------------------------------------------------------===//
17 #include "llvm/ADT/Twine.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DerivedTypes.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/InstrTypes.h"
27 #include "llvm/IR/Instruction.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/IR/Value.h"
33 #include "llvm/Pass.h"
34 #include "llvm/Support/Casting.h"
40 #define DEBUG_TYPE "scalarize-masked-mem-intrin"
44 class ScalarizeMaskedMemIntrin
: public FunctionPass
{
45 const TargetTransformInfo
*TTI
= nullptr;
48 static char ID
; // Pass identification, replacement for typeid
50 explicit ScalarizeMaskedMemIntrin() : FunctionPass(ID
) {
51 initializeScalarizeMaskedMemIntrinPass(*PassRegistry::getPassRegistry());
54 bool runOnFunction(Function
&F
) override
;
56 StringRef
getPassName() const override
{
57 return "Scalarize Masked Memory Intrinsics";
60 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
61 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
65 bool optimizeBlock(BasicBlock
&BB
, bool &ModifiedDT
);
66 bool optimizeCallInst(CallInst
*CI
, bool &ModifiedDT
);
69 } // end anonymous namespace
71 char ScalarizeMaskedMemIntrin::ID
= 0;
73 INITIALIZE_PASS(ScalarizeMaskedMemIntrin
, DEBUG_TYPE
,
74 "Scalarize unsupported masked memory intrinsics", false, false)
76 FunctionPass
*llvm::createScalarizeMaskedMemIntrinPass() {
77 return new ScalarizeMaskedMemIntrin();
80 static bool isConstantIntVector(Value
*Mask
) {
81 Constant
*C
= dyn_cast
<Constant
>(Mask
);
85 unsigned NumElts
= Mask
->getType()->getVectorNumElements();
86 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
87 Constant
*CElt
= C
->getAggregateElement(i
);
88 if (!CElt
|| !isa
<ConstantInt
>(CElt
))
95 // Translate a masked load intrinsic like
96 // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align,
97 // <16 x i1> %mask, <16 x i32> %passthru)
98 // to a chain of basic blocks, with loading element one-by-one if
99 // the appropriate mask bit is set
101 // %1 = bitcast i8* %addr to i32*
102 // %2 = extractelement <16 x i1> %mask, i32 0
103 // br i1 %2, label %cond.load, label %else
105 // cond.load: ; preds = %0
106 // %3 = getelementptr i32* %1, i32 0
108 // %5 = insertelement <16 x i32> %passthru, i32 %4, i32 0
111 // else: ; preds = %0, %cond.load
112 // %res.phi.else = phi <16 x i32> [ %5, %cond.load ], [ undef, %0 ]
113 // %6 = extractelement <16 x i1> %mask, i32 1
114 // br i1 %6, label %cond.load1, label %else2
116 // cond.load1: ; preds = %else
117 // %7 = getelementptr i32* %1, i32 1
119 // %9 = insertelement <16 x i32> %res.phi.else, i32 %8, i32 1
122 // else2: ; preds = %else, %cond.load1
123 // %res.phi.else3 = phi <16 x i32> [ %9, %cond.load1 ], [ %res.phi.else, %else ]
124 // %10 = extractelement <16 x i1> %mask, i32 2
125 // br i1 %10, label %cond.load4, label %else5
127 static void scalarizeMaskedLoad(CallInst
*CI
) {
128 Value
*Ptr
= CI
->getArgOperand(0);
129 Value
*Alignment
= CI
->getArgOperand(1);
130 Value
*Mask
= CI
->getArgOperand(2);
131 Value
*Src0
= CI
->getArgOperand(3);
133 unsigned AlignVal
= cast
<ConstantInt
>(Alignment
)->getZExtValue();
134 VectorType
*VecType
= cast
<VectorType
>(CI
->getType());
136 Type
*EltTy
= VecType
->getElementType();
138 IRBuilder
<> Builder(CI
->getContext());
139 Instruction
*InsertPt
= CI
;
140 BasicBlock
*IfBlock
= CI
->getParent();
141 BasicBlock
*CondBlock
= nullptr;
142 BasicBlock
*PrevIfBlock
= CI
->getParent();
144 Builder
.SetInsertPoint(InsertPt
);
145 Builder
.SetCurrentDebugLocation(CI
->getDebugLoc());
147 // Short-cut if the mask is all-true.
148 if (isa
<Constant
>(Mask
) && cast
<Constant
>(Mask
)->isAllOnesValue()) {
149 Value
*NewI
= Builder
.CreateAlignedLoad(Ptr
, AlignVal
);
150 CI
->replaceAllUsesWith(NewI
);
151 CI
->eraseFromParent();
155 // Adjust alignment for the scalar instruction.
156 AlignVal
= MinAlign(AlignVal
, EltTy
->getPrimitiveSizeInBits() / 8);
157 // Bitcast %addr fron i8* to EltTy*
159 EltTy
->getPointerTo(cast
<PointerType
>(Ptr
->getType())->getAddressSpace());
160 Value
*FirstEltPtr
= Builder
.CreateBitCast(Ptr
, NewPtrType
);
161 unsigned VectorWidth
= VecType
->getNumElements();
164 Value
*VResult
= Src0
;
166 if (isConstantIntVector(Mask
)) {
167 for (unsigned Idx
= 0; Idx
< VectorWidth
; ++Idx
) {
168 if (cast
<Constant
>(Mask
)->getAggregateElement(Idx
)->isNullValue())
171 Builder
.CreateInBoundsGEP(EltTy
, FirstEltPtr
, Builder
.getInt32(Idx
));
172 LoadInst
*Load
= Builder
.CreateAlignedLoad(Gep
, AlignVal
);
174 Builder
.CreateInsertElement(VResult
, Load
, Builder
.getInt32(Idx
));
176 CI
->replaceAllUsesWith(VResult
);
177 CI
->eraseFromParent();
181 for (unsigned Idx
= 0; Idx
< VectorWidth
; ++Idx
) {
182 // Fill the "else" block, created in the previous iteration
184 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ]
185 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx
186 // br i1 %mask_1, label %cond.load, label %else
190 Builder
.CreateExtractElement(Mask
, Builder
.getInt32(Idx
));
192 // Create "cond" block
194 // %EltAddr = getelementptr i32* %1, i32 0
195 // %Elt = load i32* %EltAddr
196 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
198 CondBlock
= IfBlock
->splitBasicBlock(InsertPt
->getIterator(), "cond.load");
199 Builder
.SetInsertPoint(InsertPt
);
202 Builder
.CreateInBoundsGEP(EltTy
, FirstEltPtr
, Builder
.getInt32(Idx
));
203 LoadInst
*Load
= Builder
.CreateAlignedLoad(Gep
, AlignVal
);
204 Value
*NewVResult
= Builder
.CreateInsertElement(VResult
, Load
,
205 Builder
.getInt32(Idx
));
207 // Create "else" block, fill it in the next iteration
208 BasicBlock
*NewIfBlock
=
209 CondBlock
->splitBasicBlock(InsertPt
->getIterator(), "else");
210 Builder
.SetInsertPoint(InsertPt
);
211 Instruction
*OldBr
= IfBlock
->getTerminator();
212 BranchInst::Create(CondBlock
, NewIfBlock
, Predicate
, OldBr
);
213 OldBr
->eraseFromParent();
214 PrevIfBlock
= IfBlock
;
215 IfBlock
= NewIfBlock
;
217 // Create the phi to join the new and previous value.
218 PHINode
*Phi
= Builder
.CreatePHI(VecType
, 2, "res.phi.else");
219 Phi
->addIncoming(NewVResult
, CondBlock
);
220 Phi
->addIncoming(VResult
, PrevIfBlock
);
224 CI
->replaceAllUsesWith(VResult
);
225 CI
->eraseFromParent();
228 // Translate a masked store intrinsic, like
229 // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align,
231 // to a chain of basic blocks, that stores element one-by-one if
232 // the appropriate mask bit is set
234 // %1 = bitcast i8* %addr to i32*
235 // %2 = extractelement <16 x i1> %mask, i32 0
236 // br i1 %2, label %cond.store, label %else
238 // cond.store: ; preds = %0
239 // %3 = extractelement <16 x i32> %val, i32 0
240 // %4 = getelementptr i32* %1, i32 0
241 // store i32 %3, i32* %4
244 // else: ; preds = %0, %cond.store
245 // %5 = extractelement <16 x i1> %mask, i32 1
246 // br i1 %5, label %cond.store1, label %else2
248 // cond.store1: ; preds = %else
249 // %6 = extractelement <16 x i32> %val, i32 1
250 // %7 = getelementptr i32* %1, i32 1
251 // store i32 %6, i32* %7
254 static void scalarizeMaskedStore(CallInst
*CI
) {
255 Value
*Src
= CI
->getArgOperand(0);
256 Value
*Ptr
= CI
->getArgOperand(1);
257 Value
*Alignment
= CI
->getArgOperand(2);
258 Value
*Mask
= CI
->getArgOperand(3);
260 unsigned AlignVal
= cast
<ConstantInt
>(Alignment
)->getZExtValue();
261 VectorType
*VecType
= cast
<VectorType
>(Src
->getType());
263 Type
*EltTy
= VecType
->getElementType();
265 IRBuilder
<> Builder(CI
->getContext());
266 Instruction
*InsertPt
= CI
;
267 BasicBlock
*IfBlock
= CI
->getParent();
268 Builder
.SetInsertPoint(InsertPt
);
269 Builder
.SetCurrentDebugLocation(CI
->getDebugLoc());
271 // Short-cut if the mask is all-true.
272 if (isa
<Constant
>(Mask
) && cast
<Constant
>(Mask
)->isAllOnesValue()) {
273 Builder
.CreateAlignedStore(Src
, Ptr
, AlignVal
);
274 CI
->eraseFromParent();
278 // Adjust alignment for the scalar instruction.
279 AlignVal
= MinAlign(AlignVal
, EltTy
->getPrimitiveSizeInBits() / 8);
280 // Bitcast %addr fron i8* to EltTy*
282 EltTy
->getPointerTo(cast
<PointerType
>(Ptr
->getType())->getAddressSpace());
283 Value
*FirstEltPtr
= Builder
.CreateBitCast(Ptr
, NewPtrType
);
284 unsigned VectorWidth
= VecType
->getNumElements();
286 if (isConstantIntVector(Mask
)) {
287 for (unsigned Idx
= 0; Idx
< VectorWidth
; ++Idx
) {
288 if (cast
<Constant
>(Mask
)->getAggregateElement(Idx
)->isNullValue())
290 Value
*OneElt
= Builder
.CreateExtractElement(Src
, Builder
.getInt32(Idx
));
292 Builder
.CreateInBoundsGEP(EltTy
, FirstEltPtr
, Builder
.getInt32(Idx
));
293 Builder
.CreateAlignedStore(OneElt
, Gep
, AlignVal
);
295 CI
->eraseFromParent();
299 for (unsigned Idx
= 0; Idx
< VectorWidth
; ++Idx
) {
300 // Fill the "else" block, created in the previous iteration
302 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx
303 // br i1 %mask_1, label %cond.store, label %else
306 Builder
.CreateExtractElement(Mask
, Builder
.getInt32(Idx
));
308 // Create "cond" block
310 // %OneElt = extractelement <16 x i32> %Src, i32 Idx
311 // %EltAddr = getelementptr i32* %1, i32 0
312 // %store i32 %OneElt, i32* %EltAddr
314 BasicBlock
*CondBlock
=
315 IfBlock
->splitBasicBlock(InsertPt
->getIterator(), "cond.store");
316 Builder
.SetInsertPoint(InsertPt
);
318 Value
*OneElt
= Builder
.CreateExtractElement(Src
, Builder
.getInt32(Idx
));
320 Builder
.CreateInBoundsGEP(EltTy
, FirstEltPtr
, Builder
.getInt32(Idx
));
321 Builder
.CreateAlignedStore(OneElt
, Gep
, AlignVal
);
323 // Create "else" block, fill it in the next iteration
324 BasicBlock
*NewIfBlock
=
325 CondBlock
->splitBasicBlock(InsertPt
->getIterator(), "else");
326 Builder
.SetInsertPoint(InsertPt
);
327 Instruction
*OldBr
= IfBlock
->getTerminator();
328 BranchInst::Create(CondBlock
, NewIfBlock
, Predicate
, OldBr
);
329 OldBr
->eraseFromParent();
330 IfBlock
= NewIfBlock
;
332 CI
->eraseFromParent();
335 // Translate a masked gather intrinsic like
336 // <16 x i32 > @llvm.masked.gather.v16i32( <16 x i32*> %Ptrs, i32 4,
337 // <16 x i1> %Mask, <16 x i32> %Src)
338 // to a chain of basic blocks, with loading element one-by-one if
339 // the appropriate mask bit is set
341 // %Ptrs = getelementptr i32, i32* %base, <16 x i64> %ind
342 // %Mask0 = extractelement <16 x i1> %Mask, i32 0
343 // br i1 %Mask0, label %cond.load, label %else
346 // %Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0
347 // %Load0 = load i32, i32* %Ptr0, align 4
348 // %Res0 = insertelement <16 x i32> undef, i32 %Load0, i32 0
352 // %res.phi.else = phi <16 x i32>[%Res0, %cond.load], [undef, %0]
353 // %Mask1 = extractelement <16 x i1> %Mask, i32 1
354 // br i1 %Mask1, label %cond.load1, label %else2
357 // %Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
358 // %Load1 = load i32, i32* %Ptr1, align 4
359 // %Res1 = insertelement <16 x i32> %res.phi.else, i32 %Load1, i32 1
362 // %Result = select <16 x i1> %Mask, <16 x i32> %res.phi.select, <16 x i32> %Src
363 // ret <16 x i32> %Result
364 static void scalarizeMaskedGather(CallInst
*CI
) {
365 Value
*Ptrs
= CI
->getArgOperand(0);
366 Value
*Alignment
= CI
->getArgOperand(1);
367 Value
*Mask
= CI
->getArgOperand(2);
368 Value
*Src0
= CI
->getArgOperand(3);
370 VectorType
*VecType
= cast
<VectorType
>(CI
->getType());
372 IRBuilder
<> Builder(CI
->getContext());
373 Instruction
*InsertPt
= CI
;
374 BasicBlock
*IfBlock
= CI
->getParent();
375 BasicBlock
*CondBlock
= nullptr;
376 BasicBlock
*PrevIfBlock
= CI
->getParent();
377 Builder
.SetInsertPoint(InsertPt
);
378 unsigned AlignVal
= cast
<ConstantInt
>(Alignment
)->getZExtValue();
380 Builder
.SetCurrentDebugLocation(CI
->getDebugLoc());
383 Value
*VResult
= Src0
;
384 unsigned VectorWidth
= VecType
->getNumElements();
386 // Shorten the way if the mask is a vector of constants.
387 if (isConstantIntVector(Mask
)) {
388 for (unsigned Idx
= 0; Idx
< VectorWidth
; ++Idx
) {
389 if (cast
<Constant
>(Mask
)->getAggregateElement(Idx
)->isNullValue())
391 Value
*Ptr
= Builder
.CreateExtractElement(Ptrs
, Builder
.getInt32(Idx
),
394 Builder
.CreateAlignedLoad(Ptr
, AlignVal
, "Load" + Twine(Idx
));
395 VResult
= Builder
.CreateInsertElement(
396 VResult
, Load
, Builder
.getInt32(Idx
), "Res" + Twine(Idx
));
398 CI
->replaceAllUsesWith(VResult
);
399 CI
->eraseFromParent();
403 for (unsigned Idx
= 0; Idx
< VectorWidth
; ++Idx
) {
404 // Fill the "else" block, created in the previous iteration
406 // %Mask1 = extractelement <16 x i1> %Mask, i32 1
407 // br i1 %Mask1, label %cond.load, label %else
410 Value
*Predicate
= Builder
.CreateExtractElement(Mask
, Builder
.getInt32(Idx
),
411 "Mask" + Twine(Idx
));
413 // Create "cond" block
415 // %EltAddr = getelementptr i32* %1, i32 0
416 // %Elt = load i32* %EltAddr
417 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
419 CondBlock
= IfBlock
->splitBasicBlock(InsertPt
, "cond.load");
420 Builder
.SetInsertPoint(InsertPt
);
422 Value
*Ptr
= Builder
.CreateExtractElement(Ptrs
, Builder
.getInt32(Idx
),
425 Builder
.CreateAlignedLoad(Ptr
, AlignVal
, "Load" + Twine(Idx
));
426 Value
*NewVResult
= Builder
.CreateInsertElement(VResult
, Load
,
427 Builder
.getInt32(Idx
),
430 // Create "else" block, fill it in the next iteration
431 BasicBlock
*NewIfBlock
= CondBlock
->splitBasicBlock(InsertPt
, "else");
432 Builder
.SetInsertPoint(InsertPt
);
433 Instruction
*OldBr
= IfBlock
->getTerminator();
434 BranchInst::Create(CondBlock
, NewIfBlock
, Predicate
, OldBr
);
435 OldBr
->eraseFromParent();
436 PrevIfBlock
= IfBlock
;
437 IfBlock
= NewIfBlock
;
439 PHINode
*Phi
= Builder
.CreatePHI(VecType
, 2, "res.phi.else");
440 Phi
->addIncoming(NewVResult
, CondBlock
);
441 Phi
->addIncoming(VResult
, PrevIfBlock
);
445 CI
->replaceAllUsesWith(VResult
);
446 CI
->eraseFromParent();
449 // Translate a masked scatter intrinsic, like
450 // void @llvm.masked.scatter.v16i32(<16 x i32> %Src, <16 x i32*>* %Ptrs, i32 4,
452 // to a chain of basic blocks, that stores element one-by-one if
453 // the appropriate mask bit is set.
455 // %Ptrs = getelementptr i32, i32* %ptr, <16 x i64> %ind
456 // %Mask0 = extractelement <16 x i1> %Mask, i32 0
457 // br i1 %Mask0, label %cond.store, label %else
460 // %Elt0 = extractelement <16 x i32> %Src, i32 0
461 // %Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0
462 // store i32 %Elt0, i32* %Ptr0, align 4
466 // %Mask1 = extractelement <16 x i1> %Mask, i32 1
467 // br i1 %Mask1, label %cond.store1, label %else2
470 // %Elt1 = extractelement <16 x i32> %Src, i32 1
471 // %Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
472 // store i32 %Elt1, i32* %Ptr1, align 4
475 static void scalarizeMaskedScatter(CallInst
*CI
) {
476 Value
*Src
= CI
->getArgOperand(0);
477 Value
*Ptrs
= CI
->getArgOperand(1);
478 Value
*Alignment
= CI
->getArgOperand(2);
479 Value
*Mask
= CI
->getArgOperand(3);
481 assert(isa
<VectorType
>(Src
->getType()) &&
482 "Unexpected data type in masked scatter intrinsic");
483 assert(isa
<VectorType
>(Ptrs
->getType()) &&
484 isa
<PointerType
>(Ptrs
->getType()->getVectorElementType()) &&
485 "Vector of pointers is expected in masked scatter intrinsic");
487 IRBuilder
<> Builder(CI
->getContext());
488 Instruction
*InsertPt
= CI
;
489 BasicBlock
*IfBlock
= CI
->getParent();
490 Builder
.SetInsertPoint(InsertPt
);
491 Builder
.SetCurrentDebugLocation(CI
->getDebugLoc());
493 unsigned AlignVal
= cast
<ConstantInt
>(Alignment
)->getZExtValue();
494 unsigned VectorWidth
= Src
->getType()->getVectorNumElements();
496 // Shorten the way if the mask is a vector of constants.
497 if (isConstantIntVector(Mask
)) {
498 for (unsigned Idx
= 0; Idx
< VectorWidth
; ++Idx
) {
499 if (cast
<ConstantVector
>(Mask
)->getAggregateElement(Idx
)->isNullValue())
501 Value
*OneElt
= Builder
.CreateExtractElement(Src
, Builder
.getInt32(Idx
),
503 Value
*Ptr
= Builder
.CreateExtractElement(Ptrs
, Builder
.getInt32(Idx
),
505 Builder
.CreateAlignedStore(OneElt
, Ptr
, AlignVal
);
507 CI
->eraseFromParent();
511 for (unsigned Idx
= 0; Idx
< VectorWidth
; ++Idx
) {
512 // Fill the "else" block, created in the previous iteration
514 // %Mask1 = extractelement <16 x i1> %Mask, i32 Idx
515 // br i1 %Mask1, label %cond.store, label %else
517 Value
*Predicate
= Builder
.CreateExtractElement(Mask
, Builder
.getInt32(Idx
),
518 "Mask" + Twine(Idx
));
520 // Create "cond" block
522 // %Elt1 = extractelement <16 x i32> %Src, i32 1
523 // %Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
524 // %store i32 %Elt1, i32* %Ptr1
526 BasicBlock
*CondBlock
= IfBlock
->splitBasicBlock(InsertPt
, "cond.store");
527 Builder
.SetInsertPoint(InsertPt
);
529 Value
*OneElt
= Builder
.CreateExtractElement(Src
, Builder
.getInt32(Idx
),
531 Value
*Ptr
= Builder
.CreateExtractElement(Ptrs
, Builder
.getInt32(Idx
),
533 Builder
.CreateAlignedStore(OneElt
, Ptr
, AlignVal
);
535 // Create "else" block, fill it in the next iteration
536 BasicBlock
*NewIfBlock
= CondBlock
->splitBasicBlock(InsertPt
, "else");
537 Builder
.SetInsertPoint(InsertPt
);
538 Instruction
*OldBr
= IfBlock
->getTerminator();
539 BranchInst::Create(CondBlock
, NewIfBlock
, Predicate
, OldBr
);
540 OldBr
->eraseFromParent();
541 IfBlock
= NewIfBlock
;
543 CI
->eraseFromParent();
546 bool ScalarizeMaskedMemIntrin::runOnFunction(Function
&F
) {
547 bool EverMadeChange
= false;
549 TTI
= &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
551 bool MadeChange
= true;
554 for (Function::iterator I
= F
.begin(); I
!= F
.end();) {
555 BasicBlock
*BB
= &*I
++;
556 bool ModifiedDTOnIteration
= false;
557 MadeChange
|= optimizeBlock(*BB
, ModifiedDTOnIteration
);
559 // Restart BB iteration if the dominator tree of the Function was changed
560 if (ModifiedDTOnIteration
)
564 EverMadeChange
|= MadeChange
;
567 return EverMadeChange
;
570 bool ScalarizeMaskedMemIntrin::optimizeBlock(BasicBlock
&BB
, bool &ModifiedDT
) {
571 bool MadeChange
= false;
573 BasicBlock::iterator CurInstIterator
= BB
.begin();
574 while (CurInstIterator
!= BB
.end()) {
575 if (CallInst
*CI
= dyn_cast
<CallInst
>(&*CurInstIterator
++))
576 MadeChange
|= optimizeCallInst(CI
, ModifiedDT
);
584 bool ScalarizeMaskedMemIntrin::optimizeCallInst(CallInst
*CI
,
586 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(CI
);
588 switch (II
->getIntrinsicID()) {
591 case Intrinsic::masked_load
:
592 // Scalarize unsupported vector masked load
593 if (!TTI
->isLegalMaskedLoad(CI
->getType())) {
594 scalarizeMaskedLoad(CI
);
599 case Intrinsic::masked_store
:
600 if (!TTI
->isLegalMaskedStore(CI
->getArgOperand(0)->getType())) {
601 scalarizeMaskedStore(CI
);
606 case Intrinsic::masked_gather
:
607 if (!TTI
->isLegalMaskedGather(CI
->getType())) {
608 scalarizeMaskedGather(CI
);
613 case Intrinsic::masked_scatter
:
614 if (!TTI
->isLegalMaskedScatter(CI
->getArgOperand(0)->getType())) {
615 scalarizeMaskedScatter(CI
);