[RISCV] Fix mgather -> riscv.masked.strided.load combine not extending indices (...
[llvm-project.git] / llvm / lib / Transforms / InstCombine / InstCombineVectorOps.cpp
blob18ab510aae7f2199bea77e3906f6e54c26ef77fe
1 //===- InstCombineVectorOps.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements instcombine for ExtractElement, InsertElement and
10 // ShuffleVector.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallBitVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/VectorUtils.h"
24 #include "llvm/IR/BasicBlock.h"
25 #include "llvm/IR/Constant.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/Operator.h"
32 #include "llvm/IR/PatternMatch.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/IR/User.h"
35 #include "llvm/IR/Value.h"
36 #include "llvm/Support/Casting.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Transforms/InstCombine/InstCombiner.h"
39 #include <cassert>
40 #include <cstdint>
41 #include <iterator>
42 #include <utility>
44 #define DEBUG_TYPE "instcombine"
46 using namespace llvm;
47 using namespace PatternMatch;
49 STATISTIC(NumAggregateReconstructionsSimplified,
50 "Number of aggregate reconstructions turned into reuse of the "
51 "original aggregate");
53 /// Return true if the value is cheaper to scalarize than it is to leave as a
54 /// vector operation. If the extract index \p EI is a constant integer then
55 /// some operations may be cheap to scalarize.
56 ///
57 /// FIXME: It's possible to create more instructions than previously existed.
58 static bool cheapToScalarize(Value *V, Value *EI) {
59 ConstantInt *CEI = dyn_cast<ConstantInt>(EI);
61 // If we can pick a scalar constant value out of a vector, that is free.
62 if (auto *C = dyn_cast<Constant>(V))
63 return CEI || C->getSplatValue();
65 if (CEI && match(V, m_Intrinsic<Intrinsic::experimental_stepvector>())) {
66 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
67 // Index needs to be lower than the minimum size of the vector, because
68 // for scalable vector, the vector size is known at run time.
69 return CEI->getValue().ult(EC.getKnownMinValue());
72 // An insertelement to the same constant index as our extract will simplify
73 // to the scalar inserted element. An insertelement to a different constant
74 // index is irrelevant to our extract.
75 if (match(V, m_InsertElt(m_Value(), m_Value(), m_ConstantInt())))
76 return CEI;
78 if (match(V, m_OneUse(m_Load(m_Value()))))
79 return true;
81 if (match(V, m_OneUse(m_UnOp())))
82 return true;
84 Value *V0, *V1;
85 if (match(V, m_OneUse(m_BinOp(m_Value(V0), m_Value(V1)))))
86 if (cheapToScalarize(V0, EI) || cheapToScalarize(V1, EI))
87 return true;
89 CmpInst::Predicate UnusedPred;
90 if (match(V, m_OneUse(m_Cmp(UnusedPred, m_Value(V0), m_Value(V1)))))
91 if (cheapToScalarize(V0, EI) || cheapToScalarize(V1, EI))
92 return true;
94 return false;
97 // If we have a PHI node with a vector type that is only used to feed
98 // itself and be an operand of extractelement at a constant location,
99 // try to replace the PHI of the vector type with a PHI of a scalar type.
100 Instruction *InstCombinerImpl::scalarizePHI(ExtractElementInst &EI,
101 PHINode *PN) {
102 SmallVector<Instruction *, 2> Extracts;
103 // The users we want the PHI to have are:
104 // 1) The EI ExtractElement (we already know this)
105 // 2) Possibly more ExtractElements with the same index.
106 // 3) Another operand, which will feed back into the PHI.
107 Instruction *PHIUser = nullptr;
108 for (auto *U : PN->users()) {
109 if (ExtractElementInst *EU = dyn_cast<ExtractElementInst>(U)) {
110 if (EI.getIndexOperand() == EU->getIndexOperand())
111 Extracts.push_back(EU);
112 else
113 return nullptr;
114 } else if (!PHIUser) {
115 PHIUser = cast<Instruction>(U);
116 } else {
117 return nullptr;
121 if (!PHIUser)
122 return nullptr;
124 // Verify that this PHI user has one use, which is the PHI itself,
125 // and that it is a binary operation which is cheap to scalarize.
126 // otherwise return nullptr.
127 if (!PHIUser->hasOneUse() || !(PHIUser->user_back() == PN) ||
128 !(isa<BinaryOperator>(PHIUser)) ||
129 !cheapToScalarize(PHIUser, EI.getIndexOperand()))
130 return nullptr;
132 // Create a scalar PHI node that will replace the vector PHI node
133 // just before the current PHI node.
134 PHINode *scalarPHI = cast<PHINode>(InsertNewInstWith(
135 PHINode::Create(EI.getType(), PN->getNumIncomingValues(), ""), PN->getIterator()));
136 // Scalarize each PHI operand.
137 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
138 Value *PHIInVal = PN->getIncomingValue(i);
139 BasicBlock *inBB = PN->getIncomingBlock(i);
140 Value *Elt = EI.getIndexOperand();
141 // If the operand is the PHI induction variable:
142 if (PHIInVal == PHIUser) {
143 // Scalarize the binary operation. Its first operand is the
144 // scalar PHI, and the second operand is extracted from the other
145 // vector operand.
146 BinaryOperator *B0 = cast<BinaryOperator>(PHIUser);
147 unsigned opId = (B0->getOperand(0) == PN) ? 1 : 0;
148 Value *Op = InsertNewInstWith(
149 ExtractElementInst::Create(B0->getOperand(opId), Elt,
150 B0->getOperand(opId)->getName() + ".Elt"),
151 B0->getIterator());
152 Value *newPHIUser = InsertNewInstWith(
153 BinaryOperator::CreateWithCopiedFlags(B0->getOpcode(),
154 scalarPHI, Op, B0), B0->getIterator());
155 scalarPHI->addIncoming(newPHIUser, inBB);
156 } else {
157 // Scalarize PHI input:
158 Instruction *newEI = ExtractElementInst::Create(PHIInVal, Elt, "");
159 // Insert the new instruction into the predecessor basic block.
160 Instruction *pos = dyn_cast<Instruction>(PHIInVal);
161 BasicBlock::iterator InsertPos;
162 if (pos && !isa<PHINode>(pos)) {
163 InsertPos = ++pos->getIterator();
164 } else {
165 InsertPos = inBB->getFirstInsertionPt();
168 InsertNewInstWith(newEI, InsertPos);
170 scalarPHI->addIncoming(newEI, inBB);
174 for (auto *E : Extracts) {
175 replaceInstUsesWith(*E, scalarPHI);
176 // Add old extract to worklist for DCE.
177 addToWorklist(E);
180 return &EI;
183 Instruction *InstCombinerImpl::foldBitcastExtElt(ExtractElementInst &Ext) {
184 Value *X;
185 uint64_t ExtIndexC;
186 if (!match(Ext.getVectorOperand(), m_BitCast(m_Value(X))) ||
187 !match(Ext.getIndexOperand(), m_ConstantInt(ExtIndexC)))
188 return nullptr;
190 ElementCount NumElts =
191 cast<VectorType>(Ext.getVectorOperandType())->getElementCount();
192 Type *DestTy = Ext.getType();
193 unsigned DestWidth = DestTy->getPrimitiveSizeInBits();
194 bool IsBigEndian = DL.isBigEndian();
196 // If we are casting an integer to vector and extracting a portion, that is
197 // a shift-right and truncate.
198 if (X->getType()->isIntegerTy()) {
199 assert(isa<FixedVectorType>(Ext.getVectorOperand()->getType()) &&
200 "Expected fixed vector type for bitcast from scalar integer");
202 // Big endian requires adjusting the extract index since MSB is at index 0.
203 // LittleEndian: extelt (bitcast i32 X to v4i8), 0 -> trunc i32 X to i8
204 // BigEndian: extelt (bitcast i32 X to v4i8), 0 -> trunc i32 (X >> 24) to i8
205 if (IsBigEndian)
206 ExtIndexC = NumElts.getKnownMinValue() - 1 - ExtIndexC;
207 unsigned ShiftAmountC = ExtIndexC * DestWidth;
208 if (!ShiftAmountC ||
209 (isDesirableIntType(X->getType()->getPrimitiveSizeInBits()) &&
210 Ext.getVectorOperand()->hasOneUse())) {
211 if (ShiftAmountC)
212 X = Builder.CreateLShr(X, ShiftAmountC, "extelt.offset");
213 if (DestTy->isFloatingPointTy()) {
214 Type *DstIntTy = IntegerType::getIntNTy(X->getContext(), DestWidth);
215 Value *Trunc = Builder.CreateTrunc(X, DstIntTy);
216 return new BitCastInst(Trunc, DestTy);
218 return new TruncInst(X, DestTy);
222 if (!X->getType()->isVectorTy())
223 return nullptr;
225 // If this extractelement is using a bitcast from a vector of the same number
226 // of elements, see if we can find the source element from the source vector:
227 // extelt (bitcast VecX), IndexC --> bitcast X[IndexC]
228 auto *SrcTy = cast<VectorType>(X->getType());
229 ElementCount NumSrcElts = SrcTy->getElementCount();
230 if (NumSrcElts == NumElts)
231 if (Value *Elt = findScalarElement(X, ExtIndexC))
232 return new BitCastInst(Elt, DestTy);
234 assert(NumSrcElts.isScalable() == NumElts.isScalable() &&
235 "Src and Dst must be the same sort of vector type");
237 // If the source elements are wider than the destination, try to shift and
238 // truncate a subset of scalar bits of an insert op.
239 if (NumSrcElts.getKnownMinValue() < NumElts.getKnownMinValue()) {
240 Value *Scalar;
241 Value *Vec;
242 uint64_t InsIndexC;
243 if (!match(X, m_InsertElt(m_Value(Vec), m_Value(Scalar),
244 m_ConstantInt(InsIndexC))))
245 return nullptr;
247 // The extract must be from the subset of vector elements that we inserted
248 // into. Example: if we inserted element 1 of a <2 x i64> and we are
249 // extracting an i16 (narrowing ratio = 4), then this extract must be from 1
250 // of elements 4-7 of the bitcasted vector.
251 unsigned NarrowingRatio =
252 NumElts.getKnownMinValue() / NumSrcElts.getKnownMinValue();
254 if (ExtIndexC / NarrowingRatio != InsIndexC) {
255 // Remove insertelement, if we don't use the inserted element.
256 // extractelement (bitcast (insertelement (Vec, b)), a) ->
257 // extractelement (bitcast (Vec), a)
258 // FIXME: this should be removed to SimplifyDemandedVectorElts,
259 // once scale vectors are supported.
260 if (X->hasOneUse() && Ext.getVectorOperand()->hasOneUse()) {
261 Value *NewBC = Builder.CreateBitCast(Vec, Ext.getVectorOperandType());
262 return ExtractElementInst::Create(NewBC, Ext.getIndexOperand());
264 return nullptr;
267 // We are extracting part of the original scalar. How that scalar is
268 // inserted into the vector depends on the endian-ness. Example:
269 // Vector Byte Elt Index: 0 1 2 3 4 5 6 7
270 // +--+--+--+--+--+--+--+--+
271 // inselt <2 x i32> V, <i32> S, 1: |V0|V1|V2|V3|S0|S1|S2|S3|
272 // extelt <4 x i16> V', 3: | |S2|S3|
273 // +--+--+--+--+--+--+--+--+
274 // If this is little-endian, S2|S3 are the MSB of the 32-bit 'S' value.
275 // If this is big-endian, S2|S3 are the LSB of the 32-bit 'S' value.
276 // In this example, we must right-shift little-endian. Big-endian is just a
277 // truncate.
278 unsigned Chunk = ExtIndexC % NarrowingRatio;
279 if (IsBigEndian)
280 Chunk = NarrowingRatio - 1 - Chunk;
282 // Bail out if this is an FP vector to FP vector sequence. That would take
283 // more instructions than we started with unless there is no shift, and it
284 // may not be handled as well in the backend.
285 bool NeedSrcBitcast = SrcTy->getScalarType()->isFloatingPointTy();
286 bool NeedDestBitcast = DestTy->isFloatingPointTy();
287 if (NeedSrcBitcast && NeedDestBitcast)
288 return nullptr;
290 unsigned SrcWidth = SrcTy->getScalarSizeInBits();
291 unsigned ShAmt = Chunk * DestWidth;
293 // TODO: This limitation is more strict than necessary. We could sum the
294 // number of new instructions and subtract the number eliminated to know if
295 // we can proceed.
296 if (!X->hasOneUse() || !Ext.getVectorOperand()->hasOneUse())
297 if (NeedSrcBitcast || NeedDestBitcast)
298 return nullptr;
300 if (NeedSrcBitcast) {
301 Type *SrcIntTy = IntegerType::getIntNTy(Scalar->getContext(), SrcWidth);
302 Scalar = Builder.CreateBitCast(Scalar, SrcIntTy);
305 if (ShAmt) {
306 // Bail out if we could end with more instructions than we started with.
307 if (!Ext.getVectorOperand()->hasOneUse())
308 return nullptr;
309 Scalar = Builder.CreateLShr(Scalar, ShAmt);
312 if (NeedDestBitcast) {
313 Type *DestIntTy = IntegerType::getIntNTy(Scalar->getContext(), DestWidth);
314 return new BitCastInst(Builder.CreateTrunc(Scalar, DestIntTy), DestTy);
316 return new TruncInst(Scalar, DestTy);
319 return nullptr;
322 /// Find elements of V demanded by UserInstr.
323 static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) {
324 unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements();
326 // Conservatively assume that all elements are needed.
327 APInt UsedElts(APInt::getAllOnes(VWidth));
329 switch (UserInstr->getOpcode()) {
330 case Instruction::ExtractElement: {
331 ExtractElementInst *EEI = cast<ExtractElementInst>(UserInstr);
332 assert(EEI->getVectorOperand() == V);
333 ConstantInt *EEIIndexC = dyn_cast<ConstantInt>(EEI->getIndexOperand());
334 if (EEIIndexC && EEIIndexC->getValue().ult(VWidth)) {
335 UsedElts = APInt::getOneBitSet(VWidth, EEIIndexC->getZExtValue());
337 break;
339 case Instruction::ShuffleVector: {
340 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(UserInstr);
341 unsigned MaskNumElts =
342 cast<FixedVectorType>(UserInstr->getType())->getNumElements();
344 UsedElts = APInt(VWidth, 0);
345 for (unsigned i = 0; i < MaskNumElts; i++) {
346 unsigned MaskVal = Shuffle->getMaskValue(i);
347 if (MaskVal == -1u || MaskVal >= 2 * VWidth)
348 continue;
349 if (Shuffle->getOperand(0) == V && (MaskVal < VWidth))
350 UsedElts.setBit(MaskVal);
351 if (Shuffle->getOperand(1) == V &&
352 ((MaskVal >= VWidth) && (MaskVal < 2 * VWidth)))
353 UsedElts.setBit(MaskVal - VWidth);
355 break;
357 default:
358 break;
360 return UsedElts;
363 /// Find union of elements of V demanded by all its users.
364 /// If it is known by querying findDemandedEltsBySingleUser that
365 /// no user demands an element of V, then the corresponding bit
366 /// remains unset in the returned value.
367 static APInt findDemandedEltsByAllUsers(Value *V) {
368 unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements();
370 APInt UnionUsedElts(VWidth, 0);
371 for (const Use &U : V->uses()) {
372 if (Instruction *I = dyn_cast<Instruction>(U.getUser())) {
373 UnionUsedElts |= findDemandedEltsBySingleUser(V, I);
374 } else {
375 UnionUsedElts = APInt::getAllOnes(VWidth);
376 break;
379 if (UnionUsedElts.isAllOnes())
380 break;
383 return UnionUsedElts;
386 /// Given a constant index for a extractelement or insertelement instruction,
387 /// return it with the canonical type if it isn't already canonical. We
388 /// arbitrarily pick 64 bit as our canonical type. The actual bitwidth doesn't
389 /// matter, we just want a consistent type to simplify CSE.
390 static ConstantInt *getPreferredVectorIndex(ConstantInt *IndexC) {
391 const unsigned IndexBW = IndexC->getBitWidth();
392 if (IndexBW == 64 || IndexC->getValue().getActiveBits() > 64)
393 return nullptr;
394 return ConstantInt::get(IndexC->getContext(),
395 IndexC->getValue().zextOrTrunc(64));
398 Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
399 Value *SrcVec = EI.getVectorOperand();
400 Value *Index = EI.getIndexOperand();
401 if (Value *V = simplifyExtractElementInst(SrcVec, Index,
402 SQ.getWithInstruction(&EI)))
403 return replaceInstUsesWith(EI, V);
405 // extractelt (select %x, %vec1, %vec2), %const ->
406 // select %x, %vec1[%const], %vec2[%const]
407 // TODO: Support constant folding of multiple select operands:
408 // extractelt (select %x, %vec1, %vec2), (select %x, %c1, %c2)
409 // If the extractelement will for instance try to do out of bounds accesses
410 // because of the values of %c1 and/or %c2, the sequence could be optimized
411 // early. This is currently not possible because constant folding will reach
412 // an unreachable assertion if it doesn't find a constant operand.
413 if (SelectInst *SI = dyn_cast<SelectInst>(EI.getVectorOperand()))
414 if (SI->getCondition()->getType()->isIntegerTy() &&
415 isa<Constant>(EI.getIndexOperand()))
416 if (Instruction *R = FoldOpIntoSelect(EI, SI))
417 return R;
419 // If extracting a specified index from the vector, see if we can recursively
420 // find a previously computed scalar that was inserted into the vector.
421 auto *IndexC = dyn_cast<ConstantInt>(Index);
422 if (IndexC) {
423 // Canonicalize type of constant indices to i64 to simplify CSE
424 if (auto *NewIdx = getPreferredVectorIndex(IndexC))
425 return replaceOperand(EI, 1, NewIdx);
427 ElementCount EC = EI.getVectorOperandType()->getElementCount();
428 unsigned NumElts = EC.getKnownMinValue();
430 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(SrcVec)) {
431 Intrinsic::ID IID = II->getIntrinsicID();
432 // Index needs to be lower than the minimum size of the vector, because
433 // for scalable vector, the vector size is known at run time.
434 if (IID == Intrinsic::experimental_stepvector &&
435 IndexC->getValue().ult(NumElts)) {
436 Type *Ty = EI.getType();
437 unsigned BitWidth = Ty->getIntegerBitWidth();
438 Value *Idx;
439 // Return index when its value does not exceed the allowed limit
440 // for the element type of the vector, otherwise return undefined.
441 if (IndexC->getValue().getActiveBits() <= BitWidth)
442 Idx = ConstantInt::get(Ty, IndexC->getValue().zextOrTrunc(BitWidth));
443 else
444 Idx = PoisonValue::get(Ty);
445 return replaceInstUsesWith(EI, Idx);
449 // InstSimplify should handle cases where the index is invalid.
450 // For fixed-length vector, it's invalid to extract out-of-range element.
451 if (!EC.isScalable() && IndexC->getValue().uge(NumElts))
452 return nullptr;
454 if (Instruction *I = foldBitcastExtElt(EI))
455 return I;
457 // If there's a vector PHI feeding a scalar use through this extractelement
458 // instruction, try to scalarize the PHI.
459 if (auto *Phi = dyn_cast<PHINode>(SrcVec))
460 if (Instruction *ScalarPHI = scalarizePHI(EI, Phi))
461 return ScalarPHI;
464 // TODO come up with a n-ary matcher that subsumes both unary and
465 // binary matchers.
466 UnaryOperator *UO;
467 if (match(SrcVec, m_UnOp(UO)) && cheapToScalarize(SrcVec, Index)) {
468 // extelt (unop X), Index --> unop (extelt X, Index)
469 Value *X = UO->getOperand(0);
470 Value *E = Builder.CreateExtractElement(X, Index);
471 return UnaryOperator::CreateWithCopiedFlags(UO->getOpcode(), E, UO);
474 BinaryOperator *BO;
475 if (match(SrcVec, m_BinOp(BO)) && cheapToScalarize(SrcVec, Index)) {
476 // extelt (binop X, Y), Index --> binop (extelt X, Index), (extelt Y, Index)
477 Value *X = BO->getOperand(0), *Y = BO->getOperand(1);
478 Value *E0 = Builder.CreateExtractElement(X, Index);
479 Value *E1 = Builder.CreateExtractElement(Y, Index);
480 return BinaryOperator::CreateWithCopiedFlags(BO->getOpcode(), E0, E1, BO);
483 Value *X, *Y;
484 CmpInst::Predicate Pred;
485 if (match(SrcVec, m_Cmp(Pred, m_Value(X), m_Value(Y))) &&
486 cheapToScalarize(SrcVec, Index)) {
487 // extelt (cmp X, Y), Index --> cmp (extelt X, Index), (extelt Y, Index)
488 Value *E0 = Builder.CreateExtractElement(X, Index);
489 Value *E1 = Builder.CreateExtractElement(Y, Index);
490 return CmpInst::Create(cast<CmpInst>(SrcVec)->getOpcode(), Pred, E0, E1);
493 if (auto *I = dyn_cast<Instruction>(SrcVec)) {
494 if (auto *IE = dyn_cast<InsertElementInst>(I)) {
495 // instsimplify already handled the case where the indices are constants
496 // and equal by value, if both are constants, they must not be the same
497 // value, extract from the pre-inserted value instead.
498 if (isa<Constant>(IE->getOperand(2)) && IndexC)
499 return replaceOperand(EI, 0, IE->getOperand(0));
500 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
501 auto *VecType = cast<VectorType>(GEP->getType());
502 ElementCount EC = VecType->getElementCount();
503 uint64_t IdxVal = IndexC ? IndexC->getZExtValue() : 0;
504 if (IndexC && IdxVal < EC.getKnownMinValue() && GEP->hasOneUse()) {
505 // Find out why we have a vector result - these are a few examples:
506 // 1. We have a scalar pointer and a vector of indices, or
507 // 2. We have a vector of pointers and a scalar index, or
508 // 3. We have a vector of pointers and a vector of indices, etc.
509 // Here we only consider combining when there is exactly one vector
510 // operand, since the optimization is less obviously a win due to
511 // needing more than one extractelements.
513 unsigned VectorOps =
514 llvm::count_if(GEP->operands(), [](const Value *V) {
515 return isa<VectorType>(V->getType());
517 if (VectorOps == 1) {
518 Value *NewPtr = GEP->getPointerOperand();
519 if (isa<VectorType>(NewPtr->getType()))
520 NewPtr = Builder.CreateExtractElement(NewPtr, IndexC);
522 SmallVector<Value *> NewOps;
523 for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {
524 Value *Op = GEP->getOperand(I);
525 if (isa<VectorType>(Op->getType()))
526 NewOps.push_back(Builder.CreateExtractElement(Op, IndexC));
527 else
528 NewOps.push_back(Op);
531 GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
532 GEP->getSourceElementType(), NewPtr, NewOps);
533 NewGEP->setIsInBounds(GEP->isInBounds());
534 return NewGEP;
537 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
538 // If this is extracting an element from a shufflevector, figure out where
539 // it came from and extract from the appropriate input element instead.
540 // Restrict the following transformation to fixed-length vector.
541 if (isa<FixedVectorType>(SVI->getType()) && isa<ConstantInt>(Index)) {
542 int SrcIdx =
543 SVI->getMaskValue(cast<ConstantInt>(Index)->getZExtValue());
544 Value *Src;
545 unsigned LHSWidth = cast<FixedVectorType>(SVI->getOperand(0)->getType())
546 ->getNumElements();
548 if (SrcIdx < 0)
549 return replaceInstUsesWith(EI, PoisonValue::get(EI.getType()));
550 if (SrcIdx < (int)LHSWidth)
551 Src = SVI->getOperand(0);
552 else {
553 SrcIdx -= LHSWidth;
554 Src = SVI->getOperand(1);
556 Type *Int64Ty = Type::getInt64Ty(EI.getContext());
557 return ExtractElementInst::Create(
558 Src, ConstantInt::get(Int64Ty, SrcIdx, false));
560 } else if (auto *CI = dyn_cast<CastInst>(I)) {
561 // Canonicalize extractelement(cast) -> cast(extractelement).
562 // Bitcasts can change the number of vector elements, and they cost
563 // nothing.
564 if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) {
565 Value *EE = Builder.CreateExtractElement(CI->getOperand(0), Index);
566 return CastInst::Create(CI->getOpcode(), EE, EI.getType());
571 // Run demanded elements after other transforms as this can drop flags on
572 // binops. If there's two paths to the same final result, we prefer the
573 // one which doesn't force us to drop flags.
574 if (IndexC) {
575 ElementCount EC = EI.getVectorOperandType()->getElementCount();
576 unsigned NumElts = EC.getKnownMinValue();
577 // This instruction only demands the single element from the input vector.
578 // Skip for scalable type, the number of elements is unknown at
579 // compile-time.
580 if (!EC.isScalable() && NumElts != 1) {
581 // If the input vector has a single use, simplify it based on this use
582 // property.
583 if (SrcVec->hasOneUse()) {
584 APInt PoisonElts(NumElts, 0);
585 APInt DemandedElts(NumElts, 0);
586 DemandedElts.setBit(IndexC->getZExtValue());
587 if (Value *V =
588 SimplifyDemandedVectorElts(SrcVec, DemandedElts, PoisonElts))
589 return replaceOperand(EI, 0, V);
590 } else {
591 // If the input vector has multiple uses, simplify it based on a union
592 // of all elements used.
593 APInt DemandedElts = findDemandedEltsByAllUsers(SrcVec);
594 if (!DemandedElts.isAllOnes()) {
595 APInt PoisonElts(NumElts, 0);
596 if (Value *V = SimplifyDemandedVectorElts(
597 SrcVec, DemandedElts, PoisonElts, 0 /* Depth */,
598 true /* AllowMultipleUsers */)) {
599 if (V != SrcVec) {
600 Worklist.addValue(SrcVec);
601 SrcVec->replaceAllUsesWith(V);
602 return &EI;
609 return nullptr;
612 /// If V is a shuffle of values that ONLY returns elements from either LHS or
613 /// RHS, return the shuffle mask and true. Otherwise, return false.
614 static bool collectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
615 SmallVectorImpl<int> &Mask) {
616 assert(LHS->getType() == RHS->getType() &&
617 "Invalid CollectSingleShuffleElements");
618 unsigned NumElts = cast<FixedVectorType>(V->getType())->getNumElements();
620 if (match(V, m_Undef())) {
621 Mask.assign(NumElts, -1);
622 return true;
625 if (V == LHS) {
626 for (unsigned i = 0; i != NumElts; ++i)
627 Mask.push_back(i);
628 return true;
631 if (V == RHS) {
632 for (unsigned i = 0; i != NumElts; ++i)
633 Mask.push_back(i + NumElts);
634 return true;
637 if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
638 // If this is an insert of an extract from some other vector, include it.
639 Value *VecOp = IEI->getOperand(0);
640 Value *ScalarOp = IEI->getOperand(1);
641 Value *IdxOp = IEI->getOperand(2);
643 if (!isa<ConstantInt>(IdxOp))
644 return false;
645 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
647 if (isa<PoisonValue>(ScalarOp)) { // inserting poison into vector.
648 // We can handle this if the vector we are inserting into is
649 // transitively ok.
650 if (collectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
651 // If so, update the mask to reflect the inserted poison.
652 Mask[InsertedIdx] = -1;
653 return true;
655 } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
656 if (isa<ConstantInt>(EI->getOperand(1))) {
657 unsigned ExtractedIdx =
658 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
659 unsigned NumLHSElts =
660 cast<FixedVectorType>(LHS->getType())->getNumElements();
662 // This must be extracting from either LHS or RHS.
663 if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
664 // We can handle this if the vector we are inserting into is
665 // transitively ok.
666 if (collectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
667 // If so, update the mask to reflect the inserted value.
668 if (EI->getOperand(0) == LHS) {
669 Mask[InsertedIdx % NumElts] = ExtractedIdx;
670 } else {
671 assert(EI->getOperand(0) == RHS);
672 Mask[InsertedIdx % NumElts] = ExtractedIdx + NumLHSElts;
674 return true;
681 return false;
684 /// If we have insertion into a vector that is wider than the vector that we
685 /// are extracting from, try to widen the source vector to allow a single
686 /// shufflevector to replace one or more insert/extract pairs.
687 static bool replaceExtractElements(InsertElementInst *InsElt,
688 ExtractElementInst *ExtElt,
689 InstCombinerImpl &IC) {
690 auto *InsVecType = cast<FixedVectorType>(InsElt->getType());
691 auto *ExtVecType = cast<FixedVectorType>(ExtElt->getVectorOperandType());
692 unsigned NumInsElts = InsVecType->getNumElements();
693 unsigned NumExtElts = ExtVecType->getNumElements();
695 // The inserted-to vector must be wider than the extracted-from vector.
696 if (InsVecType->getElementType() != ExtVecType->getElementType() ||
697 NumExtElts >= NumInsElts)
698 return false;
700 // Create a shuffle mask to widen the extended-from vector using poison
701 // values. The mask selects all of the values of the original vector followed
702 // by as many poison values as needed to create a vector of the same length
703 // as the inserted-to vector.
704 SmallVector<int, 16> ExtendMask;
705 for (unsigned i = 0; i < NumExtElts; ++i)
706 ExtendMask.push_back(i);
707 for (unsigned i = NumExtElts; i < NumInsElts; ++i)
708 ExtendMask.push_back(-1);
710 Value *ExtVecOp = ExtElt->getVectorOperand();
711 auto *ExtVecOpInst = dyn_cast<Instruction>(ExtVecOp);
712 BasicBlock *InsertionBlock = (ExtVecOpInst && !isa<PHINode>(ExtVecOpInst))
713 ? ExtVecOpInst->getParent()
714 : ExtElt->getParent();
716 // TODO: This restriction matches the basic block check below when creating
717 // new extractelement instructions. If that limitation is removed, this one
718 // could also be removed. But for now, we just bail out to ensure that we
719 // will replace the extractelement instruction that is feeding our
720 // insertelement instruction. This allows the insertelement to then be
721 // replaced by a shufflevector. If the insertelement is not replaced, we can
722 // induce infinite looping because there's an optimization for extractelement
723 // that will delete our widening shuffle. This would trigger another attempt
724 // here to create that shuffle, and we spin forever.
725 if (InsertionBlock != InsElt->getParent())
726 return false;
728 // TODO: This restriction matches the check in visitInsertElementInst() and
729 // prevents an infinite loop caused by not turning the extract/insert pair
730 // into a shuffle. We really should not need either check, but we're lacking
731 // folds for shufflevectors because we're afraid to generate shuffle masks
732 // that the backend can't handle.
733 if (InsElt->hasOneUse() && isa<InsertElementInst>(InsElt->user_back()))
734 return false;
736 auto *WideVec = new ShuffleVectorInst(ExtVecOp, ExtendMask);
738 // Insert the new shuffle after the vector operand of the extract is defined
739 // (as long as it's not a PHI) or at the start of the basic block of the
740 // extract, so any subsequent extracts in the same basic block can use it.
741 // TODO: Insert before the earliest ExtractElementInst that is replaced.
742 if (ExtVecOpInst && !isa<PHINode>(ExtVecOpInst))
743 WideVec->insertAfter(ExtVecOpInst);
744 else
745 IC.InsertNewInstWith(WideVec, ExtElt->getParent()->getFirstInsertionPt());
747 // Replace extracts from the original narrow vector with extracts from the new
748 // wide vector.
749 for (User *U : ExtVecOp->users()) {
750 ExtractElementInst *OldExt = dyn_cast<ExtractElementInst>(U);
751 if (!OldExt || OldExt->getParent() != WideVec->getParent())
752 continue;
753 auto *NewExt = ExtractElementInst::Create(WideVec, OldExt->getOperand(1));
754 IC.InsertNewInstWith(NewExt, OldExt->getIterator());
755 IC.replaceInstUsesWith(*OldExt, NewExt);
756 // Add the old extracts to the worklist for DCE. We can't remove the
757 // extracts directly, because they may still be used by the calling code.
758 IC.addToWorklist(OldExt);
761 return true;
764 /// We are building a shuffle to create V, which is a sequence of insertelement,
765 /// extractelement pairs. If PermittedRHS is set, then we must either use it or
766 /// not rely on the second vector source. Return a std::pair containing the
767 /// left and right vectors of the proposed shuffle (or 0), and set the Mask
768 /// parameter as required.
770 /// Note: we intentionally don't try to fold earlier shuffles since they have
771 /// often been chosen carefully to be efficiently implementable on the target.
772 using ShuffleOps = std::pair<Value *, Value *>;
774 static ShuffleOps collectShuffleElements(Value *V, SmallVectorImpl<int> &Mask,
775 Value *PermittedRHS,
776 InstCombinerImpl &IC, bool &Rerun) {
777 assert(V->getType()->isVectorTy() && "Invalid shuffle!");
778 unsigned NumElts = cast<FixedVectorType>(V->getType())->getNumElements();
780 if (match(V, m_Poison())) {
781 Mask.assign(NumElts, -1);
782 return std::make_pair(
783 PermittedRHS ? PoisonValue::get(PermittedRHS->getType()) : V, nullptr);
786 if (isa<ConstantAggregateZero>(V)) {
787 Mask.assign(NumElts, 0);
788 return std::make_pair(V, nullptr);
791 if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
792 // If this is an insert of an extract from some other vector, include it.
793 Value *VecOp = IEI->getOperand(0);
794 Value *ScalarOp = IEI->getOperand(1);
795 Value *IdxOp = IEI->getOperand(2);
797 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
798 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp)) {
799 unsigned ExtractedIdx =
800 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
801 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
803 // Either the extracted from or inserted into vector must be RHSVec,
804 // otherwise we'd end up with a shuffle of three inputs.
805 if (EI->getOperand(0) == PermittedRHS || PermittedRHS == nullptr) {
806 Value *RHS = EI->getOperand(0);
807 ShuffleOps LR = collectShuffleElements(VecOp, Mask, RHS, IC, Rerun);
808 assert(LR.second == nullptr || LR.second == RHS);
810 if (LR.first->getType() != RHS->getType()) {
811 // Although we are giving up for now, see if we can create extracts
812 // that match the inserts for another round of combining.
813 if (replaceExtractElements(IEI, EI, IC))
814 Rerun = true;
816 // We tried our best, but we can't find anything compatible with RHS
817 // further up the chain. Return a trivial shuffle.
818 for (unsigned i = 0; i < NumElts; ++i)
819 Mask[i] = i;
820 return std::make_pair(V, nullptr);
823 unsigned NumLHSElts =
824 cast<FixedVectorType>(RHS->getType())->getNumElements();
825 Mask[InsertedIdx % NumElts] = NumLHSElts + ExtractedIdx;
826 return std::make_pair(LR.first, RHS);
829 if (VecOp == PermittedRHS) {
830 // We've gone as far as we can: anything on the other side of the
831 // extractelement will already have been converted into a shuffle.
832 unsigned NumLHSElts =
833 cast<FixedVectorType>(EI->getOperand(0)->getType())
834 ->getNumElements();
835 for (unsigned i = 0; i != NumElts; ++i)
836 Mask.push_back(i == InsertedIdx ? ExtractedIdx : NumLHSElts + i);
837 return std::make_pair(EI->getOperand(0), PermittedRHS);
840 // If this insertelement is a chain that comes from exactly these two
841 // vectors, return the vector and the effective shuffle.
842 if (EI->getOperand(0)->getType() == PermittedRHS->getType() &&
843 collectSingleShuffleElements(IEI, EI->getOperand(0), PermittedRHS,
844 Mask))
845 return std::make_pair(EI->getOperand(0), PermittedRHS);
850 // Otherwise, we can't do anything fancy. Return an identity vector.
851 for (unsigned i = 0; i != NumElts; ++i)
852 Mask.push_back(i);
853 return std::make_pair(V, nullptr);
856 /// Look for chain of insertvalue's that fully define an aggregate, and trace
857 /// back the values inserted, see if they are all were extractvalue'd from
858 /// the same source aggregate from the exact same element indexes.
859 /// If they were, just reuse the source aggregate.
860 /// This potentially deals with PHI indirections.
861 Instruction *InstCombinerImpl::foldAggregateConstructionIntoAggregateReuse(
862 InsertValueInst &OrigIVI) {
863 Type *AggTy = OrigIVI.getType();
864 unsigned NumAggElts;
865 switch (AggTy->getTypeID()) {
866 case Type::StructTyID:
867 NumAggElts = AggTy->getStructNumElements();
868 break;
869 case Type::ArrayTyID:
870 NumAggElts = AggTy->getArrayNumElements();
871 break;
872 default:
873 llvm_unreachable("Unhandled aggregate type?");
876 // Arbitrary aggregate size cut-off. Motivation for limit of 2 is to be able
877 // to handle clang C++ exception struct (which is hardcoded as {i8*, i32}),
878 // FIXME: any interesting patterns to be caught with larger limit?
879 assert(NumAggElts > 0 && "Aggregate should have elements.");
880 if (NumAggElts > 2)
881 return nullptr;
883 static constexpr auto NotFound = std::nullopt;
884 static constexpr auto FoundMismatch = nullptr;
886 // Try to find a value of each element of an aggregate.
887 // FIXME: deal with more complex, not one-dimensional, aggregate types
888 SmallVector<std::optional<Instruction *>, 2> AggElts(NumAggElts, NotFound);
890 // Do we know values for each element of the aggregate?
891 auto KnowAllElts = [&AggElts]() {
892 return !llvm::is_contained(AggElts, NotFound);
895 int Depth = 0;
897 // Arbitrary `insertvalue` visitation depth limit. Let's be okay with
898 // every element being overwritten twice, which should never happen.
899 static const int DepthLimit = 2 * NumAggElts;
901 // Recurse up the chain of `insertvalue` aggregate operands until either we've
902 // reconstructed full initializer or can't visit any more `insertvalue`'s.
903 for (InsertValueInst *CurrIVI = &OrigIVI;
904 Depth < DepthLimit && CurrIVI && !KnowAllElts();
905 CurrIVI = dyn_cast<InsertValueInst>(CurrIVI->getAggregateOperand()),
906 ++Depth) {
907 auto *InsertedValue =
908 dyn_cast<Instruction>(CurrIVI->getInsertedValueOperand());
909 if (!InsertedValue)
910 return nullptr; // Inserted value must be produced by an instruction.
912 ArrayRef<unsigned int> Indices = CurrIVI->getIndices();
914 // Don't bother with more than single-level aggregates.
915 if (Indices.size() != 1)
916 return nullptr; // FIXME: deal with more complex aggregates?
918 // Now, we may have already previously recorded the value for this element
919 // of an aggregate. If we did, that means the CurrIVI will later be
920 // overwritten with the already-recorded value. But if not, let's record it!
921 std::optional<Instruction *> &Elt = AggElts[Indices.front()];
922 Elt = Elt.value_or(InsertedValue);
924 // FIXME: should we handle chain-terminating undef base operand?
927 // Was that sufficient to deduce the full initializer for the aggregate?
928 if (!KnowAllElts())
929 return nullptr; // Give up then.
931 // We now want to find the source[s] of the aggregate elements we've found.
932 // And with "source" we mean the original aggregate[s] from which
933 // the inserted elements were extracted. This may require PHI translation.
935 enum class AggregateDescription {
936 /// When analyzing the value that was inserted into an aggregate, we did
937 /// not manage to find defining `extractvalue` instruction to analyze.
938 NotFound,
939 /// When analyzing the value that was inserted into an aggregate, we did
940 /// manage to find defining `extractvalue` instruction[s], and everything
941 /// matched perfectly - aggregate type, element insertion/extraction index.
942 Found,
943 /// When analyzing the value that was inserted into an aggregate, we did
944 /// manage to find defining `extractvalue` instruction, but there was
945 /// a mismatch: either the source type from which the extraction was didn't
946 /// match the aggregate type into which the insertion was,
947 /// or the extraction/insertion channels mismatched,
948 /// or different elements had different source aggregates.
949 FoundMismatch
951 auto Describe = [](std::optional<Value *> SourceAggregate) {
952 if (SourceAggregate == NotFound)
953 return AggregateDescription::NotFound;
954 if (*SourceAggregate == FoundMismatch)
955 return AggregateDescription::FoundMismatch;
956 return AggregateDescription::Found;
959 // Given the value \p Elt that was being inserted into element \p EltIdx of an
960 // aggregate AggTy, see if \p Elt was originally defined by an
961 // appropriate extractvalue (same element index, same aggregate type).
962 // If found, return the source aggregate from which the extraction was.
963 // If \p PredBB is provided, does PHI translation of an \p Elt first.
964 auto FindSourceAggregate =
965 [&](Instruction *Elt, unsigned EltIdx, std::optional<BasicBlock *> UseBB,
966 std::optional<BasicBlock *> PredBB) -> std::optional<Value *> {
967 // For now(?), only deal with, at most, a single level of PHI indirection.
968 if (UseBB && PredBB)
969 Elt = dyn_cast<Instruction>(Elt->DoPHITranslation(*UseBB, *PredBB));
970 // FIXME: deal with multiple levels of PHI indirection?
972 // Did we find an extraction?
973 auto *EVI = dyn_cast_or_null<ExtractValueInst>(Elt);
974 if (!EVI)
975 return NotFound;
977 Value *SourceAggregate = EVI->getAggregateOperand();
979 // Is the extraction from the same type into which the insertion was?
980 if (SourceAggregate->getType() != AggTy)
981 return FoundMismatch;
982 // And the element index doesn't change between extraction and insertion?
983 if (EVI->getNumIndices() != 1 || EltIdx != EVI->getIndices().front())
984 return FoundMismatch;
986 return SourceAggregate; // AggregateDescription::Found
989 // Given elements AggElts that were constructing an aggregate OrigIVI,
990 // see if we can find appropriate source aggregate for each of the elements,
991 // and see it's the same aggregate for each element. If so, return it.
992 auto FindCommonSourceAggregate =
993 [&](std::optional<BasicBlock *> UseBB,
994 std::optional<BasicBlock *> PredBB) -> std::optional<Value *> {
995 std::optional<Value *> SourceAggregate;
997 for (auto I : enumerate(AggElts)) {
998 assert(Describe(SourceAggregate) != AggregateDescription::FoundMismatch &&
999 "We don't store nullptr in SourceAggregate!");
1000 assert((Describe(SourceAggregate) == AggregateDescription::Found) ==
1001 (I.index() != 0) &&
1002 "SourceAggregate should be valid after the first element,");
1004 // For this element, is there a plausible source aggregate?
1005 // FIXME: we could special-case undef element, IFF we know that in the
1006 // source aggregate said element isn't poison.
1007 std::optional<Value *> SourceAggregateForElement =
1008 FindSourceAggregate(*I.value(), I.index(), UseBB, PredBB);
1010 // Okay, what have we found? Does that correlate with previous findings?
1012 // Regardless of whether or not we have previously found source
1013 // aggregate for previous elements (if any), if we didn't find one for
1014 // this element, passthrough whatever we have just found.
1015 if (Describe(SourceAggregateForElement) != AggregateDescription::Found)
1016 return SourceAggregateForElement;
1018 // Okay, we have found source aggregate for this element.
1019 // Let's see what we already know from previous elements, if any.
1020 switch (Describe(SourceAggregate)) {
1021 case AggregateDescription::NotFound:
1022 // This is apparently the first element that we have examined.
1023 SourceAggregate = SourceAggregateForElement; // Record the aggregate!
1024 continue; // Great, now look at next element.
1025 case AggregateDescription::Found:
1026 // We have previously already successfully examined other elements.
1027 // Is this the same source aggregate we've found for other elements?
1028 if (*SourceAggregateForElement != *SourceAggregate)
1029 return FoundMismatch;
1030 continue; // Still the same aggregate, look at next element.
1031 case AggregateDescription::FoundMismatch:
1032 llvm_unreachable("Can't happen. We would have early-exited then.");
1036 assert(Describe(SourceAggregate) == AggregateDescription::Found &&
1037 "Must be a valid Value");
1038 return *SourceAggregate;
1041 std::optional<Value *> SourceAggregate;
1043 // Can we find the source aggregate without looking at predecessors?
1044 SourceAggregate = FindCommonSourceAggregate(/*UseBB=*/std::nullopt,
1045 /*PredBB=*/std::nullopt);
1046 if (Describe(SourceAggregate) != AggregateDescription::NotFound) {
1047 if (Describe(SourceAggregate) == AggregateDescription::FoundMismatch)
1048 return nullptr; // Conflicting source aggregates!
1049 ++NumAggregateReconstructionsSimplified;
1050 return replaceInstUsesWith(OrigIVI, *SourceAggregate);
1053 // Okay, apparently we need to look at predecessors.
1055 // We should be smart about picking the "use" basic block, which will be the
1056 // merge point for aggregate, where we'll insert the final PHI that will be
1057 // used instead of OrigIVI. Basic block of OrigIVI is *not* the right choice.
1058 // We should look in which blocks each of the AggElts is being defined,
1059 // they all should be defined in the same basic block.
1060 BasicBlock *UseBB = nullptr;
1062 for (const std::optional<Instruction *> &I : AggElts) {
1063 BasicBlock *BB = (*I)->getParent();
1064 // If it's the first instruction we've encountered, record the basic block.
1065 if (!UseBB) {
1066 UseBB = BB;
1067 continue;
1069 // Otherwise, this must be the same basic block we've seen previously.
1070 if (UseBB != BB)
1071 return nullptr;
1074 // If *all* of the elements are basic-block-independent, meaning they are
1075 // either function arguments, or constant expressions, then if we didn't
1076 // handle them without predecessor-aware handling, we won't handle them now.
1077 if (!UseBB)
1078 return nullptr;
1080 // If we didn't manage to find source aggregate without looking at
1081 // predecessors, and there are no predecessors to look at, then we're done.
1082 if (pred_empty(UseBB))
1083 return nullptr;
1085 // Arbitrary predecessor count limit.
1086 static const int PredCountLimit = 64;
1088 // Cache the (non-uniqified!) list of predecessors in a vector,
1089 // checking the limit at the same time for efficiency.
1090 SmallVector<BasicBlock *, 4> Preds; // May have duplicates!
1091 for (BasicBlock *Pred : predecessors(UseBB)) {
1092 // Don't bother if there are too many predecessors.
1093 if (Preds.size() >= PredCountLimit) // FIXME: only count duplicates once?
1094 return nullptr;
1095 Preds.emplace_back(Pred);
1098 // For each predecessor, what is the source aggregate,
1099 // from which all the elements were originally extracted from?
1100 // Note that we want for the map to have stable iteration order!
1101 SmallDenseMap<BasicBlock *, Value *, 4> SourceAggregates;
1102 for (BasicBlock *Pred : Preds) {
1103 std::pair<decltype(SourceAggregates)::iterator, bool> IV =
1104 SourceAggregates.insert({Pred, nullptr});
1105 // Did we already evaluate this predecessor?
1106 if (!IV.second)
1107 continue;
1109 // Let's hope that when coming from predecessor Pred, all elements of the
1110 // aggregate produced by OrigIVI must have been originally extracted from
1111 // the same aggregate. Is that so? Can we find said original aggregate?
1112 SourceAggregate = FindCommonSourceAggregate(UseBB, Pred);
1113 if (Describe(SourceAggregate) != AggregateDescription::Found)
1114 return nullptr; // Give up.
1115 IV.first->second = *SourceAggregate;
1118 // All good! Now we just need to thread the source aggregates here.
1119 // Note that we have to insert the new PHI here, ourselves, because we can't
1120 // rely on InstCombinerImpl::run() inserting it into the right basic block.
1121 // Note that the same block can be a predecessor more than once,
1122 // and we need to preserve that invariant for the PHI node.
1123 BuilderTy::InsertPointGuard Guard(Builder);
1124 Builder.SetInsertPoint(UseBB, UseBB->getFirstNonPHIIt());
1125 auto *PHI =
1126 Builder.CreatePHI(AggTy, Preds.size(), OrigIVI.getName() + ".merged");
1127 for (BasicBlock *Pred : Preds)
1128 PHI->addIncoming(SourceAggregates[Pred], Pred);
1130 ++NumAggregateReconstructionsSimplified;
1131 return replaceInstUsesWith(OrigIVI, PHI);
1134 /// Try to find redundant insertvalue instructions, like the following ones:
1135 /// %0 = insertvalue { i8, i32 } undef, i8 %x, 0
1136 /// %1 = insertvalue { i8, i32 } %0, i8 %y, 0
1137 /// Here the second instruction inserts values at the same indices, as the
1138 /// first one, making the first one redundant.
1139 /// It should be transformed to:
1140 /// %0 = insertvalue { i8, i32 } undef, i8 %y, 0
1141 Instruction *InstCombinerImpl::visitInsertValueInst(InsertValueInst &I) {
1142 if (Value *V = simplifyInsertValueInst(
1143 I.getAggregateOperand(), I.getInsertedValueOperand(), I.getIndices(),
1144 SQ.getWithInstruction(&I)))
1145 return replaceInstUsesWith(I, V);
1147 bool IsRedundant = false;
1148 ArrayRef<unsigned int> FirstIndices = I.getIndices();
1150 // If there is a chain of insertvalue instructions (each of them except the
1151 // last one has only one use and it's another insertvalue insn from this
1152 // chain), check if any of the 'children' uses the same indices as the first
1153 // instruction. In this case, the first one is redundant.
1154 Value *V = &I;
1155 unsigned Depth = 0;
1156 while (V->hasOneUse() && Depth < 10) {
1157 User *U = V->user_back();
1158 auto UserInsInst = dyn_cast<InsertValueInst>(U);
1159 if (!UserInsInst || U->getOperand(0) != V)
1160 break;
1161 if (UserInsInst->getIndices() == FirstIndices) {
1162 IsRedundant = true;
1163 break;
1165 V = UserInsInst;
1166 Depth++;
1169 if (IsRedundant)
1170 return replaceInstUsesWith(I, I.getOperand(0));
1172 if (Instruction *NewI = foldAggregateConstructionIntoAggregateReuse(I))
1173 return NewI;
1175 return nullptr;
1178 static bool isShuffleEquivalentToSelect(ShuffleVectorInst &Shuf) {
1179 // Can not analyze scalable type, the number of elements is not a compile-time
1180 // constant.
1181 if (isa<ScalableVectorType>(Shuf.getOperand(0)->getType()))
1182 return false;
1184 int MaskSize = Shuf.getShuffleMask().size();
1185 int VecSize =
1186 cast<FixedVectorType>(Shuf.getOperand(0)->getType())->getNumElements();
1188 // A vector select does not change the size of the operands.
1189 if (MaskSize != VecSize)
1190 return false;
1192 // Each mask element must be undefined or choose a vector element from one of
1193 // the source operands without crossing vector lanes.
1194 for (int i = 0; i != MaskSize; ++i) {
1195 int Elt = Shuf.getMaskValue(i);
1196 if (Elt != -1 && Elt != i && Elt != i + VecSize)
1197 return false;
1200 return true;
1203 /// Turn a chain of inserts that splats a value into an insert + shuffle:
1204 /// insertelt(insertelt(insertelt(insertelt X, %k, 0), %k, 1), %k, 2) ... ->
1205 /// shufflevector(insertelt(X, %k, 0), poison, zero)
1206 static Instruction *foldInsSequenceIntoSplat(InsertElementInst &InsElt) {
1207 // We are interested in the last insert in a chain. So if this insert has a
1208 // single user and that user is an insert, bail.
1209 if (InsElt.hasOneUse() && isa<InsertElementInst>(InsElt.user_back()))
1210 return nullptr;
1212 VectorType *VecTy = InsElt.getType();
1213 // Can not handle scalable type, the number of elements is not a compile-time
1214 // constant.
1215 if (isa<ScalableVectorType>(VecTy))
1216 return nullptr;
1217 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1219 // Do not try to do this for a one-element vector, since that's a nop,
1220 // and will cause an inf-loop.
1221 if (NumElements == 1)
1222 return nullptr;
1224 Value *SplatVal = InsElt.getOperand(1);
1225 InsertElementInst *CurrIE = &InsElt;
1226 SmallBitVector ElementPresent(NumElements, false);
1227 InsertElementInst *FirstIE = nullptr;
1229 // Walk the chain backwards, keeping track of which indices we inserted into,
1230 // until we hit something that isn't an insert of the splatted value.
1231 while (CurrIE) {
1232 auto *Idx = dyn_cast<ConstantInt>(CurrIE->getOperand(2));
1233 if (!Idx || CurrIE->getOperand(1) != SplatVal)
1234 return nullptr;
1236 auto *NextIE = dyn_cast<InsertElementInst>(CurrIE->getOperand(0));
1237 // Check none of the intermediate steps have any additional uses, except
1238 // for the root insertelement instruction, which can be re-used, if it
1239 // inserts at position 0.
1240 if (CurrIE != &InsElt &&
1241 (!CurrIE->hasOneUse() && (NextIE != nullptr || !Idx->isZero())))
1242 return nullptr;
1244 ElementPresent[Idx->getZExtValue()] = true;
1245 FirstIE = CurrIE;
1246 CurrIE = NextIE;
1249 // If this is just a single insertelement (not a sequence), we are done.
1250 if (FirstIE == &InsElt)
1251 return nullptr;
1253 // If we are not inserting into a poison vector, make sure we've seen an
1254 // insert into every element.
1255 // TODO: If the base vector is not undef, it might be better to create a splat
1256 // and then a select-shuffle (blend) with the base vector.
1257 if (!match(FirstIE->getOperand(0), m_Poison()))
1258 if (!ElementPresent.all())
1259 return nullptr;
1261 // Create the insert + shuffle.
1262 Type *Int64Ty = Type::getInt64Ty(InsElt.getContext());
1263 PoisonValue *PoisonVec = PoisonValue::get(VecTy);
1264 Constant *Zero = ConstantInt::get(Int64Ty, 0);
1265 if (!cast<ConstantInt>(FirstIE->getOperand(2))->isZero())
1266 FirstIE = InsertElementInst::Create(PoisonVec, SplatVal, Zero, "", &InsElt);
1268 // Splat from element 0, but replace absent elements with poison in the mask.
1269 SmallVector<int, 16> Mask(NumElements, 0);
1270 for (unsigned i = 0; i != NumElements; ++i)
1271 if (!ElementPresent[i])
1272 Mask[i] = -1;
1274 return new ShuffleVectorInst(FirstIE, Mask);
1277 /// Try to fold an insert element into an existing splat shuffle by changing
1278 /// the shuffle's mask to include the index of this insert element.
1279 static Instruction *foldInsEltIntoSplat(InsertElementInst &InsElt) {
1280 // Check if the vector operand of this insert is a canonical splat shuffle.
1281 auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0));
1282 if (!Shuf || !Shuf->isZeroEltSplat())
1283 return nullptr;
1285 // Bail out early if shuffle is scalable type. The number of elements in
1286 // shuffle mask is unknown at compile-time.
1287 if (isa<ScalableVectorType>(Shuf->getType()))
1288 return nullptr;
1290 // Check for a constant insertion index.
1291 uint64_t IdxC;
1292 if (!match(InsElt.getOperand(2), m_ConstantInt(IdxC)))
1293 return nullptr;
1295 // Check if the splat shuffle's input is the same as this insert's scalar op.
1296 Value *X = InsElt.getOperand(1);
1297 Value *Op0 = Shuf->getOperand(0);
1298 if (!match(Op0, m_InsertElt(m_Undef(), m_Specific(X), m_ZeroInt())))
1299 return nullptr;
1301 // Replace the shuffle mask element at the index of this insert with a zero.
1302 // For example:
1303 // inselt (shuf (inselt undef, X, 0), _, <0,undef,0,undef>), X, 1
1304 // --> shuf (inselt undef, X, 0), poison, <0,0,0,undef>
1305 unsigned NumMaskElts =
1306 cast<FixedVectorType>(Shuf->getType())->getNumElements();
1307 SmallVector<int, 16> NewMask(NumMaskElts);
1308 for (unsigned i = 0; i != NumMaskElts; ++i)
1309 NewMask[i] = i == IdxC ? 0 : Shuf->getMaskValue(i);
1311 return new ShuffleVectorInst(Op0, NewMask);
1314 /// Try to fold an extract+insert element into an existing identity shuffle by
1315 /// changing the shuffle's mask to include the index of this insert element.
1316 static Instruction *foldInsEltIntoIdentityShuffle(InsertElementInst &InsElt) {
1317 // Check if the vector operand of this insert is an identity shuffle.
1318 auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0));
1319 if (!Shuf || !match(Shuf->getOperand(1), m_Undef()) ||
1320 !(Shuf->isIdentityWithExtract() || Shuf->isIdentityWithPadding()))
1321 return nullptr;
1323 // Bail out early if shuffle is scalable type. The number of elements in
1324 // shuffle mask is unknown at compile-time.
1325 if (isa<ScalableVectorType>(Shuf->getType()))
1326 return nullptr;
1328 // Check for a constant insertion index.
1329 uint64_t IdxC;
1330 if (!match(InsElt.getOperand(2), m_ConstantInt(IdxC)))
1331 return nullptr;
1333 // Check if this insert's scalar op is extracted from the identity shuffle's
1334 // input vector.
1335 Value *Scalar = InsElt.getOperand(1);
1336 Value *X = Shuf->getOperand(0);
1337 if (!match(Scalar, m_ExtractElt(m_Specific(X), m_SpecificInt(IdxC))))
1338 return nullptr;
1340 // Replace the shuffle mask element at the index of this extract+insert with
1341 // that same index value.
1342 // For example:
1343 // inselt (shuf X, IdMask), (extelt X, IdxC), IdxC --> shuf X, IdMask'
1344 unsigned NumMaskElts =
1345 cast<FixedVectorType>(Shuf->getType())->getNumElements();
1346 SmallVector<int, 16> NewMask(NumMaskElts);
1347 ArrayRef<int> OldMask = Shuf->getShuffleMask();
1348 for (unsigned i = 0; i != NumMaskElts; ++i) {
1349 if (i != IdxC) {
1350 // All mask elements besides the inserted element remain the same.
1351 NewMask[i] = OldMask[i];
1352 } else if (OldMask[i] == (int)IdxC) {
1353 // If the mask element was already set, there's nothing to do
1354 // (demanded elements analysis may unset it later).
1355 return nullptr;
1356 } else {
1357 assert(OldMask[i] == PoisonMaskElem &&
1358 "Unexpected shuffle mask element for identity shuffle");
1359 NewMask[i] = IdxC;
1363 return new ShuffleVectorInst(X, Shuf->getOperand(1), NewMask);
1366 /// If we have an insertelement instruction feeding into another insertelement
1367 /// and the 2nd is inserting a constant into the vector, canonicalize that
1368 /// constant insertion before the insertion of a variable:
1370 /// insertelement (insertelement X, Y, IdxC1), ScalarC, IdxC2 -->
1371 /// insertelement (insertelement X, ScalarC, IdxC2), Y, IdxC1
1373 /// This has the potential of eliminating the 2nd insertelement instruction
1374 /// via constant folding of the scalar constant into a vector constant.
1375 static Instruction *hoistInsEltConst(InsertElementInst &InsElt2,
1376 InstCombiner::BuilderTy &Builder) {
1377 auto *InsElt1 = dyn_cast<InsertElementInst>(InsElt2.getOperand(0));
1378 if (!InsElt1 || !InsElt1->hasOneUse())
1379 return nullptr;
1381 Value *X, *Y;
1382 Constant *ScalarC;
1383 ConstantInt *IdxC1, *IdxC2;
1384 if (match(InsElt1->getOperand(0), m_Value(X)) &&
1385 match(InsElt1->getOperand(1), m_Value(Y)) && !isa<Constant>(Y) &&
1386 match(InsElt1->getOperand(2), m_ConstantInt(IdxC1)) &&
1387 match(InsElt2.getOperand(1), m_Constant(ScalarC)) &&
1388 match(InsElt2.getOperand(2), m_ConstantInt(IdxC2)) && IdxC1 != IdxC2) {
1389 Value *NewInsElt1 = Builder.CreateInsertElement(X, ScalarC, IdxC2);
1390 return InsertElementInst::Create(NewInsElt1, Y, IdxC1);
1393 return nullptr;
1396 /// insertelt (shufflevector X, CVec, Mask|insertelt X, C1, CIndex1), C, CIndex
1397 /// --> shufflevector X, CVec', Mask'
1398 static Instruction *foldConstantInsEltIntoShuffle(InsertElementInst &InsElt) {
1399 auto *Inst = dyn_cast<Instruction>(InsElt.getOperand(0));
1400 // Bail out if the parent has more than one use. In that case, we'd be
1401 // replacing the insertelt with a shuffle, and that's not a clear win.
1402 if (!Inst || !Inst->hasOneUse())
1403 return nullptr;
1404 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0))) {
1405 // The shuffle must have a constant vector operand. The insertelt must have
1406 // a constant scalar being inserted at a constant position in the vector.
1407 Constant *ShufConstVec, *InsEltScalar;
1408 uint64_t InsEltIndex;
1409 if (!match(Shuf->getOperand(1), m_Constant(ShufConstVec)) ||
1410 !match(InsElt.getOperand(1), m_Constant(InsEltScalar)) ||
1411 !match(InsElt.getOperand(2), m_ConstantInt(InsEltIndex)))
1412 return nullptr;
1414 // Adding an element to an arbitrary shuffle could be expensive, but a
1415 // shuffle that selects elements from vectors without crossing lanes is
1416 // assumed cheap.
1417 // If we're just adding a constant into that shuffle, it will still be
1418 // cheap.
1419 if (!isShuffleEquivalentToSelect(*Shuf))
1420 return nullptr;
1422 // From the above 'select' check, we know that the mask has the same number
1423 // of elements as the vector input operands. We also know that each constant
1424 // input element is used in its lane and can not be used more than once by
1425 // the shuffle. Therefore, replace the constant in the shuffle's constant
1426 // vector with the insertelt constant. Replace the constant in the shuffle's
1427 // mask vector with the insertelt index plus the length of the vector
1428 // (because the constant vector operand of a shuffle is always the 2nd
1429 // operand).
1430 ArrayRef<int> Mask = Shuf->getShuffleMask();
1431 unsigned NumElts = Mask.size();
1432 SmallVector<Constant *, 16> NewShufElts(NumElts);
1433 SmallVector<int, 16> NewMaskElts(NumElts);
1434 for (unsigned I = 0; I != NumElts; ++I) {
1435 if (I == InsEltIndex) {
1436 NewShufElts[I] = InsEltScalar;
1437 NewMaskElts[I] = InsEltIndex + NumElts;
1438 } else {
1439 // Copy over the existing values.
1440 NewShufElts[I] = ShufConstVec->getAggregateElement(I);
1441 NewMaskElts[I] = Mask[I];
1444 // Bail if we failed to find an element.
1445 if (!NewShufElts[I])
1446 return nullptr;
1449 // Create new operands for a shuffle that includes the constant of the
1450 // original insertelt. The old shuffle will be dead now.
1451 return new ShuffleVectorInst(Shuf->getOperand(0),
1452 ConstantVector::get(NewShufElts), NewMaskElts);
1453 } else if (auto *IEI = dyn_cast<InsertElementInst>(Inst)) {
1454 // Transform sequences of insertelements ops with constant data/indexes into
1455 // a single shuffle op.
1456 // Can not handle scalable type, the number of elements needed to create
1457 // shuffle mask is not a compile-time constant.
1458 if (isa<ScalableVectorType>(InsElt.getType()))
1459 return nullptr;
1460 unsigned NumElts =
1461 cast<FixedVectorType>(InsElt.getType())->getNumElements();
1463 uint64_t InsertIdx[2];
1464 Constant *Val[2];
1465 if (!match(InsElt.getOperand(2), m_ConstantInt(InsertIdx[0])) ||
1466 !match(InsElt.getOperand(1), m_Constant(Val[0])) ||
1467 !match(IEI->getOperand(2), m_ConstantInt(InsertIdx[1])) ||
1468 !match(IEI->getOperand(1), m_Constant(Val[1])))
1469 return nullptr;
1470 SmallVector<Constant *, 16> Values(NumElts);
1471 SmallVector<int, 16> Mask(NumElts);
1472 auto ValI = std::begin(Val);
1473 // Generate new constant vector and mask.
1474 // We have 2 values/masks from the insertelements instructions. Insert them
1475 // into new value/mask vectors.
1476 for (uint64_t I : InsertIdx) {
1477 if (!Values[I]) {
1478 Values[I] = *ValI;
1479 Mask[I] = NumElts + I;
1481 ++ValI;
1483 // Remaining values are filled with 'poison' values.
1484 for (unsigned I = 0; I < NumElts; ++I) {
1485 if (!Values[I]) {
1486 Values[I] = PoisonValue::get(InsElt.getType()->getElementType());
1487 Mask[I] = I;
1490 // Create new operands for a shuffle that includes the constant of the
1491 // original insertelt.
1492 return new ShuffleVectorInst(IEI->getOperand(0),
1493 ConstantVector::get(Values), Mask);
1495 return nullptr;
1498 /// If both the base vector and the inserted element are extended from the same
1499 /// type, do the insert element in the narrow source type followed by extend.
1500 /// TODO: This can be extended to include other cast opcodes, but particularly
1501 /// if we create a wider insertelement, make sure codegen is not harmed.
1502 static Instruction *narrowInsElt(InsertElementInst &InsElt,
1503 InstCombiner::BuilderTy &Builder) {
1504 // We are creating a vector extend. If the original vector extend has another
1505 // use, that would mean we end up with 2 vector extends, so avoid that.
1506 // TODO: We could ease the use-clause to "if at least one op has one use"
1507 // (assuming that the source types match - see next TODO comment).
1508 Value *Vec = InsElt.getOperand(0);
1509 if (!Vec->hasOneUse())
1510 return nullptr;
1512 Value *Scalar = InsElt.getOperand(1);
1513 Value *X, *Y;
1514 CastInst::CastOps CastOpcode;
1515 if (match(Vec, m_FPExt(m_Value(X))) && match(Scalar, m_FPExt(m_Value(Y))))
1516 CastOpcode = Instruction::FPExt;
1517 else if (match(Vec, m_SExt(m_Value(X))) && match(Scalar, m_SExt(m_Value(Y))))
1518 CastOpcode = Instruction::SExt;
1519 else if (match(Vec, m_ZExt(m_Value(X))) && match(Scalar, m_ZExt(m_Value(Y))))
1520 CastOpcode = Instruction::ZExt;
1521 else
1522 return nullptr;
1524 // TODO: We can allow mismatched types by creating an intermediate cast.
1525 if (X->getType()->getScalarType() != Y->getType())
1526 return nullptr;
1528 // inselt (ext X), (ext Y), Index --> ext (inselt X, Y, Index)
1529 Value *NewInsElt = Builder.CreateInsertElement(X, Y, InsElt.getOperand(2));
1530 return CastInst::Create(CastOpcode, NewInsElt, InsElt.getType());
1533 /// If we are inserting 2 halves of a value into adjacent elements of a vector,
1534 /// try to convert to a single insert with appropriate bitcasts.
1535 static Instruction *foldTruncInsEltPair(InsertElementInst &InsElt,
1536 bool IsBigEndian,
1537 InstCombiner::BuilderTy &Builder) {
1538 Value *VecOp = InsElt.getOperand(0);
1539 Value *ScalarOp = InsElt.getOperand(1);
1540 Value *IndexOp = InsElt.getOperand(2);
1542 // Pattern depends on endian because we expect lower index is inserted first.
1543 // Big endian:
1544 // inselt (inselt BaseVec, (trunc (lshr X, BW/2), Index0), (trunc X), Index1
1545 // Little endian:
1546 // inselt (inselt BaseVec, (trunc X), Index0), (trunc (lshr X, BW/2)), Index1
1547 // Note: It is not safe to do this transform with an arbitrary base vector
1548 // because the bitcast of that vector to fewer/larger elements could
1549 // allow poison to spill into an element that was not poison before.
1550 // TODO: Detect smaller fractions of the scalar.
1551 // TODO: One-use checks are conservative.
1552 auto *VTy = dyn_cast<FixedVectorType>(InsElt.getType());
1553 Value *Scalar0, *BaseVec;
1554 uint64_t Index0, Index1;
1555 if (!VTy || (VTy->getNumElements() & 1) ||
1556 !match(IndexOp, m_ConstantInt(Index1)) ||
1557 !match(VecOp, m_InsertElt(m_Value(BaseVec), m_Value(Scalar0),
1558 m_ConstantInt(Index0))) ||
1559 !match(BaseVec, m_Undef()))
1560 return nullptr;
1562 // The first insert must be to the index one less than this one, and
1563 // the first insert must be to an even index.
1564 if (Index0 + 1 != Index1 || Index0 & 1)
1565 return nullptr;
1567 // For big endian, the high half of the value should be inserted first.
1568 // For little endian, the low half of the value should be inserted first.
1569 Value *X;
1570 uint64_t ShAmt;
1571 if (IsBigEndian) {
1572 if (!match(ScalarOp, m_Trunc(m_Value(X))) ||
1573 !match(Scalar0, m_Trunc(m_LShr(m_Specific(X), m_ConstantInt(ShAmt)))))
1574 return nullptr;
1575 } else {
1576 if (!match(Scalar0, m_Trunc(m_Value(X))) ||
1577 !match(ScalarOp, m_Trunc(m_LShr(m_Specific(X), m_ConstantInt(ShAmt)))))
1578 return nullptr;
1581 Type *SrcTy = X->getType();
1582 unsigned ScalarWidth = SrcTy->getScalarSizeInBits();
1583 unsigned VecEltWidth = VTy->getScalarSizeInBits();
1584 if (ScalarWidth != VecEltWidth * 2 || ShAmt != VecEltWidth)
1585 return nullptr;
1587 // Bitcast the base vector to a vector type with the source element type.
1588 Type *CastTy = FixedVectorType::get(SrcTy, VTy->getNumElements() / 2);
1589 Value *CastBaseVec = Builder.CreateBitCast(BaseVec, CastTy);
1591 // Scale the insert index for a vector with half as many elements.
1592 // bitcast (inselt (bitcast BaseVec), X, NewIndex)
1593 uint64_t NewIndex = IsBigEndian ? Index1 / 2 : Index0 / 2;
1594 Value *NewInsert = Builder.CreateInsertElement(CastBaseVec, X, NewIndex);
1595 return new BitCastInst(NewInsert, VTy);
1598 Instruction *InstCombinerImpl::visitInsertElementInst(InsertElementInst &IE) {
1599 Value *VecOp = IE.getOperand(0);
1600 Value *ScalarOp = IE.getOperand(1);
1601 Value *IdxOp = IE.getOperand(2);
1603 if (auto *V = simplifyInsertElementInst(
1604 VecOp, ScalarOp, IdxOp, SQ.getWithInstruction(&IE)))
1605 return replaceInstUsesWith(IE, V);
1607 // Canonicalize type of constant indices to i64 to simplify CSE
1608 if (auto *IndexC = dyn_cast<ConstantInt>(IdxOp)) {
1609 if (auto *NewIdx = getPreferredVectorIndex(IndexC))
1610 return replaceOperand(IE, 2, NewIdx);
1612 Value *BaseVec, *OtherScalar;
1613 uint64_t OtherIndexVal;
1614 if (match(VecOp, m_OneUse(m_InsertElt(m_Value(BaseVec),
1615 m_Value(OtherScalar),
1616 m_ConstantInt(OtherIndexVal)))) &&
1617 !isa<Constant>(OtherScalar) && OtherIndexVal > IndexC->getZExtValue()) {
1618 Value *NewIns = Builder.CreateInsertElement(BaseVec, ScalarOp, IdxOp);
1619 return InsertElementInst::Create(NewIns, OtherScalar,
1620 Builder.getInt64(OtherIndexVal));
1624 // If the scalar is bitcast and inserted into undef, do the insert in the
1625 // source type followed by bitcast.
1626 // TODO: Generalize for insert into any constant, not just undef?
1627 Value *ScalarSrc;
1628 if (match(VecOp, m_Undef()) &&
1629 match(ScalarOp, m_OneUse(m_BitCast(m_Value(ScalarSrc)))) &&
1630 (ScalarSrc->getType()->isIntegerTy() ||
1631 ScalarSrc->getType()->isFloatingPointTy())) {
1632 // inselt undef, (bitcast ScalarSrc), IdxOp -->
1633 // bitcast (inselt undef, ScalarSrc, IdxOp)
1634 Type *ScalarTy = ScalarSrc->getType();
1635 Type *VecTy = VectorType::get(ScalarTy, IE.getType()->getElementCount());
1636 Constant *NewUndef = isa<PoisonValue>(VecOp) ? PoisonValue::get(VecTy)
1637 : UndefValue::get(VecTy);
1638 Value *NewInsElt = Builder.CreateInsertElement(NewUndef, ScalarSrc, IdxOp);
1639 return new BitCastInst(NewInsElt, IE.getType());
1642 // If the vector and scalar are both bitcast from the same element type, do
1643 // the insert in that source type followed by bitcast.
1644 Value *VecSrc;
1645 if (match(VecOp, m_BitCast(m_Value(VecSrc))) &&
1646 match(ScalarOp, m_BitCast(m_Value(ScalarSrc))) &&
1647 (VecOp->hasOneUse() || ScalarOp->hasOneUse()) &&
1648 VecSrc->getType()->isVectorTy() && !ScalarSrc->getType()->isVectorTy() &&
1649 cast<VectorType>(VecSrc->getType())->getElementType() ==
1650 ScalarSrc->getType()) {
1651 // inselt (bitcast VecSrc), (bitcast ScalarSrc), IdxOp -->
1652 // bitcast (inselt VecSrc, ScalarSrc, IdxOp)
1653 Value *NewInsElt = Builder.CreateInsertElement(VecSrc, ScalarSrc, IdxOp);
1654 return new BitCastInst(NewInsElt, IE.getType());
1657 // If the inserted element was extracted from some other fixed-length vector
1658 // and both indexes are valid constants, try to turn this into a shuffle.
1659 // Can not handle scalable vector type, the number of elements needed to
1660 // create shuffle mask is not a compile-time constant.
1661 uint64_t InsertedIdx, ExtractedIdx;
1662 Value *ExtVecOp;
1663 if (isa<FixedVectorType>(IE.getType()) &&
1664 match(IdxOp, m_ConstantInt(InsertedIdx)) &&
1665 match(ScalarOp,
1666 m_ExtractElt(m_Value(ExtVecOp), m_ConstantInt(ExtractedIdx))) &&
1667 isa<FixedVectorType>(ExtVecOp->getType()) &&
1668 ExtractedIdx <
1669 cast<FixedVectorType>(ExtVecOp->getType())->getNumElements()) {
1670 // TODO: Looking at the user(s) to determine if this insert is a
1671 // fold-to-shuffle opportunity does not match the usual instcombine
1672 // constraints. We should decide if the transform is worthy based only
1673 // on this instruction and its operands, but that may not work currently.
1675 // Here, we are trying to avoid creating shuffles before reaching
1676 // the end of a chain of extract-insert pairs. This is complicated because
1677 // we do not generally form arbitrary shuffle masks in instcombine
1678 // (because those may codegen poorly), but collectShuffleElements() does
1679 // exactly that.
1681 // The rules for determining what is an acceptable target-independent
1682 // shuffle mask are fuzzy because they evolve based on the backend's
1683 // capabilities and real-world impact.
1684 auto isShuffleRootCandidate = [](InsertElementInst &Insert) {
1685 if (!Insert.hasOneUse())
1686 return true;
1687 auto *InsertUser = dyn_cast<InsertElementInst>(Insert.user_back());
1688 if (!InsertUser)
1689 return true;
1690 return false;
1693 // Try to form a shuffle from a chain of extract-insert ops.
1694 if (isShuffleRootCandidate(IE)) {
1695 bool Rerun = true;
1696 while (Rerun) {
1697 Rerun = false;
1699 SmallVector<int, 16> Mask;
1700 ShuffleOps LR =
1701 collectShuffleElements(&IE, Mask, nullptr, *this, Rerun);
1703 // The proposed shuffle may be trivial, in which case we shouldn't
1704 // perform the combine.
1705 if (LR.first != &IE && LR.second != &IE) {
1706 // We now have a shuffle of LHS, RHS, Mask.
1707 if (LR.second == nullptr)
1708 LR.second = PoisonValue::get(LR.first->getType());
1709 return new ShuffleVectorInst(LR.first, LR.second, Mask);
1715 if (auto VecTy = dyn_cast<FixedVectorType>(VecOp->getType())) {
1716 unsigned VWidth = VecTy->getNumElements();
1717 APInt PoisonElts(VWidth, 0);
1718 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
1719 if (Value *V = SimplifyDemandedVectorElts(&IE, AllOnesEltMask,
1720 PoisonElts)) {
1721 if (V != &IE)
1722 return replaceInstUsesWith(IE, V);
1723 return &IE;
1727 if (Instruction *Shuf = foldConstantInsEltIntoShuffle(IE))
1728 return Shuf;
1730 if (Instruction *NewInsElt = hoistInsEltConst(IE, Builder))
1731 return NewInsElt;
1733 if (Instruction *Broadcast = foldInsSequenceIntoSplat(IE))
1734 return Broadcast;
1736 if (Instruction *Splat = foldInsEltIntoSplat(IE))
1737 return Splat;
1739 if (Instruction *IdentityShuf = foldInsEltIntoIdentityShuffle(IE))
1740 return IdentityShuf;
1742 if (Instruction *Ext = narrowInsElt(IE, Builder))
1743 return Ext;
1745 if (Instruction *Ext = foldTruncInsEltPair(IE, DL.isBigEndian(), Builder))
1746 return Ext;
1748 return nullptr;
1751 /// Return true if we can evaluate the specified expression tree if the vector
1752 /// elements were shuffled in a different order.
1753 static bool canEvaluateShuffled(Value *V, ArrayRef<int> Mask,
1754 unsigned Depth = 5) {
1755 // We can always reorder the elements of a constant.
1756 if (isa<Constant>(V))
1757 return true;
1759 // We won't reorder vector arguments. No IPO here.
1760 Instruction *I = dyn_cast<Instruction>(V);
1761 if (!I) return false;
1763 // Two users may expect different orders of the elements. Don't try it.
1764 if (!I->hasOneUse())
1765 return false;
1767 if (Depth == 0) return false;
1769 switch (I->getOpcode()) {
1770 case Instruction::UDiv:
1771 case Instruction::SDiv:
1772 case Instruction::URem:
1773 case Instruction::SRem:
1774 // Propagating an undefined shuffle mask element to integer div/rem is not
1775 // allowed because those opcodes can create immediate undefined behavior
1776 // from an undefined element in an operand.
1777 if (llvm::is_contained(Mask, -1))
1778 return false;
1779 [[fallthrough]];
1780 case Instruction::Add:
1781 case Instruction::FAdd:
1782 case Instruction::Sub:
1783 case Instruction::FSub:
1784 case Instruction::Mul:
1785 case Instruction::FMul:
1786 case Instruction::FDiv:
1787 case Instruction::FRem:
1788 case Instruction::Shl:
1789 case Instruction::LShr:
1790 case Instruction::AShr:
1791 case Instruction::And:
1792 case Instruction::Or:
1793 case Instruction::Xor:
1794 case Instruction::ICmp:
1795 case Instruction::FCmp:
1796 case Instruction::Trunc:
1797 case Instruction::ZExt:
1798 case Instruction::SExt:
1799 case Instruction::FPToUI:
1800 case Instruction::FPToSI:
1801 case Instruction::UIToFP:
1802 case Instruction::SIToFP:
1803 case Instruction::FPTrunc:
1804 case Instruction::FPExt:
1805 case Instruction::GetElementPtr: {
1806 // Bail out if we would create longer vector ops. We could allow creating
1807 // longer vector ops, but that may result in more expensive codegen.
1808 Type *ITy = I->getType();
1809 if (ITy->isVectorTy() &&
1810 Mask.size() > cast<FixedVectorType>(ITy)->getNumElements())
1811 return false;
1812 for (Value *Operand : I->operands()) {
1813 if (!canEvaluateShuffled(Operand, Mask, Depth - 1))
1814 return false;
1816 return true;
1818 case Instruction::InsertElement: {
1819 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(2));
1820 if (!CI) return false;
1821 int ElementNumber = CI->getLimitedValue();
1823 // Verify that 'CI' does not occur twice in Mask. A single 'insertelement'
1824 // can't put an element into multiple indices.
1825 bool SeenOnce = false;
1826 for (int I : Mask) {
1827 if (I == ElementNumber) {
1828 if (SeenOnce)
1829 return false;
1830 SeenOnce = true;
1833 return canEvaluateShuffled(I->getOperand(0), Mask, Depth - 1);
1836 return false;
1839 /// Rebuild a new instruction just like 'I' but with the new operands given.
1840 /// In the event of type mismatch, the type of the operands is correct.
1841 static Value *buildNew(Instruction *I, ArrayRef<Value*> NewOps,
1842 IRBuilderBase &Builder) {
1843 Builder.SetInsertPoint(I);
1844 switch (I->getOpcode()) {
1845 case Instruction::Add:
1846 case Instruction::FAdd:
1847 case Instruction::Sub:
1848 case Instruction::FSub:
1849 case Instruction::Mul:
1850 case Instruction::FMul:
1851 case Instruction::UDiv:
1852 case Instruction::SDiv:
1853 case Instruction::FDiv:
1854 case Instruction::URem:
1855 case Instruction::SRem:
1856 case Instruction::FRem:
1857 case Instruction::Shl:
1858 case Instruction::LShr:
1859 case Instruction::AShr:
1860 case Instruction::And:
1861 case Instruction::Or:
1862 case Instruction::Xor: {
1863 BinaryOperator *BO = cast<BinaryOperator>(I);
1864 assert(NewOps.size() == 2 && "binary operator with #ops != 2");
1865 Value *New = Builder.CreateBinOp(cast<BinaryOperator>(I)->getOpcode(),
1866 NewOps[0], NewOps[1]);
1867 if (auto *NewI = dyn_cast<Instruction>(New)) {
1868 if (isa<OverflowingBinaryOperator>(BO)) {
1869 NewI->setHasNoUnsignedWrap(BO->hasNoUnsignedWrap());
1870 NewI->setHasNoSignedWrap(BO->hasNoSignedWrap());
1872 if (isa<PossiblyExactOperator>(BO)) {
1873 NewI->setIsExact(BO->isExact());
1875 if (isa<FPMathOperator>(BO))
1876 NewI->copyFastMathFlags(I);
1878 return New;
1880 case Instruction::ICmp:
1881 assert(NewOps.size() == 2 && "icmp with #ops != 2");
1882 return Builder.CreateICmp(cast<ICmpInst>(I)->getPredicate(), NewOps[0],
1883 NewOps[1]);
1884 case Instruction::FCmp:
1885 assert(NewOps.size() == 2 && "fcmp with #ops != 2");
1886 return Builder.CreateFCmp(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
1887 NewOps[1]);
1888 case Instruction::Trunc:
1889 case Instruction::ZExt:
1890 case Instruction::SExt:
1891 case Instruction::FPToUI:
1892 case Instruction::FPToSI:
1893 case Instruction::UIToFP:
1894 case Instruction::SIToFP:
1895 case Instruction::FPTrunc:
1896 case Instruction::FPExt: {
1897 // It's possible that the mask has a different number of elements from
1898 // the original cast. We recompute the destination type to match the mask.
1899 Type *DestTy = VectorType::get(
1900 I->getType()->getScalarType(),
1901 cast<VectorType>(NewOps[0]->getType())->getElementCount());
1902 assert(NewOps.size() == 1 && "cast with #ops != 1");
1903 return Builder.CreateCast(cast<CastInst>(I)->getOpcode(), NewOps[0],
1904 DestTy);
1906 case Instruction::GetElementPtr: {
1907 Value *Ptr = NewOps[0];
1908 ArrayRef<Value*> Idx = NewOps.slice(1);
1909 return Builder.CreateGEP(cast<GEPOperator>(I)->getSourceElementType(),
1910 Ptr, Idx, "",
1911 cast<GEPOperator>(I)->isInBounds());
1914 llvm_unreachable("failed to rebuild vector instructions");
1917 static Value *evaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask,
1918 IRBuilderBase &Builder) {
1919 // Mask.size() does not need to be equal to the number of vector elements.
1921 assert(V->getType()->isVectorTy() && "can't reorder non-vector elements");
1922 Type *EltTy = V->getType()->getScalarType();
1924 if (isa<PoisonValue>(V))
1925 return PoisonValue::get(FixedVectorType::get(EltTy, Mask.size()));
1927 if (match(V, m_Undef()))
1928 return UndefValue::get(FixedVectorType::get(EltTy, Mask.size()));
1930 if (isa<ConstantAggregateZero>(V))
1931 return ConstantAggregateZero::get(FixedVectorType::get(EltTy, Mask.size()));
1933 if (Constant *C = dyn_cast<Constant>(V))
1934 return ConstantExpr::getShuffleVector(C, PoisonValue::get(C->getType()),
1935 Mask);
1937 Instruction *I = cast<Instruction>(V);
1938 switch (I->getOpcode()) {
1939 case Instruction::Add:
1940 case Instruction::FAdd:
1941 case Instruction::Sub:
1942 case Instruction::FSub:
1943 case Instruction::Mul:
1944 case Instruction::FMul:
1945 case Instruction::UDiv:
1946 case Instruction::SDiv:
1947 case Instruction::FDiv:
1948 case Instruction::URem:
1949 case Instruction::SRem:
1950 case Instruction::FRem:
1951 case Instruction::Shl:
1952 case Instruction::LShr:
1953 case Instruction::AShr:
1954 case Instruction::And:
1955 case Instruction::Or:
1956 case Instruction::Xor:
1957 case Instruction::ICmp:
1958 case Instruction::FCmp:
1959 case Instruction::Trunc:
1960 case Instruction::ZExt:
1961 case Instruction::SExt:
1962 case Instruction::FPToUI:
1963 case Instruction::FPToSI:
1964 case Instruction::UIToFP:
1965 case Instruction::SIToFP:
1966 case Instruction::FPTrunc:
1967 case Instruction::FPExt:
1968 case Instruction::Select:
1969 case Instruction::GetElementPtr: {
1970 SmallVector<Value*, 8> NewOps;
1971 bool NeedsRebuild =
1972 (Mask.size() !=
1973 cast<FixedVectorType>(I->getType())->getNumElements());
1974 for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
1975 Value *V;
1976 // Recursively call evaluateInDifferentElementOrder on vector arguments
1977 // as well. E.g. GetElementPtr may have scalar operands even if the
1978 // return value is a vector, so we need to examine the operand type.
1979 if (I->getOperand(i)->getType()->isVectorTy())
1980 V = evaluateInDifferentElementOrder(I->getOperand(i), Mask, Builder);
1981 else
1982 V = I->getOperand(i);
1983 NewOps.push_back(V);
1984 NeedsRebuild |= (V != I->getOperand(i));
1986 if (NeedsRebuild)
1987 return buildNew(I, NewOps, Builder);
1988 return I;
1990 case Instruction::InsertElement: {
1991 int Element = cast<ConstantInt>(I->getOperand(2))->getLimitedValue();
1993 // The insertelement was inserting at Element. Figure out which element
1994 // that becomes after shuffling. The answer is guaranteed to be unique
1995 // by CanEvaluateShuffled.
1996 bool Found = false;
1997 int Index = 0;
1998 for (int e = Mask.size(); Index != e; ++Index) {
1999 if (Mask[Index] == Element) {
2000 Found = true;
2001 break;
2005 // If element is not in Mask, no need to handle the operand 1 (element to
2006 // be inserted). Just evaluate values in operand 0 according to Mask.
2007 if (!Found)
2008 return evaluateInDifferentElementOrder(I->getOperand(0), Mask, Builder);
2010 Value *V = evaluateInDifferentElementOrder(I->getOperand(0), Mask,
2011 Builder);
2012 Builder.SetInsertPoint(I);
2013 return Builder.CreateInsertElement(V, I->getOperand(1), Index);
2016 llvm_unreachable("failed to reorder elements of vector instruction!");
2019 // Returns true if the shuffle is extracting a contiguous range of values from
2020 // LHS, for example:
2021 // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
2022 // Input: |AA|BB|CC|DD|EE|FF|GG|HH|II|JJ|KK|LL|MM|NN|OO|PP|
2023 // Shuffles to: |EE|FF|GG|HH|
2024 // +--+--+--+--+
2025 static bool isShuffleExtractingFromLHS(ShuffleVectorInst &SVI,
2026 ArrayRef<int> Mask) {
2027 unsigned LHSElems =
2028 cast<FixedVectorType>(SVI.getOperand(0)->getType())->getNumElements();
2029 unsigned MaskElems = Mask.size();
2030 unsigned BegIdx = Mask.front();
2031 unsigned EndIdx = Mask.back();
2032 if (BegIdx > EndIdx || EndIdx >= LHSElems || EndIdx - BegIdx != MaskElems - 1)
2033 return false;
2034 for (unsigned I = 0; I != MaskElems; ++I)
2035 if (static_cast<unsigned>(Mask[I]) != BegIdx + I)
2036 return false;
2037 return true;
2040 /// These are the ingredients in an alternate form binary operator as described
2041 /// below.
2042 struct BinopElts {
2043 BinaryOperator::BinaryOps Opcode;
2044 Value *Op0;
2045 Value *Op1;
2046 BinopElts(BinaryOperator::BinaryOps Opc = (BinaryOperator::BinaryOps)0,
2047 Value *V0 = nullptr, Value *V1 = nullptr) :
2048 Opcode(Opc), Op0(V0), Op1(V1) {}
2049 operator bool() const { return Opcode != 0; }
2052 /// Binops may be transformed into binops with different opcodes and operands.
2053 /// Reverse the usual canonicalization to enable folds with the non-canonical
2054 /// form of the binop. If a transform is possible, return the elements of the
2055 /// new binop. If not, return invalid elements.
2056 static BinopElts getAlternateBinop(BinaryOperator *BO, const DataLayout &DL) {
2057 Value *BO0 = BO->getOperand(0), *BO1 = BO->getOperand(1);
2058 Type *Ty = BO->getType();
2059 switch (BO->getOpcode()) {
2060 case Instruction::Shl: {
2061 // shl X, C --> mul X, (1 << C)
2062 Constant *C;
2063 if (match(BO1, m_Constant(C))) {
2064 Constant *ShlOne = ConstantExpr::getShl(ConstantInt::get(Ty, 1), C);
2065 return {Instruction::Mul, BO0, ShlOne};
2067 break;
2069 case Instruction::Or: {
2070 // or X, C --> add X, C (when X and C have no common bits set)
2071 const APInt *C;
2072 if (match(BO1, m_APInt(C)) && MaskedValueIsZero(BO0, *C, DL))
2073 return {Instruction::Add, BO0, BO1};
2074 break;
2076 case Instruction::Sub:
2077 // sub 0, X --> mul X, -1
2078 if (match(BO0, m_ZeroInt()))
2079 return {Instruction::Mul, BO1, ConstantInt::getAllOnesValue(Ty)};
2080 break;
2081 default:
2082 break;
2084 return {};
2087 /// A select shuffle of a select shuffle with a shared operand can be reduced
2088 /// to a single select shuffle. This is an obvious improvement in IR, and the
2089 /// backend is expected to lower select shuffles efficiently.
2090 static Instruction *foldSelectShuffleOfSelectShuffle(ShuffleVectorInst &Shuf) {
2091 assert(Shuf.isSelect() && "Must have select-equivalent shuffle");
2093 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
2094 SmallVector<int, 16> Mask;
2095 Shuf.getShuffleMask(Mask);
2096 unsigned NumElts = Mask.size();
2098 // Canonicalize a select shuffle with common operand as Op1.
2099 auto *ShufOp = dyn_cast<ShuffleVectorInst>(Op0);
2100 if (ShufOp && ShufOp->isSelect() &&
2101 (ShufOp->getOperand(0) == Op1 || ShufOp->getOperand(1) == Op1)) {
2102 std::swap(Op0, Op1);
2103 ShuffleVectorInst::commuteShuffleMask(Mask, NumElts);
2106 ShufOp = dyn_cast<ShuffleVectorInst>(Op1);
2107 if (!ShufOp || !ShufOp->isSelect() ||
2108 (ShufOp->getOperand(0) != Op0 && ShufOp->getOperand(1) != Op0))
2109 return nullptr;
2111 Value *X = ShufOp->getOperand(0), *Y = ShufOp->getOperand(1);
2112 SmallVector<int, 16> Mask1;
2113 ShufOp->getShuffleMask(Mask1);
2114 assert(Mask1.size() == NumElts && "Vector size changed with select shuffle");
2116 // Canonicalize common operand (Op0) as X (first operand of first shuffle).
2117 if (Y == Op0) {
2118 std::swap(X, Y);
2119 ShuffleVectorInst::commuteShuffleMask(Mask1, NumElts);
2122 // If the mask chooses from X (operand 0), it stays the same.
2123 // If the mask chooses from the earlier shuffle, the other mask value is
2124 // transferred to the combined select shuffle:
2125 // shuf X, (shuf X, Y, M1), M --> shuf X, Y, M'
2126 SmallVector<int, 16> NewMask(NumElts);
2127 for (unsigned i = 0; i != NumElts; ++i)
2128 NewMask[i] = Mask[i] < (signed)NumElts ? Mask[i] : Mask1[i];
2130 // A select mask with undef elements might look like an identity mask.
2131 assert((ShuffleVectorInst::isSelectMask(NewMask, NumElts) ||
2132 ShuffleVectorInst::isIdentityMask(NewMask, NumElts)) &&
2133 "Unexpected shuffle mask");
2134 return new ShuffleVectorInst(X, Y, NewMask);
2137 static Instruction *foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf) {
2138 assert(Shuf.isSelect() && "Must have select-equivalent shuffle");
2140 // Are we shuffling together some value and that same value after it has been
2141 // modified by a binop with a constant?
2142 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
2143 Constant *C;
2144 bool Op0IsBinop;
2145 if (match(Op0, m_BinOp(m_Specific(Op1), m_Constant(C))))
2146 Op0IsBinop = true;
2147 else if (match(Op1, m_BinOp(m_Specific(Op0), m_Constant(C))))
2148 Op0IsBinop = false;
2149 else
2150 return nullptr;
2152 // The identity constant for a binop leaves a variable operand unchanged. For
2153 // a vector, this is a splat of something like 0, -1, or 1.
2154 // If there's no identity constant for this binop, we're done.
2155 auto *BO = cast<BinaryOperator>(Op0IsBinop ? Op0 : Op1);
2156 BinaryOperator::BinaryOps BOpcode = BO->getOpcode();
2157 Constant *IdC = ConstantExpr::getBinOpIdentity(BOpcode, Shuf.getType(), true);
2158 if (!IdC)
2159 return nullptr;
2161 // Shuffle identity constants into the lanes that return the original value.
2162 // Example: shuf (mul X, {-1,-2,-3,-4}), X, {0,5,6,3} --> mul X, {-1,1,1,-4}
2163 // Example: shuf X, (add X, {-1,-2,-3,-4}), {0,1,6,7} --> add X, {0,0,-3,-4}
2164 // The existing binop constant vector remains in the same operand position.
2165 ArrayRef<int> Mask = Shuf.getShuffleMask();
2166 Constant *NewC = Op0IsBinop ? ConstantExpr::getShuffleVector(C, IdC, Mask) :
2167 ConstantExpr::getShuffleVector(IdC, C, Mask);
2169 bool MightCreatePoisonOrUB =
2170 is_contained(Mask, PoisonMaskElem) &&
2171 (Instruction::isIntDivRem(BOpcode) || Instruction::isShift(BOpcode));
2172 if (MightCreatePoisonOrUB)
2173 NewC = InstCombiner::getSafeVectorConstantForBinop(BOpcode, NewC, true);
2175 // shuf (bop X, C), X, M --> bop X, C'
2176 // shuf X, (bop X, C), M --> bop X, C'
2177 Value *X = Op0IsBinop ? Op1 : Op0;
2178 Instruction *NewBO = BinaryOperator::Create(BOpcode, X, NewC);
2179 NewBO->copyIRFlags(BO);
2181 // An undef shuffle mask element may propagate as an undef constant element in
2182 // the new binop. That would produce poison where the original code might not.
2183 // If we already made a safe constant, then there's no danger.
2184 if (is_contained(Mask, PoisonMaskElem) && !MightCreatePoisonOrUB)
2185 NewBO->dropPoisonGeneratingFlags();
2186 return NewBO;
2189 /// If we have an insert of a scalar to a non-zero element of an undefined
2190 /// vector and then shuffle that value, that's the same as inserting to the zero
2191 /// element and shuffling. Splatting from the zero element is recognized as the
2192 /// canonical form of splat.
2193 static Instruction *canonicalizeInsertSplat(ShuffleVectorInst &Shuf,
2194 InstCombiner::BuilderTy &Builder) {
2195 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
2196 ArrayRef<int> Mask = Shuf.getShuffleMask();
2197 Value *X;
2198 uint64_t IndexC;
2200 // Match a shuffle that is a splat to a non-zero element.
2201 if (!match(Op0, m_OneUse(m_InsertElt(m_Undef(), m_Value(X),
2202 m_ConstantInt(IndexC)))) ||
2203 !match(Op1, m_Undef()) || match(Mask, m_ZeroMask()) || IndexC == 0)
2204 return nullptr;
2206 // Insert into element 0 of a poison vector.
2207 PoisonValue *PoisonVec = PoisonValue::get(Shuf.getType());
2208 Value *NewIns = Builder.CreateInsertElement(PoisonVec, X, (uint64_t)0);
2210 // Splat from element 0. Any mask element that is undefined remains undefined.
2211 // For example:
2212 // shuf (inselt undef, X, 2), _, <2,2,undef>
2213 // --> shuf (inselt undef, X, 0), poison, <0,0,undef>
2214 unsigned NumMaskElts =
2215 cast<FixedVectorType>(Shuf.getType())->getNumElements();
2216 SmallVector<int, 16> NewMask(NumMaskElts, 0);
2217 for (unsigned i = 0; i != NumMaskElts; ++i)
2218 if (Mask[i] == PoisonMaskElem)
2219 NewMask[i] = Mask[i];
2221 return new ShuffleVectorInst(NewIns, NewMask);
2224 /// Try to fold shuffles that are the equivalent of a vector select.
2225 Instruction *InstCombinerImpl::foldSelectShuffle(ShuffleVectorInst &Shuf) {
2226 if (!Shuf.isSelect())
2227 return nullptr;
2229 // Canonicalize to choose from operand 0 first unless operand 1 is undefined.
2230 // Commuting undef to operand 0 conflicts with another canonicalization.
2231 unsigned NumElts = cast<FixedVectorType>(Shuf.getType())->getNumElements();
2232 if (!match(Shuf.getOperand(1), m_Undef()) &&
2233 Shuf.getMaskValue(0) >= (int)NumElts) {
2234 // TODO: Can we assert that both operands of a shuffle-select are not undef
2235 // (otherwise, it would have been folded by instsimplify?
2236 Shuf.commute();
2237 return &Shuf;
2240 if (Instruction *I = foldSelectShuffleOfSelectShuffle(Shuf))
2241 return I;
2243 if (Instruction *I = foldSelectShuffleWith1Binop(Shuf))
2244 return I;
2246 BinaryOperator *B0, *B1;
2247 if (!match(Shuf.getOperand(0), m_BinOp(B0)) ||
2248 !match(Shuf.getOperand(1), m_BinOp(B1)))
2249 return nullptr;
2251 // If one operand is "0 - X", allow that to be viewed as "X * -1"
2252 // (ConstantsAreOp1) by getAlternateBinop below. If the neg is not paired
2253 // with a multiply, we will exit because C0/C1 will not be set.
2254 Value *X, *Y;
2255 Constant *C0 = nullptr, *C1 = nullptr;
2256 bool ConstantsAreOp1;
2257 if (match(B0, m_BinOp(m_Constant(C0), m_Value(X))) &&
2258 match(B1, m_BinOp(m_Constant(C1), m_Value(Y))))
2259 ConstantsAreOp1 = false;
2260 else if (match(B0, m_CombineOr(m_BinOp(m_Value(X), m_Constant(C0)),
2261 m_Neg(m_Value(X)))) &&
2262 match(B1, m_CombineOr(m_BinOp(m_Value(Y), m_Constant(C1)),
2263 m_Neg(m_Value(Y)))))
2264 ConstantsAreOp1 = true;
2265 else
2266 return nullptr;
2268 // We need matching binops to fold the lanes together.
2269 BinaryOperator::BinaryOps Opc0 = B0->getOpcode();
2270 BinaryOperator::BinaryOps Opc1 = B1->getOpcode();
2271 bool DropNSW = false;
2272 if (ConstantsAreOp1 && Opc0 != Opc1) {
2273 // TODO: We drop "nsw" if shift is converted into multiply because it may
2274 // not be correct when the shift amount is BitWidth - 1. We could examine
2275 // each vector element to determine if it is safe to keep that flag.
2276 if (Opc0 == Instruction::Shl || Opc1 == Instruction::Shl)
2277 DropNSW = true;
2278 if (BinopElts AltB0 = getAlternateBinop(B0, DL)) {
2279 assert(isa<Constant>(AltB0.Op1) && "Expecting constant with alt binop");
2280 Opc0 = AltB0.Opcode;
2281 C0 = cast<Constant>(AltB0.Op1);
2282 } else if (BinopElts AltB1 = getAlternateBinop(B1, DL)) {
2283 assert(isa<Constant>(AltB1.Op1) && "Expecting constant with alt binop");
2284 Opc1 = AltB1.Opcode;
2285 C1 = cast<Constant>(AltB1.Op1);
2289 if (Opc0 != Opc1 || !C0 || !C1)
2290 return nullptr;
2292 // The opcodes must be the same. Use a new name to make that clear.
2293 BinaryOperator::BinaryOps BOpc = Opc0;
2295 // Select the constant elements needed for the single binop.
2296 ArrayRef<int> Mask = Shuf.getShuffleMask();
2297 Constant *NewC = ConstantExpr::getShuffleVector(C0, C1, Mask);
2299 // We are moving a binop after a shuffle. When a shuffle has an undefined
2300 // mask element, the result is undefined, but it is not poison or undefined
2301 // behavior. That is not necessarily true for div/rem/shift.
2302 bool MightCreatePoisonOrUB =
2303 is_contained(Mask, PoisonMaskElem) &&
2304 (Instruction::isIntDivRem(BOpc) || Instruction::isShift(BOpc));
2305 if (MightCreatePoisonOrUB)
2306 NewC = InstCombiner::getSafeVectorConstantForBinop(BOpc, NewC,
2307 ConstantsAreOp1);
2309 Value *V;
2310 if (X == Y) {
2311 // Remove a binop and the shuffle by rearranging the constant:
2312 // shuffle (op V, C0), (op V, C1), M --> op V, C'
2313 // shuffle (op C0, V), (op C1, V), M --> op C', V
2314 V = X;
2315 } else {
2316 // If there are 2 different variable operands, we must create a new shuffle
2317 // (select) first, so check uses to ensure that we don't end up with more
2318 // instructions than we started with.
2319 if (!B0->hasOneUse() && !B1->hasOneUse())
2320 return nullptr;
2322 // If we use the original shuffle mask and op1 is *variable*, we would be
2323 // putting an undef into operand 1 of div/rem/shift. This is either UB or
2324 // poison. We do not have to guard against UB when *constants* are op1
2325 // because safe constants guarantee that we do not overflow sdiv/srem (and
2326 // there's no danger for other opcodes).
2327 // TODO: To allow this case, create a new shuffle mask with no undefs.
2328 if (MightCreatePoisonOrUB && !ConstantsAreOp1)
2329 return nullptr;
2331 // Note: In general, we do not create new shuffles in InstCombine because we
2332 // do not know if a target can lower an arbitrary shuffle optimally. In this
2333 // case, the shuffle uses the existing mask, so there is no additional risk.
2335 // Select the variable vectors first, then perform the binop:
2336 // shuffle (op X, C0), (op Y, C1), M --> op (shuffle X, Y, M), C'
2337 // shuffle (op C0, X), (op C1, Y), M --> op C', (shuffle X, Y, M)
2338 V = Builder.CreateShuffleVector(X, Y, Mask);
2341 Value *NewBO = ConstantsAreOp1 ? Builder.CreateBinOp(BOpc, V, NewC) :
2342 Builder.CreateBinOp(BOpc, NewC, V);
2344 // Flags are intersected from the 2 source binops. But there are 2 exceptions:
2345 // 1. If we changed an opcode, poison conditions might have changed.
2346 // 2. If the shuffle had undef mask elements, the new binop might have undefs
2347 // where the original code did not. But if we already made a safe constant,
2348 // then there's no danger.
2349 if (auto *NewI = dyn_cast<Instruction>(NewBO)) {
2350 NewI->copyIRFlags(B0);
2351 NewI->andIRFlags(B1);
2352 if (DropNSW)
2353 NewI->setHasNoSignedWrap(false);
2354 if (is_contained(Mask, PoisonMaskElem) && !MightCreatePoisonOrUB)
2355 NewI->dropPoisonGeneratingFlags();
2357 return replaceInstUsesWith(Shuf, NewBO);
2360 /// Convert a narrowing shuffle of a bitcasted vector into a vector truncate.
2361 /// Example (little endian):
2362 /// shuf (bitcast <4 x i16> X to <8 x i8>), <0, 2, 4, 6> --> trunc X to <4 x i8>
2363 static Instruction *foldTruncShuffle(ShuffleVectorInst &Shuf,
2364 bool IsBigEndian) {
2365 // This must be a bitcasted shuffle of 1 vector integer operand.
2366 Type *DestType = Shuf.getType();
2367 Value *X;
2368 if (!match(Shuf.getOperand(0), m_BitCast(m_Value(X))) ||
2369 !match(Shuf.getOperand(1), m_Undef()) || !DestType->isIntOrIntVectorTy())
2370 return nullptr;
2372 // The source type must have the same number of elements as the shuffle,
2373 // and the source element type must be larger than the shuffle element type.
2374 Type *SrcType = X->getType();
2375 if (!SrcType->isVectorTy() || !SrcType->isIntOrIntVectorTy() ||
2376 cast<FixedVectorType>(SrcType)->getNumElements() !=
2377 cast<FixedVectorType>(DestType)->getNumElements() ||
2378 SrcType->getScalarSizeInBits() % DestType->getScalarSizeInBits() != 0)
2379 return nullptr;
2381 assert(Shuf.changesLength() && !Shuf.increasesLength() &&
2382 "Expected a shuffle that decreases length");
2384 // Last, check that the mask chooses the correct low bits for each narrow
2385 // element in the result.
2386 uint64_t TruncRatio =
2387 SrcType->getScalarSizeInBits() / DestType->getScalarSizeInBits();
2388 ArrayRef<int> Mask = Shuf.getShuffleMask();
2389 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
2390 if (Mask[i] == PoisonMaskElem)
2391 continue;
2392 uint64_t LSBIndex = IsBigEndian ? (i + 1) * TruncRatio - 1 : i * TruncRatio;
2393 assert(LSBIndex <= INT32_MAX && "Overflowed 32-bits");
2394 if (Mask[i] != (int)LSBIndex)
2395 return nullptr;
2398 return new TruncInst(X, DestType);
2401 /// Match a shuffle-select-shuffle pattern where the shuffles are widening and
2402 /// narrowing (concatenating with undef and extracting back to the original
2403 /// length). This allows replacing the wide select with a narrow select.
2404 static Instruction *narrowVectorSelect(ShuffleVectorInst &Shuf,
2405 InstCombiner::BuilderTy &Builder) {
2406 // This must be a narrowing identity shuffle. It extracts the 1st N elements
2407 // of the 1st vector operand of a shuffle.
2408 if (!match(Shuf.getOperand(1), m_Undef()) || !Shuf.isIdentityWithExtract())
2409 return nullptr;
2411 // The vector being shuffled must be a vector select that we can eliminate.
2412 // TODO: The one-use requirement could be eased if X and/or Y are constants.
2413 Value *Cond, *X, *Y;
2414 if (!match(Shuf.getOperand(0),
2415 m_OneUse(m_Select(m_Value(Cond), m_Value(X), m_Value(Y)))))
2416 return nullptr;
2418 // We need a narrow condition value. It must be extended with undef elements
2419 // and have the same number of elements as this shuffle.
2420 unsigned NarrowNumElts =
2421 cast<FixedVectorType>(Shuf.getType())->getNumElements();
2422 Value *NarrowCond;
2423 if (!match(Cond, m_OneUse(m_Shuffle(m_Value(NarrowCond), m_Undef()))) ||
2424 cast<FixedVectorType>(NarrowCond->getType())->getNumElements() !=
2425 NarrowNumElts ||
2426 !cast<ShuffleVectorInst>(Cond)->isIdentityWithPadding())
2427 return nullptr;
2429 // shuf (sel (shuf NarrowCond, undef, WideMask), X, Y), undef, NarrowMask) -->
2430 // sel NarrowCond, (shuf X, undef, NarrowMask), (shuf Y, undef, NarrowMask)
2431 Value *NarrowX = Builder.CreateShuffleVector(X, Shuf.getShuffleMask());
2432 Value *NarrowY = Builder.CreateShuffleVector(Y, Shuf.getShuffleMask());
2433 return SelectInst::Create(NarrowCond, NarrowX, NarrowY);
2436 /// Canonicalize FP negate/abs after shuffle.
2437 static Instruction *foldShuffleOfUnaryOps(ShuffleVectorInst &Shuf,
2438 InstCombiner::BuilderTy &Builder) {
2439 auto *S0 = dyn_cast<Instruction>(Shuf.getOperand(0));
2440 Value *X;
2441 if (!S0 || !match(S0, m_CombineOr(m_FNeg(m_Value(X)), m_FAbs(m_Value(X)))))
2442 return nullptr;
2444 bool IsFNeg = S0->getOpcode() == Instruction::FNeg;
2446 // Match 1-input (unary) shuffle.
2447 // shuffle (fneg/fabs X), Mask --> fneg/fabs (shuffle X, Mask)
2448 if (S0->hasOneUse() && match(Shuf.getOperand(1), m_Undef())) {
2449 Value *NewShuf = Builder.CreateShuffleVector(X, Shuf.getShuffleMask());
2450 if (IsFNeg)
2451 return UnaryOperator::CreateFNegFMF(NewShuf, S0);
2453 Function *FAbs = Intrinsic::getDeclaration(Shuf.getModule(),
2454 Intrinsic::fabs, Shuf.getType());
2455 CallInst *NewF = CallInst::Create(FAbs, {NewShuf});
2456 NewF->setFastMathFlags(S0->getFastMathFlags());
2457 return NewF;
2460 // Match 2-input (binary) shuffle.
2461 auto *S1 = dyn_cast<Instruction>(Shuf.getOperand(1));
2462 Value *Y;
2463 if (!S1 || !match(S1, m_CombineOr(m_FNeg(m_Value(Y)), m_FAbs(m_Value(Y)))) ||
2464 S0->getOpcode() != S1->getOpcode() ||
2465 (!S0->hasOneUse() && !S1->hasOneUse()))
2466 return nullptr;
2468 // shuf (fneg/fabs X), (fneg/fabs Y), Mask --> fneg/fabs (shuf X, Y, Mask)
2469 Value *NewShuf = Builder.CreateShuffleVector(X, Y, Shuf.getShuffleMask());
2470 Instruction *NewF;
2471 if (IsFNeg) {
2472 NewF = UnaryOperator::CreateFNeg(NewShuf);
2473 } else {
2474 Function *FAbs = Intrinsic::getDeclaration(Shuf.getModule(),
2475 Intrinsic::fabs, Shuf.getType());
2476 NewF = CallInst::Create(FAbs, {NewShuf});
2478 NewF->copyIRFlags(S0);
2479 NewF->andIRFlags(S1);
2480 return NewF;
2483 /// Canonicalize casts after shuffle.
2484 static Instruction *foldCastShuffle(ShuffleVectorInst &Shuf,
2485 InstCombiner::BuilderTy &Builder) {
2486 // Do we have 2 matching cast operands?
2487 auto *Cast0 = dyn_cast<CastInst>(Shuf.getOperand(0));
2488 auto *Cast1 = dyn_cast<CastInst>(Shuf.getOperand(1));
2489 if (!Cast0 || !Cast1 || Cast0->getOpcode() != Cast1->getOpcode() ||
2490 Cast0->getSrcTy() != Cast1->getSrcTy())
2491 return nullptr;
2493 // TODO: Allow other opcodes? That would require easing the type restrictions
2494 // below here.
2495 CastInst::CastOps CastOpcode = Cast0->getOpcode();
2496 switch (CastOpcode) {
2497 case Instruction::FPToSI:
2498 case Instruction::FPToUI:
2499 case Instruction::SIToFP:
2500 case Instruction::UIToFP:
2501 break;
2502 default:
2503 return nullptr;
2506 VectorType *ShufTy = Shuf.getType();
2507 VectorType *ShufOpTy = cast<VectorType>(Shuf.getOperand(0)->getType());
2508 VectorType *CastSrcTy = cast<VectorType>(Cast0->getSrcTy());
2510 // TODO: Allow length-increasing shuffles?
2511 if (ShufTy->getElementCount().getKnownMinValue() >
2512 ShufOpTy->getElementCount().getKnownMinValue())
2513 return nullptr;
2515 // TODO: Allow element-size-decreasing casts (ex: fptosi float to i8)?
2516 assert(isa<FixedVectorType>(CastSrcTy) && isa<FixedVectorType>(ShufOpTy) &&
2517 "Expected fixed vector operands for casts and binary shuffle");
2518 if (CastSrcTy->getPrimitiveSizeInBits() > ShufOpTy->getPrimitiveSizeInBits())
2519 return nullptr;
2521 // At least one of the operands must have only one use (the shuffle).
2522 if (!Cast0->hasOneUse() && !Cast1->hasOneUse())
2523 return nullptr;
2525 // shuffle (cast X), (cast Y), Mask --> cast (shuffle X, Y, Mask)
2526 Value *X = Cast0->getOperand(0);
2527 Value *Y = Cast1->getOperand(0);
2528 Value *NewShuf = Builder.CreateShuffleVector(X, Y, Shuf.getShuffleMask());
2529 return CastInst::Create(CastOpcode, NewShuf, ShufTy);
2532 /// Try to fold an extract subvector operation.
2533 static Instruction *foldIdentityExtractShuffle(ShuffleVectorInst &Shuf) {
2534 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
2535 if (!Shuf.isIdentityWithExtract() || !match(Op1, m_Undef()))
2536 return nullptr;
2538 // Check if we are extracting all bits of an inserted scalar:
2539 // extract-subvec (bitcast (inselt ?, X, 0) --> bitcast X to subvec type
2540 Value *X;
2541 if (match(Op0, m_BitCast(m_InsertElt(m_Value(), m_Value(X), m_Zero()))) &&
2542 X->getType()->getPrimitiveSizeInBits() ==
2543 Shuf.getType()->getPrimitiveSizeInBits())
2544 return new BitCastInst(X, Shuf.getType());
2546 // Try to combine 2 shuffles into 1 shuffle by concatenating a shuffle mask.
2547 Value *Y;
2548 ArrayRef<int> Mask;
2549 if (!match(Op0, m_Shuffle(m_Value(X), m_Value(Y), m_Mask(Mask))))
2550 return nullptr;
2552 // Be conservative with shuffle transforms. If we can't kill the 1st shuffle,
2553 // then combining may result in worse codegen.
2554 if (!Op0->hasOneUse())
2555 return nullptr;
2557 // We are extracting a subvector from a shuffle. Remove excess elements from
2558 // the 1st shuffle mask to eliminate the extract.
2560 // This transform is conservatively limited to identity extracts because we do
2561 // not allow arbitrary shuffle mask creation as a target-independent transform
2562 // (because we can't guarantee that will lower efficiently).
2564 // If the extracting shuffle has an undef mask element, it transfers to the
2565 // new shuffle mask. Otherwise, copy the original mask element. Example:
2566 // shuf (shuf X, Y, <C0, C1, C2, undef, C4>), undef, <0, undef, 2, 3> -->
2567 // shuf X, Y, <C0, undef, C2, undef>
2568 unsigned NumElts = cast<FixedVectorType>(Shuf.getType())->getNumElements();
2569 SmallVector<int, 16> NewMask(NumElts);
2570 assert(NumElts < Mask.size() &&
2571 "Identity with extract must have less elements than its inputs");
2573 for (unsigned i = 0; i != NumElts; ++i) {
2574 int ExtractMaskElt = Shuf.getMaskValue(i);
2575 int MaskElt = Mask[i];
2576 NewMask[i] = ExtractMaskElt == PoisonMaskElem ? ExtractMaskElt : MaskElt;
2578 return new ShuffleVectorInst(X, Y, NewMask);
2581 /// Try to replace a shuffle with an insertelement or try to replace a shuffle
2582 /// operand with the operand of an insertelement.
2583 static Instruction *foldShuffleWithInsert(ShuffleVectorInst &Shuf,
2584 InstCombinerImpl &IC) {
2585 Value *V0 = Shuf.getOperand(0), *V1 = Shuf.getOperand(1);
2586 SmallVector<int, 16> Mask;
2587 Shuf.getShuffleMask(Mask);
2589 int NumElts = Mask.size();
2590 int InpNumElts = cast<FixedVectorType>(V0->getType())->getNumElements();
2592 // This is a specialization of a fold in SimplifyDemandedVectorElts. We may
2593 // not be able to handle it there if the insertelement has >1 use.
2594 // If the shuffle has an insertelement operand but does not choose the
2595 // inserted scalar element from that value, then we can replace that shuffle
2596 // operand with the source vector of the insertelement.
2597 Value *X;
2598 uint64_t IdxC;
2599 if (match(V0, m_InsertElt(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
2600 // shuf (inselt X, ?, IdxC), ?, Mask --> shuf X, ?, Mask
2601 if (!is_contained(Mask, (int)IdxC))
2602 return IC.replaceOperand(Shuf, 0, X);
2604 if (match(V1, m_InsertElt(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
2605 // Offset the index constant by the vector width because we are checking for
2606 // accesses to the 2nd vector input of the shuffle.
2607 IdxC += InpNumElts;
2608 // shuf ?, (inselt X, ?, IdxC), Mask --> shuf ?, X, Mask
2609 if (!is_contained(Mask, (int)IdxC))
2610 return IC.replaceOperand(Shuf, 1, X);
2612 // For the rest of the transform, the shuffle must not change vector sizes.
2613 // TODO: This restriction could be removed if the insert has only one use
2614 // (because the transform would require a new length-changing shuffle).
2615 if (NumElts != InpNumElts)
2616 return nullptr;
2618 // shuffle (insert ?, Scalar, IndexC), V1, Mask --> insert V1, Scalar, IndexC'
2619 auto isShufflingScalarIntoOp1 = [&](Value *&Scalar, ConstantInt *&IndexC) {
2620 // We need an insertelement with a constant index.
2621 if (!match(V0, m_InsertElt(m_Value(), m_Value(Scalar),
2622 m_ConstantInt(IndexC))))
2623 return false;
2625 // Test the shuffle mask to see if it splices the inserted scalar into the
2626 // operand 1 vector of the shuffle.
2627 int NewInsIndex = -1;
2628 for (int i = 0; i != NumElts; ++i) {
2629 // Ignore undef mask elements.
2630 if (Mask[i] == -1)
2631 continue;
2633 // The shuffle takes elements of operand 1 without lane changes.
2634 if (Mask[i] == NumElts + i)
2635 continue;
2637 // The shuffle must choose the inserted scalar exactly once.
2638 if (NewInsIndex != -1 || Mask[i] != IndexC->getSExtValue())
2639 return false;
2641 // The shuffle is placing the inserted scalar into element i.
2642 NewInsIndex = i;
2645 assert(NewInsIndex != -1 && "Did not fold shuffle with unused operand?");
2647 // Index is updated to the potentially translated insertion lane.
2648 IndexC = ConstantInt::get(IndexC->getIntegerType(), NewInsIndex);
2649 return true;
2652 // If the shuffle is unnecessary, insert the scalar operand directly into
2653 // operand 1 of the shuffle. Example:
2654 // shuffle (insert ?, S, 1), V1, <1, 5, 6, 7> --> insert V1, S, 0
2655 Value *Scalar;
2656 ConstantInt *IndexC;
2657 if (isShufflingScalarIntoOp1(Scalar, IndexC))
2658 return InsertElementInst::Create(V1, Scalar, IndexC);
2660 // Try again after commuting shuffle. Example:
2661 // shuffle V0, (insert ?, S, 0), <0, 1, 2, 4> -->
2662 // shuffle (insert ?, S, 0), V0, <4, 5, 6, 0> --> insert V0, S, 3
2663 std::swap(V0, V1);
2664 ShuffleVectorInst::commuteShuffleMask(Mask, NumElts);
2665 if (isShufflingScalarIntoOp1(Scalar, IndexC))
2666 return InsertElementInst::Create(V1, Scalar, IndexC);
2668 return nullptr;
2671 static Instruction *foldIdentityPaddedShuffles(ShuffleVectorInst &Shuf) {
2672 // Match the operands as identity with padding (also known as concatenation
2673 // with undef) shuffles of the same source type. The backend is expected to
2674 // recreate these concatenations from a shuffle of narrow operands.
2675 auto *Shuffle0 = dyn_cast<ShuffleVectorInst>(Shuf.getOperand(0));
2676 auto *Shuffle1 = dyn_cast<ShuffleVectorInst>(Shuf.getOperand(1));
2677 if (!Shuffle0 || !Shuffle0->isIdentityWithPadding() ||
2678 !Shuffle1 || !Shuffle1->isIdentityWithPadding())
2679 return nullptr;
2681 // We limit this transform to power-of-2 types because we expect that the
2682 // backend can convert the simplified IR patterns to identical nodes as the
2683 // original IR.
2684 // TODO: If we can verify the same behavior for arbitrary types, the
2685 // power-of-2 checks can be removed.
2686 Value *X = Shuffle0->getOperand(0);
2687 Value *Y = Shuffle1->getOperand(0);
2688 if (X->getType() != Y->getType() ||
2689 !isPowerOf2_32(cast<FixedVectorType>(Shuf.getType())->getNumElements()) ||
2690 !isPowerOf2_32(
2691 cast<FixedVectorType>(Shuffle0->getType())->getNumElements()) ||
2692 !isPowerOf2_32(cast<FixedVectorType>(X->getType())->getNumElements()) ||
2693 match(X, m_Undef()) || match(Y, m_Undef()))
2694 return nullptr;
2695 assert(match(Shuffle0->getOperand(1), m_Undef()) &&
2696 match(Shuffle1->getOperand(1), m_Undef()) &&
2697 "Unexpected operand for identity shuffle");
2699 // This is a shuffle of 2 widening shuffles. We can shuffle the narrow source
2700 // operands directly by adjusting the shuffle mask to account for the narrower
2701 // types:
2702 // shuf (widen X), (widen Y), Mask --> shuf X, Y, Mask'
2703 int NarrowElts = cast<FixedVectorType>(X->getType())->getNumElements();
2704 int WideElts = cast<FixedVectorType>(Shuffle0->getType())->getNumElements();
2705 assert(WideElts > NarrowElts && "Unexpected types for identity with padding");
2707 ArrayRef<int> Mask = Shuf.getShuffleMask();
2708 SmallVector<int, 16> NewMask(Mask.size(), -1);
2709 for (int i = 0, e = Mask.size(); i != e; ++i) {
2710 if (Mask[i] == -1)
2711 continue;
2713 // If this shuffle is choosing an undef element from 1 of the sources, that
2714 // element is undef.
2715 if (Mask[i] < WideElts) {
2716 if (Shuffle0->getMaskValue(Mask[i]) == -1)
2717 continue;
2718 } else {
2719 if (Shuffle1->getMaskValue(Mask[i] - WideElts) == -1)
2720 continue;
2723 // If this shuffle is choosing from the 1st narrow op, the mask element is
2724 // the same. If this shuffle is choosing from the 2nd narrow op, the mask
2725 // element is offset down to adjust for the narrow vector widths.
2726 if (Mask[i] < WideElts) {
2727 assert(Mask[i] < NarrowElts && "Unexpected shuffle mask");
2728 NewMask[i] = Mask[i];
2729 } else {
2730 assert(Mask[i] < (WideElts + NarrowElts) && "Unexpected shuffle mask");
2731 NewMask[i] = Mask[i] - (WideElts - NarrowElts);
2734 return new ShuffleVectorInst(X, Y, NewMask);
2737 // Splatting the first element of the result of a BinOp, where any of the
2738 // BinOp's operands are the result of a first element splat can be simplified to
2739 // splatting the first element of the result of the BinOp
2740 Instruction *InstCombinerImpl::simplifyBinOpSplats(ShuffleVectorInst &SVI) {
2741 if (!match(SVI.getOperand(1), m_Undef()) ||
2742 !match(SVI.getShuffleMask(), m_ZeroMask()) ||
2743 !SVI.getOperand(0)->hasOneUse())
2744 return nullptr;
2746 Value *Op0 = SVI.getOperand(0);
2747 Value *X, *Y;
2748 if (!match(Op0, m_BinOp(m_Shuffle(m_Value(X), m_Undef(), m_ZeroMask()),
2749 m_Value(Y))) &&
2750 !match(Op0, m_BinOp(m_Value(X),
2751 m_Shuffle(m_Value(Y), m_Undef(), m_ZeroMask()))))
2752 return nullptr;
2753 if (X->getType() != Y->getType())
2754 return nullptr;
2756 auto *BinOp = cast<BinaryOperator>(Op0);
2757 if (!isSafeToSpeculativelyExecute(BinOp))
2758 return nullptr;
2760 Value *NewBO = Builder.CreateBinOp(BinOp->getOpcode(), X, Y);
2761 if (auto NewBOI = dyn_cast<Instruction>(NewBO))
2762 NewBOI->copyIRFlags(BinOp);
2764 return new ShuffleVectorInst(NewBO, SVI.getShuffleMask());
2767 Instruction *InstCombinerImpl::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
2768 Value *LHS = SVI.getOperand(0);
2769 Value *RHS = SVI.getOperand(1);
2770 SimplifyQuery ShufQuery = SQ.getWithInstruction(&SVI);
2771 if (auto *V = simplifyShuffleVectorInst(LHS, RHS, SVI.getShuffleMask(),
2772 SVI.getType(), ShufQuery))
2773 return replaceInstUsesWith(SVI, V);
2775 if (Instruction *I = simplifyBinOpSplats(SVI))
2776 return I;
2778 // Canonicalize splat shuffle to use poison RHS. Handle this explicitly in
2779 // order to support scalable vectors.
2780 if (match(SVI.getShuffleMask(), m_ZeroMask()) && !isa<PoisonValue>(RHS))
2781 return replaceOperand(SVI, 1, PoisonValue::get(RHS->getType()));
2783 if (isa<ScalableVectorType>(LHS->getType()))
2784 return nullptr;
2786 unsigned VWidth = cast<FixedVectorType>(SVI.getType())->getNumElements();
2787 unsigned LHSWidth = cast<FixedVectorType>(LHS->getType())->getNumElements();
2789 // shuffle (bitcast X), (bitcast Y), Mask --> bitcast (shuffle X, Y, Mask)
2791 // if X and Y are of the same (vector) type, and the element size is not
2792 // changed by the bitcasts, we can distribute the bitcasts through the
2793 // shuffle, hopefully reducing the number of instructions. We make sure that
2794 // at least one bitcast only has one use, so we don't *increase* the number of
2795 // instructions here.
2796 Value *X, *Y;
2797 if (match(LHS, m_BitCast(m_Value(X))) && match(RHS, m_BitCast(m_Value(Y))) &&
2798 X->getType()->isVectorTy() && X->getType() == Y->getType() &&
2799 X->getType()->getScalarSizeInBits() ==
2800 SVI.getType()->getScalarSizeInBits() &&
2801 (LHS->hasOneUse() || RHS->hasOneUse())) {
2802 Value *V = Builder.CreateShuffleVector(X, Y, SVI.getShuffleMask(),
2803 SVI.getName() + ".uncasted");
2804 return new BitCastInst(V, SVI.getType());
2807 ArrayRef<int> Mask = SVI.getShuffleMask();
2809 // Peek through a bitcasted shuffle operand by scaling the mask. If the
2810 // simulated shuffle can simplify, then this shuffle is unnecessary:
2811 // shuf (bitcast X), undef, Mask --> bitcast X'
2812 // TODO: This could be extended to allow length-changing shuffles.
2813 // The transform might also be obsoleted if we allowed canonicalization
2814 // of bitcasted shuffles.
2815 if (match(LHS, m_BitCast(m_Value(X))) && match(RHS, m_Undef()) &&
2816 X->getType()->isVectorTy() && VWidth == LHSWidth) {
2817 // Try to create a scaled mask constant.
2818 auto *XType = cast<FixedVectorType>(X->getType());
2819 unsigned XNumElts = XType->getNumElements();
2820 SmallVector<int, 16> ScaledMask;
2821 if (XNumElts >= VWidth) {
2822 assert(XNumElts % VWidth == 0 && "Unexpected vector bitcast");
2823 narrowShuffleMaskElts(XNumElts / VWidth, Mask, ScaledMask);
2824 } else {
2825 assert(VWidth % XNumElts == 0 && "Unexpected vector bitcast");
2826 if (!widenShuffleMaskElts(VWidth / XNumElts, Mask, ScaledMask))
2827 ScaledMask.clear();
2829 if (!ScaledMask.empty()) {
2830 // If the shuffled source vector simplifies, cast that value to this
2831 // shuffle's type.
2832 if (auto *V = simplifyShuffleVectorInst(X, UndefValue::get(XType),
2833 ScaledMask, XType, ShufQuery))
2834 return BitCastInst::Create(Instruction::BitCast, V, SVI.getType());
2838 // shuffle x, x, mask --> shuffle x, undef, mask'
2839 if (LHS == RHS) {
2840 assert(!match(RHS, m_Undef()) &&
2841 "Shuffle with 2 undef ops not simplified?");
2842 return new ShuffleVectorInst(LHS, createUnaryMask(Mask, LHSWidth));
2845 // shuffle undef, x, mask --> shuffle x, undef, mask'
2846 if (match(LHS, m_Undef())) {
2847 SVI.commute();
2848 return &SVI;
2851 if (Instruction *I = canonicalizeInsertSplat(SVI, Builder))
2852 return I;
2854 if (Instruction *I = foldSelectShuffle(SVI))
2855 return I;
2857 if (Instruction *I = foldTruncShuffle(SVI, DL.isBigEndian()))
2858 return I;
2860 if (Instruction *I = narrowVectorSelect(SVI, Builder))
2861 return I;
2863 if (Instruction *I = foldShuffleOfUnaryOps(SVI, Builder))
2864 return I;
2866 if (Instruction *I = foldCastShuffle(SVI, Builder))
2867 return I;
2869 APInt PoisonElts(VWidth, 0);
2870 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2871 if (Value *V = SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, PoisonElts)) {
2872 if (V != &SVI)
2873 return replaceInstUsesWith(SVI, V);
2874 return &SVI;
2877 if (Instruction *I = foldIdentityExtractShuffle(SVI))
2878 return I;
2880 // These transforms have the potential to lose undef knowledge, so they are
2881 // intentionally placed after SimplifyDemandedVectorElts().
2882 if (Instruction *I = foldShuffleWithInsert(SVI, *this))
2883 return I;
2884 if (Instruction *I = foldIdentityPaddedShuffles(SVI))
2885 return I;
2887 if (match(RHS, m_Undef()) && canEvaluateShuffled(LHS, Mask)) {
2888 Value *V = evaluateInDifferentElementOrder(LHS, Mask, Builder);
2889 return replaceInstUsesWith(SVI, V);
2892 // SROA generates shuffle+bitcast when the extracted sub-vector is bitcast to
2893 // a non-vector type. We can instead bitcast the original vector followed by
2894 // an extract of the desired element:
2896 // %sroa = shufflevector <16 x i8> %in, <16 x i8> undef,
2897 // <4 x i32> <i32 0, i32 1, i32 2, i32 3>
2898 // %1 = bitcast <4 x i8> %sroa to i32
2899 // Becomes:
2900 // %bc = bitcast <16 x i8> %in to <4 x i32>
2901 // %ext = extractelement <4 x i32> %bc, i32 0
2903 // If the shuffle is extracting a contiguous range of values from the input
2904 // vector then each use which is a bitcast of the extracted size can be
2905 // replaced. This will work if the vector types are compatible, and the begin
2906 // index is aligned to a value in the casted vector type. If the begin index
2907 // isn't aligned then we can shuffle the original vector (keeping the same
2908 // vector type) before extracting.
2910 // This code will bail out if the target type is fundamentally incompatible
2911 // with vectors of the source type.
2913 // Example of <16 x i8>, target type i32:
2914 // Index range [4,8): v-----------v Will work.
2915 // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
2916 // <16 x i8>: | | | | | | | | | | | | | | | | |
2917 // <4 x i32>: | | | | |
2918 // +-----------+-----------+-----------+-----------+
2919 // Index range [6,10): ^-----------^ Needs an extra shuffle.
2920 // Target type i40: ^--------------^ Won't work, bail.
2921 bool MadeChange = false;
2922 if (isShuffleExtractingFromLHS(SVI, Mask)) {
2923 Value *V = LHS;
2924 unsigned MaskElems = Mask.size();
2925 auto *SrcTy = cast<FixedVectorType>(V->getType());
2926 unsigned VecBitWidth = SrcTy->getPrimitiveSizeInBits().getFixedValue();
2927 unsigned SrcElemBitWidth = DL.getTypeSizeInBits(SrcTy->getElementType());
2928 assert(SrcElemBitWidth && "vector elements must have a bitwidth");
2929 unsigned SrcNumElems = SrcTy->getNumElements();
2930 SmallVector<BitCastInst *, 8> BCs;
2931 DenseMap<Type *, Value *> NewBCs;
2932 for (User *U : SVI.users())
2933 if (BitCastInst *BC = dyn_cast<BitCastInst>(U))
2934 if (!BC->use_empty())
2935 // Only visit bitcasts that weren't previously handled.
2936 BCs.push_back(BC);
2937 for (BitCastInst *BC : BCs) {
2938 unsigned BegIdx = Mask.front();
2939 Type *TgtTy = BC->getDestTy();
2940 unsigned TgtElemBitWidth = DL.getTypeSizeInBits(TgtTy);
2941 if (!TgtElemBitWidth)
2942 continue;
2943 unsigned TgtNumElems = VecBitWidth / TgtElemBitWidth;
2944 bool VecBitWidthsEqual = VecBitWidth == TgtNumElems * TgtElemBitWidth;
2945 bool BegIsAligned = 0 == ((SrcElemBitWidth * BegIdx) % TgtElemBitWidth);
2946 if (!VecBitWidthsEqual)
2947 continue;
2948 if (!VectorType::isValidElementType(TgtTy))
2949 continue;
2950 auto *CastSrcTy = FixedVectorType::get(TgtTy, TgtNumElems);
2951 if (!BegIsAligned) {
2952 // Shuffle the input so [0,NumElements) contains the output, and
2953 // [NumElems,SrcNumElems) is undef.
2954 SmallVector<int, 16> ShuffleMask(SrcNumElems, -1);
2955 for (unsigned I = 0, E = MaskElems, Idx = BegIdx; I != E; ++Idx, ++I)
2956 ShuffleMask[I] = Idx;
2957 V = Builder.CreateShuffleVector(V, ShuffleMask,
2958 SVI.getName() + ".extract");
2959 BegIdx = 0;
2961 unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth;
2962 assert(SrcElemsPerTgtElem);
2963 BegIdx /= SrcElemsPerTgtElem;
2964 bool BCAlreadyExists = NewBCs.contains(CastSrcTy);
2965 auto *NewBC =
2966 BCAlreadyExists
2967 ? NewBCs[CastSrcTy]
2968 : Builder.CreateBitCast(V, CastSrcTy, SVI.getName() + ".bc");
2969 if (!BCAlreadyExists)
2970 NewBCs[CastSrcTy] = NewBC;
2971 auto *Ext = Builder.CreateExtractElement(NewBC, BegIdx,
2972 SVI.getName() + ".extract");
2973 // The shufflevector isn't being replaced: the bitcast that used it
2974 // is. InstCombine will visit the newly-created instructions.
2975 replaceInstUsesWith(*BC, Ext);
2976 MadeChange = true;
2980 // If the LHS is a shufflevector itself, see if we can combine it with this
2981 // one without producing an unusual shuffle.
2982 // Cases that might be simplified:
2983 // 1.
2984 // x1=shuffle(v1,v2,mask1)
2985 // x=shuffle(x1,undef,mask)
2986 // ==>
2987 // x=shuffle(v1,undef,newMask)
2988 // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : -1
2989 // 2.
2990 // x1=shuffle(v1,undef,mask1)
2991 // x=shuffle(x1,x2,mask)
2992 // where v1.size() == mask1.size()
2993 // ==>
2994 // x=shuffle(v1,x2,newMask)
2995 // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : mask[i]
2996 // 3.
2997 // x2=shuffle(v2,undef,mask2)
2998 // x=shuffle(x1,x2,mask)
2999 // where v2.size() == mask2.size()
3000 // ==>
3001 // x=shuffle(x1,v2,newMask)
3002 // newMask[i] = (mask[i] < x1.size())
3003 // ? mask[i] : mask2[mask[i]-x1.size()]+x1.size()
3004 // 4.
3005 // x1=shuffle(v1,undef,mask1)
3006 // x2=shuffle(v2,undef,mask2)
3007 // x=shuffle(x1,x2,mask)
3008 // where v1.size() == v2.size()
3009 // ==>
3010 // x=shuffle(v1,v2,newMask)
3011 // newMask[i] = (mask[i] < x1.size())
3012 // ? mask1[mask[i]] : mask2[mask[i]-x1.size()]+v1.size()
3014 // Here we are really conservative:
3015 // we are absolutely afraid of producing a shuffle mask not in the input
3016 // program, because the code gen may not be smart enough to turn a merged
3017 // shuffle into two specific shuffles: it may produce worse code. As such,
3018 // we only merge two shuffles if the result is either a splat or one of the
3019 // input shuffle masks. In this case, merging the shuffles just removes
3020 // one instruction, which we know is safe. This is good for things like
3021 // turning: (splat(splat)) -> splat, or
3022 // merge(V[0..n], V[n+1..2n]) -> V[0..2n]
3023 ShuffleVectorInst* LHSShuffle = dyn_cast<ShuffleVectorInst>(LHS);
3024 ShuffleVectorInst* RHSShuffle = dyn_cast<ShuffleVectorInst>(RHS);
3025 if (LHSShuffle)
3026 if (!match(LHSShuffle->getOperand(1), m_Poison()) &&
3027 !match(RHS, m_Poison()))
3028 LHSShuffle = nullptr;
3029 if (RHSShuffle)
3030 if (!match(RHSShuffle->getOperand(1), m_Poison()))
3031 RHSShuffle = nullptr;
3032 if (!LHSShuffle && !RHSShuffle)
3033 return MadeChange ? &SVI : nullptr;
3035 Value* LHSOp0 = nullptr;
3036 Value* LHSOp1 = nullptr;
3037 Value* RHSOp0 = nullptr;
3038 unsigned LHSOp0Width = 0;
3039 unsigned RHSOp0Width = 0;
3040 if (LHSShuffle) {
3041 LHSOp0 = LHSShuffle->getOperand(0);
3042 LHSOp1 = LHSShuffle->getOperand(1);
3043 LHSOp0Width = cast<FixedVectorType>(LHSOp0->getType())->getNumElements();
3045 if (RHSShuffle) {
3046 RHSOp0 = RHSShuffle->getOperand(0);
3047 RHSOp0Width = cast<FixedVectorType>(RHSOp0->getType())->getNumElements();
3049 Value* newLHS = LHS;
3050 Value* newRHS = RHS;
3051 if (LHSShuffle) {
3052 // case 1
3053 if (match(RHS, m_Poison())) {
3054 newLHS = LHSOp0;
3055 newRHS = LHSOp1;
3057 // case 2 or 4
3058 else if (LHSOp0Width == LHSWidth) {
3059 newLHS = LHSOp0;
3062 // case 3 or 4
3063 if (RHSShuffle && RHSOp0Width == LHSWidth) {
3064 newRHS = RHSOp0;
3066 // case 4
3067 if (LHSOp0 == RHSOp0) {
3068 newLHS = LHSOp0;
3069 newRHS = nullptr;
3072 if (newLHS == LHS && newRHS == RHS)
3073 return MadeChange ? &SVI : nullptr;
3075 ArrayRef<int> LHSMask;
3076 ArrayRef<int> RHSMask;
3077 if (newLHS != LHS)
3078 LHSMask = LHSShuffle->getShuffleMask();
3079 if (RHSShuffle && newRHS != RHS)
3080 RHSMask = RHSShuffle->getShuffleMask();
3082 unsigned newLHSWidth = (newLHS != LHS) ? LHSOp0Width : LHSWidth;
3083 SmallVector<int, 16> newMask;
3084 bool isSplat = true;
3085 int SplatElt = -1;
3086 // Create a new mask for the new ShuffleVectorInst so that the new
3087 // ShuffleVectorInst is equivalent to the original one.
3088 for (unsigned i = 0; i < VWidth; ++i) {
3089 int eltMask;
3090 if (Mask[i] < 0) {
3091 // This element is a poison value.
3092 eltMask = -1;
3093 } else if (Mask[i] < (int)LHSWidth) {
3094 // This element is from left hand side vector operand.
3096 // If LHS is going to be replaced (case 1, 2, or 4), calculate the
3097 // new mask value for the element.
3098 if (newLHS != LHS) {
3099 eltMask = LHSMask[Mask[i]];
3100 // If the value selected is an poison value, explicitly specify it
3101 // with a -1 mask value.
3102 if (eltMask >= (int)LHSOp0Width && isa<PoisonValue>(LHSOp1))
3103 eltMask = -1;
3104 } else
3105 eltMask = Mask[i];
3106 } else {
3107 // This element is from right hand side vector operand
3109 // If the value selected is a poison value, explicitly specify it
3110 // with a -1 mask value. (case 1)
3111 if (match(RHS, m_Poison()))
3112 eltMask = -1;
3113 // If RHS is going to be replaced (case 3 or 4), calculate the
3114 // new mask value for the element.
3115 else if (newRHS != RHS) {
3116 eltMask = RHSMask[Mask[i]-LHSWidth];
3117 // If the value selected is an poison value, explicitly specify it
3118 // with a -1 mask value.
3119 if (eltMask >= (int)RHSOp0Width) {
3120 assert(match(RHSShuffle->getOperand(1), m_Poison()) &&
3121 "should have been check above");
3122 eltMask = -1;
3124 } else
3125 eltMask = Mask[i]-LHSWidth;
3127 // If LHS's width is changed, shift the mask value accordingly.
3128 // If newRHS == nullptr, i.e. LHSOp0 == RHSOp0, we want to remap any
3129 // references from RHSOp0 to LHSOp0, so we don't need to shift the mask.
3130 // If newRHS == newLHS, we want to remap any references from newRHS to
3131 // newLHS so that we can properly identify splats that may occur due to
3132 // obfuscation across the two vectors.
3133 if (eltMask >= 0 && newRHS != nullptr && newLHS != newRHS)
3134 eltMask += newLHSWidth;
3137 // Check if this could still be a splat.
3138 if (eltMask >= 0) {
3139 if (SplatElt >= 0 && SplatElt != eltMask)
3140 isSplat = false;
3141 SplatElt = eltMask;
3144 newMask.push_back(eltMask);
3147 // If the result mask is equal to one of the original shuffle masks,
3148 // or is a splat, do the replacement.
3149 if (isSplat || newMask == LHSMask || newMask == RHSMask || newMask == Mask) {
3150 if (!newRHS)
3151 newRHS = PoisonValue::get(newLHS->getType());
3152 return new ShuffleVectorInst(newLHS, newRHS, newMask);
3155 return MadeChange ? &SVI : nullptr;