[ASan] Make insertion of version mismatch guard configurable
[llvm-core.git] / lib / Transforms / InstCombine / InstCombineCalls.cpp
blobd304f98ceaed1667967968ee67b9db0ae1bba104
1 //===- InstCombineCalls.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitCall, visitInvoke, and visitCallBr functions.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Analysis/VectorUtils.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/IR/Metadata.h"
45 #include "llvm/IR/PatternMatch.h"
46 #include "llvm/IR/Statepoint.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/User.h"
49 #include "llvm/IR/Value.h"
50 #include "llvm/IR/ValueHandle.h"
51 #include "llvm/Support/AtomicOrdering.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/KnownBits.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Support/raw_ostream.h"
60 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
61 #include "llvm/Transforms/Utils/Local.h"
62 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
63 #include <algorithm>
64 #include <cassert>
65 #include <cstdint>
66 #include <cstring>
67 #include <utility>
68 #include <vector>
70 using namespace llvm;
71 using namespace PatternMatch;
73 #define DEBUG_TYPE "instcombine"
75 STATISTIC(NumSimplified, "Number of library calls simplified");
77 static cl::opt<unsigned> GuardWideningWindow(
78 "instcombine-guard-widening-window",
79 cl::init(3),
80 cl::desc("How wide an instruction window to bypass looking for "
81 "another guard"));
83 /// Return the specified type promoted as it would be to pass though a va_arg
84 /// area.
85 static Type *getPromotedType(Type *Ty) {
86 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
87 if (ITy->getBitWidth() < 32)
88 return Type::getInt32Ty(Ty->getContext());
90 return Ty;
93 /// Return a constant boolean vector that has true elements in all positions
94 /// where the input constant data vector has an element with the sign bit set.
95 static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) {
96 SmallVector<Constant *, 32> BoolVec;
97 IntegerType *BoolTy = Type::getInt1Ty(V->getContext());
98 for (unsigned I = 0, E = V->getNumElements(); I != E; ++I) {
99 Constant *Elt = V->getElementAsConstant(I);
100 assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
101 "Unexpected constant data vector element type");
102 bool Sign = V->getElementType()->isIntegerTy()
103 ? cast<ConstantInt>(Elt)->isNegative()
104 : cast<ConstantFP>(Elt)->isNegative();
105 BoolVec.push_back(ConstantInt::get(BoolTy, Sign));
107 return ConstantVector::get(BoolVec);
110 Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
111 unsigned DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
112 unsigned CopyDstAlign = MI->getDestAlignment();
113 if (CopyDstAlign < DstAlign){
114 MI->setDestAlignment(DstAlign);
115 return MI;
118 unsigned SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
119 unsigned CopySrcAlign = MI->getSourceAlignment();
120 if (CopySrcAlign < SrcAlign) {
121 MI->setSourceAlignment(SrcAlign);
122 return MI;
125 // If we have a store to a location which is known constant, we can conclude
126 // that the store must be storing the constant value (else the memory
127 // wouldn't be constant), and this must be a noop.
128 if (AA->pointsToConstantMemory(MI->getDest())) {
129 // Set the size of the copy to 0, it will be deleted on the next iteration.
130 MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
131 return MI;
134 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
135 // load/store.
136 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
137 if (!MemOpLength) return nullptr;
139 // Source and destination pointer types are always "i8*" for intrinsic. See
140 // if the size is something we can handle with a single primitive load/store.
141 // A single load+store correctly handles overlapping memory in the memmove
142 // case.
143 uint64_t Size = MemOpLength->getLimitedValue();
144 assert(Size && "0-sized memory transferring should be removed already.");
146 if (Size > 8 || (Size&(Size-1)))
147 return nullptr; // If not 1/2/4/8 bytes, exit.
149 // If it is an atomic and alignment is less than the size then we will
150 // introduce the unaligned memory access which will be later transformed
151 // into libcall in CodeGen. This is not evident performance gain so disable
152 // it now.
153 if (isa<AtomicMemTransferInst>(MI))
154 if (CopyDstAlign < Size || CopySrcAlign < Size)
155 return nullptr;
157 // Use an integer load+store unless we can find something better.
158 unsigned SrcAddrSp =
159 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
160 unsigned DstAddrSp =
161 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
163 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
164 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
165 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
167 // If the memcpy has metadata describing the members, see if we can get the
168 // TBAA tag describing our copy.
169 MDNode *CopyMD = nullptr;
170 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
171 CopyMD = M;
172 } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
173 if (M->getNumOperands() == 3 && M->getOperand(0) &&
174 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
175 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
176 M->getOperand(1) &&
177 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
178 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
179 Size &&
180 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
181 CopyMD = cast<MDNode>(M->getOperand(2));
184 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
185 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
186 LoadInst *L = Builder.CreateLoad(IntType, Src);
187 // Alignment from the mem intrinsic will be better, so use it.
188 L->setAlignment(CopySrcAlign);
189 if (CopyMD)
190 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
191 MDNode *LoopMemParallelMD =
192 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
193 if (LoopMemParallelMD)
194 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
195 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
196 if (AccessGroupMD)
197 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
199 StoreInst *S = Builder.CreateStore(L, Dest);
200 // Alignment from the mem intrinsic will be better, so use it.
201 S->setAlignment(CopyDstAlign);
202 if (CopyMD)
203 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
204 if (LoopMemParallelMD)
205 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
206 if (AccessGroupMD)
207 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
209 if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
210 // non-atomics can be volatile
211 L->setVolatile(MT->isVolatile());
212 S->setVolatile(MT->isVolatile());
214 if (isa<AtomicMemTransferInst>(MI)) {
215 // atomics have to be unordered
216 L->setOrdering(AtomicOrdering::Unordered);
217 S->setOrdering(AtomicOrdering::Unordered);
220 // Set the size of the copy to 0, it will be deleted on the next iteration.
221 MI->setLength(Constant::getNullValue(MemOpLength->getType()));
222 return MI;
225 Instruction *InstCombiner::SimplifyAnyMemSet(AnyMemSetInst *MI) {
226 unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
227 if (MI->getDestAlignment() < Alignment) {
228 MI->setDestAlignment(Alignment);
229 return MI;
232 // If we have a store to a location which is known constant, we can conclude
233 // that the store must be storing the constant value (else the memory
234 // wouldn't be constant), and this must be a noop.
235 if (AA->pointsToConstantMemory(MI->getDest())) {
236 // Set the size of the copy to 0, it will be deleted on the next iteration.
237 MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
238 return MI;
241 // Extract the length and alignment and fill if they are constant.
242 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
243 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
244 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
245 return nullptr;
246 uint64_t Len = LenC->getLimitedValue();
247 Alignment = MI->getDestAlignment();
248 assert(Len && "0-sized memory setting should be removed already.");
250 // Alignment 0 is identity for alignment 1 for memset, but not store.
251 if (Alignment == 0)
252 Alignment = 1;
254 // If it is an atomic and alignment is less than the size then we will
255 // introduce the unaligned memory access which will be later transformed
256 // into libcall in CodeGen. This is not evident performance gain so disable
257 // it now.
258 if (isa<AtomicMemSetInst>(MI))
259 if (Alignment < Len)
260 return nullptr;
262 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
263 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
264 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
266 Value *Dest = MI->getDest();
267 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
268 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
269 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
271 // Extract the fill value and store.
272 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
273 StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
274 MI->isVolatile());
275 S->setAlignment(Alignment);
276 if (isa<AtomicMemSetInst>(MI))
277 S->setOrdering(AtomicOrdering::Unordered);
279 // Set the size of the copy to 0, it will be deleted on the next iteration.
280 MI->setLength(Constant::getNullValue(LenC->getType()));
281 return MI;
284 return nullptr;
287 static Value *simplifyX86immShift(const IntrinsicInst &II,
288 InstCombiner::BuilderTy &Builder) {
289 bool LogicalShift = false;
290 bool ShiftLeft = false;
292 switch (II.getIntrinsicID()) {
293 default: llvm_unreachable("Unexpected intrinsic!");
294 case Intrinsic::x86_sse2_psra_d:
295 case Intrinsic::x86_sse2_psra_w:
296 case Intrinsic::x86_sse2_psrai_d:
297 case Intrinsic::x86_sse2_psrai_w:
298 case Intrinsic::x86_avx2_psra_d:
299 case Intrinsic::x86_avx2_psra_w:
300 case Intrinsic::x86_avx2_psrai_d:
301 case Intrinsic::x86_avx2_psrai_w:
302 case Intrinsic::x86_avx512_psra_q_128:
303 case Intrinsic::x86_avx512_psrai_q_128:
304 case Intrinsic::x86_avx512_psra_q_256:
305 case Intrinsic::x86_avx512_psrai_q_256:
306 case Intrinsic::x86_avx512_psra_d_512:
307 case Intrinsic::x86_avx512_psra_q_512:
308 case Intrinsic::x86_avx512_psra_w_512:
309 case Intrinsic::x86_avx512_psrai_d_512:
310 case Intrinsic::x86_avx512_psrai_q_512:
311 case Intrinsic::x86_avx512_psrai_w_512:
312 LogicalShift = false; ShiftLeft = false;
313 break;
314 case Intrinsic::x86_sse2_psrl_d:
315 case Intrinsic::x86_sse2_psrl_q:
316 case Intrinsic::x86_sse2_psrl_w:
317 case Intrinsic::x86_sse2_psrli_d:
318 case Intrinsic::x86_sse2_psrli_q:
319 case Intrinsic::x86_sse2_psrli_w:
320 case Intrinsic::x86_avx2_psrl_d:
321 case Intrinsic::x86_avx2_psrl_q:
322 case Intrinsic::x86_avx2_psrl_w:
323 case Intrinsic::x86_avx2_psrli_d:
324 case Intrinsic::x86_avx2_psrli_q:
325 case Intrinsic::x86_avx2_psrli_w:
326 case Intrinsic::x86_avx512_psrl_d_512:
327 case Intrinsic::x86_avx512_psrl_q_512:
328 case Intrinsic::x86_avx512_psrl_w_512:
329 case Intrinsic::x86_avx512_psrli_d_512:
330 case Intrinsic::x86_avx512_psrli_q_512:
331 case Intrinsic::x86_avx512_psrli_w_512:
332 LogicalShift = true; ShiftLeft = false;
333 break;
334 case Intrinsic::x86_sse2_psll_d:
335 case Intrinsic::x86_sse2_psll_q:
336 case Intrinsic::x86_sse2_psll_w:
337 case Intrinsic::x86_sse2_pslli_d:
338 case Intrinsic::x86_sse2_pslli_q:
339 case Intrinsic::x86_sse2_pslli_w:
340 case Intrinsic::x86_avx2_psll_d:
341 case Intrinsic::x86_avx2_psll_q:
342 case Intrinsic::x86_avx2_psll_w:
343 case Intrinsic::x86_avx2_pslli_d:
344 case Intrinsic::x86_avx2_pslli_q:
345 case Intrinsic::x86_avx2_pslli_w:
346 case Intrinsic::x86_avx512_psll_d_512:
347 case Intrinsic::x86_avx512_psll_q_512:
348 case Intrinsic::x86_avx512_psll_w_512:
349 case Intrinsic::x86_avx512_pslli_d_512:
350 case Intrinsic::x86_avx512_pslli_q_512:
351 case Intrinsic::x86_avx512_pslli_w_512:
352 LogicalShift = true; ShiftLeft = true;
353 break;
355 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
357 // Simplify if count is constant.
358 auto Arg1 = II.getArgOperand(1);
359 auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
360 auto CDV = dyn_cast<ConstantDataVector>(Arg1);
361 auto CInt = dyn_cast<ConstantInt>(Arg1);
362 if (!CAZ && !CDV && !CInt)
363 return nullptr;
365 APInt Count(64, 0);
366 if (CDV) {
367 // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
368 // operand to compute the shift amount.
369 auto VT = cast<VectorType>(CDV->getType());
370 unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
371 assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
372 unsigned NumSubElts = 64 / BitWidth;
374 // Concatenate the sub-elements to create the 64-bit value.
375 for (unsigned i = 0; i != NumSubElts; ++i) {
376 unsigned SubEltIdx = (NumSubElts - 1) - i;
377 auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
378 Count <<= BitWidth;
379 Count |= SubElt->getValue().zextOrTrunc(64);
382 else if (CInt)
383 Count = CInt->getValue();
385 auto Vec = II.getArgOperand(0);
386 auto VT = cast<VectorType>(Vec->getType());
387 auto SVT = VT->getElementType();
388 unsigned VWidth = VT->getNumElements();
389 unsigned BitWidth = SVT->getPrimitiveSizeInBits();
391 // If shift-by-zero then just return the original value.
392 if (Count.isNullValue())
393 return Vec;
395 // Handle cases when Shift >= BitWidth.
396 if (Count.uge(BitWidth)) {
397 // If LogicalShift - just return zero.
398 if (LogicalShift)
399 return ConstantAggregateZero::get(VT);
401 // If ArithmeticShift - clamp Shift to (BitWidth - 1).
402 Count = APInt(64, BitWidth - 1);
405 // Get a constant vector of the same type as the first operand.
406 auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
407 auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
409 if (ShiftLeft)
410 return Builder.CreateShl(Vec, ShiftVec);
412 if (LogicalShift)
413 return Builder.CreateLShr(Vec, ShiftVec);
415 return Builder.CreateAShr(Vec, ShiftVec);
418 // Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
419 // Unlike the generic IR shifts, the intrinsics have defined behaviour for out
420 // of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
421 static Value *simplifyX86varShift(const IntrinsicInst &II,
422 InstCombiner::BuilderTy &Builder) {
423 bool LogicalShift = false;
424 bool ShiftLeft = false;
426 switch (II.getIntrinsicID()) {
427 default: llvm_unreachable("Unexpected intrinsic!");
428 case Intrinsic::x86_avx2_psrav_d:
429 case Intrinsic::x86_avx2_psrav_d_256:
430 case Intrinsic::x86_avx512_psrav_q_128:
431 case Intrinsic::x86_avx512_psrav_q_256:
432 case Intrinsic::x86_avx512_psrav_d_512:
433 case Intrinsic::x86_avx512_psrav_q_512:
434 case Intrinsic::x86_avx512_psrav_w_128:
435 case Intrinsic::x86_avx512_psrav_w_256:
436 case Intrinsic::x86_avx512_psrav_w_512:
437 LogicalShift = false;
438 ShiftLeft = false;
439 break;
440 case Intrinsic::x86_avx2_psrlv_d:
441 case Intrinsic::x86_avx2_psrlv_d_256:
442 case Intrinsic::x86_avx2_psrlv_q:
443 case Intrinsic::x86_avx2_psrlv_q_256:
444 case Intrinsic::x86_avx512_psrlv_d_512:
445 case Intrinsic::x86_avx512_psrlv_q_512:
446 case Intrinsic::x86_avx512_psrlv_w_128:
447 case Intrinsic::x86_avx512_psrlv_w_256:
448 case Intrinsic::x86_avx512_psrlv_w_512:
449 LogicalShift = true;
450 ShiftLeft = false;
451 break;
452 case Intrinsic::x86_avx2_psllv_d:
453 case Intrinsic::x86_avx2_psllv_d_256:
454 case Intrinsic::x86_avx2_psllv_q:
455 case Intrinsic::x86_avx2_psllv_q_256:
456 case Intrinsic::x86_avx512_psllv_d_512:
457 case Intrinsic::x86_avx512_psllv_q_512:
458 case Intrinsic::x86_avx512_psllv_w_128:
459 case Intrinsic::x86_avx512_psllv_w_256:
460 case Intrinsic::x86_avx512_psllv_w_512:
461 LogicalShift = true;
462 ShiftLeft = true;
463 break;
465 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
467 // Simplify if all shift amounts are constant/undef.
468 auto *CShift = dyn_cast<Constant>(II.getArgOperand(1));
469 if (!CShift)
470 return nullptr;
472 auto Vec = II.getArgOperand(0);
473 auto VT = cast<VectorType>(II.getType());
474 auto SVT = VT->getVectorElementType();
475 int NumElts = VT->getNumElements();
476 int BitWidth = SVT->getIntegerBitWidth();
478 // Collect each element's shift amount.
479 // We also collect special cases: UNDEF = -1, OUT-OF-RANGE = BitWidth.
480 bool AnyOutOfRange = false;
481 SmallVector<int, 8> ShiftAmts;
482 for (int I = 0; I < NumElts; ++I) {
483 auto *CElt = CShift->getAggregateElement(I);
484 if (CElt && isa<UndefValue>(CElt)) {
485 ShiftAmts.push_back(-1);
486 continue;
489 auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
490 if (!COp)
491 return nullptr;
493 // Handle out of range shifts.
494 // If LogicalShift - set to BitWidth (special case).
495 // If ArithmeticShift - set to (BitWidth - 1) (sign splat).
496 APInt ShiftVal = COp->getValue();
497 if (ShiftVal.uge(BitWidth)) {
498 AnyOutOfRange = LogicalShift;
499 ShiftAmts.push_back(LogicalShift ? BitWidth : BitWidth - 1);
500 continue;
503 ShiftAmts.push_back((int)ShiftVal.getZExtValue());
506 // If all elements out of range or UNDEF, return vector of zeros/undefs.
507 // ArithmeticShift should only hit this if they are all UNDEF.
508 auto OutOfRange = [&](int Idx) { return (Idx < 0) || (BitWidth <= Idx); };
509 if (llvm::all_of(ShiftAmts, OutOfRange)) {
510 SmallVector<Constant *, 8> ConstantVec;
511 for (int Idx : ShiftAmts) {
512 if (Idx < 0) {
513 ConstantVec.push_back(UndefValue::get(SVT));
514 } else {
515 assert(LogicalShift && "Logical shift expected");
516 ConstantVec.push_back(ConstantInt::getNullValue(SVT));
519 return ConstantVector::get(ConstantVec);
522 // We can't handle only some out of range values with generic logical shifts.
523 if (AnyOutOfRange)
524 return nullptr;
526 // Build the shift amount constant vector.
527 SmallVector<Constant *, 8> ShiftVecAmts;
528 for (int Idx : ShiftAmts) {
529 if (Idx < 0)
530 ShiftVecAmts.push_back(UndefValue::get(SVT));
531 else
532 ShiftVecAmts.push_back(ConstantInt::get(SVT, Idx));
534 auto ShiftVec = ConstantVector::get(ShiftVecAmts);
536 if (ShiftLeft)
537 return Builder.CreateShl(Vec, ShiftVec);
539 if (LogicalShift)
540 return Builder.CreateLShr(Vec, ShiftVec);
542 return Builder.CreateAShr(Vec, ShiftVec);
545 static Value *simplifyX86pack(IntrinsicInst &II,
546 InstCombiner::BuilderTy &Builder, bool IsSigned) {
547 Value *Arg0 = II.getArgOperand(0);
548 Value *Arg1 = II.getArgOperand(1);
549 Type *ResTy = II.getType();
551 // Fast all undef handling.
552 if (isa<UndefValue>(Arg0) && isa<UndefValue>(Arg1))
553 return UndefValue::get(ResTy);
555 Type *ArgTy = Arg0->getType();
556 unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128;
557 unsigned NumSrcElts = ArgTy->getVectorNumElements();
558 assert(ResTy->getVectorNumElements() == (2 * NumSrcElts) &&
559 "Unexpected packing types");
561 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
562 unsigned DstScalarSizeInBits = ResTy->getScalarSizeInBits();
563 unsigned SrcScalarSizeInBits = ArgTy->getScalarSizeInBits();
564 assert(SrcScalarSizeInBits == (2 * DstScalarSizeInBits) &&
565 "Unexpected packing types");
567 // Constant folding.
568 if (!isa<Constant>(Arg0) || !isa<Constant>(Arg1))
569 return nullptr;
571 // Clamp Values - signed/unsigned both use signed clamp values, but they
572 // differ on the min/max values.
573 APInt MinValue, MaxValue;
574 if (IsSigned) {
575 // PACKSS: Truncate signed value with signed saturation.
576 // Source values less than dst minint are saturated to minint.
577 // Source values greater than dst maxint are saturated to maxint.
578 MinValue =
579 APInt::getSignedMinValue(DstScalarSizeInBits).sext(SrcScalarSizeInBits);
580 MaxValue =
581 APInt::getSignedMaxValue(DstScalarSizeInBits).sext(SrcScalarSizeInBits);
582 } else {
583 // PACKUS: Truncate signed value with unsigned saturation.
584 // Source values less than zero are saturated to zero.
585 // Source values greater than dst maxuint are saturated to maxuint.
586 MinValue = APInt::getNullValue(SrcScalarSizeInBits);
587 MaxValue = APInt::getLowBitsSet(SrcScalarSizeInBits, DstScalarSizeInBits);
590 auto *MinC = Constant::getIntegerValue(ArgTy, MinValue);
591 auto *MaxC = Constant::getIntegerValue(ArgTy, MaxValue);
592 Arg0 = Builder.CreateSelect(Builder.CreateICmpSLT(Arg0, MinC), MinC, Arg0);
593 Arg1 = Builder.CreateSelect(Builder.CreateICmpSLT(Arg1, MinC), MinC, Arg1);
594 Arg0 = Builder.CreateSelect(Builder.CreateICmpSGT(Arg0, MaxC), MaxC, Arg0);
595 Arg1 = Builder.CreateSelect(Builder.CreateICmpSGT(Arg1, MaxC), MaxC, Arg1);
597 // Shuffle clamped args together at the lane level.
598 SmallVector<unsigned, 32> PackMask;
599 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
600 for (unsigned Elt = 0; Elt != NumSrcEltsPerLane; ++Elt)
601 PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane));
602 for (unsigned Elt = 0; Elt != NumSrcEltsPerLane; ++Elt)
603 PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane) + NumSrcElts);
605 auto *Shuffle = Builder.CreateShuffleVector(Arg0, Arg1, PackMask);
607 // Truncate to dst size.
608 return Builder.CreateTrunc(Shuffle, ResTy);
611 static Value *simplifyX86movmsk(const IntrinsicInst &II,
612 InstCombiner::BuilderTy &Builder) {
613 Value *Arg = II.getArgOperand(0);
614 Type *ResTy = II.getType();
615 Type *ArgTy = Arg->getType();
617 // movmsk(undef) -> zero as we must ensure the upper bits are zero.
618 if (isa<UndefValue>(Arg))
619 return Constant::getNullValue(ResTy);
621 // We can't easily peek through x86_mmx types.
622 if (!ArgTy->isVectorTy())
623 return nullptr;
625 // Expand MOVMSK to compare/bitcast/zext:
626 // e.g. PMOVMSKB(v16i8 x):
627 // %cmp = icmp slt <16 x i8> %x, zeroinitializer
628 // %int = bitcast <16 x i1> %cmp to i16
629 // %res = zext i16 %int to i32
630 unsigned NumElts = ArgTy->getVectorNumElements();
631 Type *IntegerVecTy = VectorType::getInteger(cast<VectorType>(ArgTy));
632 Type *IntegerTy = Builder.getIntNTy(NumElts);
634 Value *Res = Builder.CreateBitCast(Arg, IntegerVecTy);
635 Res = Builder.CreateICmpSLT(Res, Constant::getNullValue(IntegerVecTy));
636 Res = Builder.CreateBitCast(Res, IntegerTy);
637 Res = Builder.CreateZExtOrTrunc(Res, ResTy);
638 return Res;
641 static Value *simplifyX86addcarry(const IntrinsicInst &II,
642 InstCombiner::BuilderTy &Builder) {
643 Value *CarryIn = II.getArgOperand(0);
644 Value *Op1 = II.getArgOperand(1);
645 Value *Op2 = II.getArgOperand(2);
646 Type *RetTy = II.getType();
647 Type *OpTy = Op1->getType();
648 assert(RetTy->getStructElementType(0)->isIntegerTy(8) &&
649 RetTy->getStructElementType(1) == OpTy && OpTy == Op2->getType() &&
650 "Unexpected types for x86 addcarry");
652 // If carry-in is zero, this is just an unsigned add with overflow.
653 if (match(CarryIn, m_ZeroInt())) {
654 Value *UAdd = Builder.CreateIntrinsic(Intrinsic::uadd_with_overflow, OpTy,
655 { Op1, Op2 });
656 // The types have to be adjusted to match the x86 call types.
657 Value *UAddResult = Builder.CreateExtractValue(UAdd, 0);
658 Value *UAddOV = Builder.CreateZExt(Builder.CreateExtractValue(UAdd, 1),
659 Builder.getInt8Ty());
660 Value *Res = UndefValue::get(RetTy);
661 Res = Builder.CreateInsertValue(Res, UAddOV, 0);
662 return Builder.CreateInsertValue(Res, UAddResult, 1);
665 return nullptr;
668 static Value *simplifyX86insertps(const IntrinsicInst &II,
669 InstCombiner::BuilderTy &Builder) {
670 auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
671 if (!CInt)
672 return nullptr;
674 VectorType *VecTy = cast<VectorType>(II.getType());
675 assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
677 // The immediate permute control byte looks like this:
678 // [3:0] - zero mask for each 32-bit lane
679 // [5:4] - select one 32-bit destination lane
680 // [7:6] - select one 32-bit source lane
682 uint8_t Imm = CInt->getZExtValue();
683 uint8_t ZMask = Imm & 0xf;
684 uint8_t DestLane = (Imm >> 4) & 0x3;
685 uint8_t SourceLane = (Imm >> 6) & 0x3;
687 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
689 // If all zero mask bits are set, this was just a weird way to
690 // generate a zero vector.
691 if (ZMask == 0xf)
692 return ZeroVector;
694 // Initialize by passing all of the first source bits through.
695 uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
697 // We may replace the second operand with the zero vector.
698 Value *V1 = II.getArgOperand(1);
700 if (ZMask) {
701 // If the zero mask is being used with a single input or the zero mask
702 // overrides the destination lane, this is a shuffle with the zero vector.
703 if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
704 (ZMask & (1 << DestLane))) {
705 V1 = ZeroVector;
706 // We may still move 32-bits of the first source vector from one lane
707 // to another.
708 ShuffleMask[DestLane] = SourceLane;
709 // The zero mask may override the previous insert operation.
710 for (unsigned i = 0; i < 4; ++i)
711 if ((ZMask >> i) & 0x1)
712 ShuffleMask[i] = i + 4;
713 } else {
714 // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
715 return nullptr;
717 } else {
718 // Replace the selected destination lane with the selected source lane.
719 ShuffleMask[DestLane] = SourceLane + 4;
722 return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
725 /// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
726 /// or conversion to a shuffle vector.
727 static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
728 ConstantInt *CILength, ConstantInt *CIIndex,
729 InstCombiner::BuilderTy &Builder) {
730 auto LowConstantHighUndef = [&](uint64_t Val) {
731 Type *IntTy64 = Type::getInt64Ty(II.getContext());
732 Constant *Args[] = {ConstantInt::get(IntTy64, Val),
733 UndefValue::get(IntTy64)};
734 return ConstantVector::get(Args);
737 // See if we're dealing with constant values.
738 Constant *C0 = dyn_cast<Constant>(Op0);
739 ConstantInt *CI0 =
740 C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
741 : nullptr;
743 // Attempt to constant fold.
744 if (CILength && CIIndex) {
745 // From AMD documentation: "The bit index and field length are each six
746 // bits in length other bits of the field are ignored."
747 APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
748 APInt APLength = CILength->getValue().zextOrTrunc(6);
750 unsigned Index = APIndex.getZExtValue();
752 // From AMD documentation: "a value of zero in the field length is
753 // defined as length of 64".
754 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
756 // From AMD documentation: "If the sum of the bit index + length field
757 // is greater than 64, the results are undefined".
758 unsigned End = Index + Length;
760 // Note that both field index and field length are 8-bit quantities.
761 // Since variables 'Index' and 'Length' are unsigned values
762 // obtained from zero-extending field index and field length
763 // respectively, their sum should never wrap around.
764 if (End > 64)
765 return UndefValue::get(II.getType());
767 // If we are inserting whole bytes, we can convert this to a shuffle.
768 // Lowering can recognize EXTRQI shuffle masks.
769 if ((Length % 8) == 0 && (Index % 8) == 0) {
770 // Convert bit indices to byte indices.
771 Length /= 8;
772 Index /= 8;
774 Type *IntTy8 = Type::getInt8Ty(II.getContext());
775 Type *IntTy32 = Type::getInt32Ty(II.getContext());
776 VectorType *ShufTy = VectorType::get(IntTy8, 16);
778 SmallVector<Constant *, 16> ShuffleMask;
779 for (int i = 0; i != (int)Length; ++i)
780 ShuffleMask.push_back(
781 Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
782 for (int i = Length; i != 8; ++i)
783 ShuffleMask.push_back(
784 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
785 for (int i = 8; i != 16; ++i)
786 ShuffleMask.push_back(UndefValue::get(IntTy32));
788 Value *SV = Builder.CreateShuffleVector(
789 Builder.CreateBitCast(Op0, ShufTy),
790 ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
791 return Builder.CreateBitCast(SV, II.getType());
794 // Constant Fold - shift Index'th bit to lowest position and mask off
795 // Length bits.
796 if (CI0) {
797 APInt Elt = CI0->getValue();
798 Elt.lshrInPlace(Index);
799 Elt = Elt.zextOrTrunc(Length);
800 return LowConstantHighUndef(Elt.getZExtValue());
803 // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
804 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
805 Value *Args[] = {Op0, CILength, CIIndex};
806 Module *M = II.getModule();
807 Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
808 return Builder.CreateCall(F, Args);
812 // Constant Fold - extraction from zero is always {zero, undef}.
813 if (CI0 && CI0->isZero())
814 return LowConstantHighUndef(0);
816 return nullptr;
819 /// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
820 /// folding or conversion to a shuffle vector.
821 static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
822 APInt APLength, APInt APIndex,
823 InstCombiner::BuilderTy &Builder) {
824 // From AMD documentation: "The bit index and field length are each six bits
825 // in length other bits of the field are ignored."
826 APIndex = APIndex.zextOrTrunc(6);
827 APLength = APLength.zextOrTrunc(6);
829 // Attempt to constant fold.
830 unsigned Index = APIndex.getZExtValue();
832 // From AMD documentation: "a value of zero in the field length is
833 // defined as length of 64".
834 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
836 // From AMD documentation: "If the sum of the bit index + length field
837 // is greater than 64, the results are undefined".
838 unsigned End = Index + Length;
840 // Note that both field index and field length are 8-bit quantities.
841 // Since variables 'Index' and 'Length' are unsigned values
842 // obtained from zero-extending field index and field length
843 // respectively, their sum should never wrap around.
844 if (End > 64)
845 return UndefValue::get(II.getType());
847 // If we are inserting whole bytes, we can convert this to a shuffle.
848 // Lowering can recognize INSERTQI shuffle masks.
849 if ((Length % 8) == 0 && (Index % 8) == 0) {
850 // Convert bit indices to byte indices.
851 Length /= 8;
852 Index /= 8;
854 Type *IntTy8 = Type::getInt8Ty(II.getContext());
855 Type *IntTy32 = Type::getInt32Ty(II.getContext());
856 VectorType *ShufTy = VectorType::get(IntTy8, 16);
858 SmallVector<Constant *, 16> ShuffleMask;
859 for (int i = 0; i != (int)Index; ++i)
860 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
861 for (int i = 0; i != (int)Length; ++i)
862 ShuffleMask.push_back(
863 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
864 for (int i = Index + Length; i != 8; ++i)
865 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
866 for (int i = 8; i != 16; ++i)
867 ShuffleMask.push_back(UndefValue::get(IntTy32));
869 Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
870 Builder.CreateBitCast(Op1, ShufTy),
871 ConstantVector::get(ShuffleMask));
872 return Builder.CreateBitCast(SV, II.getType());
875 // See if we're dealing with constant values.
876 Constant *C0 = dyn_cast<Constant>(Op0);
877 Constant *C1 = dyn_cast<Constant>(Op1);
878 ConstantInt *CI00 =
879 C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
880 : nullptr;
881 ConstantInt *CI10 =
882 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
883 : nullptr;
885 // Constant Fold - insert bottom Length bits starting at the Index'th bit.
886 if (CI00 && CI10) {
887 APInt V00 = CI00->getValue();
888 APInt V10 = CI10->getValue();
889 APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
890 V00 = V00 & ~Mask;
891 V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
892 APInt Val = V00 | V10;
893 Type *IntTy64 = Type::getInt64Ty(II.getContext());
894 Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
895 UndefValue::get(IntTy64)};
896 return ConstantVector::get(Args);
899 // If we were an INSERTQ call, we'll save demanded elements if we convert to
900 // INSERTQI.
901 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
902 Type *IntTy8 = Type::getInt8Ty(II.getContext());
903 Constant *CILength = ConstantInt::get(IntTy8, Length, false);
904 Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
906 Value *Args[] = {Op0, Op1, CILength, CIIndex};
907 Module *M = II.getModule();
908 Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
909 return Builder.CreateCall(F, Args);
912 return nullptr;
915 /// Attempt to convert pshufb* to shufflevector if the mask is constant.
916 static Value *simplifyX86pshufb(const IntrinsicInst &II,
917 InstCombiner::BuilderTy &Builder) {
918 Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
919 if (!V)
920 return nullptr;
922 auto *VecTy = cast<VectorType>(II.getType());
923 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
924 unsigned NumElts = VecTy->getNumElements();
925 assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
926 "Unexpected number of elements in shuffle mask!");
928 // Construct a shuffle mask from constant integers or UNDEFs.
929 Constant *Indexes[64] = {nullptr};
931 // Each byte in the shuffle control mask forms an index to permute the
932 // corresponding byte in the destination operand.
933 for (unsigned I = 0; I < NumElts; ++I) {
934 Constant *COp = V->getAggregateElement(I);
935 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
936 return nullptr;
938 if (isa<UndefValue>(COp)) {
939 Indexes[I] = UndefValue::get(MaskEltTy);
940 continue;
943 int8_t Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
945 // If the most significant bit (bit[7]) of each byte of the shuffle
946 // control mask is set, then zero is written in the result byte.
947 // The zero vector is in the right-hand side of the resulting
948 // shufflevector.
950 // The value of each index for the high 128-bit lane is the least
951 // significant 4 bits of the respective shuffle control byte.
952 Index = ((Index < 0) ? NumElts : Index & 0x0F) + (I & 0xF0);
953 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
956 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
957 auto V1 = II.getArgOperand(0);
958 auto V2 = Constant::getNullValue(VecTy);
959 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
962 /// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
963 static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
964 InstCombiner::BuilderTy &Builder) {
965 Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
966 if (!V)
967 return nullptr;
969 auto *VecTy = cast<VectorType>(II.getType());
970 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
971 unsigned NumElts = VecTy->getVectorNumElements();
972 bool IsPD = VecTy->getScalarType()->isDoubleTy();
973 unsigned NumLaneElts = IsPD ? 2 : 4;
974 assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2);
976 // Construct a shuffle mask from constant integers or UNDEFs.
977 Constant *Indexes[16] = {nullptr};
979 // The intrinsics only read one or two bits, clear the rest.
980 for (unsigned I = 0; I < NumElts; ++I) {
981 Constant *COp = V->getAggregateElement(I);
982 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
983 return nullptr;
985 if (isa<UndefValue>(COp)) {
986 Indexes[I] = UndefValue::get(MaskEltTy);
987 continue;
990 APInt Index = cast<ConstantInt>(COp)->getValue();
991 Index = Index.zextOrTrunc(32).getLoBits(2);
993 // The PD variants uses bit 1 to select per-lane element index, so
994 // shift down to convert to generic shuffle mask index.
995 if (IsPD)
996 Index.lshrInPlace(1);
998 // The _256 variants are a bit trickier since the mask bits always index
999 // into the corresponding 128 half. In order to convert to a generic
1000 // shuffle, we have to make that explicit.
1001 Index += APInt(32, (I / NumLaneElts) * NumLaneElts);
1003 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1006 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
1007 auto V1 = II.getArgOperand(0);
1008 auto V2 = UndefValue::get(V1->getType());
1009 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1012 /// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
1013 static Value *simplifyX86vpermv(const IntrinsicInst &II,
1014 InstCombiner::BuilderTy &Builder) {
1015 auto *V = dyn_cast<Constant>(II.getArgOperand(1));
1016 if (!V)
1017 return nullptr;
1019 auto *VecTy = cast<VectorType>(II.getType());
1020 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
1021 unsigned Size = VecTy->getNumElements();
1022 assert((Size == 4 || Size == 8 || Size == 16 || Size == 32 || Size == 64) &&
1023 "Unexpected shuffle mask size");
1025 // Construct a shuffle mask from constant integers or UNDEFs.
1026 Constant *Indexes[64] = {nullptr};
1028 for (unsigned I = 0; I < Size; ++I) {
1029 Constant *COp = V->getAggregateElement(I);
1030 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
1031 return nullptr;
1033 if (isa<UndefValue>(COp)) {
1034 Indexes[I] = UndefValue::get(MaskEltTy);
1035 continue;
1038 uint32_t Index = cast<ConstantInt>(COp)->getZExtValue();
1039 Index &= Size - 1;
1040 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1043 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, Size));
1044 auto V1 = II.getArgOperand(0);
1045 auto V2 = UndefValue::get(VecTy);
1046 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1049 // TODO, Obvious Missing Transforms:
1050 // * Narrow width by halfs excluding zero/undef lanes
1051 Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
1052 Value *LoadPtr = II.getArgOperand(0);
1053 unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
1055 // If the mask is all ones or undefs, this is a plain vector load of the 1st
1056 // argument.
1057 if (maskIsAllOneOrUndef(II.getArgOperand(2)))
1058 return Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
1059 "unmaskedload");
1061 // If we can unconditionally load from this address, replace with a
1062 // load/select idiom. TODO: use DT for context sensitive query
1063 if (isDereferenceableAndAlignedPointer(LoadPtr, II.getType(), Alignment,
1064 II.getModule()->getDataLayout(),
1065 &II, nullptr)) {
1066 Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
1067 "unmaskedload");
1068 return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
1071 return nullptr;
1074 // TODO, Obvious Missing Transforms:
1075 // * Single constant active lane -> store
1076 // * Narrow width by halfs excluding zero/undef lanes
1077 Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) {
1078 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1079 if (!ConstMask)
1080 return nullptr;
1082 // If the mask is all zeros, this instruction does nothing.
1083 if (ConstMask->isNullValue())
1084 return eraseInstFromFunction(II);
1086 // If the mask is all ones, this is a plain vector store of the 1st argument.
1087 if (ConstMask->isAllOnesValue()) {
1088 Value *StorePtr = II.getArgOperand(1);
1089 unsigned Alignment = cast<ConstantInt>(II.getArgOperand(2))->getZExtValue();
1090 return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
1093 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
1094 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
1095 APInt UndefElts(DemandedElts.getBitWidth(), 0);
1096 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
1097 DemandedElts, UndefElts)) {
1098 II.setOperand(0, V);
1099 return &II;
1102 return nullptr;
1105 // TODO, Obvious Missing Transforms:
1106 // * Single constant active lane load -> load
1107 // * Dereferenceable address & few lanes -> scalarize speculative load/selects
1108 // * Adjacent vector addresses -> masked.load
1109 // * Narrow width by halfs excluding zero/undef lanes
1110 // * Vector splat address w/known mask -> scalar load
1111 // * Vector incrementing address -> vector masked load
1112 Instruction *InstCombiner::simplifyMaskedGather(IntrinsicInst &II) {
1113 return nullptr;
1116 // TODO, Obvious Missing Transforms:
1117 // * Single constant active lane -> store
1118 // * Adjacent vector addresses -> masked.store
1119 // * Narrow store width by halfs excluding zero/undef lanes
1120 // * Vector splat address w/known mask -> scalar store
1121 // * Vector incrementing address -> vector masked store
1122 Instruction *InstCombiner::simplifyMaskedScatter(IntrinsicInst &II) {
1123 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1124 if (!ConstMask)
1125 return nullptr;
1127 // If the mask is all zeros, a scatter does nothing.
1128 if (ConstMask->isNullValue())
1129 return eraseInstFromFunction(II);
1131 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
1132 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
1133 APInt UndefElts(DemandedElts.getBitWidth(), 0);
1134 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
1135 DemandedElts, UndefElts)) {
1136 II.setOperand(0, V);
1137 return &II;
1139 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1),
1140 DemandedElts, UndefElts)) {
1141 II.setOperand(1, V);
1142 return &II;
1145 return nullptr;
1148 /// This function transforms launder.invariant.group and strip.invariant.group
1149 /// like:
1150 /// launder(launder(%x)) -> launder(%x) (the result is not the argument)
1151 /// launder(strip(%x)) -> launder(%x)
1152 /// strip(strip(%x)) -> strip(%x) (the result is not the argument)
1153 /// strip(launder(%x)) -> strip(%x)
1154 /// This is legal because it preserves the most recent information about
1155 /// the presence or absence of invariant.group.
1156 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II,
1157 InstCombiner &IC) {
1158 auto *Arg = II.getArgOperand(0);
1159 auto *StrippedArg = Arg->stripPointerCasts();
1160 auto *StrippedInvariantGroupsArg = Arg->stripPointerCastsAndInvariantGroups();
1161 if (StrippedArg == StrippedInvariantGroupsArg)
1162 return nullptr; // No launders/strips to remove.
1164 Value *Result = nullptr;
1166 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
1167 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
1168 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
1169 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
1170 else
1171 llvm_unreachable(
1172 "simplifyInvariantGroupIntrinsic only handles launder and strip");
1173 if (Result->getType()->getPointerAddressSpace() !=
1174 II.getType()->getPointerAddressSpace())
1175 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
1176 if (Result->getType() != II.getType())
1177 Result = IC.Builder.CreateBitCast(Result, II.getType());
1179 return cast<Instruction>(Result);
1182 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
1183 assert((II.getIntrinsicID() == Intrinsic::cttz ||
1184 II.getIntrinsicID() == Intrinsic::ctlz) &&
1185 "Expected cttz or ctlz intrinsic");
1186 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
1187 Value *Op0 = II.getArgOperand(0);
1188 Value *X;
1189 // ctlz(bitreverse(x)) -> cttz(x)
1190 // cttz(bitreverse(x)) -> ctlz(x)
1191 if (match(Op0, m_BitReverse(m_Value(X)))) {
1192 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
1193 Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType());
1194 return CallInst::Create(F, {X, II.getArgOperand(1)});
1197 if (IsTZ) {
1198 // cttz(-x) -> cttz(x)
1199 if (match(Op0, m_Neg(m_Value(X)))) {
1200 II.setOperand(0, X);
1201 return &II;
1204 // cttz(abs(x)) -> cttz(x)
1205 // cttz(nabs(x)) -> cttz(x)
1206 Value *Y;
1207 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
1208 if (SPF == SPF_ABS || SPF == SPF_NABS) {
1209 II.setOperand(0, X);
1210 return &II;
1214 KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
1216 // Create a mask for bits above (ctlz) or below (cttz) the first known one.
1217 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
1218 : Known.countMaxLeadingZeros();
1219 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
1220 : Known.countMinLeadingZeros();
1222 // If all bits above (ctlz) or below (cttz) the first known one are known
1223 // zero, this value is constant.
1224 // FIXME: This should be in InstSimplify because we're replacing an
1225 // instruction with a constant.
1226 if (PossibleZeros == DefiniteZeros) {
1227 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
1228 return IC.replaceInstUsesWith(II, C);
1231 // If the input to cttz/ctlz is known to be non-zero,
1232 // then change the 'ZeroIsUndef' parameter to 'true'
1233 // because we know the zero behavior can't affect the result.
1234 if (!Known.One.isNullValue() ||
1235 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
1236 &IC.getDominatorTree())) {
1237 if (!match(II.getArgOperand(1), m_One())) {
1238 II.setOperand(1, IC.Builder.getTrue());
1239 return &II;
1243 // Add range metadata since known bits can't completely reflect what we know.
1244 // TODO: Handle splat vectors.
1245 auto *IT = dyn_cast<IntegerType>(Op0->getType());
1246 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1247 Metadata *LowAndHigh[] = {
1248 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
1249 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
1250 II.setMetadata(LLVMContext::MD_range,
1251 MDNode::get(II.getContext(), LowAndHigh));
1252 return &II;
1255 return nullptr;
1258 static Instruction *foldCtpop(IntrinsicInst &II, InstCombiner &IC) {
1259 assert(II.getIntrinsicID() == Intrinsic::ctpop &&
1260 "Expected ctpop intrinsic");
1261 Value *Op0 = II.getArgOperand(0);
1262 Value *X;
1263 // ctpop(bitreverse(x)) -> ctpop(x)
1264 // ctpop(bswap(x)) -> ctpop(x)
1265 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) {
1266 II.setOperand(0, X);
1267 return &II;
1270 // FIXME: Try to simplify vectors of integers.
1271 auto *IT = dyn_cast<IntegerType>(Op0->getType());
1272 if (!IT)
1273 return nullptr;
1275 unsigned BitWidth = IT->getBitWidth();
1276 KnownBits Known(BitWidth);
1277 IC.computeKnownBits(Op0, Known, 0, &II);
1279 unsigned MinCount = Known.countMinPopulation();
1280 unsigned MaxCount = Known.countMaxPopulation();
1282 // Add range metadata since known bits can't completely reflect what we know.
1283 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1284 Metadata *LowAndHigh[] = {
1285 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)),
1286 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
1287 II.setMetadata(LLVMContext::MD_range,
1288 MDNode::get(II.getContext(), LowAndHigh));
1289 return &II;
1292 return nullptr;
1295 // TODO: If the x86 backend knew how to convert a bool vector mask back to an
1296 // XMM register mask efficiently, we could transform all x86 masked intrinsics
1297 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1298 static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
1299 Value *Ptr = II.getOperand(0);
1300 Value *Mask = II.getOperand(1);
1301 Constant *ZeroVec = Constant::getNullValue(II.getType());
1303 // Special case a zero mask since that's not a ConstantDataVector.
1304 // This masked load instruction creates a zero vector.
1305 if (isa<ConstantAggregateZero>(Mask))
1306 return IC.replaceInstUsesWith(II, ZeroVec);
1308 auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1309 if (!ConstMask)
1310 return nullptr;
1312 // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1313 // to allow target-independent optimizations.
1315 // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1316 // the LLVM intrinsic definition for the pointer argument.
1317 unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1318 PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
1319 Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1321 // Second, convert the x86 XMM integer vector mask to a vector of bools based
1322 // on each element's most significant bit (the sign bit).
1323 Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1325 // The pass-through vector for an x86 masked load is a zero vector.
1326 CallInst *NewMaskedLoad =
1327 IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
1328 return IC.replaceInstUsesWith(II, NewMaskedLoad);
1331 // TODO: If the x86 backend knew how to convert a bool vector mask back to an
1332 // XMM register mask efficiently, we could transform all x86 masked intrinsics
1333 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1334 static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1335 Value *Ptr = II.getOperand(0);
1336 Value *Mask = II.getOperand(1);
1337 Value *Vec = II.getOperand(2);
1339 // Special case a zero mask since that's not a ConstantDataVector:
1340 // this masked store instruction does nothing.
1341 if (isa<ConstantAggregateZero>(Mask)) {
1342 IC.eraseInstFromFunction(II);
1343 return true;
1346 // The SSE2 version is too weird (eg, unaligned but non-temporal) to do
1347 // anything else at this level.
1348 if (II.getIntrinsicID() == Intrinsic::x86_sse2_maskmov_dqu)
1349 return false;
1351 auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1352 if (!ConstMask)
1353 return false;
1355 // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1356 // to allow target-independent optimizations.
1358 // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1359 // the LLVM intrinsic definition for the pointer argument.
1360 unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1361 PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
1362 Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1364 // Second, convert the x86 XMM integer vector mask to a vector of bools based
1365 // on each element's most significant bit (the sign bit).
1366 Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1368 IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
1370 // 'Replace uses' doesn't work for stores. Erase the original masked store.
1371 IC.eraseInstFromFunction(II);
1372 return true;
1375 // Constant fold llvm.amdgcn.fmed3 intrinsics for standard inputs.
1377 // A single NaN input is folded to minnum, so we rely on that folding for
1378 // handling NaNs.
1379 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
1380 const APFloat &Src2) {
1381 APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2);
1383 APFloat::cmpResult Cmp0 = Max3.compare(Src0);
1384 assert(Cmp0 != APFloat::cmpUnordered && "nans handled separately");
1385 if (Cmp0 == APFloat::cmpEqual)
1386 return maxnum(Src1, Src2);
1388 APFloat::cmpResult Cmp1 = Max3.compare(Src1);
1389 assert(Cmp1 != APFloat::cmpUnordered && "nans handled separately");
1390 if (Cmp1 == APFloat::cmpEqual)
1391 return maxnum(Src0, Src2);
1393 return maxnum(Src0, Src1);
1396 /// Convert a table lookup to shufflevector if the mask is constant.
1397 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
1398 /// which case we could lower the shufflevector with rev64 instructions
1399 /// as it's actually a byte reverse.
1400 static Value *simplifyNeonTbl1(const IntrinsicInst &II,
1401 InstCombiner::BuilderTy &Builder) {
1402 // Bail out if the mask is not a constant.
1403 auto *C = dyn_cast<Constant>(II.getArgOperand(1));
1404 if (!C)
1405 return nullptr;
1407 auto *VecTy = cast<VectorType>(II.getType());
1408 unsigned NumElts = VecTy->getNumElements();
1410 // Only perform this transformation for <8 x i8> vector types.
1411 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
1412 return nullptr;
1414 uint32_t Indexes[8];
1416 for (unsigned I = 0; I < NumElts; ++I) {
1417 Constant *COp = C->getAggregateElement(I);
1419 if (!COp || !isa<ConstantInt>(COp))
1420 return nullptr;
1422 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
1424 // Make sure the mask indices are in range.
1425 if (Indexes[I] >= NumElts)
1426 return nullptr;
1429 auto *ShuffleMask = ConstantDataVector::get(II.getContext(),
1430 makeArrayRef(Indexes));
1431 auto *V1 = II.getArgOperand(0);
1432 auto *V2 = Constant::getNullValue(V1->getType());
1433 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1436 /// Convert a vector load intrinsic into a simple llvm load instruction.
1437 /// This is beneficial when the underlying object being addressed comes
1438 /// from a constant, since we get constant-folding for free.
1439 static Value *simplifyNeonVld1(const IntrinsicInst &II,
1440 unsigned MemAlign,
1441 InstCombiner::BuilderTy &Builder) {
1442 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
1444 if (!IntrAlign)
1445 return nullptr;
1447 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign ?
1448 MemAlign : IntrAlign->getLimitedValue();
1450 if (!isPowerOf2_32(Alignment))
1451 return nullptr;
1453 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
1454 PointerType::get(II.getType(), 0));
1455 return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment);
1458 // Returns true iff the 2 intrinsics have the same operands, limiting the
1459 // comparison to the first NumOperands.
1460 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
1461 unsigned NumOperands) {
1462 assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
1463 assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
1464 for (unsigned i = 0; i < NumOperands; i++)
1465 if (I.getArgOperand(i) != E.getArgOperand(i))
1466 return false;
1467 return true;
1470 // Remove trivially empty start/end intrinsic ranges, i.e. a start
1471 // immediately followed by an end (ignoring debuginfo or other
1472 // start/end intrinsics in between). As this handles only the most trivial
1473 // cases, tracking the nesting level is not needed:
1475 // call @llvm.foo.start(i1 0) ; &I
1476 // call @llvm.foo.start(i1 0)
1477 // call @llvm.foo.end(i1 0) ; This one will not be skipped: it will be removed
1478 // call @llvm.foo.end(i1 0)
1479 static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID,
1480 unsigned EndID, InstCombiner &IC) {
1481 assert(I.getIntrinsicID() == StartID &&
1482 "Start intrinsic does not have expected ID");
1483 BasicBlock::iterator BI(I), BE(I.getParent()->end());
1484 for (++BI; BI != BE; ++BI) {
1485 if (auto *E = dyn_cast<IntrinsicInst>(BI)) {
1486 if (isa<DbgInfoIntrinsic>(E) || E->getIntrinsicID() == StartID)
1487 continue;
1488 if (E->getIntrinsicID() == EndID &&
1489 haveSameOperands(I, *E, E->getNumArgOperands())) {
1490 IC.eraseInstFromFunction(*E);
1491 IC.eraseInstFromFunction(I);
1492 return true;
1495 break;
1498 return false;
1501 // Convert NVVM intrinsics to target-generic LLVM code where possible.
1502 static Instruction *SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC) {
1503 // Each NVVM intrinsic we can simplify can be replaced with one of:
1505 // * an LLVM intrinsic,
1506 // * an LLVM cast operation,
1507 // * an LLVM binary operation, or
1508 // * ad-hoc LLVM IR for the particular operation.
1510 // Some transformations are only valid when the module's
1511 // flush-denormals-to-zero (ftz) setting is true/false, whereas other
1512 // transformations are valid regardless of the module's ftz setting.
1513 enum FtzRequirementTy {
1514 FTZ_Any, // Any ftz setting is ok.
1515 FTZ_MustBeOn, // Transformation is valid only if ftz is on.
1516 FTZ_MustBeOff, // Transformation is valid only if ftz is off.
1518 // Classes of NVVM intrinsics that can't be replaced one-to-one with a
1519 // target-generic intrinsic, cast op, or binary op but that we can nonetheless
1520 // simplify.
1521 enum SpecialCase {
1522 SPC_Reciprocal,
1525 // SimplifyAction is a poor-man's variant (plus an additional flag) that
1526 // represents how to replace an NVVM intrinsic with target-generic LLVM IR.
1527 struct SimplifyAction {
1528 // Invariant: At most one of these Optionals has a value.
1529 Optional<Intrinsic::ID> IID;
1530 Optional<Instruction::CastOps> CastOp;
1531 Optional<Instruction::BinaryOps> BinaryOp;
1532 Optional<SpecialCase> Special;
1534 FtzRequirementTy FtzRequirement = FTZ_Any;
1536 SimplifyAction() = default;
1538 SimplifyAction(Intrinsic::ID IID, FtzRequirementTy FtzReq)
1539 : IID(IID), FtzRequirement(FtzReq) {}
1541 // Cast operations don't have anything to do with FTZ, so we skip that
1542 // argument.
1543 SimplifyAction(Instruction::CastOps CastOp) : CastOp(CastOp) {}
1545 SimplifyAction(Instruction::BinaryOps BinaryOp, FtzRequirementTy FtzReq)
1546 : BinaryOp(BinaryOp), FtzRequirement(FtzReq) {}
1548 SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq)
1549 : Special(Special), FtzRequirement(FtzReq) {}
1552 // Try to generate a SimplifyAction describing how to replace our
1553 // IntrinsicInstr with target-generic LLVM IR.
1554 const SimplifyAction Action = [II]() -> SimplifyAction {
1555 switch (II->getIntrinsicID()) {
1556 // NVVM intrinsics that map directly to LLVM intrinsics.
1557 case Intrinsic::nvvm_ceil_d:
1558 return {Intrinsic::ceil, FTZ_Any};
1559 case Intrinsic::nvvm_ceil_f:
1560 return {Intrinsic::ceil, FTZ_MustBeOff};
1561 case Intrinsic::nvvm_ceil_ftz_f:
1562 return {Intrinsic::ceil, FTZ_MustBeOn};
1563 case Intrinsic::nvvm_fabs_d:
1564 return {Intrinsic::fabs, FTZ_Any};
1565 case Intrinsic::nvvm_fabs_f:
1566 return {Intrinsic::fabs, FTZ_MustBeOff};
1567 case Intrinsic::nvvm_fabs_ftz_f:
1568 return {Intrinsic::fabs, FTZ_MustBeOn};
1569 case Intrinsic::nvvm_floor_d:
1570 return {Intrinsic::floor, FTZ_Any};
1571 case Intrinsic::nvvm_floor_f:
1572 return {Intrinsic::floor, FTZ_MustBeOff};
1573 case Intrinsic::nvvm_floor_ftz_f:
1574 return {Intrinsic::floor, FTZ_MustBeOn};
1575 case Intrinsic::nvvm_fma_rn_d:
1576 return {Intrinsic::fma, FTZ_Any};
1577 case Intrinsic::nvvm_fma_rn_f:
1578 return {Intrinsic::fma, FTZ_MustBeOff};
1579 case Intrinsic::nvvm_fma_rn_ftz_f:
1580 return {Intrinsic::fma, FTZ_MustBeOn};
1581 case Intrinsic::nvvm_fmax_d:
1582 return {Intrinsic::maxnum, FTZ_Any};
1583 case Intrinsic::nvvm_fmax_f:
1584 return {Intrinsic::maxnum, FTZ_MustBeOff};
1585 case Intrinsic::nvvm_fmax_ftz_f:
1586 return {Intrinsic::maxnum, FTZ_MustBeOn};
1587 case Intrinsic::nvvm_fmin_d:
1588 return {Intrinsic::minnum, FTZ_Any};
1589 case Intrinsic::nvvm_fmin_f:
1590 return {Intrinsic::minnum, FTZ_MustBeOff};
1591 case Intrinsic::nvvm_fmin_ftz_f:
1592 return {Intrinsic::minnum, FTZ_MustBeOn};
1593 case Intrinsic::nvvm_round_d:
1594 return {Intrinsic::round, FTZ_Any};
1595 case Intrinsic::nvvm_round_f:
1596 return {Intrinsic::round, FTZ_MustBeOff};
1597 case Intrinsic::nvvm_round_ftz_f:
1598 return {Intrinsic::round, FTZ_MustBeOn};
1599 case Intrinsic::nvvm_sqrt_rn_d:
1600 return {Intrinsic::sqrt, FTZ_Any};
1601 case Intrinsic::nvvm_sqrt_f:
1602 // nvvm_sqrt_f is a special case. For most intrinsics, foo_ftz_f is the
1603 // ftz version, and foo_f is the non-ftz version. But nvvm_sqrt_f adopts
1604 // the ftz-ness of the surrounding code. sqrt_rn_f and sqrt_rn_ftz_f are
1605 // the versions with explicit ftz-ness.
1606 return {Intrinsic::sqrt, FTZ_Any};
1607 case Intrinsic::nvvm_sqrt_rn_f:
1608 return {Intrinsic::sqrt, FTZ_MustBeOff};
1609 case Intrinsic::nvvm_sqrt_rn_ftz_f:
1610 return {Intrinsic::sqrt, FTZ_MustBeOn};
1611 case Intrinsic::nvvm_trunc_d:
1612 return {Intrinsic::trunc, FTZ_Any};
1613 case Intrinsic::nvvm_trunc_f:
1614 return {Intrinsic::trunc, FTZ_MustBeOff};
1615 case Intrinsic::nvvm_trunc_ftz_f:
1616 return {Intrinsic::trunc, FTZ_MustBeOn};
1618 // NVVM intrinsics that map to LLVM cast operations.
1620 // Note that llvm's target-generic conversion operators correspond to the rz
1621 // (round to zero) versions of the nvvm conversion intrinsics, even though
1622 // most everything else here uses the rn (round to nearest even) nvvm ops.
1623 case Intrinsic::nvvm_d2i_rz:
1624 case Intrinsic::nvvm_f2i_rz:
1625 case Intrinsic::nvvm_d2ll_rz:
1626 case Intrinsic::nvvm_f2ll_rz:
1627 return {Instruction::FPToSI};
1628 case Intrinsic::nvvm_d2ui_rz:
1629 case Intrinsic::nvvm_f2ui_rz:
1630 case Intrinsic::nvvm_d2ull_rz:
1631 case Intrinsic::nvvm_f2ull_rz:
1632 return {Instruction::FPToUI};
1633 case Intrinsic::nvvm_i2d_rz:
1634 case Intrinsic::nvvm_i2f_rz:
1635 case Intrinsic::nvvm_ll2d_rz:
1636 case Intrinsic::nvvm_ll2f_rz:
1637 return {Instruction::SIToFP};
1638 case Intrinsic::nvvm_ui2d_rz:
1639 case Intrinsic::nvvm_ui2f_rz:
1640 case Intrinsic::nvvm_ull2d_rz:
1641 case Intrinsic::nvvm_ull2f_rz:
1642 return {Instruction::UIToFP};
1644 // NVVM intrinsics that map to LLVM binary ops.
1645 case Intrinsic::nvvm_add_rn_d:
1646 return {Instruction::FAdd, FTZ_Any};
1647 case Intrinsic::nvvm_add_rn_f:
1648 return {Instruction::FAdd, FTZ_MustBeOff};
1649 case Intrinsic::nvvm_add_rn_ftz_f:
1650 return {Instruction::FAdd, FTZ_MustBeOn};
1651 case Intrinsic::nvvm_mul_rn_d:
1652 return {Instruction::FMul, FTZ_Any};
1653 case Intrinsic::nvvm_mul_rn_f:
1654 return {Instruction::FMul, FTZ_MustBeOff};
1655 case Intrinsic::nvvm_mul_rn_ftz_f:
1656 return {Instruction::FMul, FTZ_MustBeOn};
1657 case Intrinsic::nvvm_div_rn_d:
1658 return {Instruction::FDiv, FTZ_Any};
1659 case Intrinsic::nvvm_div_rn_f:
1660 return {Instruction::FDiv, FTZ_MustBeOff};
1661 case Intrinsic::nvvm_div_rn_ftz_f:
1662 return {Instruction::FDiv, FTZ_MustBeOn};
1664 // The remainder of cases are NVVM intrinsics that map to LLVM idioms, but
1665 // need special handling.
1667 // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just
1668 // as well.
1669 case Intrinsic::nvvm_rcp_rn_d:
1670 return {SPC_Reciprocal, FTZ_Any};
1671 case Intrinsic::nvvm_rcp_rn_f:
1672 return {SPC_Reciprocal, FTZ_MustBeOff};
1673 case Intrinsic::nvvm_rcp_rn_ftz_f:
1674 return {SPC_Reciprocal, FTZ_MustBeOn};
1676 // We do not currently simplify intrinsics that give an approximate answer.
1677 // These include:
1679 // - nvvm_cos_approx_{f,ftz_f}
1680 // - nvvm_ex2_approx_{d,f,ftz_f}
1681 // - nvvm_lg2_approx_{d,f,ftz_f}
1682 // - nvvm_sin_approx_{f,ftz_f}
1683 // - nvvm_sqrt_approx_{f,ftz_f}
1684 // - nvvm_rsqrt_approx_{d,f,ftz_f}
1685 // - nvvm_div_approx_{ftz_d,ftz_f,f}
1686 // - nvvm_rcp_approx_ftz_d
1688 // Ideally we'd encode them as e.g. "fast call @llvm.cos", where "fast"
1689 // means that fastmath is enabled in the intrinsic. Unfortunately only
1690 // binary operators (currently) have a fastmath bit in SelectionDAG, so this
1691 // information gets lost and we can't select on it.
1693 // TODO: div and rcp are lowered to a binary op, so these we could in theory
1694 // lower them to "fast fdiv".
1696 default:
1697 return {};
1699 }();
1701 // If Action.FtzRequirementTy is not satisfied by the module's ftz state, we
1702 // can bail out now. (Notice that in the case that IID is not an NVVM
1703 // intrinsic, we don't have to look up any module metadata, as
1704 // FtzRequirementTy will be FTZ_Any.)
1705 if (Action.FtzRequirement != FTZ_Any) {
1706 bool FtzEnabled =
1707 II->getFunction()->getFnAttribute("nvptx-f32ftz").getValueAsString() ==
1708 "true";
1710 if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn))
1711 return nullptr;
1714 // Simplify to target-generic intrinsic.
1715 if (Action.IID) {
1716 SmallVector<Value *, 4> Args(II->arg_operands());
1717 // All the target-generic intrinsics currently of interest to us have one
1718 // type argument, equal to that of the nvvm intrinsic's argument.
1719 Type *Tys[] = {II->getArgOperand(0)->getType()};
1720 return CallInst::Create(
1721 Intrinsic::getDeclaration(II->getModule(), *Action.IID, Tys), Args);
1724 // Simplify to target-generic binary op.
1725 if (Action.BinaryOp)
1726 return BinaryOperator::Create(*Action.BinaryOp, II->getArgOperand(0),
1727 II->getArgOperand(1), II->getName());
1729 // Simplify to target-generic cast op.
1730 if (Action.CastOp)
1731 return CastInst::Create(*Action.CastOp, II->getArgOperand(0), II->getType(),
1732 II->getName());
1734 // All that's left are the special cases.
1735 if (!Action.Special)
1736 return nullptr;
1738 switch (*Action.Special) {
1739 case SPC_Reciprocal:
1740 // Simplify reciprocal.
1741 return BinaryOperator::Create(
1742 Instruction::FDiv, ConstantFP::get(II->getArgOperand(0)->getType(), 1),
1743 II->getArgOperand(0), II->getName());
1745 llvm_unreachable("All SpecialCase enumerators should be handled in switch.");
1748 Instruction *InstCombiner::visitVAStartInst(VAStartInst &I) {
1749 removeTriviallyEmptyRange(I, Intrinsic::vastart, Intrinsic::vaend, *this);
1750 return nullptr;
1753 Instruction *InstCombiner::visitVACopyInst(VACopyInst &I) {
1754 removeTriviallyEmptyRange(I, Intrinsic::vacopy, Intrinsic::vaend, *this);
1755 return nullptr;
1758 static Instruction *canonicalizeConstantArg0ToArg1(CallInst &Call) {
1759 assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap");
1760 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
1761 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
1762 Call.setArgOperand(0, Arg1);
1763 Call.setArgOperand(1, Arg0);
1764 return &Call;
1766 return nullptr;
1769 Instruction *InstCombiner::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
1770 WithOverflowInst *WO = cast<WithOverflowInst>(II);
1771 Value *OperationResult = nullptr;
1772 Constant *OverflowResult = nullptr;
1773 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
1774 WO->getRHS(), *WO, OperationResult, OverflowResult))
1775 return CreateOverflowTuple(WO, OperationResult, OverflowResult);
1776 return nullptr;
1779 /// CallInst simplification. This mostly only handles folding of intrinsic
1780 /// instructions. For normal calls, it allows visitCallBase to do the heavy
1781 /// lifting.
1782 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
1783 if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
1784 return replaceInstUsesWith(CI, V);
1786 if (isFreeCall(&CI, &TLI))
1787 return visitFree(CI);
1789 // If the caller function is nounwind, mark the call as nounwind, even if the
1790 // callee isn't.
1791 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
1792 CI.setDoesNotThrow();
1793 return &CI;
1796 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1797 if (!II) return visitCallBase(CI);
1799 // Intrinsics cannot occur in an invoke or a callbr, so handle them here
1800 // instead of in visitCallBase.
1801 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1802 bool Changed = false;
1804 // memmove/cpy/set of zero bytes is a noop.
1805 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1806 if (NumBytes->isNullValue())
1807 return eraseInstFromFunction(CI);
1809 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
1810 if (CI->getZExtValue() == 1) {
1811 // Replace the instruction with just byte operations. We would
1812 // transform other cases to loads/stores, but we don't know if
1813 // alignment is sufficient.
1817 // No other transformations apply to volatile transfers.
1818 if (auto *M = dyn_cast<MemIntrinsic>(MI))
1819 if (M->isVolatile())
1820 return nullptr;
1822 // If we have a memmove and the source operation is a constant global,
1823 // then the source and dest pointers can't alias, so we can change this
1824 // into a call to memcpy.
1825 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1826 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1827 if (GVSrc->isConstant()) {
1828 Module *M = CI.getModule();
1829 Intrinsic::ID MemCpyID =
1830 isa<AtomicMemMoveInst>(MMI)
1831 ? Intrinsic::memcpy_element_unordered_atomic
1832 : Intrinsic::memcpy;
1833 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1834 CI.getArgOperand(1)->getType(),
1835 CI.getArgOperand(2)->getType() };
1836 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
1837 Changed = true;
1841 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1842 // memmove(x,x,size) -> noop.
1843 if (MTI->getSource() == MTI->getDest())
1844 return eraseInstFromFunction(CI);
1847 // If we can determine a pointer alignment that is bigger than currently
1848 // set, update the alignment.
1849 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1850 if (Instruction *I = SimplifyAnyMemTransfer(MTI))
1851 return I;
1852 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1853 if (Instruction *I = SimplifyAnyMemSet(MSI))
1854 return I;
1857 if (Changed) return II;
1860 // For vector result intrinsics, use the generic demanded vector support.
1861 if (II->getType()->isVectorTy()) {
1862 auto VWidth = II->getType()->getVectorNumElements();
1863 APInt UndefElts(VWidth, 0);
1864 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
1865 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
1866 if (V != II)
1867 return replaceInstUsesWith(*II, V);
1868 return II;
1872 if (Instruction *I = SimplifyNVVMIntrinsic(II, *this))
1873 return I;
1875 auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width,
1876 unsigned DemandedWidth) {
1877 APInt UndefElts(Width, 0);
1878 APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
1879 return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1882 Intrinsic::ID IID = II->getIntrinsicID();
1883 switch (IID) {
1884 default: break;
1885 case Intrinsic::objectsize:
1886 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
1887 return replaceInstUsesWith(CI, V);
1888 return nullptr;
1889 case Intrinsic::bswap: {
1890 Value *IIOperand = II->getArgOperand(0);
1891 Value *X = nullptr;
1893 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1894 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1895 unsigned C = X->getType()->getPrimitiveSizeInBits() -
1896 IIOperand->getType()->getPrimitiveSizeInBits();
1897 Value *CV = ConstantInt::get(X->getType(), C);
1898 Value *V = Builder.CreateLShr(X, CV);
1899 return new TruncInst(V, IIOperand->getType());
1901 break;
1903 case Intrinsic::masked_load:
1904 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1905 return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1906 break;
1907 case Intrinsic::masked_store:
1908 return simplifyMaskedStore(*II);
1909 case Intrinsic::masked_gather:
1910 return simplifyMaskedGather(*II);
1911 case Intrinsic::masked_scatter:
1912 return simplifyMaskedScatter(*II);
1913 case Intrinsic::launder_invariant_group:
1914 case Intrinsic::strip_invariant_group:
1915 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1916 return replaceInstUsesWith(*II, SkippedBarrier);
1917 break;
1918 case Intrinsic::powi:
1919 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
1920 // 0 and 1 are handled in instsimplify
1922 // powi(x, -1) -> 1/x
1923 if (Power->isMinusOne())
1924 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
1925 II->getArgOperand(0));
1926 // powi(x, 2) -> x*x
1927 if (Power->equalsInt(2))
1928 return BinaryOperator::CreateFMul(II->getArgOperand(0),
1929 II->getArgOperand(0));
1931 break;
1933 case Intrinsic::cttz:
1934 case Intrinsic::ctlz:
1935 if (auto *I = foldCttzCtlz(*II, *this))
1936 return I;
1937 break;
1939 case Intrinsic::ctpop:
1940 if (auto *I = foldCtpop(*II, *this))
1941 return I;
1942 break;
1944 case Intrinsic::fshl:
1945 case Intrinsic::fshr: {
1946 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1947 Type *Ty = II->getType();
1948 unsigned BitWidth = Ty->getScalarSizeInBits();
1949 Constant *ShAmtC;
1950 if (match(II->getArgOperand(2), m_Constant(ShAmtC)) &&
1951 !isa<ConstantExpr>(ShAmtC) && !ShAmtC->containsConstantExpression()) {
1952 // Canonicalize a shift amount constant operand to modulo the bit-width.
1953 Constant *WidthC = ConstantInt::get(Ty, BitWidth);
1954 Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC);
1955 if (ModuloC != ShAmtC) {
1956 II->setArgOperand(2, ModuloC);
1957 return II;
1959 assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) ==
1960 ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) &&
1961 "Shift amount expected to be modulo bitwidth");
1963 // Canonicalize funnel shift right by constant to funnel shift left. This
1964 // is not entirely arbitrary. For historical reasons, the backend may
1965 // recognize rotate left patterns but miss rotate right patterns.
1966 if (IID == Intrinsic::fshr) {
1967 // fshr X, Y, C --> fshl X, Y, (BitWidth - C)
1968 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
1969 Module *Mod = II->getModule();
1970 Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
1971 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
1973 assert(IID == Intrinsic::fshl &&
1974 "All funnel shifts by simple constants should go left");
1976 // fshl(X, 0, C) --> shl X, C
1977 // fshl(X, undef, C) --> shl X, C
1978 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef()))
1979 return BinaryOperator::CreateShl(Op0, ShAmtC);
1981 // fshl(0, X, C) --> lshr X, (BW-C)
1982 // fshl(undef, X, C) --> lshr X, (BW-C)
1983 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef()))
1984 return BinaryOperator::CreateLShr(Op1,
1985 ConstantExpr::getSub(WidthC, ShAmtC));
1987 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
1988 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
1989 Module *Mod = II->getModule();
1990 Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
1991 return CallInst::Create(Bswap, { Op0 });
1995 // Left or right might be masked.
1996 if (SimplifyDemandedInstructionBits(*II))
1997 return &CI;
1999 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
2000 // so only the low bits of the shift amount are demanded if the bitwidth is
2001 // a power-of-2.
2002 if (!isPowerOf2_32(BitWidth))
2003 break;
2004 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
2005 KnownBits Op2Known(BitWidth);
2006 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
2007 return &CI;
2008 break;
2010 case Intrinsic::uadd_with_overflow:
2011 case Intrinsic::sadd_with_overflow: {
2012 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2013 return I;
2014 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2015 return I;
2017 // Given 2 constant operands whose sum does not overflow:
2018 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
2019 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
2020 Value *X;
2021 const APInt *C0, *C1;
2022 Value *Arg0 = II->getArgOperand(0);
2023 Value *Arg1 = II->getArgOperand(1);
2024 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2025 bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
2026 : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
2027 if (HasNWAdd && match(Arg1, m_APInt(C1))) {
2028 bool Overflow;
2029 APInt NewC =
2030 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
2031 if (!Overflow)
2032 return replaceInstUsesWith(
2033 *II, Builder.CreateBinaryIntrinsic(
2034 IID, X, ConstantInt::get(Arg1->getType(), NewC)));
2036 break;
2039 case Intrinsic::umul_with_overflow:
2040 case Intrinsic::smul_with_overflow:
2041 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2042 return I;
2043 LLVM_FALLTHROUGH;
2045 case Intrinsic::usub_with_overflow:
2046 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2047 return I;
2048 break;
2050 case Intrinsic::ssub_with_overflow: {
2051 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2052 return I;
2054 Constant *C;
2055 Value *Arg0 = II->getArgOperand(0);
2056 Value *Arg1 = II->getArgOperand(1);
2057 // Given a constant C that is not the minimum signed value
2058 // for an integer of a given bit width:
2060 // ssubo X, C -> saddo X, -C
2061 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
2062 Value *NegVal = ConstantExpr::getNeg(C);
2063 // Build a saddo call that is equivalent to the discovered
2064 // ssubo call.
2065 return replaceInstUsesWith(
2066 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
2067 Arg0, NegVal));
2070 break;
2073 case Intrinsic::uadd_sat:
2074 case Intrinsic::sadd_sat:
2075 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2076 return I;
2077 LLVM_FALLTHROUGH;
2078 case Intrinsic::usub_sat:
2079 case Intrinsic::ssub_sat: {
2080 SaturatingInst *SI = cast<SaturatingInst>(II);
2081 Type *Ty = SI->getType();
2082 Value *Arg0 = SI->getLHS();
2083 Value *Arg1 = SI->getRHS();
2085 // Make use of known overflow information.
2086 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
2087 Arg0, Arg1, SI);
2088 switch (OR) {
2089 case OverflowResult::MayOverflow:
2090 break;
2091 case OverflowResult::NeverOverflows:
2092 if (SI->isSigned())
2093 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
2094 else
2095 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
2096 case OverflowResult::AlwaysOverflowsLow: {
2097 unsigned BitWidth = Ty->getScalarSizeInBits();
2098 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
2099 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
2101 case OverflowResult::AlwaysOverflowsHigh: {
2102 unsigned BitWidth = Ty->getScalarSizeInBits();
2103 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
2104 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
2108 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
2109 Constant *C;
2110 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
2111 C->isNotMinSignedValue()) {
2112 Value *NegVal = ConstantExpr::getNeg(C);
2113 return replaceInstUsesWith(
2114 *II, Builder.CreateBinaryIntrinsic(
2115 Intrinsic::sadd_sat, Arg0, NegVal));
2118 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
2119 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
2120 // if Val and Val2 have the same sign
2121 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
2122 Value *X;
2123 const APInt *Val, *Val2;
2124 APInt NewVal;
2125 bool IsUnsigned =
2126 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2127 if (Other->getIntrinsicID() == IID &&
2128 match(Arg1, m_APInt(Val)) &&
2129 match(Other->getArgOperand(0), m_Value(X)) &&
2130 match(Other->getArgOperand(1), m_APInt(Val2))) {
2131 if (IsUnsigned)
2132 NewVal = Val->uadd_sat(*Val2);
2133 else if (Val->isNonNegative() == Val2->isNonNegative()) {
2134 bool Overflow;
2135 NewVal = Val->sadd_ov(*Val2, Overflow);
2136 if (Overflow) {
2137 // Both adds together may add more than SignedMaxValue
2138 // without saturating the final result.
2139 break;
2141 } else {
2142 // Cannot fold saturated addition with different signs.
2143 break;
2146 return replaceInstUsesWith(
2147 *II, Builder.CreateBinaryIntrinsic(
2148 IID, X, ConstantInt::get(II->getType(), NewVal)));
2151 break;
2154 case Intrinsic::minnum:
2155 case Intrinsic::maxnum:
2156 case Intrinsic::minimum:
2157 case Intrinsic::maximum: {
2158 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2159 return I;
2160 Value *Arg0 = II->getArgOperand(0);
2161 Value *Arg1 = II->getArgOperand(1);
2162 Value *X, *Y;
2163 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
2164 (Arg0->hasOneUse() || Arg1->hasOneUse())) {
2165 // If both operands are negated, invert the call and negate the result:
2166 // min(-X, -Y) --> -(max(X, Y))
2167 // max(-X, -Y) --> -(min(X, Y))
2168 Intrinsic::ID NewIID;
2169 switch (IID) {
2170 case Intrinsic::maxnum:
2171 NewIID = Intrinsic::minnum;
2172 break;
2173 case Intrinsic::minnum:
2174 NewIID = Intrinsic::maxnum;
2175 break;
2176 case Intrinsic::maximum:
2177 NewIID = Intrinsic::minimum;
2178 break;
2179 case Intrinsic::minimum:
2180 NewIID = Intrinsic::maximum;
2181 break;
2182 default:
2183 llvm_unreachable("unexpected intrinsic ID");
2185 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
2186 Instruction *FNeg = BinaryOperator::CreateFNeg(NewCall);
2187 FNeg->copyIRFlags(II);
2188 return FNeg;
2191 // m(m(X, C2), C1) -> m(X, C)
2192 const APFloat *C1, *C2;
2193 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2194 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
2195 ((match(M->getArgOperand(0), m_Value(X)) &&
2196 match(M->getArgOperand(1), m_APFloat(C2))) ||
2197 (match(M->getArgOperand(1), m_Value(X)) &&
2198 match(M->getArgOperand(0), m_APFloat(C2))))) {
2199 APFloat Res(0.0);
2200 switch (IID) {
2201 case Intrinsic::maxnum:
2202 Res = maxnum(*C1, *C2);
2203 break;
2204 case Intrinsic::minnum:
2205 Res = minnum(*C1, *C2);
2206 break;
2207 case Intrinsic::maximum:
2208 Res = maximum(*C1, *C2);
2209 break;
2210 case Intrinsic::minimum:
2211 Res = minimum(*C1, *C2);
2212 break;
2213 default:
2214 llvm_unreachable("unexpected intrinsic ID");
2216 Instruction *NewCall = Builder.CreateBinaryIntrinsic(
2217 IID, X, ConstantFP::get(Arg0->getType(), Res));
2218 NewCall->copyIRFlags(II);
2219 return replaceInstUsesWith(*II, NewCall);
2223 break;
2225 case Intrinsic::fmuladd: {
2226 // Canonicalize fast fmuladd to the separate fmul + fadd.
2227 if (II->isFast()) {
2228 BuilderTy::FastMathFlagGuard Guard(Builder);
2229 Builder.setFastMathFlags(II->getFastMathFlags());
2230 Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
2231 II->getArgOperand(1));
2232 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
2233 Add->takeName(II);
2234 return replaceInstUsesWith(*II, Add);
2237 LLVM_FALLTHROUGH;
2239 case Intrinsic::fma: {
2240 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2241 return I;
2243 // fma fneg(x), fneg(y), z -> fma x, y, z
2244 Value *Src0 = II->getArgOperand(0);
2245 Value *Src1 = II->getArgOperand(1);
2246 Value *X, *Y;
2247 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
2248 II->setArgOperand(0, X);
2249 II->setArgOperand(1, Y);
2250 return II;
2253 // fma fabs(x), fabs(x), z -> fma x, x, z
2254 if (match(Src0, m_FAbs(m_Value(X))) &&
2255 match(Src1, m_FAbs(m_Specific(X)))) {
2256 II->setArgOperand(0, X);
2257 II->setArgOperand(1, X);
2258 return II;
2261 // fma x, 1, z -> fadd x, z
2262 if (match(Src1, m_FPOne())) {
2263 auto *FAdd = BinaryOperator::CreateFAdd(Src0, II->getArgOperand(2));
2264 FAdd->copyFastMathFlags(II);
2265 return FAdd;
2268 break;
2270 case Intrinsic::fabs: {
2271 Value *Cond;
2272 Constant *LHS, *RHS;
2273 if (match(II->getArgOperand(0),
2274 m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) {
2275 CallInst *Call0 = Builder.CreateCall(II->getCalledFunction(), {LHS});
2276 CallInst *Call1 = Builder.CreateCall(II->getCalledFunction(), {RHS});
2277 return SelectInst::Create(Cond, Call0, Call1);
2280 LLVM_FALLTHROUGH;
2282 case Intrinsic::ceil:
2283 case Intrinsic::floor:
2284 case Intrinsic::round:
2285 case Intrinsic::nearbyint:
2286 case Intrinsic::rint:
2287 case Intrinsic::trunc: {
2288 Value *ExtSrc;
2289 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
2290 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
2291 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
2292 return new FPExtInst(NarrowII, II->getType());
2294 break;
2296 case Intrinsic::cos:
2297 case Intrinsic::amdgcn_cos: {
2298 Value *X;
2299 Value *Src = II->getArgOperand(0);
2300 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) {
2301 // cos(-x) -> cos(x)
2302 // cos(fabs(x)) -> cos(x)
2303 II->setArgOperand(0, X);
2304 return II;
2306 break;
2308 case Intrinsic::sin: {
2309 Value *X;
2310 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
2311 // sin(-x) --> -sin(x)
2312 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
2313 Instruction *FNeg = BinaryOperator::CreateFNeg(NewSin);
2314 FNeg->copyFastMathFlags(II);
2315 return FNeg;
2317 break;
2319 case Intrinsic::ppc_altivec_lvx:
2320 case Intrinsic::ppc_altivec_lvxl:
2321 // Turn PPC lvx -> load if the pointer is known aligned.
2322 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2323 &DT) >= 16) {
2324 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2325 PointerType::getUnqual(II->getType()));
2326 return new LoadInst(II->getType(), Ptr);
2328 break;
2329 case Intrinsic::ppc_vsx_lxvw4x:
2330 case Intrinsic::ppc_vsx_lxvd2x: {
2331 // Turn PPC VSX loads into normal loads.
2332 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2333 PointerType::getUnqual(II->getType()));
2334 return new LoadInst(II->getType(), Ptr, Twine(""), false, 1);
2336 case Intrinsic::ppc_altivec_stvx:
2337 case Intrinsic::ppc_altivec_stvxl:
2338 // Turn stvx -> store if the pointer is known aligned.
2339 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2340 &DT) >= 16) {
2341 Type *OpPtrTy =
2342 PointerType::getUnqual(II->getArgOperand(0)->getType());
2343 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2344 return new StoreInst(II->getArgOperand(0), Ptr);
2346 break;
2347 case Intrinsic::ppc_vsx_stxvw4x:
2348 case Intrinsic::ppc_vsx_stxvd2x: {
2349 // Turn PPC VSX stores into normal stores.
2350 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
2351 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2352 return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
2354 case Intrinsic::ppc_qpx_qvlfs:
2355 // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
2356 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2357 &DT) >= 16) {
2358 Type *VTy = VectorType::get(Builder.getFloatTy(),
2359 II->getType()->getVectorNumElements());
2360 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2361 PointerType::getUnqual(VTy));
2362 Value *Load = Builder.CreateLoad(VTy, Ptr);
2363 return new FPExtInst(Load, II->getType());
2365 break;
2366 case Intrinsic::ppc_qpx_qvlfd:
2367 // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
2368 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
2369 &DT) >= 32) {
2370 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2371 PointerType::getUnqual(II->getType()));
2372 return new LoadInst(II->getType(), Ptr);
2374 break;
2375 case Intrinsic::ppc_qpx_qvstfs:
2376 // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
2377 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2378 &DT) >= 16) {
2379 Type *VTy = VectorType::get(Builder.getFloatTy(),
2380 II->getArgOperand(0)->getType()->getVectorNumElements());
2381 Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
2382 Type *OpPtrTy = PointerType::getUnqual(VTy);
2383 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2384 return new StoreInst(TOp, Ptr);
2386 break;
2387 case Intrinsic::ppc_qpx_qvstfd:
2388 // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
2389 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
2390 &DT) >= 32) {
2391 Type *OpPtrTy =
2392 PointerType::getUnqual(II->getArgOperand(0)->getType());
2393 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2394 return new StoreInst(II->getArgOperand(0), Ptr);
2396 break;
2398 case Intrinsic::x86_bmi_bextr_32:
2399 case Intrinsic::x86_bmi_bextr_64:
2400 case Intrinsic::x86_tbm_bextri_u32:
2401 case Intrinsic::x86_tbm_bextri_u64:
2402 // If the RHS is a constant we can try some simplifications.
2403 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2404 uint64_t Shift = C->getZExtValue();
2405 uint64_t Length = (Shift >> 8) & 0xff;
2406 Shift &= 0xff;
2407 unsigned BitWidth = II->getType()->getIntegerBitWidth();
2408 // If the length is 0 or the shift is out of range, replace with zero.
2409 if (Length == 0 || Shift >= BitWidth)
2410 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2411 // If the LHS is also a constant, we can completely constant fold this.
2412 if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2413 uint64_t Result = InC->getZExtValue() >> Shift;
2414 if (Length > BitWidth)
2415 Length = BitWidth;
2416 Result &= maskTrailingOnes<uint64_t>(Length);
2417 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2419 // TODO should we turn this into 'and' if shift is 0? Or 'shl' if we
2420 // are only masking bits that a shift already cleared?
2422 break;
2424 case Intrinsic::x86_bmi_bzhi_32:
2425 case Intrinsic::x86_bmi_bzhi_64:
2426 // If the RHS is a constant we can try some simplifications.
2427 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2428 uint64_t Index = C->getZExtValue() & 0xff;
2429 unsigned BitWidth = II->getType()->getIntegerBitWidth();
2430 if (Index >= BitWidth)
2431 return replaceInstUsesWith(CI, II->getArgOperand(0));
2432 if (Index == 0)
2433 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2434 // If the LHS is also a constant, we can completely constant fold this.
2435 if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2436 uint64_t Result = InC->getZExtValue();
2437 Result &= maskTrailingOnes<uint64_t>(Index);
2438 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2440 // TODO should we convert this to an AND if the RHS is constant?
2442 break;
2444 case Intrinsic::x86_vcvtph2ps_128:
2445 case Intrinsic::x86_vcvtph2ps_256: {
2446 auto Arg = II->getArgOperand(0);
2447 auto ArgType = cast<VectorType>(Arg->getType());
2448 auto RetType = cast<VectorType>(II->getType());
2449 unsigned ArgWidth = ArgType->getNumElements();
2450 unsigned RetWidth = RetType->getNumElements();
2451 assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
2452 assert(ArgType->isIntOrIntVectorTy() &&
2453 ArgType->getScalarSizeInBits() == 16 &&
2454 "CVTPH2PS input type should be 16-bit integer vector");
2455 assert(RetType->getScalarType()->isFloatTy() &&
2456 "CVTPH2PS output type should be 32-bit float vector");
2458 // Constant folding: Convert to generic half to single conversion.
2459 if (isa<ConstantAggregateZero>(Arg))
2460 return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
2462 if (isa<ConstantDataVector>(Arg)) {
2463 auto VectorHalfAsShorts = Arg;
2464 if (RetWidth < ArgWidth) {
2465 SmallVector<uint32_t, 8> SubVecMask;
2466 for (unsigned i = 0; i != RetWidth; ++i)
2467 SubVecMask.push_back((int)i);
2468 VectorHalfAsShorts = Builder.CreateShuffleVector(
2469 Arg, UndefValue::get(ArgType), SubVecMask);
2472 auto VectorHalfType =
2473 VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
2474 auto VectorHalfs =
2475 Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
2476 auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
2477 return replaceInstUsesWith(*II, VectorFloats);
2480 // We only use the lowest lanes of the argument.
2481 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
2482 II->setArgOperand(0, V);
2483 return II;
2485 break;
2488 case Intrinsic::x86_sse_cvtss2si:
2489 case Intrinsic::x86_sse_cvtss2si64:
2490 case Intrinsic::x86_sse_cvttss2si:
2491 case Intrinsic::x86_sse_cvttss2si64:
2492 case Intrinsic::x86_sse2_cvtsd2si:
2493 case Intrinsic::x86_sse2_cvtsd2si64:
2494 case Intrinsic::x86_sse2_cvttsd2si:
2495 case Intrinsic::x86_sse2_cvttsd2si64:
2496 case Intrinsic::x86_avx512_vcvtss2si32:
2497 case Intrinsic::x86_avx512_vcvtss2si64:
2498 case Intrinsic::x86_avx512_vcvtss2usi32:
2499 case Intrinsic::x86_avx512_vcvtss2usi64:
2500 case Intrinsic::x86_avx512_vcvtsd2si32:
2501 case Intrinsic::x86_avx512_vcvtsd2si64:
2502 case Intrinsic::x86_avx512_vcvtsd2usi32:
2503 case Intrinsic::x86_avx512_vcvtsd2usi64:
2504 case Intrinsic::x86_avx512_cvttss2si:
2505 case Intrinsic::x86_avx512_cvttss2si64:
2506 case Intrinsic::x86_avx512_cvttss2usi:
2507 case Intrinsic::x86_avx512_cvttss2usi64:
2508 case Intrinsic::x86_avx512_cvttsd2si:
2509 case Intrinsic::x86_avx512_cvttsd2si64:
2510 case Intrinsic::x86_avx512_cvttsd2usi:
2511 case Intrinsic::x86_avx512_cvttsd2usi64: {
2512 // These intrinsics only demand the 0th element of their input vectors. If
2513 // we can simplify the input based on that, do so now.
2514 Value *Arg = II->getArgOperand(0);
2515 unsigned VWidth = Arg->getType()->getVectorNumElements();
2516 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
2517 II->setArgOperand(0, V);
2518 return II;
2520 break;
2523 case Intrinsic::x86_mmx_pmovmskb:
2524 case Intrinsic::x86_sse_movmsk_ps:
2525 case Intrinsic::x86_sse2_movmsk_pd:
2526 case Intrinsic::x86_sse2_pmovmskb_128:
2527 case Intrinsic::x86_avx_movmsk_pd_256:
2528 case Intrinsic::x86_avx_movmsk_ps_256:
2529 case Intrinsic::x86_avx2_pmovmskb:
2530 if (Value *V = simplifyX86movmsk(*II, Builder))
2531 return replaceInstUsesWith(*II, V);
2532 break;
2534 case Intrinsic::x86_sse_comieq_ss:
2535 case Intrinsic::x86_sse_comige_ss:
2536 case Intrinsic::x86_sse_comigt_ss:
2537 case Intrinsic::x86_sse_comile_ss:
2538 case Intrinsic::x86_sse_comilt_ss:
2539 case Intrinsic::x86_sse_comineq_ss:
2540 case Intrinsic::x86_sse_ucomieq_ss:
2541 case Intrinsic::x86_sse_ucomige_ss:
2542 case Intrinsic::x86_sse_ucomigt_ss:
2543 case Intrinsic::x86_sse_ucomile_ss:
2544 case Intrinsic::x86_sse_ucomilt_ss:
2545 case Intrinsic::x86_sse_ucomineq_ss:
2546 case Intrinsic::x86_sse2_comieq_sd:
2547 case Intrinsic::x86_sse2_comige_sd:
2548 case Intrinsic::x86_sse2_comigt_sd:
2549 case Intrinsic::x86_sse2_comile_sd:
2550 case Intrinsic::x86_sse2_comilt_sd:
2551 case Intrinsic::x86_sse2_comineq_sd:
2552 case Intrinsic::x86_sse2_ucomieq_sd:
2553 case Intrinsic::x86_sse2_ucomige_sd:
2554 case Intrinsic::x86_sse2_ucomigt_sd:
2555 case Intrinsic::x86_sse2_ucomile_sd:
2556 case Intrinsic::x86_sse2_ucomilt_sd:
2557 case Intrinsic::x86_sse2_ucomineq_sd:
2558 case Intrinsic::x86_avx512_vcomi_ss:
2559 case Intrinsic::x86_avx512_vcomi_sd:
2560 case Intrinsic::x86_avx512_mask_cmp_ss:
2561 case Intrinsic::x86_avx512_mask_cmp_sd: {
2562 // These intrinsics only demand the 0th element of their input vectors. If
2563 // we can simplify the input based on that, do so now.
2564 bool MadeChange = false;
2565 Value *Arg0 = II->getArgOperand(0);
2566 Value *Arg1 = II->getArgOperand(1);
2567 unsigned VWidth = Arg0->getType()->getVectorNumElements();
2568 if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
2569 II->setArgOperand(0, V);
2570 MadeChange = true;
2572 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
2573 II->setArgOperand(1, V);
2574 MadeChange = true;
2576 if (MadeChange)
2577 return II;
2578 break;
2580 case Intrinsic::x86_avx512_cmp_pd_128:
2581 case Intrinsic::x86_avx512_cmp_pd_256:
2582 case Intrinsic::x86_avx512_cmp_pd_512:
2583 case Intrinsic::x86_avx512_cmp_ps_128:
2584 case Intrinsic::x86_avx512_cmp_ps_256:
2585 case Intrinsic::x86_avx512_cmp_ps_512: {
2586 // Folding cmp(sub(a,b),0) -> cmp(a,b) and cmp(0,sub(a,b)) -> cmp(b,a)
2587 Value *Arg0 = II->getArgOperand(0);
2588 Value *Arg1 = II->getArgOperand(1);
2589 bool Arg0IsZero = match(Arg0, m_PosZeroFP());
2590 if (Arg0IsZero)
2591 std::swap(Arg0, Arg1);
2592 Value *A, *B;
2593 // This fold requires only the NINF(not +/- inf) since inf minus
2594 // inf is nan.
2595 // NSZ(No Signed Zeros) is not needed because zeros of any sign are
2596 // equal for both compares.
2597 // NNAN is not needed because nans compare the same for both compares.
2598 // The compare intrinsic uses the above assumptions and therefore
2599 // doesn't require additional flags.
2600 if ((match(Arg0, m_OneUse(m_FSub(m_Value(A), m_Value(B)))) &&
2601 match(Arg1, m_PosZeroFP()) && isa<Instruction>(Arg0) &&
2602 cast<Instruction>(Arg0)->getFastMathFlags().noInfs())) {
2603 if (Arg0IsZero)
2604 std::swap(A, B);
2605 II->setArgOperand(0, A);
2606 II->setArgOperand(1, B);
2607 return II;
2609 break;
2612 case Intrinsic::x86_avx512_add_ps_512:
2613 case Intrinsic::x86_avx512_div_ps_512:
2614 case Intrinsic::x86_avx512_mul_ps_512:
2615 case Intrinsic::x86_avx512_sub_ps_512:
2616 case Intrinsic::x86_avx512_add_pd_512:
2617 case Intrinsic::x86_avx512_div_pd_512:
2618 case Intrinsic::x86_avx512_mul_pd_512:
2619 case Intrinsic::x86_avx512_sub_pd_512:
2620 // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2621 // IR operations.
2622 if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2623 if (R->getValue() == 4) {
2624 Value *Arg0 = II->getArgOperand(0);
2625 Value *Arg1 = II->getArgOperand(1);
2627 Value *V;
2628 switch (IID) {
2629 default: llvm_unreachable("Case stmts out of sync!");
2630 case Intrinsic::x86_avx512_add_ps_512:
2631 case Intrinsic::x86_avx512_add_pd_512:
2632 V = Builder.CreateFAdd(Arg0, Arg1);
2633 break;
2634 case Intrinsic::x86_avx512_sub_ps_512:
2635 case Intrinsic::x86_avx512_sub_pd_512:
2636 V = Builder.CreateFSub(Arg0, Arg1);
2637 break;
2638 case Intrinsic::x86_avx512_mul_ps_512:
2639 case Intrinsic::x86_avx512_mul_pd_512:
2640 V = Builder.CreateFMul(Arg0, Arg1);
2641 break;
2642 case Intrinsic::x86_avx512_div_ps_512:
2643 case Intrinsic::x86_avx512_div_pd_512:
2644 V = Builder.CreateFDiv(Arg0, Arg1);
2645 break;
2648 return replaceInstUsesWith(*II, V);
2651 break;
2653 case Intrinsic::x86_avx512_mask_add_ss_round:
2654 case Intrinsic::x86_avx512_mask_div_ss_round:
2655 case Intrinsic::x86_avx512_mask_mul_ss_round:
2656 case Intrinsic::x86_avx512_mask_sub_ss_round:
2657 case Intrinsic::x86_avx512_mask_add_sd_round:
2658 case Intrinsic::x86_avx512_mask_div_sd_round:
2659 case Intrinsic::x86_avx512_mask_mul_sd_round:
2660 case Intrinsic::x86_avx512_mask_sub_sd_round:
2661 // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2662 // IR operations.
2663 if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(4))) {
2664 if (R->getValue() == 4) {
2665 // Extract the element as scalars.
2666 Value *Arg0 = II->getArgOperand(0);
2667 Value *Arg1 = II->getArgOperand(1);
2668 Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
2669 Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
2671 Value *V;
2672 switch (IID) {
2673 default: llvm_unreachable("Case stmts out of sync!");
2674 case Intrinsic::x86_avx512_mask_add_ss_round:
2675 case Intrinsic::x86_avx512_mask_add_sd_round:
2676 V = Builder.CreateFAdd(LHS, RHS);
2677 break;
2678 case Intrinsic::x86_avx512_mask_sub_ss_round:
2679 case Intrinsic::x86_avx512_mask_sub_sd_round:
2680 V = Builder.CreateFSub(LHS, RHS);
2681 break;
2682 case Intrinsic::x86_avx512_mask_mul_ss_round:
2683 case Intrinsic::x86_avx512_mask_mul_sd_round:
2684 V = Builder.CreateFMul(LHS, RHS);
2685 break;
2686 case Intrinsic::x86_avx512_mask_div_ss_round:
2687 case Intrinsic::x86_avx512_mask_div_sd_round:
2688 V = Builder.CreateFDiv(LHS, RHS);
2689 break;
2692 // Handle the masking aspect of the intrinsic.
2693 Value *Mask = II->getArgOperand(3);
2694 auto *C = dyn_cast<ConstantInt>(Mask);
2695 // We don't need a select if we know the mask bit is a 1.
2696 if (!C || !C->getValue()[0]) {
2697 // Cast the mask to an i1 vector and then extract the lowest element.
2698 auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
2699 cast<IntegerType>(Mask->getType())->getBitWidth());
2700 Mask = Builder.CreateBitCast(Mask, MaskTy);
2701 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
2702 // Extract the lowest element from the passthru operand.
2703 Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
2704 (uint64_t)0);
2705 V = Builder.CreateSelect(Mask, V, Passthru);
2708 // Insert the result back into the original argument 0.
2709 V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
2711 return replaceInstUsesWith(*II, V);
2714 break;
2716 // Constant fold ashr( <A x Bi>, Ci ).
2717 // Constant fold lshr( <A x Bi>, Ci ).
2718 // Constant fold shl( <A x Bi>, Ci ).
2719 case Intrinsic::x86_sse2_psrai_d:
2720 case Intrinsic::x86_sse2_psrai_w:
2721 case Intrinsic::x86_avx2_psrai_d:
2722 case Intrinsic::x86_avx2_psrai_w:
2723 case Intrinsic::x86_avx512_psrai_q_128:
2724 case Intrinsic::x86_avx512_psrai_q_256:
2725 case Intrinsic::x86_avx512_psrai_d_512:
2726 case Intrinsic::x86_avx512_psrai_q_512:
2727 case Intrinsic::x86_avx512_psrai_w_512:
2728 case Intrinsic::x86_sse2_psrli_d:
2729 case Intrinsic::x86_sse2_psrli_q:
2730 case Intrinsic::x86_sse2_psrli_w:
2731 case Intrinsic::x86_avx2_psrli_d:
2732 case Intrinsic::x86_avx2_psrli_q:
2733 case Intrinsic::x86_avx2_psrli_w:
2734 case Intrinsic::x86_avx512_psrli_d_512:
2735 case Intrinsic::x86_avx512_psrli_q_512:
2736 case Intrinsic::x86_avx512_psrli_w_512:
2737 case Intrinsic::x86_sse2_pslli_d:
2738 case Intrinsic::x86_sse2_pslli_q:
2739 case Intrinsic::x86_sse2_pslli_w:
2740 case Intrinsic::x86_avx2_pslli_d:
2741 case Intrinsic::x86_avx2_pslli_q:
2742 case Intrinsic::x86_avx2_pslli_w:
2743 case Intrinsic::x86_avx512_pslli_d_512:
2744 case Intrinsic::x86_avx512_pslli_q_512:
2745 case Intrinsic::x86_avx512_pslli_w_512:
2746 if (Value *V = simplifyX86immShift(*II, Builder))
2747 return replaceInstUsesWith(*II, V);
2748 break;
2750 case Intrinsic::x86_sse2_psra_d:
2751 case Intrinsic::x86_sse2_psra_w:
2752 case Intrinsic::x86_avx2_psra_d:
2753 case Intrinsic::x86_avx2_psra_w:
2754 case Intrinsic::x86_avx512_psra_q_128:
2755 case Intrinsic::x86_avx512_psra_q_256:
2756 case Intrinsic::x86_avx512_psra_d_512:
2757 case Intrinsic::x86_avx512_psra_q_512:
2758 case Intrinsic::x86_avx512_psra_w_512:
2759 case Intrinsic::x86_sse2_psrl_d:
2760 case Intrinsic::x86_sse2_psrl_q:
2761 case Intrinsic::x86_sse2_psrl_w:
2762 case Intrinsic::x86_avx2_psrl_d:
2763 case Intrinsic::x86_avx2_psrl_q:
2764 case Intrinsic::x86_avx2_psrl_w:
2765 case Intrinsic::x86_avx512_psrl_d_512:
2766 case Intrinsic::x86_avx512_psrl_q_512:
2767 case Intrinsic::x86_avx512_psrl_w_512:
2768 case Intrinsic::x86_sse2_psll_d:
2769 case Intrinsic::x86_sse2_psll_q:
2770 case Intrinsic::x86_sse2_psll_w:
2771 case Intrinsic::x86_avx2_psll_d:
2772 case Intrinsic::x86_avx2_psll_q:
2773 case Intrinsic::x86_avx2_psll_w:
2774 case Intrinsic::x86_avx512_psll_d_512:
2775 case Intrinsic::x86_avx512_psll_q_512:
2776 case Intrinsic::x86_avx512_psll_w_512: {
2777 if (Value *V = simplifyX86immShift(*II, Builder))
2778 return replaceInstUsesWith(*II, V);
2780 // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
2781 // operand to compute the shift amount.
2782 Value *Arg1 = II->getArgOperand(1);
2783 assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
2784 "Unexpected packed shift size");
2785 unsigned VWidth = Arg1->getType()->getVectorNumElements();
2787 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
2788 II->setArgOperand(1, V);
2789 return II;
2791 break;
2794 case Intrinsic::x86_avx2_psllv_d:
2795 case Intrinsic::x86_avx2_psllv_d_256:
2796 case Intrinsic::x86_avx2_psllv_q:
2797 case Intrinsic::x86_avx2_psllv_q_256:
2798 case Intrinsic::x86_avx512_psllv_d_512:
2799 case Intrinsic::x86_avx512_psllv_q_512:
2800 case Intrinsic::x86_avx512_psllv_w_128:
2801 case Intrinsic::x86_avx512_psllv_w_256:
2802 case Intrinsic::x86_avx512_psllv_w_512:
2803 case Intrinsic::x86_avx2_psrav_d:
2804 case Intrinsic::x86_avx2_psrav_d_256:
2805 case Intrinsic::x86_avx512_psrav_q_128:
2806 case Intrinsic::x86_avx512_psrav_q_256:
2807 case Intrinsic::x86_avx512_psrav_d_512:
2808 case Intrinsic::x86_avx512_psrav_q_512:
2809 case Intrinsic::x86_avx512_psrav_w_128:
2810 case Intrinsic::x86_avx512_psrav_w_256:
2811 case Intrinsic::x86_avx512_psrav_w_512:
2812 case Intrinsic::x86_avx2_psrlv_d:
2813 case Intrinsic::x86_avx2_psrlv_d_256:
2814 case Intrinsic::x86_avx2_psrlv_q:
2815 case Intrinsic::x86_avx2_psrlv_q_256:
2816 case Intrinsic::x86_avx512_psrlv_d_512:
2817 case Intrinsic::x86_avx512_psrlv_q_512:
2818 case Intrinsic::x86_avx512_psrlv_w_128:
2819 case Intrinsic::x86_avx512_psrlv_w_256:
2820 case Intrinsic::x86_avx512_psrlv_w_512:
2821 if (Value *V = simplifyX86varShift(*II, Builder))
2822 return replaceInstUsesWith(*II, V);
2823 break;
2825 case Intrinsic::x86_sse2_packssdw_128:
2826 case Intrinsic::x86_sse2_packsswb_128:
2827 case Intrinsic::x86_avx2_packssdw:
2828 case Intrinsic::x86_avx2_packsswb:
2829 case Intrinsic::x86_avx512_packssdw_512:
2830 case Intrinsic::x86_avx512_packsswb_512:
2831 if (Value *V = simplifyX86pack(*II, Builder, true))
2832 return replaceInstUsesWith(*II, V);
2833 break;
2835 case Intrinsic::x86_sse2_packuswb_128:
2836 case Intrinsic::x86_sse41_packusdw:
2837 case Intrinsic::x86_avx2_packusdw:
2838 case Intrinsic::x86_avx2_packuswb:
2839 case Intrinsic::x86_avx512_packusdw_512:
2840 case Intrinsic::x86_avx512_packuswb_512:
2841 if (Value *V = simplifyX86pack(*II, Builder, false))
2842 return replaceInstUsesWith(*II, V);
2843 break;
2845 case Intrinsic::x86_pclmulqdq:
2846 case Intrinsic::x86_pclmulqdq_256:
2847 case Intrinsic::x86_pclmulqdq_512: {
2848 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2849 unsigned Imm = C->getZExtValue();
2851 bool MadeChange = false;
2852 Value *Arg0 = II->getArgOperand(0);
2853 Value *Arg1 = II->getArgOperand(1);
2854 unsigned VWidth = Arg0->getType()->getVectorNumElements();
2856 APInt UndefElts1(VWidth, 0);
2857 APInt DemandedElts1 = APInt::getSplat(VWidth,
2858 APInt(2, (Imm & 0x01) ? 2 : 1));
2859 if (Value *V = SimplifyDemandedVectorElts(Arg0, DemandedElts1,
2860 UndefElts1)) {
2861 II->setArgOperand(0, V);
2862 MadeChange = true;
2865 APInt UndefElts2(VWidth, 0);
2866 APInt DemandedElts2 = APInt::getSplat(VWidth,
2867 APInt(2, (Imm & 0x10) ? 2 : 1));
2868 if (Value *V = SimplifyDemandedVectorElts(Arg1, DemandedElts2,
2869 UndefElts2)) {
2870 II->setArgOperand(1, V);
2871 MadeChange = true;
2874 // If either input elements are undef, the result is zero.
2875 if (DemandedElts1.isSubsetOf(UndefElts1) ||
2876 DemandedElts2.isSubsetOf(UndefElts2))
2877 return replaceInstUsesWith(*II,
2878 ConstantAggregateZero::get(II->getType()));
2880 if (MadeChange)
2881 return II;
2883 break;
2886 case Intrinsic::x86_sse41_insertps:
2887 if (Value *V = simplifyX86insertps(*II, Builder))
2888 return replaceInstUsesWith(*II, V);
2889 break;
2891 case Intrinsic::x86_sse4a_extrq: {
2892 Value *Op0 = II->getArgOperand(0);
2893 Value *Op1 = II->getArgOperand(1);
2894 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2895 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
2896 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2897 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2898 VWidth1 == 16 && "Unexpected operand sizes");
2900 // See if we're dealing with constant values.
2901 Constant *C1 = dyn_cast<Constant>(Op1);
2902 ConstantInt *CILength =
2903 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
2904 : nullptr;
2905 ConstantInt *CIIndex =
2906 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
2907 : nullptr;
2909 // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
2910 if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
2911 return replaceInstUsesWith(*II, V);
2913 // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
2914 // operands and the lowest 16-bits of the second.
2915 bool MadeChange = false;
2916 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
2917 II->setArgOperand(0, V);
2918 MadeChange = true;
2920 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
2921 II->setArgOperand(1, V);
2922 MadeChange = true;
2924 if (MadeChange)
2925 return II;
2926 break;
2929 case Intrinsic::x86_sse4a_extrqi: {
2930 // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
2931 // bits of the lower 64-bits. The upper 64-bits are undefined.
2932 Value *Op0 = II->getArgOperand(0);
2933 unsigned VWidth = Op0->getType()->getVectorNumElements();
2934 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2935 "Unexpected operand size");
2937 // See if we're dealing with constant values.
2938 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
2939 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
2941 // Attempt to simplify to a constant or shuffle vector.
2942 if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
2943 return replaceInstUsesWith(*II, V);
2945 // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
2946 // operand.
2947 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
2948 II->setArgOperand(0, V);
2949 return II;
2951 break;
2954 case Intrinsic::x86_sse4a_insertq: {
2955 Value *Op0 = II->getArgOperand(0);
2956 Value *Op1 = II->getArgOperand(1);
2957 unsigned VWidth = Op0->getType()->getVectorNumElements();
2958 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2959 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2960 Op1->getType()->getVectorNumElements() == 2 &&
2961 "Unexpected operand size");
2963 // See if we're dealing with constant values.
2964 Constant *C1 = dyn_cast<Constant>(Op1);
2965 ConstantInt *CI11 =
2966 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
2967 : nullptr;
2969 // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
2970 if (CI11) {
2971 const APInt &V11 = CI11->getValue();
2972 APInt Len = V11.zextOrTrunc(6);
2973 APInt Idx = V11.lshr(8).zextOrTrunc(6);
2974 if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
2975 return replaceInstUsesWith(*II, V);
2978 // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
2979 // operand.
2980 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
2981 II->setArgOperand(0, V);
2982 return II;
2984 break;
2987 case Intrinsic::x86_sse4a_insertqi: {
2988 // INSERTQI: Extract lowest Length bits from lower half of second source and
2989 // insert over first source starting at Index bit. The upper 64-bits are
2990 // undefined.
2991 Value *Op0 = II->getArgOperand(0);
2992 Value *Op1 = II->getArgOperand(1);
2993 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2994 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
2995 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2996 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2997 VWidth1 == 2 && "Unexpected operand sizes");
2999 // See if we're dealing with constant values.
3000 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
3001 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
3003 // Attempt to simplify to a constant or shuffle vector.
3004 if (CILength && CIIndex) {
3005 APInt Len = CILength->getValue().zextOrTrunc(6);
3006 APInt Idx = CIIndex->getValue().zextOrTrunc(6);
3007 if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
3008 return replaceInstUsesWith(*II, V);
3011 // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
3012 // operands.
3013 bool MadeChange = false;
3014 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
3015 II->setArgOperand(0, V);
3016 MadeChange = true;
3018 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
3019 II->setArgOperand(1, V);
3020 MadeChange = true;
3022 if (MadeChange)
3023 return II;
3024 break;
3027 case Intrinsic::x86_sse41_pblendvb:
3028 case Intrinsic::x86_sse41_blendvps:
3029 case Intrinsic::x86_sse41_blendvpd:
3030 case Intrinsic::x86_avx_blendv_ps_256:
3031 case Intrinsic::x86_avx_blendv_pd_256:
3032 case Intrinsic::x86_avx2_pblendvb: {
3033 // fold (blend A, A, Mask) -> A
3034 Value *Op0 = II->getArgOperand(0);
3035 Value *Op1 = II->getArgOperand(1);
3036 Value *Mask = II->getArgOperand(2);
3037 if (Op0 == Op1)
3038 return replaceInstUsesWith(CI, Op0);
3040 // Zero Mask - select 1st argument.
3041 if (isa<ConstantAggregateZero>(Mask))
3042 return replaceInstUsesWith(CI, Op0);
3044 // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
3045 if (auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
3046 Constant *NewSelector = getNegativeIsTrueBoolVec(ConstantMask);
3047 return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
3050 // Convert to a vector select if we can bypass casts and find a boolean
3051 // vector condition value.
3052 Value *BoolVec;
3053 Mask = peekThroughBitcast(Mask);
3054 if (match(Mask, m_SExt(m_Value(BoolVec))) &&
3055 BoolVec->getType()->isVectorTy() &&
3056 BoolVec->getType()->getScalarSizeInBits() == 1) {
3057 assert(Mask->getType()->getPrimitiveSizeInBits() ==
3058 II->getType()->getPrimitiveSizeInBits() &&
3059 "Not expecting mask and operands with different sizes");
3061 unsigned NumMaskElts = Mask->getType()->getVectorNumElements();
3062 unsigned NumOperandElts = II->getType()->getVectorNumElements();
3063 if (NumMaskElts == NumOperandElts)
3064 return SelectInst::Create(BoolVec, Op1, Op0);
3066 // If the mask has less elements than the operands, each mask bit maps to
3067 // multiple elements of the operands. Bitcast back and forth.
3068 if (NumMaskElts < NumOperandElts) {
3069 Value *CastOp0 = Builder.CreateBitCast(Op0, Mask->getType());
3070 Value *CastOp1 = Builder.CreateBitCast(Op1, Mask->getType());
3071 Value *Sel = Builder.CreateSelect(BoolVec, CastOp1, CastOp0);
3072 return new BitCastInst(Sel, II->getType());
3076 break;
3079 case Intrinsic::x86_ssse3_pshuf_b_128:
3080 case Intrinsic::x86_avx2_pshuf_b:
3081 case Intrinsic::x86_avx512_pshuf_b_512:
3082 if (Value *V = simplifyX86pshufb(*II, Builder))
3083 return replaceInstUsesWith(*II, V);
3084 break;
3086 case Intrinsic::x86_avx_vpermilvar_ps:
3087 case Intrinsic::x86_avx_vpermilvar_ps_256:
3088 case Intrinsic::x86_avx512_vpermilvar_ps_512:
3089 case Intrinsic::x86_avx_vpermilvar_pd:
3090 case Intrinsic::x86_avx_vpermilvar_pd_256:
3091 case Intrinsic::x86_avx512_vpermilvar_pd_512:
3092 if (Value *V = simplifyX86vpermilvar(*II, Builder))
3093 return replaceInstUsesWith(*II, V);
3094 break;
3096 case Intrinsic::x86_avx2_permd:
3097 case Intrinsic::x86_avx2_permps:
3098 case Intrinsic::x86_avx512_permvar_df_256:
3099 case Intrinsic::x86_avx512_permvar_df_512:
3100 case Intrinsic::x86_avx512_permvar_di_256:
3101 case Intrinsic::x86_avx512_permvar_di_512:
3102 case Intrinsic::x86_avx512_permvar_hi_128:
3103 case Intrinsic::x86_avx512_permvar_hi_256:
3104 case Intrinsic::x86_avx512_permvar_hi_512:
3105 case Intrinsic::x86_avx512_permvar_qi_128:
3106 case Intrinsic::x86_avx512_permvar_qi_256:
3107 case Intrinsic::x86_avx512_permvar_qi_512:
3108 case Intrinsic::x86_avx512_permvar_sf_512:
3109 case Intrinsic::x86_avx512_permvar_si_512:
3110 if (Value *V = simplifyX86vpermv(*II, Builder))
3111 return replaceInstUsesWith(*II, V);
3112 break;
3114 case Intrinsic::x86_avx_maskload_ps:
3115 case Intrinsic::x86_avx_maskload_pd:
3116 case Intrinsic::x86_avx_maskload_ps_256:
3117 case Intrinsic::x86_avx_maskload_pd_256:
3118 case Intrinsic::x86_avx2_maskload_d:
3119 case Intrinsic::x86_avx2_maskload_q:
3120 case Intrinsic::x86_avx2_maskload_d_256:
3121 case Intrinsic::x86_avx2_maskload_q_256:
3122 if (Instruction *I = simplifyX86MaskedLoad(*II, *this))
3123 return I;
3124 break;
3126 case Intrinsic::x86_sse2_maskmov_dqu:
3127 case Intrinsic::x86_avx_maskstore_ps:
3128 case Intrinsic::x86_avx_maskstore_pd:
3129 case Intrinsic::x86_avx_maskstore_ps_256:
3130 case Intrinsic::x86_avx_maskstore_pd_256:
3131 case Intrinsic::x86_avx2_maskstore_d:
3132 case Intrinsic::x86_avx2_maskstore_q:
3133 case Intrinsic::x86_avx2_maskstore_d_256:
3134 case Intrinsic::x86_avx2_maskstore_q_256:
3135 if (simplifyX86MaskedStore(*II, *this))
3136 return nullptr;
3137 break;
3139 case Intrinsic::x86_addcarry_32:
3140 case Intrinsic::x86_addcarry_64:
3141 if (Value *V = simplifyX86addcarry(*II, Builder))
3142 return replaceInstUsesWith(*II, V);
3143 break;
3145 case Intrinsic::ppc_altivec_vperm:
3146 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
3147 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
3148 // a vectorshuffle for little endian, we must undo the transformation
3149 // performed on vec_perm in altivec.h. That is, we must complement
3150 // the permutation mask with respect to 31 and reverse the order of
3151 // V1 and V2.
3152 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
3153 assert(Mask->getType()->getVectorNumElements() == 16 &&
3154 "Bad type for intrinsic!");
3156 // Check that all of the elements are integer constants or undefs.
3157 bool AllEltsOk = true;
3158 for (unsigned i = 0; i != 16; ++i) {
3159 Constant *Elt = Mask->getAggregateElement(i);
3160 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
3161 AllEltsOk = false;
3162 break;
3166 if (AllEltsOk) {
3167 // Cast the input vectors to byte vectors.
3168 Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
3169 Mask->getType());
3170 Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
3171 Mask->getType());
3172 Value *Result = UndefValue::get(Op0->getType());
3174 // Only extract each element once.
3175 Value *ExtractedElts[32];
3176 memset(ExtractedElts, 0, sizeof(ExtractedElts));
3178 for (unsigned i = 0; i != 16; ++i) {
3179 if (isa<UndefValue>(Mask->getAggregateElement(i)))
3180 continue;
3181 unsigned Idx =
3182 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
3183 Idx &= 31; // Match the hardware behavior.
3184 if (DL.isLittleEndian())
3185 Idx = 31 - Idx;
3187 if (!ExtractedElts[Idx]) {
3188 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
3189 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
3190 ExtractedElts[Idx] =
3191 Builder.CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
3192 Builder.getInt32(Idx&15));
3195 // Insert this value into the result vector.
3196 Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
3197 Builder.getInt32(i));
3199 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
3202 break;
3204 case Intrinsic::arm_neon_vld1: {
3205 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0),
3206 DL, II, &AC, &DT);
3207 if (Value *V = simplifyNeonVld1(*II, MemAlign, Builder))
3208 return replaceInstUsesWith(*II, V);
3209 break;
3212 case Intrinsic::arm_neon_vld2:
3213 case Intrinsic::arm_neon_vld3:
3214 case Intrinsic::arm_neon_vld4:
3215 case Intrinsic::arm_neon_vld2lane:
3216 case Intrinsic::arm_neon_vld3lane:
3217 case Intrinsic::arm_neon_vld4lane:
3218 case Intrinsic::arm_neon_vst1:
3219 case Intrinsic::arm_neon_vst2:
3220 case Intrinsic::arm_neon_vst3:
3221 case Intrinsic::arm_neon_vst4:
3222 case Intrinsic::arm_neon_vst2lane:
3223 case Intrinsic::arm_neon_vst3lane:
3224 case Intrinsic::arm_neon_vst4lane: {
3225 unsigned MemAlign =
3226 getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
3227 unsigned AlignArg = II->getNumArgOperands() - 1;
3228 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
3229 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
3230 II->setArgOperand(AlignArg,
3231 ConstantInt::get(Type::getInt32Ty(II->getContext()),
3232 MemAlign, false));
3233 return II;
3235 break;
3238 case Intrinsic::arm_neon_vtbl1:
3239 case Intrinsic::aarch64_neon_tbl1:
3240 if (Value *V = simplifyNeonTbl1(*II, Builder))
3241 return replaceInstUsesWith(*II, V);
3242 break;
3244 case Intrinsic::arm_neon_vmulls:
3245 case Intrinsic::arm_neon_vmullu:
3246 case Intrinsic::aarch64_neon_smull:
3247 case Intrinsic::aarch64_neon_umull: {
3248 Value *Arg0 = II->getArgOperand(0);
3249 Value *Arg1 = II->getArgOperand(1);
3251 // Handle mul by zero first:
3252 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
3253 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
3256 // Check for constant LHS & RHS - in this case we just simplify.
3257 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
3258 IID == Intrinsic::aarch64_neon_umull);
3259 VectorType *NewVT = cast<VectorType>(II->getType());
3260 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3261 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
3262 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
3263 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
3265 return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
3268 // Couldn't simplify - canonicalize constant to the RHS.
3269 std::swap(Arg0, Arg1);
3272 // Handle mul by one:
3273 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
3274 if (ConstantInt *Splat =
3275 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3276 if (Splat->isOne())
3277 return CastInst::CreateIntegerCast(Arg0, II->getType(),
3278 /*isSigned=*/!Zext);
3280 break;
3282 case Intrinsic::arm_neon_aesd:
3283 case Intrinsic::arm_neon_aese:
3284 case Intrinsic::aarch64_crypto_aesd:
3285 case Intrinsic::aarch64_crypto_aese: {
3286 Value *DataArg = II->getArgOperand(0);
3287 Value *KeyArg = II->getArgOperand(1);
3289 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
3290 Value *Data, *Key;
3291 if (match(KeyArg, m_ZeroInt()) &&
3292 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
3293 II->setArgOperand(0, Data);
3294 II->setArgOperand(1, Key);
3295 return II;
3297 break;
3299 case Intrinsic::amdgcn_rcp: {
3300 Value *Src = II->getArgOperand(0);
3302 // TODO: Move to ConstantFolding/InstSimplify?
3303 if (isa<UndefValue>(Src))
3304 return replaceInstUsesWith(CI, Src);
3306 if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3307 const APFloat &ArgVal = C->getValueAPF();
3308 APFloat Val(ArgVal.getSemantics(), 1.0);
3309 APFloat::opStatus Status = Val.divide(ArgVal,
3310 APFloat::rmNearestTiesToEven);
3311 // Only do this if it was exact and therefore not dependent on the
3312 // rounding mode.
3313 if (Status == APFloat::opOK)
3314 return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
3317 break;
3319 case Intrinsic::amdgcn_rsq: {
3320 Value *Src = II->getArgOperand(0);
3322 // TODO: Move to ConstantFolding/InstSimplify?
3323 if (isa<UndefValue>(Src))
3324 return replaceInstUsesWith(CI, Src);
3325 break;
3327 case Intrinsic::amdgcn_frexp_mant:
3328 case Intrinsic::amdgcn_frexp_exp: {
3329 Value *Src = II->getArgOperand(0);
3330 if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3331 int Exp;
3332 APFloat Significand = frexp(C->getValueAPF(), Exp,
3333 APFloat::rmNearestTiesToEven);
3335 if (IID == Intrinsic::amdgcn_frexp_mant) {
3336 return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(),
3337 Significand));
3340 // Match instruction special case behavior.
3341 if (Exp == APFloat::IEK_NaN || Exp == APFloat::IEK_Inf)
3342 Exp = 0;
3344 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Exp));
3347 if (isa<UndefValue>(Src))
3348 return replaceInstUsesWith(CI, UndefValue::get(II->getType()));
3350 break;
3352 case Intrinsic::amdgcn_class: {
3353 enum {
3354 S_NAN = 1 << 0, // Signaling NaN
3355 Q_NAN = 1 << 1, // Quiet NaN
3356 N_INFINITY = 1 << 2, // Negative infinity
3357 N_NORMAL = 1 << 3, // Negative normal
3358 N_SUBNORMAL = 1 << 4, // Negative subnormal
3359 N_ZERO = 1 << 5, // Negative zero
3360 P_ZERO = 1 << 6, // Positive zero
3361 P_SUBNORMAL = 1 << 7, // Positive subnormal
3362 P_NORMAL = 1 << 8, // Positive normal
3363 P_INFINITY = 1 << 9 // Positive infinity
3366 const uint32_t FullMask = S_NAN | Q_NAN | N_INFINITY | N_NORMAL |
3367 N_SUBNORMAL | N_ZERO | P_ZERO | P_SUBNORMAL | P_NORMAL | P_INFINITY;
3369 Value *Src0 = II->getArgOperand(0);
3370 Value *Src1 = II->getArgOperand(1);
3371 const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
3372 if (!CMask) {
3373 if (isa<UndefValue>(Src0))
3374 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3376 if (isa<UndefValue>(Src1))
3377 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3378 break;
3381 uint32_t Mask = CMask->getZExtValue();
3383 // If all tests are made, it doesn't matter what the value is.
3384 if ((Mask & FullMask) == FullMask)
3385 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), true));
3387 if ((Mask & FullMask) == 0)
3388 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3390 if (Mask == (S_NAN | Q_NAN)) {
3391 // Equivalent of isnan. Replace with standard fcmp.
3392 Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
3393 FCmp->takeName(II);
3394 return replaceInstUsesWith(*II, FCmp);
3397 if (Mask == (N_ZERO | P_ZERO)) {
3398 // Equivalent of == 0.
3399 Value *FCmp = Builder.CreateFCmpOEQ(
3400 Src0, ConstantFP::get(Src0->getType(), 0.0));
3402 FCmp->takeName(II);
3403 return replaceInstUsesWith(*II, FCmp);
3406 // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other
3407 if (((Mask & S_NAN) || (Mask & Q_NAN)) && isKnownNeverNaN(Src0, &TLI)) {
3408 II->setArgOperand(1, ConstantInt::get(Src1->getType(),
3409 Mask & ~(S_NAN | Q_NAN)));
3410 return II;
3413 const ConstantFP *CVal = dyn_cast<ConstantFP>(Src0);
3414 if (!CVal) {
3415 if (isa<UndefValue>(Src0))
3416 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3418 // Clamp mask to used bits
3419 if ((Mask & FullMask) != Mask) {
3420 CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
3421 { Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
3424 NewCall->takeName(II);
3425 return replaceInstUsesWith(*II, NewCall);
3428 break;
3431 const APFloat &Val = CVal->getValueAPF();
3433 bool Result =
3434 ((Mask & S_NAN) && Val.isNaN() && Val.isSignaling()) ||
3435 ((Mask & Q_NAN) && Val.isNaN() && !Val.isSignaling()) ||
3436 ((Mask & N_INFINITY) && Val.isInfinity() && Val.isNegative()) ||
3437 ((Mask & N_NORMAL) && Val.isNormal() && Val.isNegative()) ||
3438 ((Mask & N_SUBNORMAL) && Val.isDenormal() && Val.isNegative()) ||
3439 ((Mask & N_ZERO) && Val.isZero() && Val.isNegative()) ||
3440 ((Mask & P_ZERO) && Val.isZero() && !Val.isNegative()) ||
3441 ((Mask & P_SUBNORMAL) && Val.isDenormal() && !Val.isNegative()) ||
3442 ((Mask & P_NORMAL) && Val.isNormal() && !Val.isNegative()) ||
3443 ((Mask & P_INFINITY) && Val.isInfinity() && !Val.isNegative());
3445 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), Result));
3447 case Intrinsic::amdgcn_cvt_pkrtz: {
3448 Value *Src0 = II->getArgOperand(0);
3449 Value *Src1 = II->getArgOperand(1);
3450 if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3451 if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3452 const fltSemantics &HalfSem
3453 = II->getType()->getScalarType()->getFltSemantics();
3454 bool LosesInfo;
3455 APFloat Val0 = C0->getValueAPF();
3456 APFloat Val1 = C1->getValueAPF();
3457 Val0.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3458 Val1.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3460 Constant *Folded = ConstantVector::get({
3461 ConstantFP::get(II->getContext(), Val0),
3462 ConstantFP::get(II->getContext(), Val1) });
3463 return replaceInstUsesWith(*II, Folded);
3467 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3468 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3470 break;
3472 case Intrinsic::amdgcn_cvt_pknorm_i16:
3473 case Intrinsic::amdgcn_cvt_pknorm_u16:
3474 case Intrinsic::amdgcn_cvt_pk_i16:
3475 case Intrinsic::amdgcn_cvt_pk_u16: {
3476 Value *Src0 = II->getArgOperand(0);
3477 Value *Src1 = II->getArgOperand(1);
3479 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3480 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3482 break;
3484 case Intrinsic::amdgcn_ubfe:
3485 case Intrinsic::amdgcn_sbfe: {
3486 // Decompose simple cases into standard shifts.
3487 Value *Src = II->getArgOperand(0);
3488 if (isa<UndefValue>(Src))
3489 return replaceInstUsesWith(*II, Src);
3491 unsigned Width;
3492 Type *Ty = II->getType();
3493 unsigned IntSize = Ty->getIntegerBitWidth();
3495 ConstantInt *CWidth = dyn_cast<ConstantInt>(II->getArgOperand(2));
3496 if (CWidth) {
3497 Width = CWidth->getZExtValue();
3498 if ((Width & (IntSize - 1)) == 0)
3499 return replaceInstUsesWith(*II, ConstantInt::getNullValue(Ty));
3501 if (Width >= IntSize) {
3502 // Hardware ignores high bits, so remove those.
3503 II->setArgOperand(2, ConstantInt::get(CWidth->getType(),
3504 Width & (IntSize - 1)));
3505 return II;
3509 unsigned Offset;
3510 ConstantInt *COffset = dyn_cast<ConstantInt>(II->getArgOperand(1));
3511 if (COffset) {
3512 Offset = COffset->getZExtValue();
3513 if (Offset >= IntSize) {
3514 II->setArgOperand(1, ConstantInt::get(COffset->getType(),
3515 Offset & (IntSize - 1)));
3516 return II;
3520 bool Signed = IID == Intrinsic::amdgcn_sbfe;
3522 if (!CWidth || !COffset)
3523 break;
3525 // The case of Width == 0 is handled above, which makes this tranformation
3526 // safe. If Width == 0, then the ashr and lshr instructions become poison
3527 // value since the shift amount would be equal to the bit size.
3528 assert(Width != 0);
3530 // TODO: This allows folding to undef when the hardware has specific
3531 // behavior?
3532 if (Offset + Width < IntSize) {
3533 Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
3534 Value *RightShift = Signed ? Builder.CreateAShr(Shl, IntSize - Width)
3535 : Builder.CreateLShr(Shl, IntSize - Width);
3536 RightShift->takeName(II);
3537 return replaceInstUsesWith(*II, RightShift);
3540 Value *RightShift = Signed ? Builder.CreateAShr(Src, Offset)
3541 : Builder.CreateLShr(Src, Offset);
3543 RightShift->takeName(II);
3544 return replaceInstUsesWith(*II, RightShift);
3546 case Intrinsic::amdgcn_exp:
3547 case Intrinsic::amdgcn_exp_compr: {
3548 ConstantInt *En = cast<ConstantInt>(II->getArgOperand(1));
3549 unsigned EnBits = En->getZExtValue();
3550 if (EnBits == 0xf)
3551 break; // All inputs enabled.
3553 bool IsCompr = IID == Intrinsic::amdgcn_exp_compr;
3554 bool Changed = false;
3555 for (int I = 0; I < (IsCompr ? 2 : 4); ++I) {
3556 if ((!IsCompr && (EnBits & (1 << I)) == 0) ||
3557 (IsCompr && ((EnBits & (0x3 << (2 * I))) == 0))) {
3558 Value *Src = II->getArgOperand(I + 2);
3559 if (!isa<UndefValue>(Src)) {
3560 II->setArgOperand(I + 2, UndefValue::get(Src->getType()));
3561 Changed = true;
3566 if (Changed)
3567 return II;
3569 break;
3571 case Intrinsic::amdgcn_fmed3: {
3572 // Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
3573 // for the shader.
3575 Value *Src0 = II->getArgOperand(0);
3576 Value *Src1 = II->getArgOperand(1);
3577 Value *Src2 = II->getArgOperand(2);
3579 // Checking for NaN before canonicalization provides better fidelity when
3580 // mapping other operations onto fmed3 since the order of operands is
3581 // unchanged.
3582 CallInst *NewCall = nullptr;
3583 if (match(Src0, m_NaN()) || isa<UndefValue>(Src0)) {
3584 NewCall = Builder.CreateMinNum(Src1, Src2);
3585 } else if (match(Src1, m_NaN()) || isa<UndefValue>(Src1)) {
3586 NewCall = Builder.CreateMinNum(Src0, Src2);
3587 } else if (match(Src2, m_NaN()) || isa<UndefValue>(Src2)) {
3588 NewCall = Builder.CreateMaxNum(Src0, Src1);
3591 if (NewCall) {
3592 NewCall->copyFastMathFlags(II);
3593 NewCall->takeName(II);
3594 return replaceInstUsesWith(*II, NewCall);
3597 bool Swap = false;
3598 // Canonicalize constants to RHS operands.
3600 // fmed3(c0, x, c1) -> fmed3(x, c0, c1)
3601 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3602 std::swap(Src0, Src1);
3603 Swap = true;
3606 if (isa<Constant>(Src1) && !isa<Constant>(Src2)) {
3607 std::swap(Src1, Src2);
3608 Swap = true;
3611 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3612 std::swap(Src0, Src1);
3613 Swap = true;
3616 if (Swap) {
3617 II->setArgOperand(0, Src0);
3618 II->setArgOperand(1, Src1);
3619 II->setArgOperand(2, Src2);
3620 return II;
3623 if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3624 if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3625 if (const ConstantFP *C2 = dyn_cast<ConstantFP>(Src2)) {
3626 APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
3627 C2->getValueAPF());
3628 return replaceInstUsesWith(*II,
3629 ConstantFP::get(Builder.getContext(), Result));
3634 break;
3636 case Intrinsic::amdgcn_icmp:
3637 case Intrinsic::amdgcn_fcmp: {
3638 const ConstantInt *CC = cast<ConstantInt>(II->getArgOperand(2));
3639 // Guard against invalid arguments.
3640 int64_t CCVal = CC->getZExtValue();
3641 bool IsInteger = IID == Intrinsic::amdgcn_icmp;
3642 if ((IsInteger && (CCVal < CmpInst::FIRST_ICMP_PREDICATE ||
3643 CCVal > CmpInst::LAST_ICMP_PREDICATE)) ||
3644 (!IsInteger && (CCVal < CmpInst::FIRST_FCMP_PREDICATE ||
3645 CCVal > CmpInst::LAST_FCMP_PREDICATE)))
3646 break;
3648 Value *Src0 = II->getArgOperand(0);
3649 Value *Src1 = II->getArgOperand(1);
3651 if (auto *CSrc0 = dyn_cast<Constant>(Src0)) {
3652 if (auto *CSrc1 = dyn_cast<Constant>(Src1)) {
3653 Constant *CCmp = ConstantExpr::getCompare(CCVal, CSrc0, CSrc1);
3654 if (CCmp->isNullValue()) {
3655 return replaceInstUsesWith(
3656 *II, ConstantExpr::getSExt(CCmp, II->getType()));
3659 // The result of V_ICMP/V_FCMP assembly instructions (which this
3660 // intrinsic exposes) is one bit per thread, masked with the EXEC
3661 // register (which contains the bitmask of live threads). So a
3662 // comparison that always returns true is the same as a read of the
3663 // EXEC register.
3664 Function *NewF = Intrinsic::getDeclaration(
3665 II->getModule(), Intrinsic::read_register, II->getType());
3666 Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
3667 MDNode *MD = MDNode::get(II->getContext(), MDArgs);
3668 Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
3669 CallInst *NewCall = Builder.CreateCall(NewF, Args);
3670 NewCall->addAttribute(AttributeList::FunctionIndex,
3671 Attribute::Convergent);
3672 NewCall->takeName(II);
3673 return replaceInstUsesWith(*II, NewCall);
3676 // Canonicalize constants to RHS.
3677 CmpInst::Predicate SwapPred
3678 = CmpInst::getSwappedPredicate(static_cast<CmpInst::Predicate>(CCVal));
3679 II->setArgOperand(0, Src1);
3680 II->setArgOperand(1, Src0);
3681 II->setArgOperand(2, ConstantInt::get(CC->getType(),
3682 static_cast<int>(SwapPred)));
3683 return II;
3686 if (CCVal != CmpInst::ICMP_EQ && CCVal != CmpInst::ICMP_NE)
3687 break;
3689 // Canonicalize compare eq with true value to compare != 0
3690 // llvm.amdgcn.icmp(zext (i1 x), 1, eq)
3691 // -> llvm.amdgcn.icmp(zext (i1 x), 0, ne)
3692 // llvm.amdgcn.icmp(sext (i1 x), -1, eq)
3693 // -> llvm.amdgcn.icmp(sext (i1 x), 0, ne)
3694 Value *ExtSrc;
3695 if (CCVal == CmpInst::ICMP_EQ &&
3696 ((match(Src1, m_One()) && match(Src0, m_ZExt(m_Value(ExtSrc)))) ||
3697 (match(Src1, m_AllOnes()) && match(Src0, m_SExt(m_Value(ExtSrc))))) &&
3698 ExtSrc->getType()->isIntegerTy(1)) {
3699 II->setArgOperand(1, ConstantInt::getNullValue(Src1->getType()));
3700 II->setArgOperand(2, ConstantInt::get(CC->getType(), CmpInst::ICMP_NE));
3701 return II;
3704 CmpInst::Predicate SrcPred;
3705 Value *SrcLHS;
3706 Value *SrcRHS;
3708 // Fold compare eq/ne with 0 from a compare result as the predicate to the
3709 // intrinsic. The typical use is a wave vote function in the library, which
3710 // will be fed from a user code condition compared with 0. Fold in the
3711 // redundant compare.
3713 // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, ne)
3714 // -> llvm.amdgcn.[if]cmp(a, b, pred)
3716 // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, eq)
3717 // -> llvm.amdgcn.[if]cmp(a, b, inv pred)
3718 if (match(Src1, m_Zero()) &&
3719 match(Src0,
3720 m_ZExtOrSExt(m_Cmp(SrcPred, m_Value(SrcLHS), m_Value(SrcRHS))))) {
3721 if (CCVal == CmpInst::ICMP_EQ)
3722 SrcPred = CmpInst::getInversePredicate(SrcPred);
3724 Intrinsic::ID NewIID = CmpInst::isFPPredicate(SrcPred) ?
3725 Intrinsic::amdgcn_fcmp : Intrinsic::amdgcn_icmp;
3727 Type *Ty = SrcLHS->getType();
3728 if (auto *CmpType = dyn_cast<IntegerType>(Ty)) {
3729 // Promote to next legal integer type.
3730 unsigned Width = CmpType->getBitWidth();
3731 unsigned NewWidth = Width;
3733 // Don't do anything for i1 comparisons.
3734 if (Width == 1)
3735 break;
3737 if (Width <= 16)
3738 NewWidth = 16;
3739 else if (Width <= 32)
3740 NewWidth = 32;
3741 else if (Width <= 64)
3742 NewWidth = 64;
3743 else if (Width > 64)
3744 break; // Can't handle this.
3746 if (Width != NewWidth) {
3747 IntegerType *CmpTy = Builder.getIntNTy(NewWidth);
3748 if (CmpInst::isSigned(SrcPred)) {
3749 SrcLHS = Builder.CreateSExt(SrcLHS, CmpTy);
3750 SrcRHS = Builder.CreateSExt(SrcRHS, CmpTy);
3751 } else {
3752 SrcLHS = Builder.CreateZExt(SrcLHS, CmpTy);
3753 SrcRHS = Builder.CreateZExt(SrcRHS, CmpTy);
3756 } else if (!Ty->isFloatTy() && !Ty->isDoubleTy() && !Ty->isHalfTy())
3757 break;
3759 Function *NewF =
3760 Intrinsic::getDeclaration(II->getModule(), NewIID,
3761 { II->getType(),
3762 SrcLHS->getType() });
3763 Value *Args[] = { SrcLHS, SrcRHS,
3764 ConstantInt::get(CC->getType(), SrcPred) };
3765 CallInst *NewCall = Builder.CreateCall(NewF, Args);
3766 NewCall->takeName(II);
3767 return replaceInstUsesWith(*II, NewCall);
3770 break;
3772 case Intrinsic::amdgcn_wqm_vote: {
3773 // wqm_vote is identity when the argument is constant.
3774 if (!isa<Constant>(II->getArgOperand(0)))
3775 break;
3777 return replaceInstUsesWith(*II, II->getArgOperand(0));
3779 case Intrinsic::amdgcn_kill: {
3780 const ConstantInt *C = dyn_cast<ConstantInt>(II->getArgOperand(0));
3781 if (!C || !C->getZExtValue())
3782 break;
3784 // amdgcn.kill(i1 1) is a no-op
3785 return eraseInstFromFunction(CI);
3787 case Intrinsic::amdgcn_update_dpp: {
3788 Value *Old = II->getArgOperand(0);
3790 auto BC = cast<ConstantInt>(II->getArgOperand(5));
3791 auto RM = cast<ConstantInt>(II->getArgOperand(3));
3792 auto BM = cast<ConstantInt>(II->getArgOperand(4));
3793 if (BC->isZeroValue() ||
3794 RM->getZExtValue() != 0xF ||
3795 BM->getZExtValue() != 0xF ||
3796 isa<UndefValue>(Old))
3797 break;
3799 // If bound_ctrl = 1, row mask = bank mask = 0xf we can omit old value.
3800 II->setOperand(0, UndefValue::get(Old->getType()));
3801 return II;
3803 case Intrinsic::amdgcn_readfirstlane:
3804 case Intrinsic::amdgcn_readlane: {
3805 // A constant value is trivially uniform.
3806 if (Constant *C = dyn_cast<Constant>(II->getArgOperand(0)))
3807 return replaceInstUsesWith(*II, C);
3809 // The rest of these may not be safe if the exec may not be the same between
3810 // the def and use.
3811 Value *Src = II->getArgOperand(0);
3812 Instruction *SrcInst = dyn_cast<Instruction>(Src);
3813 if (SrcInst && SrcInst->getParent() != II->getParent())
3814 break;
3816 // readfirstlane (readfirstlane x) -> readfirstlane x
3817 // readlane (readfirstlane x), y -> readfirstlane x
3818 if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readfirstlane>()))
3819 return replaceInstUsesWith(*II, Src);
3821 if (IID == Intrinsic::amdgcn_readfirstlane) {
3822 // readfirstlane (readlane x, y) -> readlane x, y
3823 if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readlane>()))
3824 return replaceInstUsesWith(*II, Src);
3825 } else {
3826 // readlane (readlane x, y), y -> readlane x, y
3827 if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readlane>(
3828 m_Value(), m_Specific(II->getArgOperand(1)))))
3829 return replaceInstUsesWith(*II, Src);
3832 break;
3834 case Intrinsic::stackrestore: {
3835 // If the save is right next to the restore, remove the restore. This can
3836 // happen when variable allocas are DCE'd.
3837 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
3838 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
3839 // Skip over debug info.
3840 if (SS->getNextNonDebugInstruction() == II) {
3841 return eraseInstFromFunction(CI);
3846 // Scan down this block to see if there is another stack restore in the
3847 // same block without an intervening call/alloca.
3848 BasicBlock::iterator BI(II);
3849 Instruction *TI = II->getParent()->getTerminator();
3850 bool CannotRemove = false;
3851 for (++BI; &*BI != TI; ++BI) {
3852 if (isa<AllocaInst>(BI)) {
3853 CannotRemove = true;
3854 break;
3856 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
3857 if (auto *II2 = dyn_cast<IntrinsicInst>(BCI)) {
3858 // If there is a stackrestore below this one, remove this one.
3859 if (II2->getIntrinsicID() == Intrinsic::stackrestore)
3860 return eraseInstFromFunction(CI);
3862 // Bail if we cross over an intrinsic with side effects, such as
3863 // llvm.stacksave, llvm.read_register, or llvm.setjmp.
3864 if (II2->mayHaveSideEffects()) {
3865 CannotRemove = true;
3866 break;
3868 } else {
3869 // If we found a non-intrinsic call, we can't remove the stack
3870 // restore.
3871 CannotRemove = true;
3872 break;
3877 // If the stack restore is in a return, resume, or unwind block and if there
3878 // are no allocas or calls between the restore and the return, nuke the
3879 // restore.
3880 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
3881 return eraseInstFromFunction(CI);
3882 break;
3884 case Intrinsic::lifetime_start:
3885 // Asan needs to poison memory to detect invalid access which is possible
3886 // even for empty lifetime range.
3887 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3888 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
3889 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3890 break;
3892 if (removeTriviallyEmptyRange(*II, Intrinsic::lifetime_start,
3893 Intrinsic::lifetime_end, *this))
3894 return nullptr;
3895 break;
3896 case Intrinsic::assume: {
3897 Value *IIOperand = II->getArgOperand(0);
3898 // Remove an assume if it is followed by an identical assume.
3899 // TODO: Do we need this? Unless there are conflicting assumptions, the
3900 // computeKnownBits(IIOperand) below here eliminates redundant assumes.
3901 Instruction *Next = II->getNextNonDebugInstruction();
3902 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
3903 return eraseInstFromFunction(CI);
3905 // Canonicalize assume(a && b) -> assume(a); assume(b);
3906 // Note: New assumption intrinsics created here are registered by
3907 // the InstCombineIRInserter object.
3908 FunctionType *AssumeIntrinsicTy = II->getFunctionType();
3909 Value *AssumeIntrinsic = II->getCalledValue();
3910 Value *A, *B;
3911 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
3912 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName());
3913 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
3914 return eraseInstFromFunction(*II);
3916 // assume(!(a || b)) -> assume(!a); assume(!b);
3917 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
3918 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3919 Builder.CreateNot(A), II->getName());
3920 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3921 Builder.CreateNot(B), II->getName());
3922 return eraseInstFromFunction(*II);
3925 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
3926 // (if assume is valid at the load)
3927 CmpInst::Predicate Pred;
3928 Instruction *LHS;
3929 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
3930 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
3931 LHS->getType()->isPointerTy() &&
3932 isValidAssumeForContext(II, LHS, &DT)) {
3933 MDNode *MD = MDNode::get(II->getContext(), None);
3934 LHS->setMetadata(LLVMContext::MD_nonnull, MD);
3935 return eraseInstFromFunction(*II);
3937 // TODO: apply nonnull return attributes to calls and invokes
3938 // TODO: apply range metadata for range check patterns?
3941 // If there is a dominating assume with the same condition as this one,
3942 // then this one is redundant, and should be removed.
3943 KnownBits Known(1);
3944 computeKnownBits(IIOperand, Known, 0, II);
3945 if (Known.isAllOnes())
3946 return eraseInstFromFunction(*II);
3948 // Update the cache of affected values for this assumption (we might be
3949 // here because we just simplified the condition).
3950 AC.updateAffectedValues(II);
3951 break;
3953 case Intrinsic::experimental_gc_relocate: {
3954 // Translate facts known about a pointer before relocating into
3955 // facts about the relocate value, while being careful to
3956 // preserve relocation semantics.
3957 Value *DerivedPtr = cast<GCRelocateInst>(II)->getDerivedPtr();
3959 // Remove the relocation if unused, note that this check is required
3960 // to prevent the cases below from looping forever.
3961 if (II->use_empty())
3962 return eraseInstFromFunction(*II);
3964 // Undef is undef, even after relocation.
3965 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
3966 // most practical collectors, but there was discussion in the review thread
3967 // about whether it was legal for all possible collectors.
3968 if (isa<UndefValue>(DerivedPtr))
3969 // Use undef of gc_relocate's type to replace it.
3970 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3972 if (auto *PT = dyn_cast<PointerType>(II->getType())) {
3973 // The relocation of null will be null for most any collector.
3974 // TODO: provide a hook for this in GCStrategy. There might be some
3975 // weird collector this property does not hold for.
3976 if (isa<ConstantPointerNull>(DerivedPtr))
3977 // Use null-pointer of gc_relocate's type to replace it.
3978 return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
3980 // isKnownNonNull -> nonnull attribute
3981 if (!II->hasRetAttr(Attribute::NonNull) &&
3982 isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT)) {
3983 II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
3984 return II;
3988 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
3989 // Canonicalize on the type from the uses to the defs
3991 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
3992 break;
3995 case Intrinsic::experimental_guard: {
3996 // Is this guard followed by another guard? We scan forward over a small
3997 // fixed window of instructions to handle common cases with conditions
3998 // computed between guards.
3999 Instruction *NextInst = II->getNextNode();
4000 for (unsigned i = 0; i < GuardWideningWindow; i++) {
4001 // Note: Using context-free form to avoid compile time blow up
4002 if (!isSafeToSpeculativelyExecute(NextInst))
4003 break;
4004 NextInst = NextInst->getNextNode();
4006 Value *NextCond = nullptr;
4007 if (match(NextInst,
4008 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
4009 Value *CurrCond = II->getArgOperand(0);
4011 // Remove a guard that it is immediately preceded by an identical guard.
4012 if (CurrCond == NextCond)
4013 return eraseInstFromFunction(*NextInst);
4015 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
4016 Instruction* MoveI = II->getNextNode();
4017 while (MoveI != NextInst) {
4018 auto *Temp = MoveI;
4019 MoveI = MoveI->getNextNode();
4020 Temp->moveBefore(II);
4022 II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
4023 return eraseInstFromFunction(*NextInst);
4025 break;
4028 return visitCallBase(*II);
4031 // Fence instruction simplification
4032 Instruction *InstCombiner::visitFenceInst(FenceInst &FI) {
4033 // Remove identical consecutive fences.
4034 Instruction *Next = FI.getNextNonDebugInstruction();
4035 if (auto *NFI = dyn_cast<FenceInst>(Next))
4036 if (FI.isIdenticalTo(NFI))
4037 return eraseInstFromFunction(FI);
4038 return nullptr;
4041 // InvokeInst simplification
4042 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
4043 return visitCallBase(II);
4046 // CallBrInst simplification
4047 Instruction *InstCombiner::visitCallBrInst(CallBrInst &CBI) {
4048 return visitCallBase(CBI);
4051 /// If this cast does not affect the value passed through the varargs area, we
4052 /// can eliminate the use of the cast.
4053 static bool isSafeToEliminateVarargsCast(const CallBase &Call,
4054 const DataLayout &DL,
4055 const CastInst *const CI,
4056 const int ix) {
4057 if (!CI->isLosslessCast())
4058 return false;
4060 // If this is a GC intrinsic, avoid munging types. We need types for
4061 // statepoint reconstruction in SelectionDAG.
4062 // TODO: This is probably something which should be expanded to all
4063 // intrinsics since the entire point of intrinsics is that
4064 // they are understandable by the optimizer.
4065 if (isStatepoint(&Call) || isGCRelocate(&Call) || isGCResult(&Call))
4066 return false;
4068 // The size of ByVal or InAlloca arguments is derived from the type, so we
4069 // can't change to a type with a different size. If the size were
4070 // passed explicitly we could avoid this check.
4071 if (!Call.isByValOrInAllocaArgument(ix))
4072 return true;
4074 Type* SrcTy =
4075 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
4076 Type *DstTy = Call.isByValArgument(ix)
4077 ? Call.getParamByValType(ix)
4078 : cast<PointerType>(CI->getType())->getElementType();
4079 if (!SrcTy->isSized() || !DstTy->isSized())
4080 return false;
4081 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
4082 return false;
4083 return true;
4086 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
4087 if (!CI->getCalledFunction()) return nullptr;
4089 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
4090 replaceInstUsesWith(*From, With);
4092 auto InstCombineErase = [this](Instruction *I) {
4093 eraseInstFromFunction(*I);
4095 LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW,
4096 InstCombineErase);
4097 if (Value *With = Simplifier.optimizeCall(CI)) {
4098 ++NumSimplified;
4099 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
4102 return nullptr;
4105 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
4106 // Strip off at most one level of pointer casts, looking for an alloca. This
4107 // is good enough in practice and simpler than handling any number of casts.
4108 Value *Underlying = TrampMem->stripPointerCasts();
4109 if (Underlying != TrampMem &&
4110 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
4111 return nullptr;
4112 if (!isa<AllocaInst>(Underlying))
4113 return nullptr;
4115 IntrinsicInst *InitTrampoline = nullptr;
4116 for (User *U : TrampMem->users()) {
4117 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4118 if (!II)
4119 return nullptr;
4120 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
4121 if (InitTrampoline)
4122 // More than one init_trampoline writes to this value. Give up.
4123 return nullptr;
4124 InitTrampoline = II;
4125 continue;
4127 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4128 // Allow any number of calls to adjust.trampoline.
4129 continue;
4130 return nullptr;
4133 // No call to init.trampoline found.
4134 if (!InitTrampoline)
4135 return nullptr;
4137 // Check that the alloca is being used in the expected way.
4138 if (InitTrampoline->getOperand(0) != TrampMem)
4139 return nullptr;
4141 return InitTrampoline;
4144 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
4145 Value *TrampMem) {
4146 // Visit all the previous instructions in the basic block, and try to find a
4147 // init.trampoline which has a direct path to the adjust.trampoline.
4148 for (BasicBlock::iterator I = AdjustTramp->getIterator(),
4149 E = AdjustTramp->getParent()->begin();
4150 I != E;) {
4151 Instruction *Inst = &*--I;
4152 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
4153 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
4154 II->getOperand(0) == TrampMem)
4155 return II;
4156 if (Inst->mayWriteToMemory())
4157 return nullptr;
4159 return nullptr;
4162 // Given a call to llvm.adjust.trampoline, find and return the corresponding
4163 // call to llvm.init.trampoline if the call to the trampoline can be optimized
4164 // to a direct call to a function. Otherwise return NULL.
4165 static IntrinsicInst *findInitTrampoline(Value *Callee) {
4166 Callee = Callee->stripPointerCasts();
4167 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
4168 if (!AdjustTramp ||
4169 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
4170 return nullptr;
4172 Value *TrampMem = AdjustTramp->getOperand(0);
4174 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
4175 return IT;
4176 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
4177 return IT;
4178 return nullptr;
4181 static void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) {
4182 ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0));
4183 ConstantInt *Op1C = (Call.getNumArgOperands() == 1)
4184 ? nullptr
4185 : dyn_cast<ConstantInt>(Call.getOperand(1));
4186 // Bail out if the allocation size is zero.
4187 if ((Op0C && Op0C->isNullValue()) || (Op1C && Op1C->isNullValue()))
4188 return;
4190 if (isMallocLikeFn(&Call, TLI) && Op0C) {
4191 Call.addAttribute(AttributeList::ReturnIndex,
4192 Attribute::getWithDereferenceableOrNullBytes(
4193 Call.getContext(), Op0C->getZExtValue()));
4194 } else if (isOpNewLikeFn(&Call, TLI) && Op0C) {
4195 Call.addAttribute(AttributeList::ReturnIndex,
4196 Attribute::getWithDereferenceableBytes(
4197 Call.getContext(), Op0C->getZExtValue()));
4198 } else if (isReallocLikeFn(&Call, TLI) && Op1C) {
4199 Call.addAttribute(AttributeList::ReturnIndex,
4200 Attribute::getWithDereferenceableOrNullBytes(
4201 Call.getContext(), Op1C->getZExtValue()));
4202 } else if (isCallocLikeFn(&Call, TLI) && Op0C && Op1C) {
4203 bool Overflow;
4204 const APInt &N = Op0C->getValue();
4205 APInt Size = N.umul_ov(Op1C->getValue(), Overflow);
4206 if (!Overflow)
4207 Call.addAttribute(AttributeList::ReturnIndex,
4208 Attribute::getWithDereferenceableOrNullBytes(
4209 Call.getContext(), Size.getZExtValue()));
4213 /// Improvements for call, callbr and invoke instructions.
4214 Instruction *InstCombiner::visitCallBase(CallBase &Call) {
4215 if (isAllocationFn(&Call, &TLI))
4216 annotateAnyAllocSite(Call, &TLI);
4218 if (isAllocLikeFn(&Call, &TLI))
4219 return visitAllocSite(Call);
4221 bool Changed = false;
4223 // Mark any parameters that are known to be non-null with the nonnull
4224 // attribute. This is helpful for inlining calls to functions with null
4225 // checks on their arguments.
4226 SmallVector<unsigned, 4> ArgNos;
4227 unsigned ArgNo = 0;
4229 for (Value *V : Call.args()) {
4230 if (V->getType()->isPointerTy() &&
4231 !Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
4232 isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
4233 ArgNos.push_back(ArgNo);
4234 ArgNo++;
4237 assert(ArgNo == Call.arg_size() && "sanity check");
4239 if (!ArgNos.empty()) {
4240 AttributeList AS = Call.getAttributes();
4241 LLVMContext &Ctx = Call.getContext();
4242 AS = AS.addParamAttribute(Ctx, ArgNos,
4243 Attribute::get(Ctx, Attribute::NonNull));
4244 Call.setAttributes(AS);
4245 Changed = true;
4248 // If the callee is a pointer to a function, attempt to move any casts to the
4249 // arguments of the call/callbr/invoke.
4250 Value *Callee = Call.getCalledValue();
4251 if (!isa<Function>(Callee) && transformConstExprCastCall(Call))
4252 return nullptr;
4254 if (Function *CalleeF = dyn_cast<Function>(Callee)) {
4255 // Remove the convergent attr on calls when the callee is not convergent.
4256 if (Call.isConvergent() && !CalleeF->isConvergent() &&
4257 !CalleeF->isIntrinsic()) {
4258 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
4259 << "\n");
4260 Call.setNotConvergent();
4261 return &Call;
4264 // If the call and callee calling conventions don't match, this call must
4265 // be unreachable, as the call is undefined.
4266 if (CalleeF->getCallingConv() != Call.getCallingConv() &&
4267 // Only do this for calls to a function with a body. A prototype may
4268 // not actually end up matching the implementation's calling conv for a
4269 // variety of reasons (e.g. it may be written in assembly).
4270 !CalleeF->isDeclaration()) {
4271 Instruction *OldCall = &Call;
4272 CreateNonTerminatorUnreachable(OldCall);
4273 // If OldCall does not return void then replaceAllUsesWith undef.
4274 // This allows ValueHandlers and custom metadata to adjust itself.
4275 if (!OldCall->getType()->isVoidTy())
4276 replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
4277 if (isa<CallInst>(OldCall))
4278 return eraseInstFromFunction(*OldCall);
4280 // We cannot remove an invoke or a callbr, because it would change thexi
4281 // CFG, just change the callee to a null pointer.
4282 cast<CallBase>(OldCall)->setCalledFunction(
4283 CalleeF->getFunctionType(),
4284 Constant::getNullValue(CalleeF->getType()));
4285 return nullptr;
4289 if ((isa<ConstantPointerNull>(Callee) &&
4290 !NullPointerIsDefined(Call.getFunction())) ||
4291 isa<UndefValue>(Callee)) {
4292 // If Call does not return void then replaceAllUsesWith undef.
4293 // This allows ValueHandlers and custom metadata to adjust itself.
4294 if (!Call.getType()->isVoidTy())
4295 replaceInstUsesWith(Call, UndefValue::get(Call.getType()));
4297 if (Call.isTerminator()) {
4298 // Can't remove an invoke or callbr because we cannot change the CFG.
4299 return nullptr;
4302 // This instruction is not reachable, just remove it.
4303 CreateNonTerminatorUnreachable(&Call);
4304 return eraseInstFromFunction(Call);
4307 if (IntrinsicInst *II = findInitTrampoline(Callee))
4308 return transformCallThroughTrampoline(Call, *II);
4310 PointerType *PTy = cast<PointerType>(Callee->getType());
4311 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
4312 if (FTy->isVarArg()) {
4313 int ix = FTy->getNumParams();
4314 // See if we can optimize any arguments passed through the varargs area of
4315 // the call.
4316 for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
4317 I != E; ++I, ++ix) {
4318 CastInst *CI = dyn_cast<CastInst>(*I);
4319 if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) {
4320 *I = CI->getOperand(0);
4322 // Update the byval type to match the argument type.
4323 if (Call.isByValArgument(ix)) {
4324 Call.removeParamAttr(ix, Attribute::ByVal);
4325 Call.addParamAttr(
4326 ix, Attribute::getWithByValType(
4327 Call.getContext(),
4328 CI->getOperand(0)->getType()->getPointerElementType()));
4330 Changed = true;
4335 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
4336 // Inline asm calls cannot throw - mark them 'nounwind'.
4337 Call.setDoesNotThrow();
4338 Changed = true;
4341 // Try to optimize the call if possible, we require DataLayout for most of
4342 // this. None of these calls are seen as possibly dead so go ahead and
4343 // delete the instruction now.
4344 if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
4345 Instruction *I = tryOptimizeCall(CI);
4346 // If we changed something return the result, etc. Otherwise let
4347 // the fallthrough check.
4348 if (I) return eraseInstFromFunction(*I);
4351 return Changed ? &Call : nullptr;
4354 /// If the callee is a constexpr cast of a function, attempt to move the cast to
4355 /// the arguments of the call/callbr/invoke.
4356 bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
4357 auto *Callee = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
4358 if (!Callee)
4359 return false;
4361 // If this is a call to a thunk function, don't remove the cast. Thunks are
4362 // used to transparently forward all incoming parameters and outgoing return
4363 // values, so it's important to leave the cast in place.
4364 if (Callee->hasFnAttribute("thunk"))
4365 return false;
4367 // If this is a musttail call, the callee's prototype must match the caller's
4368 // prototype with the exception of pointee types. The code below doesn't
4369 // implement that, so we can't do this transform.
4370 // TODO: Do the transform if it only requires adding pointer casts.
4371 if (Call.isMustTailCall())
4372 return false;
4374 Instruction *Caller = &Call;
4375 const AttributeList &CallerPAL = Call.getAttributes();
4377 // Okay, this is a cast from a function to a different type. Unless doing so
4378 // would cause a type conversion of one of our arguments, change this call to
4379 // be a direct call with arguments casted to the appropriate types.
4380 FunctionType *FT = Callee->getFunctionType();
4381 Type *OldRetTy = Caller->getType();
4382 Type *NewRetTy = FT->getReturnType();
4384 // Check to see if we are changing the return type...
4385 if (OldRetTy != NewRetTy) {
4387 if (NewRetTy->isStructTy())
4388 return false; // TODO: Handle multiple return values.
4390 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
4391 if (Callee->isDeclaration())
4392 return false; // Cannot transform this return value.
4394 if (!Caller->use_empty() &&
4395 // void -> non-void is handled specially
4396 !NewRetTy->isVoidTy())
4397 return false; // Cannot transform this return value.
4400 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
4401 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4402 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
4403 return false; // Attribute not compatible with transformed value.
4406 // If the callbase is an invoke/callbr instruction, and the return value is
4407 // used by a PHI node in a successor, we cannot change the return type of
4408 // the call because there is no place to put the cast instruction (without
4409 // breaking the critical edge). Bail out in this case.
4410 if (!Caller->use_empty()) {
4411 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
4412 for (User *U : II->users())
4413 if (PHINode *PN = dyn_cast<PHINode>(U))
4414 if (PN->getParent() == II->getNormalDest() ||
4415 PN->getParent() == II->getUnwindDest())
4416 return false;
4417 // FIXME: Be conservative for callbr to avoid a quadratic search.
4418 if (isa<CallBrInst>(Caller))
4419 return false;
4423 unsigned NumActualArgs = Call.arg_size();
4424 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4426 // Prevent us turning:
4427 // declare void @takes_i32_inalloca(i32* inalloca)
4428 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
4430 // into:
4431 // call void @takes_i32_inalloca(i32* null)
4433 // Similarly, avoid folding away bitcasts of byval calls.
4434 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4435 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
4436 return false;
4438 auto AI = Call.arg_begin();
4439 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
4440 Type *ParamTy = FT->getParamType(i);
4441 Type *ActTy = (*AI)->getType();
4443 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
4444 return false; // Cannot transform this parameter value.
4446 if (AttrBuilder(CallerPAL.getParamAttributes(i))
4447 .overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
4448 return false; // Attribute not compatible with transformed value.
4450 if (Call.isInAllocaArgument(i))
4451 return false; // Cannot transform to and from inalloca.
4453 // If the parameter is passed as a byval argument, then we have to have a
4454 // sized type and the sized type has to have the same size as the old type.
4455 if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
4456 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
4457 if (!ParamPTy || !ParamPTy->getElementType()->isSized())
4458 return false;
4460 Type *CurElTy = Call.getParamByValType(i);
4461 if (DL.getTypeAllocSize(CurElTy) !=
4462 DL.getTypeAllocSize(ParamPTy->getElementType()))
4463 return false;
4467 if (Callee->isDeclaration()) {
4468 // Do not delete arguments unless we have a function body.
4469 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
4470 return false;
4472 // If the callee is just a declaration, don't change the varargsness of the
4473 // call. We don't want to introduce a varargs call where one doesn't
4474 // already exist.
4475 PointerType *APTy = cast<PointerType>(Call.getCalledValue()->getType());
4476 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
4477 return false;
4479 // If both the callee and the cast type are varargs, we still have to make
4480 // sure the number of fixed parameters are the same or we have the same
4481 // ABI issues as if we introduce a varargs call.
4482 if (FT->isVarArg() &&
4483 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
4484 FT->getNumParams() !=
4485 cast<FunctionType>(APTy->getElementType())->getNumParams())
4486 return false;
4489 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4490 !CallerPAL.isEmpty()) {
4491 // In this case we have more arguments than the new function type, but we
4492 // won't be dropping them. Check that these extra arguments have attributes
4493 // that are compatible with being a vararg call argument.
4494 unsigned SRetIdx;
4495 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
4496 SRetIdx > FT->getNumParams())
4497 return false;
4500 // Okay, we decided that this is a safe thing to do: go ahead and start
4501 // inserting cast instructions as necessary.
4502 SmallVector<Value *, 8> Args;
4503 SmallVector<AttributeSet, 8> ArgAttrs;
4504 Args.reserve(NumActualArgs);
4505 ArgAttrs.reserve(NumActualArgs);
4507 // Get any return attributes.
4508 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4510 // If the return value is not being used, the type may not be compatible
4511 // with the existing attributes. Wipe out any problematic attributes.
4512 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
4514 LLVMContext &Ctx = Call.getContext();
4515 AI = Call.arg_begin();
4516 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4517 Type *ParamTy = FT->getParamType(i);
4519 Value *NewArg = *AI;
4520 if ((*AI)->getType() != ParamTy)
4521 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
4522 Args.push_back(NewArg);
4524 // Add any parameter attributes.
4525 if (CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
4526 AttrBuilder AB(CallerPAL.getParamAttributes(i));
4527 AB.addByValAttr(NewArg->getType()->getPointerElementType());
4528 ArgAttrs.push_back(AttributeSet::get(Ctx, AB));
4529 } else
4530 ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4533 // If the function takes more arguments than the call was taking, add them
4534 // now.
4535 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4536 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
4537 ArgAttrs.push_back(AttributeSet());
4540 // If we are removing arguments to the function, emit an obnoxious warning.
4541 if (FT->getNumParams() < NumActualArgs) {
4542 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
4543 if (FT->isVarArg()) {
4544 // Add all of the arguments in their promoted form to the arg list.
4545 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4546 Type *PTy = getPromotedType((*AI)->getType());
4547 Value *NewArg = *AI;
4548 if (PTy != (*AI)->getType()) {
4549 // Must promote to pass through va_arg area!
4550 Instruction::CastOps opcode =
4551 CastInst::getCastOpcode(*AI, false, PTy, false);
4552 NewArg = Builder.CreateCast(opcode, *AI, PTy);
4554 Args.push_back(NewArg);
4556 // Add any parameter attributes.
4557 ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4562 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
4564 if (NewRetTy->isVoidTy())
4565 Caller->setName(""); // Void type should not have a name.
4567 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
4568 "missing argument attributes");
4569 AttributeList NewCallerPAL = AttributeList::get(
4570 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
4572 SmallVector<OperandBundleDef, 1> OpBundles;
4573 Call.getOperandBundlesAsDefs(OpBundles);
4575 CallBase *NewCall;
4576 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4577 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
4578 II->getUnwindDest(), Args, OpBundles);
4579 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
4580 NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(),
4581 CBI->getIndirectDests(), Args, OpBundles);
4582 } else {
4583 NewCall = Builder.CreateCall(Callee, Args, OpBundles);
4584 cast<CallInst>(NewCall)->setTailCallKind(
4585 cast<CallInst>(Caller)->getTailCallKind());
4587 NewCall->takeName(Caller);
4588 NewCall->setCallingConv(Call.getCallingConv());
4589 NewCall->setAttributes(NewCallerPAL);
4591 // Preserve the weight metadata for the new call instruction. The metadata
4592 // is used by SamplePGO to check callsite's hotness.
4593 uint64_t W;
4594 if (Caller->extractProfTotalWeight(W))
4595 NewCall->setProfWeight(W);
4597 // Insert a cast of the return type as necessary.
4598 Instruction *NC = NewCall;
4599 Value *NV = NC;
4600 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
4601 if (!NV->getType()->isVoidTy()) {
4602 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
4603 NC->setDebugLoc(Caller->getDebugLoc());
4605 // If this is an invoke/callbr instruction, we should insert it after the
4606 // first non-phi instruction in the normal successor block.
4607 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4608 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
4609 InsertNewInstBefore(NC, *I);
4610 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
4611 BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt();
4612 InsertNewInstBefore(NC, *I);
4613 } else {
4614 // Otherwise, it's a call, just insert cast right after the call.
4615 InsertNewInstBefore(NC, *Caller);
4617 Worklist.AddUsersToWorkList(*Caller);
4618 } else {
4619 NV = UndefValue::get(Caller->getType());
4623 if (!Caller->use_empty())
4624 replaceInstUsesWith(*Caller, NV);
4625 else if (Caller->hasValueHandle()) {
4626 if (OldRetTy == NV->getType())
4627 ValueHandleBase::ValueIsRAUWd(Caller, NV);
4628 else
4629 // We cannot call ValueIsRAUWd with a different type, and the
4630 // actual tracked value will disappear.
4631 ValueHandleBase::ValueIsDeleted(Caller);
4634 eraseInstFromFunction(*Caller);
4635 return true;
4638 /// Turn a call to a function created by init_trampoline / adjust_trampoline
4639 /// intrinsic pair into a direct call to the underlying function.
4640 Instruction *
4641 InstCombiner::transformCallThroughTrampoline(CallBase &Call,
4642 IntrinsicInst &Tramp) {
4643 Value *Callee = Call.getCalledValue();
4644 Type *CalleeTy = Callee->getType();
4645 FunctionType *FTy = Call.getFunctionType();
4646 AttributeList Attrs = Call.getAttributes();
4648 // If the call already has the 'nest' attribute somewhere then give up -
4649 // otherwise 'nest' would occur twice after splicing in the chain.
4650 if (Attrs.hasAttrSomewhere(Attribute::Nest))
4651 return nullptr;
4653 Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
4654 FunctionType *NestFTy = NestF->getFunctionType();
4656 AttributeList NestAttrs = NestF->getAttributes();
4657 if (!NestAttrs.isEmpty()) {
4658 unsigned NestArgNo = 0;
4659 Type *NestTy = nullptr;
4660 AttributeSet NestAttr;
4662 // Look for a parameter marked with the 'nest' attribute.
4663 for (FunctionType::param_iterator I = NestFTy->param_begin(),
4664 E = NestFTy->param_end();
4665 I != E; ++NestArgNo, ++I) {
4666 AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
4667 if (AS.hasAttribute(Attribute::Nest)) {
4668 // Record the parameter type and any other attributes.
4669 NestTy = *I;
4670 NestAttr = AS;
4671 break;
4675 if (NestTy) {
4676 std::vector<Value*> NewArgs;
4677 std::vector<AttributeSet> NewArgAttrs;
4678 NewArgs.reserve(Call.arg_size() + 1);
4679 NewArgAttrs.reserve(Call.arg_size());
4681 // Insert the nest argument into the call argument list, which may
4682 // mean appending it. Likewise for attributes.
4685 unsigned ArgNo = 0;
4686 auto I = Call.arg_begin(), E = Call.arg_end();
4687 do {
4688 if (ArgNo == NestArgNo) {
4689 // Add the chain argument and attributes.
4690 Value *NestVal = Tramp.getArgOperand(2);
4691 if (NestVal->getType() != NestTy)
4692 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
4693 NewArgs.push_back(NestVal);
4694 NewArgAttrs.push_back(NestAttr);
4697 if (I == E)
4698 break;
4700 // Add the original argument and attributes.
4701 NewArgs.push_back(*I);
4702 NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
4704 ++ArgNo;
4705 ++I;
4706 } while (true);
4709 // The trampoline may have been bitcast to a bogus type (FTy).
4710 // Handle this by synthesizing a new function type, equal to FTy
4711 // with the chain parameter inserted.
4713 std::vector<Type*> NewTypes;
4714 NewTypes.reserve(FTy->getNumParams()+1);
4716 // Insert the chain's type into the list of parameter types, which may
4717 // mean appending it.
4719 unsigned ArgNo = 0;
4720 FunctionType::param_iterator I = FTy->param_begin(),
4721 E = FTy->param_end();
4723 do {
4724 if (ArgNo == NestArgNo)
4725 // Add the chain's type.
4726 NewTypes.push_back(NestTy);
4728 if (I == E)
4729 break;
4731 // Add the original type.
4732 NewTypes.push_back(*I);
4734 ++ArgNo;
4735 ++I;
4736 } while (true);
4739 // Replace the trampoline call with a direct call. Let the generic
4740 // code sort out any function type mismatches.
4741 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
4742 FTy->isVarArg());
4743 Constant *NewCallee =
4744 NestF->getType() == PointerType::getUnqual(NewFTy) ?
4745 NestF : ConstantExpr::getBitCast(NestF,
4746 PointerType::getUnqual(NewFTy));
4747 AttributeList NewPAL =
4748 AttributeList::get(FTy->getContext(), Attrs.getFnAttributes(),
4749 Attrs.getRetAttributes(), NewArgAttrs);
4751 SmallVector<OperandBundleDef, 1> OpBundles;
4752 Call.getOperandBundlesAsDefs(OpBundles);
4754 Instruction *NewCaller;
4755 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
4756 NewCaller = InvokeInst::Create(NewFTy, NewCallee,
4757 II->getNormalDest(), II->getUnwindDest(),
4758 NewArgs, OpBundles);
4759 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
4760 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4761 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4762 NewCaller =
4763 CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(),
4764 CBI->getIndirectDests(), NewArgs, OpBundles);
4765 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4766 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4767 } else {
4768 NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles);
4769 cast<CallInst>(NewCaller)->setTailCallKind(
4770 cast<CallInst>(Call).getTailCallKind());
4771 cast<CallInst>(NewCaller)->setCallingConv(
4772 cast<CallInst>(Call).getCallingConv());
4773 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4775 NewCaller->setDebugLoc(Call.getDebugLoc());
4777 return NewCaller;
4781 // Replace the trampoline call with a direct call. Since there is no 'nest'
4782 // parameter, there is no need to adjust the argument list. Let the generic
4783 // code sort out any function type mismatches.
4784 Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy);
4785 Call.setCalledFunction(FTy, NewCallee);
4786 return &Call;