1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// X86 target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
14 //===----------------------------------------------------------------------===//
15 /// About Cost Model numbers used below it's necessary to say the following:
16 /// the numbers correspond to some "generic" X86 CPU instead of usage of
17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19 /// the lookups below the cost is based on Nehalem as that was the first CPU
20 /// to support that feature level and thus has most likely the worst case cost.
21 /// Some examples of other technologies/CPUs:
22 /// SSE 3 - Pentium4 / Athlon64
25 /// AVX - Sandy Bridge
27 /// AVX-512 - Xeon Phi / Skylake
28 /// And some examples of instruction target dependent costs (latency)
29 /// divss sqrtss rsqrtss
31 /// Piledriver 9-24 13-15 5
33 /// Pentium II,III 18 30 2
34 /// Nehalem 7-14 7-18 3
35 /// Haswell 10-13 11 5
36 /// TODO: Develop and implement the target dependent cost model and
37 /// specialize cost numbers for different Cost Model Targets such as throughput,
38 /// code size, latency and uop count.
39 //===----------------------------------------------------------------------===//
41 #include "X86TargetTransformInfo.h"
42 #include "llvm/Analysis/TargetTransformInfo.h"
43 #include "llvm/CodeGen/BasicTTIImpl.h"
44 #include "llvm/CodeGen/CostTable.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/Support/Debug.h"
51 #define DEBUG_TYPE "x86tti"
53 //===----------------------------------------------------------------------===//
57 //===----------------------------------------------------------------------===//
59 TargetTransformInfo::PopcntSupportKind
60 X86TTIImpl::getPopcntSupport(unsigned TyWidth
) {
61 assert(isPowerOf2_32(TyWidth
) && "Ty width must be power of 2");
62 // TODO: Currently the __builtin_popcount() implementation using SSE3
63 // instructions is inefficient. Once the problem is fixed, we should
64 // call ST->hasSSE3() instead of ST->hasPOPCNT().
65 return ST
->hasPOPCNT() ? TTI::PSK_FastHardware
: TTI::PSK_Software
;
68 llvm::Optional
<unsigned> X86TTIImpl::getCacheSize(
69 TargetTransformInfo::CacheLevel Level
) const {
71 case TargetTransformInfo::CacheLevel::L1D
:
81 return 32 * 1024; // 32 KByte
82 case TargetTransformInfo::CacheLevel::L2D
:
92 return 256 * 1024; // 256 KByte
95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
98 llvm::Optional
<unsigned> X86TTIImpl::getCacheAssociativity(
99 TargetTransformInfo::CacheLevel Level
) const {
110 case TargetTransformInfo::CacheLevel::L1D
:
112 case TargetTransformInfo::CacheLevel::L2D
:
116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
119 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID
) const {
120 bool Vector
= (ClassID
== 1);
121 if (Vector
&& !ST
->hasSSE1())
125 if (Vector
&& ST
->hasAVX512())
133 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K
) const {
134 unsigned PreferVectorWidth
= ST
->getPreferVectorWidth();
136 case TargetTransformInfo::RGK_Scalar
:
137 return TypeSize::getFixed(ST
->is64Bit() ? 64 : 32);
138 case TargetTransformInfo::RGK_FixedWidthVector
:
139 if (ST
->hasAVX512() && PreferVectorWidth
>= 512)
140 return TypeSize::getFixed(512);
141 if (ST
->hasAVX() && PreferVectorWidth
>= 256)
142 return TypeSize::getFixed(256);
143 if (ST
->hasSSE1() && PreferVectorWidth
>= 128)
144 return TypeSize::getFixed(128);
145 return TypeSize::getFixed(0);
146 case TargetTransformInfo::RGK_ScalableVector
:
147 return TypeSize::getScalable(0);
150 llvm_unreachable("Unsupported register kind");
153 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
154 return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector
)
158 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF
) {
159 // If the loop will not be vectorized, don't interleave the loop.
160 // Let regular unroll to unroll the loop, which saves the overflow
161 // check and memory check cost.
168 // Sandybridge and Haswell have multiple execution ports and pipelined
176 InstructionCost
X86TTIImpl::getArithmeticInstrCost(
177 unsigned Opcode
, Type
*Ty
, TTI::TargetCostKind CostKind
,
178 TTI::OperandValueKind Op1Info
, TTI::OperandValueKind Op2Info
,
179 TTI::OperandValueProperties Opd1PropInfo
,
180 TTI::OperandValueProperties Opd2PropInfo
, ArrayRef
<const Value
*> Args
,
181 const Instruction
*CxtI
) {
182 // TODO: Handle more cost kinds.
183 if (CostKind
!= TTI::TCK_RecipThroughput
)
184 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, CostKind
, Op1Info
,
185 Op2Info
, Opd1PropInfo
,
186 Opd2PropInfo
, Args
, CxtI
);
188 // vXi8 multiplications are always promoted to vXi16.
189 if (Opcode
== Instruction::Mul
&& Ty
->isVectorTy() &&
190 Ty
->getScalarSizeInBits() == 8) {
192 VectorType::getExtendedElementVectorType(cast
<VectorType
>(Ty
));
193 return getCastInstrCost(Instruction::ZExt
, WideVecTy
, Ty
,
194 TargetTransformInfo::CastContextHint::None
,
196 getCastInstrCost(Instruction::Trunc
, Ty
, WideVecTy
,
197 TargetTransformInfo::CastContextHint::None
,
199 getArithmeticInstrCost(Opcode
, WideVecTy
, CostKind
, Op1Info
, Op2Info
,
200 Opd1PropInfo
, Opd2PropInfo
);
203 // Legalize the type.
204 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Ty
);
206 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
207 assert(ISD
&& "Invalid opcode");
209 static const CostTblEntry GLMCostTable
[] = {
210 { ISD::FDIV
, MVT::f32
, 18 }, // divss
211 { ISD::FDIV
, MVT::v4f32
, 35 }, // divps
212 { ISD::FDIV
, MVT::f64
, 33 }, // divsd
213 { ISD::FDIV
, MVT::v2f64
, 65 }, // divpd
216 if (ST
->useGLMDivSqrtCosts())
217 if (const auto *Entry
= CostTableLookup(GLMCostTable
, ISD
,
219 return LT
.first
* Entry
->Cost
;
221 static const CostTblEntry SLMCostTable
[] = {
222 { ISD::MUL
, MVT::v4i32
, 11 }, // pmulld
223 { ISD::MUL
, MVT::v8i16
, 2 }, // pmullw
224 { ISD::FMUL
, MVT::f64
, 2 }, // mulsd
225 { ISD::FMUL
, MVT::v2f64
, 4 }, // mulpd
226 { ISD::FMUL
, MVT::v4f32
, 2 }, // mulps
227 { ISD::FDIV
, MVT::f32
, 17 }, // divss
228 { ISD::FDIV
, MVT::v4f32
, 39 }, // divps
229 { ISD::FDIV
, MVT::f64
, 32 }, // divsd
230 { ISD::FDIV
, MVT::v2f64
, 69 }, // divpd
231 { ISD::FADD
, MVT::v2f64
, 2 }, // addpd
232 { ISD::FSUB
, MVT::v2f64
, 2 }, // subpd
233 // v2i64/v4i64 mul is custom lowered as a series of long:
234 // multiplies(3), shifts(3) and adds(2)
235 // slm muldq version throughput is 2 and addq throughput 4
236 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
237 // 3X4 (addq throughput) = 17
238 { ISD::MUL
, MVT::v2i64
, 17 },
239 // slm addq\subq throughput is 4
240 { ISD::ADD
, MVT::v2i64
, 4 },
241 { ISD::SUB
, MVT::v2i64
, 4 },
245 if (Args
.size() == 2 && ISD
== ISD::MUL
&& LT
.second
== MVT::v4i32
) {
246 // Check if the operands can be shrinked into a smaller datatype.
247 bool Op1Signed
= false;
248 unsigned Op1MinSize
= BaseT::minRequiredElementSize(Args
[0], Op1Signed
);
249 bool Op2Signed
= false;
250 unsigned Op2MinSize
= BaseT::minRequiredElementSize(Args
[1], Op2Signed
);
252 bool SignedMode
= Op1Signed
|| Op2Signed
;
253 unsigned OpMinSize
= std::max(Op1MinSize
, Op2MinSize
);
256 return LT
.first
* 3; // pmullw/sext
257 if (!SignedMode
&& OpMinSize
<= 8)
258 return LT
.first
* 3; // pmullw/zext
260 return LT
.first
* 5; // pmullw/pmulhw/pshuf
261 if (!SignedMode
&& OpMinSize
<= 16)
262 return LT
.first
* 5; // pmullw/pmulhw/pshuf
265 if (const auto *Entry
= CostTableLookup(SLMCostTable
, ISD
,
267 return LT
.first
* Entry
->Cost
;
271 if ((ISD
== ISD::SDIV
|| ISD
== ISD::SREM
|| ISD
== ISD::UDIV
||
273 (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
274 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) &&
275 Opd2PropInfo
== TargetTransformInfo::OP_PowerOf2
) {
276 if (ISD
== ISD::SDIV
|| ISD
== ISD::SREM
) {
277 // On X86, vector signed division by constants power-of-two are
278 // normally expanded to the sequence SRA + SRL + ADD + SRA.
279 // The OperandValue properties may not be the same as that of the previous
280 // operation; conservatively assume OP_None.
281 InstructionCost Cost
=
282 2 * getArithmeticInstrCost(Instruction::AShr
, Ty
, CostKind
, Op1Info
,
283 Op2Info
, TargetTransformInfo::OP_None
,
284 TargetTransformInfo::OP_None
);
285 Cost
+= getArithmeticInstrCost(Instruction::LShr
, Ty
, CostKind
, Op1Info
,
287 TargetTransformInfo::OP_None
,
288 TargetTransformInfo::OP_None
);
289 Cost
+= getArithmeticInstrCost(Instruction::Add
, Ty
, CostKind
, Op1Info
,
291 TargetTransformInfo::OP_None
,
292 TargetTransformInfo::OP_None
);
294 if (ISD
== ISD::SREM
) {
295 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
296 Cost
+= getArithmeticInstrCost(Instruction::Mul
, Ty
, CostKind
, Op1Info
,
298 Cost
+= getArithmeticInstrCost(Instruction::Sub
, Ty
, CostKind
, Op1Info
,
305 // Vector unsigned division/remainder will be simplified to shifts/masks.
306 if (ISD
== ISD::UDIV
)
307 return getArithmeticInstrCost(Instruction::LShr
, Ty
, CostKind
,
309 TargetTransformInfo::OP_None
,
310 TargetTransformInfo::OP_None
);
313 return getArithmeticInstrCost(Instruction::And
, Ty
, CostKind
,
315 TargetTransformInfo::OP_None
,
316 TargetTransformInfo::OP_None
);
319 static const CostTblEntry AVX512BWUniformConstCostTable
[] = {
320 { ISD::SHL
, MVT::v64i8
, 2 }, // psllw + pand.
321 { ISD::SRL
, MVT::v64i8
, 2 }, // psrlw + pand.
322 { ISD::SRA
, MVT::v64i8
, 4 }, // psrlw, pand, pxor, psubb.
325 if (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
&&
327 if (const auto *Entry
= CostTableLookup(AVX512BWUniformConstCostTable
, ISD
,
329 return LT
.first
* Entry
->Cost
;
332 static const CostTblEntry AVX512UniformConstCostTable
[] = {
333 { ISD::SRA
, MVT::v2i64
, 1 },
334 { ISD::SRA
, MVT::v4i64
, 1 },
335 { ISD::SRA
, MVT::v8i64
, 1 },
337 { ISD::SHL
, MVT::v64i8
, 4 }, // psllw + pand.
338 { ISD::SRL
, MVT::v64i8
, 4 }, // psrlw + pand.
339 { ISD::SRA
, MVT::v64i8
, 8 }, // psrlw, pand, pxor, psubb.
341 { ISD::SDIV
, MVT::v16i32
, 6 }, // pmuludq sequence
342 { ISD::SREM
, MVT::v16i32
, 8 }, // pmuludq+mul+sub sequence
343 { ISD::UDIV
, MVT::v16i32
, 5 }, // pmuludq sequence
344 { ISD::UREM
, MVT::v16i32
, 7 }, // pmuludq+mul+sub sequence
347 if (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
&&
349 if (const auto *Entry
= CostTableLookup(AVX512UniformConstCostTable
, ISD
,
351 return LT
.first
* Entry
->Cost
;
354 static const CostTblEntry AVX2UniformConstCostTable
[] = {
355 { ISD::SHL
, MVT::v32i8
, 2 }, // psllw + pand.
356 { ISD::SRL
, MVT::v32i8
, 2 }, // psrlw + pand.
357 { ISD::SRA
, MVT::v32i8
, 4 }, // psrlw, pand, pxor, psubb.
359 { ISD::SRA
, MVT::v4i64
, 4 }, // 2 x psrad + shuffle.
361 { ISD::SDIV
, MVT::v8i32
, 6 }, // pmuludq sequence
362 { ISD::SREM
, MVT::v8i32
, 8 }, // pmuludq+mul+sub sequence
363 { ISD::UDIV
, MVT::v8i32
, 5 }, // pmuludq sequence
364 { ISD::UREM
, MVT::v8i32
, 7 }, // pmuludq+mul+sub sequence
367 if (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
&&
369 if (const auto *Entry
= CostTableLookup(AVX2UniformConstCostTable
, ISD
,
371 return LT
.first
* Entry
->Cost
;
374 static const CostTblEntry SSE2UniformConstCostTable
[] = {
375 { ISD::SHL
, MVT::v16i8
, 2 }, // psllw + pand.
376 { ISD::SRL
, MVT::v16i8
, 2 }, // psrlw + pand.
377 { ISD::SRA
, MVT::v16i8
, 4 }, // psrlw, pand, pxor, psubb.
379 { ISD::SHL
, MVT::v32i8
, 4+2 }, // 2*(psllw + pand) + split.
380 { ISD::SRL
, MVT::v32i8
, 4+2 }, // 2*(psrlw + pand) + split.
381 { ISD::SRA
, MVT::v32i8
, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
383 { ISD::SDIV
, MVT::v8i32
, 12+2 }, // 2*pmuludq sequence + split.
384 { ISD::SREM
, MVT::v8i32
, 16+2 }, // 2*pmuludq+mul+sub sequence + split.
385 { ISD::SDIV
, MVT::v4i32
, 6 }, // pmuludq sequence
386 { ISD::SREM
, MVT::v4i32
, 8 }, // pmuludq+mul+sub sequence
387 { ISD::UDIV
, MVT::v8i32
, 10+2 }, // 2*pmuludq sequence + split.
388 { ISD::UREM
, MVT::v8i32
, 14+2 }, // 2*pmuludq+mul+sub sequence + split.
389 { ISD::UDIV
, MVT::v4i32
, 5 }, // pmuludq sequence
390 { ISD::UREM
, MVT::v4i32
, 7 }, // pmuludq+mul+sub sequence
393 // XOP has faster vXi8 shifts.
394 if (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
&&
395 ST
->hasSSE2() && !ST
->hasXOP()) {
396 if (const auto *Entry
=
397 CostTableLookup(SSE2UniformConstCostTable
, ISD
, LT
.second
))
398 return LT
.first
* Entry
->Cost
;
401 static const CostTblEntry AVX512BWConstCostTable
[] = {
402 { ISD::SDIV
, MVT::v64i8
, 14 }, // 2*ext+2*pmulhw sequence
403 { ISD::SREM
, MVT::v64i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
404 { ISD::UDIV
, MVT::v64i8
, 14 }, // 2*ext+2*pmulhw sequence
405 { ISD::UREM
, MVT::v64i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
406 { ISD::SDIV
, MVT::v32i16
, 6 }, // vpmulhw sequence
407 { ISD::SREM
, MVT::v32i16
, 8 }, // vpmulhw+mul+sub sequence
408 { ISD::UDIV
, MVT::v32i16
, 6 }, // vpmulhuw sequence
409 { ISD::UREM
, MVT::v32i16
, 8 }, // vpmulhuw+mul+sub sequence
412 if ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
413 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) &&
415 if (const auto *Entry
=
416 CostTableLookup(AVX512BWConstCostTable
, ISD
, LT
.second
))
417 return LT
.first
* Entry
->Cost
;
420 static const CostTblEntry AVX512ConstCostTable
[] = {
421 { ISD::SDIV
, MVT::v16i32
, 15 }, // vpmuldq sequence
422 { ISD::SREM
, MVT::v16i32
, 17 }, // vpmuldq+mul+sub sequence
423 { ISD::UDIV
, MVT::v16i32
, 15 }, // vpmuludq sequence
424 { ISD::UREM
, MVT::v16i32
, 17 }, // vpmuludq+mul+sub sequence
425 { ISD::SDIV
, MVT::v64i8
, 28 }, // 4*ext+4*pmulhw sequence
426 { ISD::SREM
, MVT::v64i8
, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
427 { ISD::UDIV
, MVT::v64i8
, 28 }, // 4*ext+4*pmulhw sequence
428 { ISD::UREM
, MVT::v64i8
, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
429 { ISD::SDIV
, MVT::v32i16
, 12 }, // 2*vpmulhw sequence
430 { ISD::SREM
, MVT::v32i16
, 16 }, // 2*vpmulhw+mul+sub sequence
431 { ISD::UDIV
, MVT::v32i16
, 12 }, // 2*vpmulhuw sequence
432 { ISD::UREM
, MVT::v32i16
, 16 }, // 2*vpmulhuw+mul+sub sequence
435 if ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
436 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) &&
438 if (const auto *Entry
=
439 CostTableLookup(AVX512ConstCostTable
, ISD
, LT
.second
))
440 return LT
.first
* Entry
->Cost
;
443 static const CostTblEntry AVX2ConstCostTable
[] = {
444 { ISD::SDIV
, MVT::v32i8
, 14 }, // 2*ext+2*pmulhw sequence
445 { ISD::SREM
, MVT::v32i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
446 { ISD::UDIV
, MVT::v32i8
, 14 }, // 2*ext+2*pmulhw sequence
447 { ISD::UREM
, MVT::v32i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
448 { ISD::SDIV
, MVT::v16i16
, 6 }, // vpmulhw sequence
449 { ISD::SREM
, MVT::v16i16
, 8 }, // vpmulhw+mul+sub sequence
450 { ISD::UDIV
, MVT::v16i16
, 6 }, // vpmulhuw sequence
451 { ISD::UREM
, MVT::v16i16
, 8 }, // vpmulhuw+mul+sub sequence
452 { ISD::SDIV
, MVT::v8i32
, 15 }, // vpmuldq sequence
453 { ISD::SREM
, MVT::v8i32
, 19 }, // vpmuldq+mul+sub sequence
454 { ISD::UDIV
, MVT::v8i32
, 15 }, // vpmuludq sequence
455 { ISD::UREM
, MVT::v8i32
, 19 }, // vpmuludq+mul+sub sequence
458 if ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
459 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) &&
461 if (const auto *Entry
= CostTableLookup(AVX2ConstCostTable
, ISD
, LT
.second
))
462 return LT
.first
* Entry
->Cost
;
465 static const CostTblEntry SSE2ConstCostTable
[] = {
466 { ISD::SDIV
, MVT::v32i8
, 28+2 }, // 4*ext+4*pmulhw sequence + split.
467 { ISD::SREM
, MVT::v32i8
, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
468 { ISD::SDIV
, MVT::v16i8
, 14 }, // 2*ext+2*pmulhw sequence
469 { ISD::SREM
, MVT::v16i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
470 { ISD::UDIV
, MVT::v32i8
, 28+2 }, // 4*ext+4*pmulhw sequence + split.
471 { ISD::UREM
, MVT::v32i8
, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
472 { ISD::UDIV
, MVT::v16i8
, 14 }, // 2*ext+2*pmulhw sequence
473 { ISD::UREM
, MVT::v16i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
474 { ISD::SDIV
, MVT::v16i16
, 12+2 }, // 2*pmulhw sequence + split.
475 { ISD::SREM
, MVT::v16i16
, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
476 { ISD::SDIV
, MVT::v8i16
, 6 }, // pmulhw sequence
477 { ISD::SREM
, MVT::v8i16
, 8 }, // pmulhw+mul+sub sequence
478 { ISD::UDIV
, MVT::v16i16
, 12+2 }, // 2*pmulhuw sequence + split.
479 { ISD::UREM
, MVT::v16i16
, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
480 { ISD::UDIV
, MVT::v8i16
, 6 }, // pmulhuw sequence
481 { ISD::UREM
, MVT::v8i16
, 8 }, // pmulhuw+mul+sub sequence
482 { ISD::SDIV
, MVT::v8i32
, 38+2 }, // 2*pmuludq sequence + split.
483 { ISD::SREM
, MVT::v8i32
, 48+2 }, // 2*pmuludq+mul+sub sequence + split.
484 { ISD::SDIV
, MVT::v4i32
, 19 }, // pmuludq sequence
485 { ISD::SREM
, MVT::v4i32
, 24 }, // pmuludq+mul+sub sequence
486 { ISD::UDIV
, MVT::v8i32
, 30+2 }, // 2*pmuludq sequence + split.
487 { ISD::UREM
, MVT::v8i32
, 40+2 }, // 2*pmuludq+mul+sub sequence + split.
488 { ISD::UDIV
, MVT::v4i32
, 15 }, // pmuludq sequence
489 { ISD::UREM
, MVT::v4i32
, 20 }, // pmuludq+mul+sub sequence
492 if ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
493 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) &&
496 if (ISD
== ISD::SDIV
&& LT
.second
== MVT::v8i32
&& ST
->hasAVX())
497 return LT
.first
* 32;
498 if (ISD
== ISD::SREM
&& LT
.second
== MVT::v8i32
&& ST
->hasAVX())
499 return LT
.first
* 38;
500 if (ISD
== ISD::SDIV
&& LT
.second
== MVT::v4i32
&& ST
->hasSSE41())
501 return LT
.first
* 15;
502 if (ISD
== ISD::SREM
&& LT
.second
== MVT::v4i32
&& ST
->hasSSE41())
503 return LT
.first
* 20;
505 if (const auto *Entry
= CostTableLookup(SSE2ConstCostTable
, ISD
, LT
.second
))
506 return LT
.first
* Entry
->Cost
;
509 static const CostTblEntry AVX512BWShiftCostTable
[] = {
510 { ISD::SHL
, MVT::v16i8
, 4 }, // extend/vpsllvw/pack sequence.
511 { ISD::SRL
, MVT::v16i8
, 4 }, // extend/vpsrlvw/pack sequence.
512 { ISD::SRA
, MVT::v16i8
, 4 }, // extend/vpsravw/pack sequence.
513 { ISD::SHL
, MVT::v32i8
, 4 }, // extend/vpsllvw/pack sequence.
514 { ISD::SRL
, MVT::v32i8
, 4 }, // extend/vpsrlvw/pack sequence.
515 { ISD::SRA
, MVT::v32i8
, 6 }, // extend/vpsravw/pack sequence.
516 { ISD::SHL
, MVT::v64i8
, 6 }, // extend/vpsllvw/pack sequence.
517 { ISD::SRL
, MVT::v64i8
, 7 }, // extend/vpsrlvw/pack sequence.
518 { ISD::SRA
, MVT::v64i8
, 15 }, // extend/vpsravw/pack sequence.
520 { ISD::SHL
, MVT::v8i16
, 1 }, // vpsllvw
521 { ISD::SRL
, MVT::v8i16
, 1 }, // vpsrlvw
522 { ISD::SRA
, MVT::v8i16
, 1 }, // vpsravw
523 { ISD::SHL
, MVT::v16i16
, 1 }, // vpsllvw
524 { ISD::SRL
, MVT::v16i16
, 1 }, // vpsrlvw
525 { ISD::SRA
, MVT::v16i16
, 1 }, // vpsravw
526 { ISD::SHL
, MVT::v32i16
, 1 }, // vpsllvw
527 { ISD::SRL
, MVT::v32i16
, 1 }, // vpsrlvw
528 { ISD::SRA
, MVT::v32i16
, 1 }, // vpsravw
532 if (const auto *Entry
= CostTableLookup(AVX512BWShiftCostTable
, ISD
, LT
.second
))
533 return LT
.first
* Entry
->Cost
;
535 static const CostTblEntry AVX2UniformCostTable
[] = {
536 // Uniform splats are cheaper for the following instructions.
537 { ISD::SHL
, MVT::v16i16
, 1 }, // psllw.
538 { ISD::SRL
, MVT::v16i16
, 1 }, // psrlw.
539 { ISD::SRA
, MVT::v16i16
, 1 }, // psraw.
540 { ISD::SHL
, MVT::v32i16
, 2 }, // 2*psllw.
541 { ISD::SRL
, MVT::v32i16
, 2 }, // 2*psrlw.
542 { ISD::SRA
, MVT::v32i16
, 2 }, // 2*psraw.
544 { ISD::SHL
, MVT::v8i32
, 1 }, // pslld
545 { ISD::SRL
, MVT::v8i32
, 1 }, // psrld
546 { ISD::SRA
, MVT::v8i32
, 1 }, // psrad
547 { ISD::SHL
, MVT::v4i64
, 1 }, // psllq
548 { ISD::SRL
, MVT::v4i64
, 1 }, // psrlq
552 ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
) ||
553 (Op2Info
== TargetTransformInfo::OK_UniformValue
))) {
554 if (const auto *Entry
=
555 CostTableLookup(AVX2UniformCostTable
, ISD
, LT
.second
))
556 return LT
.first
* Entry
->Cost
;
559 static const CostTblEntry SSE2UniformCostTable
[] = {
560 // Uniform splats are cheaper for the following instructions.
561 { ISD::SHL
, MVT::v8i16
, 1 }, // psllw.
562 { ISD::SHL
, MVT::v4i32
, 1 }, // pslld
563 { ISD::SHL
, MVT::v2i64
, 1 }, // psllq.
565 { ISD::SRL
, MVT::v8i16
, 1 }, // psrlw.
566 { ISD::SRL
, MVT::v4i32
, 1 }, // psrld.
567 { ISD::SRL
, MVT::v2i64
, 1 }, // psrlq.
569 { ISD::SRA
, MVT::v8i16
, 1 }, // psraw.
570 { ISD::SRA
, MVT::v4i32
, 1 }, // psrad.
574 ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
) ||
575 (Op2Info
== TargetTransformInfo::OK_UniformValue
))) {
576 if (const auto *Entry
=
577 CostTableLookup(SSE2UniformCostTable
, ISD
, LT
.second
))
578 return LT
.first
* Entry
->Cost
;
581 static const CostTblEntry AVX512DQCostTable
[] = {
582 { ISD::MUL
, MVT::v2i64
, 2 }, // pmullq
583 { ISD::MUL
, MVT::v4i64
, 2 }, // pmullq
584 { ISD::MUL
, MVT::v8i64
, 2 } // pmullq
587 // Look for AVX512DQ lowering tricks for custom cases.
589 if (const auto *Entry
= CostTableLookup(AVX512DQCostTable
, ISD
, LT
.second
))
590 return LT
.first
* Entry
->Cost
;
592 static const CostTblEntry AVX512BWCostTable
[] = {
593 { ISD::SHL
, MVT::v64i8
, 11 }, // vpblendvb sequence.
594 { ISD::SRL
, MVT::v64i8
, 11 }, // vpblendvb sequence.
595 { ISD::SRA
, MVT::v64i8
, 24 }, // vpblendvb sequence.
598 // Look for AVX512BW lowering tricks for custom cases.
600 if (const auto *Entry
= CostTableLookup(AVX512BWCostTable
, ISD
, LT
.second
))
601 return LT
.first
* Entry
->Cost
;
603 static const CostTblEntry AVX512CostTable
[] = {
604 { ISD::SHL
, MVT::v4i32
, 1 },
605 { ISD::SRL
, MVT::v4i32
, 1 },
606 { ISD::SRA
, MVT::v4i32
, 1 },
607 { ISD::SHL
, MVT::v8i32
, 1 },
608 { ISD::SRL
, MVT::v8i32
, 1 },
609 { ISD::SRA
, MVT::v8i32
, 1 },
610 { ISD::SHL
, MVT::v16i32
, 1 },
611 { ISD::SRL
, MVT::v16i32
, 1 },
612 { ISD::SRA
, MVT::v16i32
, 1 },
614 { ISD::SHL
, MVT::v2i64
, 1 },
615 { ISD::SRL
, MVT::v2i64
, 1 },
616 { ISD::SHL
, MVT::v4i64
, 1 },
617 { ISD::SRL
, MVT::v4i64
, 1 },
618 { ISD::SHL
, MVT::v8i64
, 1 },
619 { ISD::SRL
, MVT::v8i64
, 1 },
621 { ISD::SRA
, MVT::v2i64
, 1 },
622 { ISD::SRA
, MVT::v4i64
, 1 },
623 { ISD::SRA
, MVT::v8i64
, 1 },
625 { ISD::MUL
, MVT::v16i32
, 1 }, // pmulld (Skylake from agner.org)
626 { ISD::MUL
, MVT::v8i32
, 1 }, // pmulld (Skylake from agner.org)
627 { ISD::MUL
, MVT::v4i32
, 1 }, // pmulld (Skylake from agner.org)
628 { ISD::MUL
, MVT::v8i64
, 6 }, // 3*pmuludq/3*shift/2*add
630 { ISD::FNEG
, MVT::v8f64
, 1 }, // Skylake from http://www.agner.org/
631 { ISD::FADD
, MVT::v8f64
, 1 }, // Skylake from http://www.agner.org/
632 { ISD::FSUB
, MVT::v8f64
, 1 }, // Skylake from http://www.agner.org/
633 { ISD::FMUL
, MVT::v8f64
, 1 }, // Skylake from http://www.agner.org/
634 { ISD::FDIV
, MVT::f64
, 4 }, // Skylake from http://www.agner.org/
635 { ISD::FDIV
, MVT::v2f64
, 4 }, // Skylake from http://www.agner.org/
636 { ISD::FDIV
, MVT::v4f64
, 8 }, // Skylake from http://www.agner.org/
637 { ISD::FDIV
, MVT::v8f64
, 16 }, // Skylake from http://www.agner.org/
639 { ISD::FNEG
, MVT::v16f32
, 1 }, // Skylake from http://www.agner.org/
640 { ISD::FADD
, MVT::v16f32
, 1 }, // Skylake from http://www.agner.org/
641 { ISD::FSUB
, MVT::v16f32
, 1 }, // Skylake from http://www.agner.org/
642 { ISD::FMUL
, MVT::v16f32
, 1 }, // Skylake from http://www.agner.org/
643 { ISD::FDIV
, MVT::f32
, 3 }, // Skylake from http://www.agner.org/
644 { ISD::FDIV
, MVT::v4f32
, 3 }, // Skylake from http://www.agner.org/
645 { ISD::FDIV
, MVT::v8f32
, 5 }, // Skylake from http://www.agner.org/
646 { ISD::FDIV
, MVT::v16f32
, 10 }, // Skylake from http://www.agner.org/
650 if (const auto *Entry
= CostTableLookup(AVX512CostTable
, ISD
, LT
.second
))
651 return LT
.first
* Entry
->Cost
;
653 static const CostTblEntry AVX2ShiftCostTable
[] = {
654 // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
655 // customize them to detect the cases where shift amount is a scalar one.
656 { ISD::SHL
, MVT::v4i32
, 2 }, // vpsllvd (Haswell from agner.org)
657 { ISD::SRL
, MVT::v4i32
, 2 }, // vpsrlvd (Haswell from agner.org)
658 { ISD::SRA
, MVT::v4i32
, 2 }, // vpsravd (Haswell from agner.org)
659 { ISD::SHL
, MVT::v8i32
, 2 }, // vpsllvd (Haswell from agner.org)
660 { ISD::SRL
, MVT::v8i32
, 2 }, // vpsrlvd (Haswell from agner.org)
661 { ISD::SRA
, MVT::v8i32
, 2 }, // vpsravd (Haswell from agner.org)
662 { ISD::SHL
, MVT::v2i64
, 1 }, // vpsllvq (Haswell from agner.org)
663 { ISD::SRL
, MVT::v2i64
, 1 }, // vpsrlvq (Haswell from agner.org)
664 { ISD::SHL
, MVT::v4i64
, 1 }, // vpsllvq (Haswell from agner.org)
665 { ISD::SRL
, MVT::v4i64
, 1 }, // vpsrlvq (Haswell from agner.org)
668 if (ST
->hasAVX512()) {
669 if (ISD
== ISD::SHL
&& LT
.second
== MVT::v32i16
&&
670 (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
671 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
))
672 // On AVX512, a packed v32i16 shift left by a constant build_vector
673 // is lowered into a vector multiply (vpmullw).
674 return getArithmeticInstrCost(Instruction::Mul
, Ty
, CostKind
,
676 TargetTransformInfo::OP_None
,
677 TargetTransformInfo::OP_None
);
680 // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts).
681 if (ST
->hasAVX2() && !(ST
->hasXOP() && LT
.second
== MVT::v4i32
)) {
682 if (ISD
== ISD::SHL
&& LT
.second
== MVT::v16i16
&&
683 (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
684 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
))
685 // On AVX2, a packed v16i16 shift left by a constant build_vector
686 // is lowered into a vector multiply (vpmullw).
687 return getArithmeticInstrCost(Instruction::Mul
, Ty
, CostKind
,
689 TargetTransformInfo::OP_None
,
690 TargetTransformInfo::OP_None
);
692 if (const auto *Entry
= CostTableLookup(AVX2ShiftCostTable
, ISD
, LT
.second
))
693 return LT
.first
* Entry
->Cost
;
696 static const CostTblEntry XOPShiftCostTable
[] = {
697 // 128bit shifts take 1cy, but right shifts require negation beforehand.
698 { ISD::SHL
, MVT::v16i8
, 1 },
699 { ISD::SRL
, MVT::v16i8
, 2 },
700 { ISD::SRA
, MVT::v16i8
, 2 },
701 { ISD::SHL
, MVT::v8i16
, 1 },
702 { ISD::SRL
, MVT::v8i16
, 2 },
703 { ISD::SRA
, MVT::v8i16
, 2 },
704 { ISD::SHL
, MVT::v4i32
, 1 },
705 { ISD::SRL
, MVT::v4i32
, 2 },
706 { ISD::SRA
, MVT::v4i32
, 2 },
707 { ISD::SHL
, MVT::v2i64
, 1 },
708 { ISD::SRL
, MVT::v2i64
, 2 },
709 { ISD::SRA
, MVT::v2i64
, 2 },
710 // 256bit shifts require splitting if AVX2 didn't catch them above.
711 { ISD::SHL
, MVT::v32i8
, 2+2 },
712 { ISD::SRL
, MVT::v32i8
, 4+2 },
713 { ISD::SRA
, MVT::v32i8
, 4+2 },
714 { ISD::SHL
, MVT::v16i16
, 2+2 },
715 { ISD::SRL
, MVT::v16i16
, 4+2 },
716 { ISD::SRA
, MVT::v16i16
, 4+2 },
717 { ISD::SHL
, MVT::v8i32
, 2+2 },
718 { ISD::SRL
, MVT::v8i32
, 4+2 },
719 { ISD::SRA
, MVT::v8i32
, 4+2 },
720 { ISD::SHL
, MVT::v4i64
, 2+2 },
721 { ISD::SRL
, MVT::v4i64
, 4+2 },
722 { ISD::SRA
, MVT::v4i64
, 4+2 },
725 // Look for XOP lowering tricks.
727 // If the right shift is constant then we'll fold the negation so
728 // it's as cheap as a left shift.
730 if ((ShiftISD
== ISD::SRL
|| ShiftISD
== ISD::SRA
) &&
731 (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
732 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
))
734 if (const auto *Entry
=
735 CostTableLookup(XOPShiftCostTable
, ShiftISD
, LT
.second
))
736 return LT
.first
* Entry
->Cost
;
739 static const CostTblEntry SSE2UniformShiftCostTable
[] = {
740 // Uniform splats are cheaper for the following instructions.
741 { ISD::SHL
, MVT::v16i16
, 2+2 }, // 2*psllw + split.
742 { ISD::SHL
, MVT::v8i32
, 2+2 }, // 2*pslld + split.
743 { ISD::SHL
, MVT::v4i64
, 2+2 }, // 2*psllq + split.
745 { ISD::SRL
, MVT::v16i16
, 2+2 }, // 2*psrlw + split.
746 { ISD::SRL
, MVT::v8i32
, 2+2 }, // 2*psrld + split.
747 { ISD::SRL
, MVT::v4i64
, 2+2 }, // 2*psrlq + split.
749 { ISD::SRA
, MVT::v16i16
, 2+2 }, // 2*psraw + split.
750 { ISD::SRA
, MVT::v8i32
, 2+2 }, // 2*psrad + split.
751 { ISD::SRA
, MVT::v2i64
, 4 }, // 2*psrad + shuffle.
752 { ISD::SRA
, MVT::v4i64
, 8+2 }, // 2*(2*psrad + shuffle) + split.
756 ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
) ||
757 (Op2Info
== TargetTransformInfo::OK_UniformValue
))) {
759 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
760 if (ISD
== ISD::SRA
&& LT
.second
== MVT::v4i64
&& ST
->hasAVX2())
761 return LT
.first
* 4; // 2*psrad + shuffle.
763 if (const auto *Entry
=
764 CostTableLookup(SSE2UniformShiftCostTable
, ISD
, LT
.second
))
765 return LT
.first
* Entry
->Cost
;
768 if (ISD
== ISD::SHL
&&
769 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) {
771 // Vector shift left by non uniform constant can be lowered
772 // into vector multiply.
773 if (((VT
== MVT::v8i16
|| VT
== MVT::v4i32
) && ST
->hasSSE2()) ||
774 ((VT
== MVT::v16i16
|| VT
== MVT::v8i32
) && ST
->hasAVX()))
778 static const CostTblEntry AVX2CostTable
[] = {
779 { ISD::SHL
, MVT::v16i8
, 6 }, // vpblendvb sequence.
780 { ISD::SHL
, MVT::v32i8
, 6 }, // vpblendvb sequence.
781 { ISD::SHL
, MVT::v64i8
, 12 }, // 2*vpblendvb sequence.
782 { ISD::SHL
, MVT::v8i16
, 5 }, // extend/vpsrlvd/pack sequence.
783 { ISD::SHL
, MVT::v16i16
, 7 }, // extend/vpsrlvd/pack sequence.
784 { ISD::SHL
, MVT::v32i16
, 14 }, // 2*extend/vpsrlvd/pack sequence.
786 { ISD::SRL
, MVT::v16i8
, 6 }, // vpblendvb sequence.
787 { ISD::SRL
, MVT::v32i8
, 6 }, // vpblendvb sequence.
788 { ISD::SRL
, MVT::v64i8
, 12 }, // 2*vpblendvb sequence.
789 { ISD::SRL
, MVT::v8i16
, 5 }, // extend/vpsrlvd/pack sequence.
790 { ISD::SRL
, MVT::v16i16
, 7 }, // extend/vpsrlvd/pack sequence.
791 { ISD::SRL
, MVT::v32i16
, 14 }, // 2*extend/vpsrlvd/pack sequence.
793 { ISD::SRA
, MVT::v16i8
, 17 }, // vpblendvb sequence.
794 { ISD::SRA
, MVT::v32i8
, 17 }, // vpblendvb sequence.
795 { ISD::SRA
, MVT::v64i8
, 34 }, // 2*vpblendvb sequence.
796 { ISD::SRA
, MVT::v8i16
, 5 }, // extend/vpsravd/pack sequence.
797 { ISD::SRA
, MVT::v16i16
, 7 }, // extend/vpsravd/pack sequence.
798 { ISD::SRA
, MVT::v32i16
, 14 }, // 2*extend/vpsravd/pack sequence.
799 { ISD::SRA
, MVT::v2i64
, 2 }, // srl/xor/sub sequence.
800 { ISD::SRA
, MVT::v4i64
, 2 }, // srl/xor/sub sequence.
802 { ISD::SUB
, MVT::v32i8
, 1 }, // psubb
803 { ISD::ADD
, MVT::v32i8
, 1 }, // paddb
804 { ISD::SUB
, MVT::v16i16
, 1 }, // psubw
805 { ISD::ADD
, MVT::v16i16
, 1 }, // paddw
806 { ISD::SUB
, MVT::v8i32
, 1 }, // psubd
807 { ISD::ADD
, MVT::v8i32
, 1 }, // paddd
808 { ISD::SUB
, MVT::v4i64
, 1 }, // psubq
809 { ISD::ADD
, MVT::v4i64
, 1 }, // paddq
811 { ISD::MUL
, MVT::v16i16
, 1 }, // pmullw
812 { ISD::MUL
, MVT::v8i32
, 2 }, // pmulld (Haswell from agner.org)
813 { ISD::MUL
, MVT::v4i64
, 6 }, // 3*pmuludq/3*shift/2*add
815 { ISD::FNEG
, MVT::v4f64
, 1 }, // Haswell from http://www.agner.org/
816 { ISD::FNEG
, MVT::v8f32
, 1 }, // Haswell from http://www.agner.org/
817 { ISD::FADD
, MVT::v4f64
, 1 }, // Haswell from http://www.agner.org/
818 { ISD::FADD
, MVT::v8f32
, 1 }, // Haswell from http://www.agner.org/
819 { ISD::FSUB
, MVT::v4f64
, 1 }, // Haswell from http://www.agner.org/
820 { ISD::FSUB
, MVT::v8f32
, 1 }, // Haswell from http://www.agner.org/
821 { ISD::FMUL
, MVT::f64
, 1 }, // Haswell from http://www.agner.org/
822 { ISD::FMUL
, MVT::v2f64
, 1 }, // Haswell from http://www.agner.org/
823 { ISD::FMUL
, MVT::v4f64
, 1 }, // Haswell from http://www.agner.org/
824 { ISD::FMUL
, MVT::v8f32
, 1 }, // Haswell from http://www.agner.org/
826 { ISD::FDIV
, MVT::f32
, 7 }, // Haswell from http://www.agner.org/
827 { ISD::FDIV
, MVT::v4f32
, 7 }, // Haswell from http://www.agner.org/
828 { ISD::FDIV
, MVT::v8f32
, 14 }, // Haswell from http://www.agner.org/
829 { ISD::FDIV
, MVT::f64
, 14 }, // Haswell from http://www.agner.org/
830 { ISD::FDIV
, MVT::v2f64
, 14 }, // Haswell from http://www.agner.org/
831 { ISD::FDIV
, MVT::v4f64
, 28 }, // Haswell from http://www.agner.org/
834 // Look for AVX2 lowering tricks for custom cases.
836 if (const auto *Entry
= CostTableLookup(AVX2CostTable
, ISD
, LT
.second
))
837 return LT
.first
* Entry
->Cost
;
839 static const CostTblEntry AVX1CostTable
[] = {
840 // We don't have to scalarize unsupported ops. We can issue two half-sized
841 // operations and we only need to extract the upper YMM half.
842 // Two ops + 1 extract + 1 insert = 4.
843 { ISD::MUL
, MVT::v16i16
, 4 },
844 { ISD::MUL
, MVT::v8i32
, 5 }, // BTVER2 from http://www.agner.org/
845 { ISD::MUL
, MVT::v4i64
, 12 },
847 { ISD::SUB
, MVT::v32i8
, 4 },
848 { ISD::ADD
, MVT::v32i8
, 4 },
849 { ISD::SUB
, MVT::v16i16
, 4 },
850 { ISD::ADD
, MVT::v16i16
, 4 },
851 { ISD::SUB
, MVT::v8i32
, 4 },
852 { ISD::ADD
, MVT::v8i32
, 4 },
853 { ISD::SUB
, MVT::v4i64
, 4 },
854 { ISD::ADD
, MVT::v4i64
, 4 },
856 { ISD::SHL
, MVT::v32i8
, 22 }, // pblendvb sequence + split.
857 { ISD::SHL
, MVT::v8i16
, 6 }, // pblendvb sequence.
858 { ISD::SHL
, MVT::v16i16
, 13 }, // pblendvb sequence + split.
859 { ISD::SHL
, MVT::v4i32
, 3 }, // pslld/paddd/cvttps2dq/pmulld
860 { ISD::SHL
, MVT::v8i32
, 9 }, // pslld/paddd/cvttps2dq/pmulld + split
861 { ISD::SHL
, MVT::v2i64
, 2 }, // Shift each lane + blend.
862 { ISD::SHL
, MVT::v4i64
, 6 }, // Shift each lane + blend + split.
864 { ISD::SRL
, MVT::v32i8
, 23 }, // pblendvb sequence + split.
865 { ISD::SRL
, MVT::v16i16
, 28 }, // pblendvb sequence + split.
866 { ISD::SRL
, MVT::v4i32
, 6 }, // Shift each lane + blend.
867 { ISD::SRL
, MVT::v8i32
, 14 }, // Shift each lane + blend + split.
868 { ISD::SRL
, MVT::v2i64
, 2 }, // Shift each lane + blend.
869 { ISD::SRL
, MVT::v4i64
, 6 }, // Shift each lane + blend + split.
871 { ISD::SRA
, MVT::v32i8
, 44 }, // pblendvb sequence + split.
872 { ISD::SRA
, MVT::v16i16
, 28 }, // pblendvb sequence + split.
873 { ISD::SRA
, MVT::v4i32
, 6 }, // Shift each lane + blend.
874 { ISD::SRA
, MVT::v8i32
, 14 }, // Shift each lane + blend + split.
875 { ISD::SRA
, MVT::v2i64
, 5 }, // Shift each lane + blend.
876 { ISD::SRA
, MVT::v4i64
, 12 }, // Shift each lane + blend + split.
878 { ISD::FNEG
, MVT::v4f64
, 2 }, // BTVER2 from http://www.agner.org/
879 { ISD::FNEG
, MVT::v8f32
, 2 }, // BTVER2 from http://www.agner.org/
881 { ISD::FMUL
, MVT::f64
, 2 }, // BTVER2 from http://www.agner.org/
882 { ISD::FMUL
, MVT::v2f64
, 2 }, // BTVER2 from http://www.agner.org/
883 { ISD::FMUL
, MVT::v4f64
, 4 }, // BTVER2 from http://www.agner.org/
885 { ISD::FDIV
, MVT::f32
, 14 }, // SNB from http://www.agner.org/
886 { ISD::FDIV
, MVT::v4f32
, 14 }, // SNB from http://www.agner.org/
887 { ISD::FDIV
, MVT::v8f32
, 28 }, // SNB from http://www.agner.org/
888 { ISD::FDIV
, MVT::f64
, 22 }, // SNB from http://www.agner.org/
889 { ISD::FDIV
, MVT::v2f64
, 22 }, // SNB from http://www.agner.org/
890 { ISD::FDIV
, MVT::v4f64
, 44 }, // SNB from http://www.agner.org/
894 if (const auto *Entry
= CostTableLookup(AVX1CostTable
, ISD
, LT
.second
))
895 return LT
.first
* Entry
->Cost
;
897 static const CostTblEntry SSE42CostTable
[] = {
898 { ISD::FADD
, MVT::f64
, 1 }, // Nehalem from http://www.agner.org/
899 { ISD::FADD
, MVT::f32
, 1 }, // Nehalem from http://www.agner.org/
900 { ISD::FADD
, MVT::v2f64
, 1 }, // Nehalem from http://www.agner.org/
901 { ISD::FADD
, MVT::v4f32
, 1 }, // Nehalem from http://www.agner.org/
903 { ISD::FSUB
, MVT::f64
, 1 }, // Nehalem from http://www.agner.org/
904 { ISD::FSUB
, MVT::f32
, 1 }, // Nehalem from http://www.agner.org/
905 { ISD::FSUB
, MVT::v2f64
, 1 }, // Nehalem from http://www.agner.org/
906 { ISD::FSUB
, MVT::v4f32
, 1 }, // Nehalem from http://www.agner.org/
908 { ISD::FMUL
, MVT::f64
, 1 }, // Nehalem from http://www.agner.org/
909 { ISD::FMUL
, MVT::f32
, 1 }, // Nehalem from http://www.agner.org/
910 { ISD::FMUL
, MVT::v2f64
, 1 }, // Nehalem from http://www.agner.org/
911 { ISD::FMUL
, MVT::v4f32
, 1 }, // Nehalem from http://www.agner.org/
913 { ISD::FDIV
, MVT::f32
, 14 }, // Nehalem from http://www.agner.org/
914 { ISD::FDIV
, MVT::v4f32
, 14 }, // Nehalem from http://www.agner.org/
915 { ISD::FDIV
, MVT::f64
, 22 }, // Nehalem from http://www.agner.org/
916 { ISD::FDIV
, MVT::v2f64
, 22 }, // Nehalem from http://www.agner.org/
918 { ISD::MUL
, MVT::v2i64
, 6 } // 3*pmuludq/3*shift/2*add
922 if (const auto *Entry
= CostTableLookup(SSE42CostTable
, ISD
, LT
.second
))
923 return LT
.first
* Entry
->Cost
;
925 static const CostTblEntry SSE41CostTable
[] = {
926 { ISD::SHL
, MVT::v16i8
, 10 }, // pblendvb sequence.
927 { ISD::SHL
, MVT::v8i16
, 11 }, // pblendvb sequence.
928 { ISD::SHL
, MVT::v4i32
, 4 }, // pslld/paddd/cvttps2dq/pmulld
930 { ISD::SRL
, MVT::v16i8
, 11 }, // pblendvb sequence.
931 { ISD::SRL
, MVT::v8i16
, 13 }, // pblendvb sequence.
932 { ISD::SRL
, MVT::v4i32
, 16 }, // Shift each lane + blend.
934 { ISD::SRA
, MVT::v16i8
, 21 }, // pblendvb sequence.
935 { ISD::SRA
, MVT::v8i16
, 13 }, // pblendvb sequence.
937 { ISD::MUL
, MVT::v4i32
, 2 } // pmulld (Nehalem from agner.org)
941 if (const auto *Entry
= CostTableLookup(SSE41CostTable
, ISD
, LT
.second
))
942 return LT
.first
* Entry
->Cost
;
944 static const CostTblEntry SSE2CostTable
[] = {
945 // We don't correctly identify costs of casts because they are marked as
947 { ISD::SHL
, MVT::v16i8
, 13 }, // cmpgtb sequence.
948 { ISD::SHL
, MVT::v8i16
, 25 }, // cmpgtw sequence.
949 { ISD::SHL
, MVT::v4i32
, 16 }, // pslld/paddd/cvttps2dq/pmuludq.
950 { ISD::SHL
, MVT::v2i64
, 4 }, // splat+shuffle sequence.
952 { ISD::SRL
, MVT::v16i8
, 14 }, // cmpgtb sequence.
953 { ISD::SRL
, MVT::v8i16
, 16 }, // cmpgtw sequence.
954 { ISD::SRL
, MVT::v4i32
, 12 }, // Shift each lane + blend.
955 { ISD::SRL
, MVT::v2i64
, 4 }, // splat+shuffle sequence.
957 { ISD::SRA
, MVT::v16i8
, 27 }, // unpacked cmpgtb sequence.
958 { ISD::SRA
, MVT::v8i16
, 16 }, // cmpgtw sequence.
959 { ISD::SRA
, MVT::v4i32
, 12 }, // Shift each lane + blend.
960 { ISD::SRA
, MVT::v2i64
, 8 }, // srl/xor/sub splat+shuffle sequence.
962 { ISD::MUL
, MVT::v8i16
, 1 }, // pmullw
963 { ISD::MUL
, MVT::v4i32
, 6 }, // 3*pmuludq/4*shuffle
964 { ISD::MUL
, MVT::v2i64
, 8 }, // 3*pmuludq/3*shift/2*add
966 { ISD::FDIV
, MVT::f32
, 23 }, // Pentium IV from http://www.agner.org/
967 { ISD::FDIV
, MVT::v4f32
, 39 }, // Pentium IV from http://www.agner.org/
968 { ISD::FDIV
, MVT::f64
, 38 }, // Pentium IV from http://www.agner.org/
969 { ISD::FDIV
, MVT::v2f64
, 69 }, // Pentium IV from http://www.agner.org/
971 { ISD::FNEG
, MVT::f32
, 1 }, // Pentium IV from http://www.agner.org/
972 { ISD::FNEG
, MVT::f64
, 1 }, // Pentium IV from http://www.agner.org/
973 { ISD::FNEG
, MVT::v4f32
, 1 }, // Pentium IV from http://www.agner.org/
974 { ISD::FNEG
, MVT::v2f64
, 1 }, // Pentium IV from http://www.agner.org/
976 { ISD::FADD
, MVT::f32
, 2 }, // Pentium IV from http://www.agner.org/
977 { ISD::FADD
, MVT::f64
, 2 }, // Pentium IV from http://www.agner.org/
979 { ISD::FSUB
, MVT::f32
, 2 }, // Pentium IV from http://www.agner.org/
980 { ISD::FSUB
, MVT::f64
, 2 }, // Pentium IV from http://www.agner.org/
984 if (const auto *Entry
= CostTableLookup(SSE2CostTable
, ISD
, LT
.second
))
985 return LT
.first
* Entry
->Cost
;
987 static const CostTblEntry SSE1CostTable
[] = {
988 { ISD::FDIV
, MVT::f32
, 17 }, // Pentium III from http://www.agner.org/
989 { ISD::FDIV
, MVT::v4f32
, 34 }, // Pentium III from http://www.agner.org/
991 { ISD::FNEG
, MVT::f32
, 2 }, // Pentium III from http://www.agner.org/
992 { ISD::FNEG
, MVT::v4f32
, 2 }, // Pentium III from http://www.agner.org/
994 { ISD::FADD
, MVT::f32
, 1 }, // Pentium III from http://www.agner.org/
995 { ISD::FADD
, MVT::v4f32
, 2 }, // Pentium III from http://www.agner.org/
997 { ISD::FSUB
, MVT::f32
, 1 }, // Pentium III from http://www.agner.org/
998 { ISD::FSUB
, MVT::v4f32
, 2 }, // Pentium III from http://www.agner.org/
1002 if (const auto *Entry
= CostTableLookup(SSE1CostTable
, ISD
, LT
.second
))
1003 return LT
.first
* Entry
->Cost
;
1005 static const CostTblEntry X64CostTbl
[] = { // 64-bit targets
1006 { ISD::ADD
, MVT::i64
, 1 }, // Core (Merom) from http://www.agner.org/
1007 { ISD::SUB
, MVT::i64
, 1 }, // Core (Merom) from http://www.agner.org/
1011 if (const auto *Entry
= CostTableLookup(X64CostTbl
, ISD
, LT
.second
))
1012 return LT
.first
* Entry
->Cost
;
1014 static const CostTblEntry X86CostTbl
[] = { // 32 or 64-bit targets
1015 { ISD::ADD
, MVT::i8
, 1 }, // Pentium III from http://www.agner.org/
1016 { ISD::ADD
, MVT::i16
, 1 }, // Pentium III from http://www.agner.org/
1017 { ISD::ADD
, MVT::i32
, 1 }, // Pentium III from http://www.agner.org/
1019 { ISD::SUB
, MVT::i8
, 1 }, // Pentium III from http://www.agner.org/
1020 { ISD::SUB
, MVT::i16
, 1 }, // Pentium III from http://www.agner.org/
1021 { ISD::SUB
, MVT::i32
, 1 }, // Pentium III from http://www.agner.org/
1024 if (const auto *Entry
= CostTableLookup(X86CostTbl
, ISD
, LT
.second
))
1025 return LT
.first
* Entry
->Cost
;
1027 // It is not a good idea to vectorize division. We have to scalarize it and
1028 // in the process we will often end up having to spilling regular
1029 // registers. The overhead of division is going to dominate most kernels
1030 // anyways so try hard to prevent vectorization of division - it is
1031 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
1032 // to hide "20 cycles" for each lane.
1033 if (LT
.second
.isVector() && (ISD
== ISD::SDIV
|| ISD
== ISD::SREM
||
1034 ISD
== ISD::UDIV
|| ISD
== ISD::UREM
)) {
1035 InstructionCost ScalarCost
= getArithmeticInstrCost(
1036 Opcode
, Ty
->getScalarType(), CostKind
, Op1Info
, Op2Info
,
1037 TargetTransformInfo::OP_None
, TargetTransformInfo::OP_None
);
1038 return 20 * LT
.first
* LT
.second
.getVectorNumElements() * ScalarCost
;
1041 // Fallback to the default implementation.
1042 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, CostKind
, Op1Info
, Op2Info
);
1045 InstructionCost
X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind
,
1047 ArrayRef
<int> Mask
, int Index
,
1048 VectorType
*SubTp
) {
1049 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
1050 // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
1051 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, BaseTp
);
1053 Kind
= improveShuffleKindFromMask(Kind
, Mask
);
1054 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
1055 if (Kind
== TTI::SK_Transpose
)
1056 Kind
= TTI::SK_PermuteTwoSrc
;
1058 // For Broadcasts we are splatting the first element from the first input
1059 // register, so only need to reference that input and all the output
1060 // registers are the same.
1061 if (Kind
== TTI::SK_Broadcast
)
1064 // Subvector extractions are free if they start at the beginning of a
1065 // vector and cheap if the subvectors are aligned.
1066 if (Kind
== TTI::SK_ExtractSubvector
&& LT
.second
.isVector()) {
1067 int NumElts
= LT
.second
.getVectorNumElements();
1068 if ((Index
% NumElts
) == 0)
1070 std::pair
<InstructionCost
, MVT
> SubLT
=
1071 TLI
->getTypeLegalizationCost(DL
, SubTp
);
1072 if (SubLT
.second
.isVector()) {
1073 int NumSubElts
= SubLT
.second
.getVectorNumElements();
1074 if ((Index
% NumSubElts
) == 0 && (NumElts
% NumSubElts
) == 0)
1076 // Handle some cases for widening legalization. For now we only handle
1077 // cases where the original subvector was naturally aligned and evenly
1078 // fit in its legalized subvector type.
1079 // FIXME: Remove some of the alignment restrictions.
1080 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
1082 int OrigSubElts
= cast
<FixedVectorType
>(SubTp
)->getNumElements();
1083 if (NumSubElts
> OrigSubElts
&& (Index
% OrigSubElts
) == 0 &&
1084 (NumSubElts
% OrigSubElts
) == 0 &&
1085 LT
.second
.getVectorElementType() ==
1086 SubLT
.second
.getVectorElementType() &&
1087 LT
.second
.getVectorElementType().getSizeInBits() ==
1088 BaseTp
->getElementType()->getPrimitiveSizeInBits()) {
1089 assert(NumElts
>= NumSubElts
&& NumElts
> OrigSubElts
&&
1090 "Unexpected number of elements!");
1091 auto *VecTy
= FixedVectorType::get(BaseTp
->getElementType(),
1092 LT
.second
.getVectorNumElements());
1093 auto *SubTy
= FixedVectorType::get(BaseTp
->getElementType(),
1094 SubLT
.second
.getVectorNumElements());
1095 int ExtractIndex
= alignDown((Index
% NumElts
), NumSubElts
);
1096 InstructionCost ExtractCost
= getShuffleCost(
1097 TTI::SK_ExtractSubvector
, VecTy
, None
, ExtractIndex
, SubTy
);
1099 // If the original size is 32-bits or more, we can use pshufd. Otherwise
1100 // if we have SSSE3 we can use pshufb.
1101 if (SubTp
->getPrimitiveSizeInBits() >= 32 || ST
->hasSSSE3())
1102 return ExtractCost
+ 1; // pshufd or pshufb
1104 assert(SubTp
->getPrimitiveSizeInBits() == 16 &&
1105 "Unexpected vector size");
1107 return ExtractCost
+ 2; // worst case pshufhw + pshufd
1112 // Subvector insertions are cheap if the subvectors are aligned.
1113 // Note that in general, the insertion starting at the beginning of a vector
1114 // isn't free, because we need to preserve the rest of the wide vector.
1115 if (Kind
== TTI::SK_InsertSubvector
&& LT
.second
.isVector()) {
1116 int NumElts
= LT
.second
.getVectorNumElements();
1117 std::pair
<InstructionCost
, MVT
> SubLT
=
1118 TLI
->getTypeLegalizationCost(DL
, SubTp
);
1119 if (SubLT
.second
.isVector()) {
1120 int NumSubElts
= SubLT
.second
.getVectorNumElements();
1121 if ((Index
% NumSubElts
) == 0 && (NumElts
% NumSubElts
) == 0)
1125 // If the insertion isn't aligned, treat it like a 2-op shuffle.
1126 Kind
= TTI::SK_PermuteTwoSrc
;
1129 // Handle some common (illegal) sub-vector types as they are often very cheap
1130 // to shuffle even on targets without PSHUFB.
1131 EVT VT
= TLI
->getValueType(DL
, BaseTp
);
1132 if (VT
.isSimple() && VT
.isVector() && VT
.getSizeInBits() < 128 &&
1134 static const CostTblEntry SSE2SubVectorShuffleTbl
[] = {
1135 {TTI::SK_Broadcast
, MVT::v4i16
, 1}, // pshuflw
1136 {TTI::SK_Broadcast
, MVT::v2i16
, 1}, // pshuflw
1137 {TTI::SK_Broadcast
, MVT::v8i8
, 2}, // punpck/pshuflw
1138 {TTI::SK_Broadcast
, MVT::v4i8
, 2}, // punpck/pshuflw
1139 {TTI::SK_Broadcast
, MVT::v2i8
, 1}, // punpck
1141 {TTI::SK_Reverse
, MVT::v4i16
, 1}, // pshuflw
1142 {TTI::SK_Reverse
, MVT::v2i16
, 1}, // pshuflw
1143 {TTI::SK_Reverse
, MVT::v4i8
, 3}, // punpck/pshuflw/packus
1144 {TTI::SK_Reverse
, MVT::v2i8
, 1}, // punpck
1146 {TTI::SK_PermuteTwoSrc
, MVT::v4i16
, 2}, // punpck/pshuflw
1147 {TTI::SK_PermuteTwoSrc
, MVT::v2i16
, 2}, // punpck/pshuflw
1148 {TTI::SK_PermuteTwoSrc
, MVT::v8i8
, 7}, // punpck/pshuflw
1149 {TTI::SK_PermuteTwoSrc
, MVT::v4i8
, 4}, // punpck/pshuflw
1150 {TTI::SK_PermuteTwoSrc
, MVT::v2i8
, 2}, // punpck
1152 {TTI::SK_PermuteSingleSrc
, MVT::v4i16
, 1}, // pshuflw
1153 {TTI::SK_PermuteSingleSrc
, MVT::v2i16
, 1}, // pshuflw
1154 {TTI::SK_PermuteSingleSrc
, MVT::v8i8
, 5}, // punpck/pshuflw
1155 {TTI::SK_PermuteSingleSrc
, MVT::v4i8
, 3}, // punpck/pshuflw
1156 {TTI::SK_PermuteSingleSrc
, MVT::v2i8
, 1}, // punpck
1160 if (const auto *Entry
=
1161 CostTableLookup(SSE2SubVectorShuffleTbl
, Kind
, VT
.getSimpleVT()))
1165 // We are going to permute multiple sources and the result will be in multiple
1166 // destinations. Providing an accurate cost only for splits where the element
1167 // type remains the same.
1168 if (Kind
== TTI::SK_PermuteSingleSrc
&& LT
.first
!= 1) {
1169 MVT LegalVT
= LT
.second
;
1170 if (LegalVT
.isVector() &&
1171 LegalVT
.getVectorElementType().getSizeInBits() ==
1172 BaseTp
->getElementType()->getPrimitiveSizeInBits() &&
1173 LegalVT
.getVectorNumElements() <
1174 cast
<FixedVectorType
>(BaseTp
)->getNumElements()) {
1176 unsigned VecTySize
= DL
.getTypeStoreSize(BaseTp
);
1177 unsigned LegalVTSize
= LegalVT
.getStoreSize();
1178 // Number of source vectors after legalization:
1179 unsigned NumOfSrcs
= (VecTySize
+ LegalVTSize
- 1) / LegalVTSize
;
1180 // Number of destination vectors after legalization:
1181 InstructionCost NumOfDests
= LT
.first
;
1183 auto *SingleOpTy
= FixedVectorType::get(BaseTp
->getElementType(),
1184 LegalVT
.getVectorNumElements());
1186 InstructionCost NumOfShuffles
= (NumOfSrcs
- 1) * NumOfDests
;
1187 return NumOfShuffles
* getShuffleCost(TTI::SK_PermuteTwoSrc
, SingleOpTy
,
1191 return BaseT::getShuffleCost(Kind
, BaseTp
, Mask
, Index
, SubTp
);
1194 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1195 if (Kind
== TTI::SK_PermuteTwoSrc
&& LT
.first
!= 1) {
1196 // We assume that source and destination have the same vector type.
1197 InstructionCost NumOfDests
= LT
.first
;
1198 InstructionCost NumOfShufflesPerDest
= LT
.first
* 2 - 1;
1199 LT
.first
= NumOfDests
* NumOfShufflesPerDest
;
1202 static const CostTblEntry AVX512FP16ShuffleTbl
[] = {
1203 {TTI::SK_Broadcast
, MVT::v32f16
, 1}, // vpbroadcastw
1204 {TTI::SK_Broadcast
, MVT::v16f16
, 1}, // vpbroadcastw
1205 {TTI::SK_Broadcast
, MVT::v8f16
, 1}, // vpbroadcastw
1207 {TTI::SK_Reverse
, MVT::v32f16
, 2}, // vpermw
1208 {TTI::SK_Reverse
, MVT::v16f16
, 2}, // vpermw
1209 {TTI::SK_Reverse
, MVT::v8f16
, 1}, // vpshufb
1211 {TTI::SK_PermuteSingleSrc
, MVT::v32f16
, 2}, // vpermw
1212 {TTI::SK_PermuteSingleSrc
, MVT::v16f16
, 2}, // vpermw
1213 {TTI::SK_PermuteSingleSrc
, MVT::v8f16
, 1}, // vpshufb
1215 {TTI::SK_PermuteTwoSrc
, MVT::v32f16
, 2}, // vpermt2w
1216 {TTI::SK_PermuteTwoSrc
, MVT::v16f16
, 2}, // vpermt2w
1217 {TTI::SK_PermuteTwoSrc
, MVT::v8f16
, 2} // vpermt2w
1220 if (!ST
->useSoftFloat() && ST
->hasFP16())
1221 if (const auto *Entry
=
1222 CostTableLookup(AVX512FP16ShuffleTbl
, Kind
, LT
.second
))
1223 return LT
.first
* Entry
->Cost
;
1225 static const CostTblEntry AVX512VBMIShuffleTbl
[] = {
1226 {TTI::SK_Reverse
, MVT::v64i8
, 1}, // vpermb
1227 {TTI::SK_Reverse
, MVT::v32i8
, 1}, // vpermb
1229 {TTI::SK_PermuteSingleSrc
, MVT::v64i8
, 1}, // vpermb
1230 {TTI::SK_PermuteSingleSrc
, MVT::v32i8
, 1}, // vpermb
1232 {TTI::SK_PermuteTwoSrc
, MVT::v64i8
, 2}, // vpermt2b
1233 {TTI::SK_PermuteTwoSrc
, MVT::v32i8
, 2}, // vpermt2b
1234 {TTI::SK_PermuteTwoSrc
, MVT::v16i8
, 2} // vpermt2b
1238 if (const auto *Entry
=
1239 CostTableLookup(AVX512VBMIShuffleTbl
, Kind
, LT
.second
))
1240 return LT
.first
* Entry
->Cost
;
1242 static const CostTblEntry AVX512BWShuffleTbl
[] = {
1243 {TTI::SK_Broadcast
, MVT::v32i16
, 1}, // vpbroadcastw
1244 {TTI::SK_Broadcast
, MVT::v64i8
, 1}, // vpbroadcastb
1246 {TTI::SK_Reverse
, MVT::v32i16
, 2}, // vpermw
1247 {TTI::SK_Reverse
, MVT::v16i16
, 2}, // vpermw
1248 {TTI::SK_Reverse
, MVT::v64i8
, 2}, // pshufb + vshufi64x2
1250 {TTI::SK_PermuteSingleSrc
, MVT::v32i16
, 2}, // vpermw
1251 {TTI::SK_PermuteSingleSrc
, MVT::v16i16
, 2}, // vpermw
1252 {TTI::SK_PermuteSingleSrc
, MVT::v64i8
, 8}, // extend to v32i16
1254 {TTI::SK_PermuteTwoSrc
, MVT::v32i16
, 2}, // vpermt2w
1255 {TTI::SK_PermuteTwoSrc
, MVT::v16i16
, 2}, // vpermt2w
1256 {TTI::SK_PermuteTwoSrc
, MVT::v8i16
, 2}, // vpermt2w
1257 {TTI::SK_PermuteTwoSrc
, MVT::v64i8
, 19}, // 6 * v32i8 + 1
1259 {TTI::SK_Select
, MVT::v32i16
, 1}, // vblendmw
1260 {TTI::SK_Select
, MVT::v64i8
, 1}, // vblendmb
1264 if (const auto *Entry
=
1265 CostTableLookup(AVX512BWShuffleTbl
, Kind
, LT
.second
))
1266 return LT
.first
* Entry
->Cost
;
1268 static const CostTblEntry AVX512ShuffleTbl
[] = {
1269 {TTI::SK_Broadcast
, MVT::v8f64
, 1}, // vbroadcastpd
1270 {TTI::SK_Broadcast
, MVT::v16f32
, 1}, // vbroadcastps
1271 {TTI::SK_Broadcast
, MVT::v8i64
, 1}, // vpbroadcastq
1272 {TTI::SK_Broadcast
, MVT::v16i32
, 1}, // vpbroadcastd
1273 {TTI::SK_Broadcast
, MVT::v32i16
, 1}, // vpbroadcastw
1274 {TTI::SK_Broadcast
, MVT::v64i8
, 1}, // vpbroadcastb
1276 {TTI::SK_Reverse
, MVT::v8f64
, 1}, // vpermpd
1277 {TTI::SK_Reverse
, MVT::v16f32
, 1}, // vpermps
1278 {TTI::SK_Reverse
, MVT::v8i64
, 1}, // vpermq
1279 {TTI::SK_Reverse
, MVT::v16i32
, 1}, // vpermd
1280 {TTI::SK_Reverse
, MVT::v32i16
, 7}, // per mca
1281 {TTI::SK_Reverse
, MVT::v64i8
, 7}, // per mca
1283 {TTI::SK_PermuteSingleSrc
, MVT::v8f64
, 1}, // vpermpd
1284 {TTI::SK_PermuteSingleSrc
, MVT::v4f64
, 1}, // vpermpd
1285 {TTI::SK_PermuteSingleSrc
, MVT::v2f64
, 1}, // vpermpd
1286 {TTI::SK_PermuteSingleSrc
, MVT::v16f32
, 1}, // vpermps
1287 {TTI::SK_PermuteSingleSrc
, MVT::v8f32
, 1}, // vpermps
1288 {TTI::SK_PermuteSingleSrc
, MVT::v4f32
, 1}, // vpermps
1289 {TTI::SK_PermuteSingleSrc
, MVT::v8i64
, 1}, // vpermq
1290 {TTI::SK_PermuteSingleSrc
, MVT::v4i64
, 1}, // vpermq
1291 {TTI::SK_PermuteSingleSrc
, MVT::v2i64
, 1}, // vpermq
1292 {TTI::SK_PermuteSingleSrc
, MVT::v16i32
, 1}, // vpermd
1293 {TTI::SK_PermuteSingleSrc
, MVT::v8i32
, 1}, // vpermd
1294 {TTI::SK_PermuteSingleSrc
, MVT::v4i32
, 1}, // vpermd
1295 {TTI::SK_PermuteSingleSrc
, MVT::v16i8
, 1}, // pshufb
1297 {TTI::SK_PermuteTwoSrc
, MVT::v8f64
, 1}, // vpermt2pd
1298 {TTI::SK_PermuteTwoSrc
, MVT::v16f32
, 1}, // vpermt2ps
1299 {TTI::SK_PermuteTwoSrc
, MVT::v8i64
, 1}, // vpermt2q
1300 {TTI::SK_PermuteTwoSrc
, MVT::v16i32
, 1}, // vpermt2d
1301 {TTI::SK_PermuteTwoSrc
, MVT::v4f64
, 1}, // vpermt2pd
1302 {TTI::SK_PermuteTwoSrc
, MVT::v8f32
, 1}, // vpermt2ps
1303 {TTI::SK_PermuteTwoSrc
, MVT::v4i64
, 1}, // vpermt2q
1304 {TTI::SK_PermuteTwoSrc
, MVT::v8i32
, 1}, // vpermt2d
1305 {TTI::SK_PermuteTwoSrc
, MVT::v2f64
, 1}, // vpermt2pd
1306 {TTI::SK_PermuteTwoSrc
, MVT::v4f32
, 1}, // vpermt2ps
1307 {TTI::SK_PermuteTwoSrc
, MVT::v2i64
, 1}, // vpermt2q
1308 {TTI::SK_PermuteTwoSrc
, MVT::v4i32
, 1}, // vpermt2d
1310 // FIXME: This just applies the type legalization cost rules above
1311 // assuming these completely split.
1312 {TTI::SK_PermuteSingleSrc
, MVT::v32i16
, 14},
1313 {TTI::SK_PermuteSingleSrc
, MVT::v64i8
, 14},
1314 {TTI::SK_PermuteTwoSrc
, MVT::v32i16
, 42},
1315 {TTI::SK_PermuteTwoSrc
, MVT::v64i8
, 42},
1317 {TTI::SK_Select
, MVT::v32i16
, 1}, // vpternlogq
1318 {TTI::SK_Select
, MVT::v64i8
, 1}, // vpternlogq
1319 {TTI::SK_Select
, MVT::v8f64
, 1}, // vblendmpd
1320 {TTI::SK_Select
, MVT::v16f32
, 1}, // vblendmps
1321 {TTI::SK_Select
, MVT::v8i64
, 1}, // vblendmq
1322 {TTI::SK_Select
, MVT::v16i32
, 1}, // vblendmd
1325 if (ST
->hasAVX512())
1326 if (const auto *Entry
= CostTableLookup(AVX512ShuffleTbl
, Kind
, LT
.second
))
1327 return LT
.first
* Entry
->Cost
;
1329 static const CostTblEntry AVX2ShuffleTbl
[] = {
1330 {TTI::SK_Broadcast
, MVT::v4f64
, 1}, // vbroadcastpd
1331 {TTI::SK_Broadcast
, MVT::v8f32
, 1}, // vbroadcastps
1332 {TTI::SK_Broadcast
, MVT::v4i64
, 1}, // vpbroadcastq
1333 {TTI::SK_Broadcast
, MVT::v8i32
, 1}, // vpbroadcastd
1334 {TTI::SK_Broadcast
, MVT::v16i16
, 1}, // vpbroadcastw
1335 {TTI::SK_Broadcast
, MVT::v32i8
, 1}, // vpbroadcastb
1337 {TTI::SK_Reverse
, MVT::v4f64
, 1}, // vpermpd
1338 {TTI::SK_Reverse
, MVT::v8f32
, 1}, // vpermps
1339 {TTI::SK_Reverse
, MVT::v4i64
, 1}, // vpermq
1340 {TTI::SK_Reverse
, MVT::v8i32
, 1}, // vpermd
1341 {TTI::SK_Reverse
, MVT::v16i16
, 2}, // vperm2i128 + pshufb
1342 {TTI::SK_Reverse
, MVT::v32i8
, 2}, // vperm2i128 + pshufb
1344 {TTI::SK_Select
, MVT::v16i16
, 1}, // vpblendvb
1345 {TTI::SK_Select
, MVT::v32i8
, 1}, // vpblendvb
1347 {TTI::SK_PermuteSingleSrc
, MVT::v4f64
, 1}, // vpermpd
1348 {TTI::SK_PermuteSingleSrc
, MVT::v8f32
, 1}, // vpermps
1349 {TTI::SK_PermuteSingleSrc
, MVT::v4i64
, 1}, // vpermq
1350 {TTI::SK_PermuteSingleSrc
, MVT::v8i32
, 1}, // vpermd
1351 {TTI::SK_PermuteSingleSrc
, MVT::v16i16
, 4}, // vperm2i128 + 2*vpshufb
1353 {TTI::SK_PermuteSingleSrc
, MVT::v32i8
, 4}, // vperm2i128 + 2*vpshufb
1356 {TTI::SK_PermuteTwoSrc
, MVT::v4f64
, 3}, // 2*vpermpd + vblendpd
1357 {TTI::SK_PermuteTwoSrc
, MVT::v8f32
, 3}, // 2*vpermps + vblendps
1358 {TTI::SK_PermuteTwoSrc
, MVT::v4i64
, 3}, // 2*vpermq + vpblendd
1359 {TTI::SK_PermuteTwoSrc
, MVT::v8i32
, 3}, // 2*vpermd + vpblendd
1360 {TTI::SK_PermuteTwoSrc
, MVT::v16i16
, 7}, // 2*vperm2i128 + 4*vpshufb
1362 {TTI::SK_PermuteTwoSrc
, MVT::v32i8
, 7}, // 2*vperm2i128 + 4*vpshufb
1367 if (const auto *Entry
= CostTableLookup(AVX2ShuffleTbl
, Kind
, LT
.second
))
1368 return LT
.first
* Entry
->Cost
;
1370 static const CostTblEntry XOPShuffleTbl
[] = {
1371 {TTI::SK_PermuteSingleSrc
, MVT::v4f64
, 2}, // vperm2f128 + vpermil2pd
1372 {TTI::SK_PermuteSingleSrc
, MVT::v8f32
, 2}, // vperm2f128 + vpermil2ps
1373 {TTI::SK_PermuteSingleSrc
, MVT::v4i64
, 2}, // vperm2f128 + vpermil2pd
1374 {TTI::SK_PermuteSingleSrc
, MVT::v8i32
, 2}, // vperm2f128 + vpermil2ps
1375 {TTI::SK_PermuteSingleSrc
, MVT::v16i16
, 4}, // vextractf128 + 2*vpperm
1377 {TTI::SK_PermuteSingleSrc
, MVT::v32i8
, 4}, // vextractf128 + 2*vpperm
1380 {TTI::SK_PermuteTwoSrc
, MVT::v16i16
, 9}, // 2*vextractf128 + 6*vpperm
1382 {TTI::SK_PermuteTwoSrc
, MVT::v8i16
, 1}, // vpperm
1383 {TTI::SK_PermuteTwoSrc
, MVT::v32i8
, 9}, // 2*vextractf128 + 6*vpperm
1385 {TTI::SK_PermuteTwoSrc
, MVT::v16i8
, 1}, // vpperm
1389 if (const auto *Entry
= CostTableLookup(XOPShuffleTbl
, Kind
, LT
.second
))
1390 return LT
.first
* Entry
->Cost
;
1392 static const CostTblEntry AVX1ShuffleTbl
[] = {
1393 {TTI::SK_Broadcast
, MVT::v4f64
, 2}, // vperm2f128 + vpermilpd
1394 {TTI::SK_Broadcast
, MVT::v8f32
, 2}, // vperm2f128 + vpermilps
1395 {TTI::SK_Broadcast
, MVT::v4i64
, 2}, // vperm2f128 + vpermilpd
1396 {TTI::SK_Broadcast
, MVT::v8i32
, 2}, // vperm2f128 + vpermilps
1397 {TTI::SK_Broadcast
, MVT::v16i16
, 3}, // vpshuflw + vpshufd + vinsertf128
1398 {TTI::SK_Broadcast
, MVT::v32i8
, 2}, // vpshufb + vinsertf128
1400 {TTI::SK_Reverse
, MVT::v4f64
, 2}, // vperm2f128 + vpermilpd
1401 {TTI::SK_Reverse
, MVT::v8f32
, 2}, // vperm2f128 + vpermilps
1402 {TTI::SK_Reverse
, MVT::v4i64
, 2}, // vperm2f128 + vpermilpd
1403 {TTI::SK_Reverse
, MVT::v8i32
, 2}, // vperm2f128 + vpermilps
1404 {TTI::SK_Reverse
, MVT::v16i16
, 4}, // vextractf128 + 2*pshufb
1406 {TTI::SK_Reverse
, MVT::v32i8
, 4}, // vextractf128 + 2*pshufb
1409 {TTI::SK_Select
, MVT::v4i64
, 1}, // vblendpd
1410 {TTI::SK_Select
, MVT::v4f64
, 1}, // vblendpd
1411 {TTI::SK_Select
, MVT::v8i32
, 1}, // vblendps
1412 {TTI::SK_Select
, MVT::v8f32
, 1}, // vblendps
1413 {TTI::SK_Select
, MVT::v16i16
, 3}, // vpand + vpandn + vpor
1414 {TTI::SK_Select
, MVT::v32i8
, 3}, // vpand + vpandn + vpor
1416 {TTI::SK_PermuteSingleSrc
, MVT::v4f64
, 2}, // vperm2f128 + vshufpd
1417 {TTI::SK_PermuteSingleSrc
, MVT::v4i64
, 2}, // vperm2f128 + vshufpd
1418 {TTI::SK_PermuteSingleSrc
, MVT::v8f32
, 4}, // 2*vperm2f128 + 2*vshufps
1419 {TTI::SK_PermuteSingleSrc
, MVT::v8i32
, 4}, // 2*vperm2f128 + 2*vshufps
1420 {TTI::SK_PermuteSingleSrc
, MVT::v16i16
, 8}, // vextractf128 + 4*pshufb
1421 // + 2*por + vinsertf128
1422 {TTI::SK_PermuteSingleSrc
, MVT::v32i8
, 8}, // vextractf128 + 4*pshufb
1423 // + 2*por + vinsertf128
1425 {TTI::SK_PermuteTwoSrc
, MVT::v4f64
, 3}, // 2*vperm2f128 + vshufpd
1426 {TTI::SK_PermuteTwoSrc
, MVT::v4i64
, 3}, // 2*vperm2f128 + vshufpd
1427 {TTI::SK_PermuteTwoSrc
, MVT::v8f32
, 4}, // 2*vperm2f128 + 2*vshufps
1428 {TTI::SK_PermuteTwoSrc
, MVT::v8i32
, 4}, // 2*vperm2f128 + 2*vshufps
1429 {TTI::SK_PermuteTwoSrc
, MVT::v16i16
, 15}, // 2*vextractf128 + 8*pshufb
1430 // + 4*por + vinsertf128
1431 {TTI::SK_PermuteTwoSrc
, MVT::v32i8
, 15}, // 2*vextractf128 + 8*pshufb
1432 // + 4*por + vinsertf128
1436 if (const auto *Entry
= CostTableLookup(AVX1ShuffleTbl
, Kind
, LT
.second
))
1437 return LT
.first
* Entry
->Cost
;
1439 static const CostTblEntry SSE41ShuffleTbl
[] = {
1440 {TTI::SK_Select
, MVT::v2i64
, 1}, // pblendw
1441 {TTI::SK_Select
, MVT::v2f64
, 1}, // movsd
1442 {TTI::SK_Select
, MVT::v4i32
, 1}, // pblendw
1443 {TTI::SK_Select
, MVT::v4f32
, 1}, // blendps
1444 {TTI::SK_Select
, MVT::v8i16
, 1}, // pblendw
1445 {TTI::SK_Select
, MVT::v16i8
, 1} // pblendvb
1449 if (const auto *Entry
= CostTableLookup(SSE41ShuffleTbl
, Kind
, LT
.second
))
1450 return LT
.first
* Entry
->Cost
;
1452 static const CostTblEntry SSSE3ShuffleTbl
[] = {
1453 {TTI::SK_Broadcast
, MVT::v8i16
, 1}, // pshufb
1454 {TTI::SK_Broadcast
, MVT::v16i8
, 1}, // pshufb
1456 {TTI::SK_Reverse
, MVT::v8i16
, 1}, // pshufb
1457 {TTI::SK_Reverse
, MVT::v16i8
, 1}, // pshufb
1459 {TTI::SK_Select
, MVT::v8i16
, 3}, // 2*pshufb + por
1460 {TTI::SK_Select
, MVT::v16i8
, 3}, // 2*pshufb + por
1462 {TTI::SK_PermuteSingleSrc
, MVT::v8i16
, 1}, // pshufb
1463 {TTI::SK_PermuteSingleSrc
, MVT::v16i8
, 1}, // pshufb
1465 {TTI::SK_PermuteTwoSrc
, MVT::v8i16
, 3}, // 2*pshufb + por
1466 {TTI::SK_PermuteTwoSrc
, MVT::v16i8
, 3}, // 2*pshufb + por
1470 if (const auto *Entry
= CostTableLookup(SSSE3ShuffleTbl
, Kind
, LT
.second
))
1471 return LT
.first
* Entry
->Cost
;
1473 static const CostTblEntry SSE2ShuffleTbl
[] = {
1474 {TTI::SK_Broadcast
, MVT::v2f64
, 1}, // shufpd
1475 {TTI::SK_Broadcast
, MVT::v2i64
, 1}, // pshufd
1476 {TTI::SK_Broadcast
, MVT::v4i32
, 1}, // pshufd
1477 {TTI::SK_Broadcast
, MVT::v8i16
, 2}, // pshuflw + pshufd
1478 {TTI::SK_Broadcast
, MVT::v16i8
, 3}, // unpck + pshuflw + pshufd
1480 {TTI::SK_Reverse
, MVT::v2f64
, 1}, // shufpd
1481 {TTI::SK_Reverse
, MVT::v2i64
, 1}, // pshufd
1482 {TTI::SK_Reverse
, MVT::v4i32
, 1}, // pshufd
1483 {TTI::SK_Reverse
, MVT::v8i16
, 3}, // pshuflw + pshufhw + pshufd
1484 {TTI::SK_Reverse
, MVT::v16i8
, 9}, // 2*pshuflw + 2*pshufhw
1485 // + 2*pshufd + 2*unpck + packus
1487 {TTI::SK_Select
, MVT::v2i64
, 1}, // movsd
1488 {TTI::SK_Select
, MVT::v2f64
, 1}, // movsd
1489 {TTI::SK_Select
, MVT::v4i32
, 2}, // 2*shufps
1490 {TTI::SK_Select
, MVT::v8i16
, 3}, // pand + pandn + por
1491 {TTI::SK_Select
, MVT::v16i8
, 3}, // pand + pandn + por
1493 {TTI::SK_PermuteSingleSrc
, MVT::v2f64
, 1}, // shufpd
1494 {TTI::SK_PermuteSingleSrc
, MVT::v2i64
, 1}, // pshufd
1495 {TTI::SK_PermuteSingleSrc
, MVT::v4i32
, 1}, // pshufd
1496 {TTI::SK_PermuteSingleSrc
, MVT::v8i16
, 5}, // 2*pshuflw + 2*pshufhw
1498 { TTI::SK_PermuteSingleSrc
, MVT::v16i8
, 10 }, // 2*pshuflw + 2*pshufhw
1499 // + 2*pshufd + 2*unpck + 2*packus
1501 { TTI::SK_PermuteTwoSrc
, MVT::v2f64
, 1 }, // shufpd
1502 { TTI::SK_PermuteTwoSrc
, MVT::v2i64
, 1 }, // shufpd
1503 { TTI::SK_PermuteTwoSrc
, MVT::v4i32
, 2 }, // 2*{unpck,movsd,pshufd}
1504 { TTI::SK_PermuteTwoSrc
, MVT::v8i16
, 8 }, // blend+permute
1505 { TTI::SK_PermuteTwoSrc
, MVT::v16i8
, 13 }, // blend+permute
1509 if (const auto *Entry
= CostTableLookup(SSE2ShuffleTbl
, Kind
, LT
.second
))
1510 return LT
.first
* Entry
->Cost
;
1512 static const CostTblEntry SSE1ShuffleTbl
[] = {
1513 { TTI::SK_Broadcast
, MVT::v4f32
, 1 }, // shufps
1514 { TTI::SK_Reverse
, MVT::v4f32
, 1 }, // shufps
1515 { TTI::SK_Select
, MVT::v4f32
, 2 }, // 2*shufps
1516 { TTI::SK_PermuteSingleSrc
, MVT::v4f32
, 1 }, // shufps
1517 { TTI::SK_PermuteTwoSrc
, MVT::v4f32
, 2 }, // 2*shufps
1521 if (const auto *Entry
= CostTableLookup(SSE1ShuffleTbl
, Kind
, LT
.second
))
1522 return LT
.first
* Entry
->Cost
;
1524 return BaseT::getShuffleCost(Kind
, BaseTp
, Mask
, Index
, SubTp
);
1527 InstructionCost
X86TTIImpl::getCastInstrCost(unsigned Opcode
, Type
*Dst
,
1529 TTI::CastContextHint CCH
,
1530 TTI::TargetCostKind CostKind
,
1531 const Instruction
*I
) {
1532 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
1533 assert(ISD
&& "Invalid opcode");
1535 // TODO: Allow non-throughput costs that aren't binary.
1536 auto AdjustCost
= [&CostKind
](InstructionCost Cost
) -> InstructionCost
{
1537 if (CostKind
!= TTI::TCK_RecipThroughput
)
1538 return Cost
== 0 ? 0 : 1;
1542 // The cost tables include both specific, custom (non-legal) src/dst type
1543 // conversions and generic, legalized types. We test for customs first, before
1544 // falling back to legalization.
1545 // FIXME: Need a better design of the cost table to handle non-simple types of
1546 // potential massive combinations (elem_num x src_type x dst_type).
1547 static const TypeConversionCostTblEntry AVX512BWConversionTbl
[] {
1548 { ISD::SIGN_EXTEND
, MVT::v32i16
, MVT::v32i8
, 1 },
1549 { ISD::ZERO_EXTEND
, MVT::v32i16
, MVT::v32i8
, 1 },
1551 // Mask sign extend has an instruction.
1552 { ISD::SIGN_EXTEND
, MVT::v2i8
, MVT::v2i1
, 1 },
1553 { ISD::SIGN_EXTEND
, MVT::v2i16
, MVT::v2i1
, 1 },
1554 { ISD::SIGN_EXTEND
, MVT::v4i8
, MVT::v4i1
, 1 },
1555 { ISD::SIGN_EXTEND
, MVT::v4i16
, MVT::v4i1
, 1 },
1556 { ISD::SIGN_EXTEND
, MVT::v8i8
, MVT::v8i1
, 1 },
1557 { ISD::SIGN_EXTEND
, MVT::v8i16
, MVT::v8i1
, 1 },
1558 { ISD::SIGN_EXTEND
, MVT::v16i8
, MVT::v16i1
, 1 },
1559 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i1
, 1 },
1560 { ISD::SIGN_EXTEND
, MVT::v32i8
, MVT::v32i1
, 1 },
1561 { ISD::SIGN_EXTEND
, MVT::v32i16
, MVT::v32i1
, 1 },
1562 { ISD::SIGN_EXTEND
, MVT::v64i8
, MVT::v64i1
, 1 },
1564 // Mask zero extend is a sext + shift.
1565 { ISD::ZERO_EXTEND
, MVT::v2i8
, MVT::v2i1
, 2 },
1566 { ISD::ZERO_EXTEND
, MVT::v2i16
, MVT::v2i1
, 2 },
1567 { ISD::ZERO_EXTEND
, MVT::v4i8
, MVT::v4i1
, 2 },
1568 { ISD::ZERO_EXTEND
, MVT::v4i16
, MVT::v4i1
, 2 },
1569 { ISD::ZERO_EXTEND
, MVT::v8i8
, MVT::v8i1
, 2 },
1570 { ISD::ZERO_EXTEND
, MVT::v8i16
, MVT::v8i1
, 2 },
1571 { ISD::ZERO_EXTEND
, MVT::v16i8
, MVT::v16i1
, 2 },
1572 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i1
, 2 },
1573 { ISD::ZERO_EXTEND
, MVT::v32i8
, MVT::v32i1
, 2 },
1574 { ISD::ZERO_EXTEND
, MVT::v32i16
, MVT::v32i1
, 2 },
1575 { ISD::ZERO_EXTEND
, MVT::v64i8
, MVT::v64i1
, 2 },
1577 { ISD::TRUNCATE
, MVT::v32i8
, MVT::v32i16
, 2 },
1578 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i16
, 2 }, // widen to zmm
1579 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i8
, 2 }, // widen to zmm
1580 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i16
, 2 }, // widen to zmm
1581 { ISD::TRUNCATE
, MVT::v2i8
, MVT::v2i16
, 2 }, // vpmovwb
1582 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i8
, 2 }, // widen to zmm
1583 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i16
, 2 }, // widen to zmm
1584 { ISD::TRUNCATE
, MVT::v4i8
, MVT::v4i16
, 2 }, // vpmovwb
1585 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i8
, 2 }, // widen to zmm
1586 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i16
, 2 }, // widen to zmm
1587 { ISD::TRUNCATE
, MVT::v8i8
, MVT::v8i16
, 2 }, // vpmovwb
1588 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i8
, 2 }, // widen to zmm
1589 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i16
, 2 }, // widen to zmm
1590 { ISD::TRUNCATE
, MVT::v32i1
, MVT::v32i8
, 2 }, // widen to zmm
1591 { ISD::TRUNCATE
, MVT::v32i1
, MVT::v32i16
, 2 },
1592 { ISD::TRUNCATE
, MVT::v64i1
, MVT::v64i8
, 2 },
1595 static const TypeConversionCostTblEntry AVX512DQConversionTbl
[] = {
1596 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i64
, 1 },
1597 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i64
, 1 },
1599 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i64
, 1 },
1600 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i64
, 1 },
1602 { ISD::FP_TO_SINT
, MVT::v8i64
, MVT::v8f32
, 1 },
1603 { ISD::FP_TO_SINT
, MVT::v8i64
, MVT::v8f64
, 1 },
1605 { ISD::FP_TO_UINT
, MVT::v8i64
, MVT::v8f32
, 1 },
1606 { ISD::FP_TO_UINT
, MVT::v8i64
, MVT::v8f64
, 1 },
1609 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1610 // 256-bit wide vectors.
1612 static const TypeConversionCostTblEntry AVX512FConversionTbl
[] = {
1613 { ISD::FP_EXTEND
, MVT::v8f64
, MVT::v8f32
, 1 },
1614 { ISD::FP_EXTEND
, MVT::v8f64
, MVT::v16f32
, 3 },
1615 { ISD::FP_ROUND
, MVT::v8f32
, MVT::v8f64
, 1 },
1617 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i8
, 3 }, // sext+vpslld+vptestmd
1618 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i8
, 3 }, // sext+vpslld+vptestmd
1619 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i8
, 3 }, // sext+vpslld+vptestmd
1620 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i8
, 3 }, // sext+vpslld+vptestmd
1621 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i16
, 3 }, // sext+vpsllq+vptestmq
1622 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i16
, 3 }, // sext+vpsllq+vptestmq
1623 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i16
, 3 }, // sext+vpsllq+vptestmq
1624 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i16
, 3 }, // sext+vpslld+vptestmd
1625 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i32
, 2 }, // zmm vpslld+vptestmd
1626 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i32
, 2 }, // zmm vpslld+vptestmd
1627 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i32
, 2 }, // zmm vpslld+vptestmd
1628 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i32
, 2 }, // vpslld+vptestmd
1629 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i64
, 2 }, // zmm vpsllq+vptestmq
1630 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i64
, 2 }, // zmm vpsllq+vptestmq
1631 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i64
, 2 }, // vpsllq+vptestmq
1632 { ISD::TRUNCATE
, MVT::v2i8
, MVT::v2i32
, 2 }, // vpmovdb
1633 { ISD::TRUNCATE
, MVT::v4i8
, MVT::v4i32
, 2 }, // vpmovdb
1634 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i32
, 2 }, // vpmovdb
1635 { ISD::TRUNCATE
, MVT::v16i16
, MVT::v16i32
, 2 }, // vpmovdb
1636 { ISD::TRUNCATE
, MVT::v2i8
, MVT::v2i64
, 2 }, // vpmovqb
1637 { ISD::TRUNCATE
, MVT::v2i16
, MVT::v2i64
, 1 }, // vpshufb
1638 { ISD::TRUNCATE
, MVT::v8i8
, MVT::v8i64
, 2 }, // vpmovqb
1639 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v8i64
, 2 }, // vpmovqw
1640 { ISD::TRUNCATE
, MVT::v8i32
, MVT::v8i64
, 1 }, // vpmovqd
1641 { ISD::TRUNCATE
, MVT::v4i32
, MVT::v4i64
, 1 }, // zmm vpmovqd
1642 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i64
, 5 },// 2*vpmovqd+concat+vpmovdb
1644 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i16
, 3 }, // extend to v16i32
1645 { ISD::TRUNCATE
, MVT::v32i8
, MVT::v32i16
, 8 },
1647 // Sign extend is zmm vpternlogd+vptruncdb.
1648 // Zero extend is zmm broadcast load+vptruncdw.
1649 { ISD::SIGN_EXTEND
, MVT::v2i8
, MVT::v2i1
, 3 },
1650 { ISD::ZERO_EXTEND
, MVT::v2i8
, MVT::v2i1
, 4 },
1651 { ISD::SIGN_EXTEND
, MVT::v4i8
, MVT::v4i1
, 3 },
1652 { ISD::ZERO_EXTEND
, MVT::v4i8
, MVT::v4i1
, 4 },
1653 { ISD::SIGN_EXTEND
, MVT::v8i8
, MVT::v8i1
, 3 },
1654 { ISD::ZERO_EXTEND
, MVT::v8i8
, MVT::v8i1
, 4 },
1655 { ISD::SIGN_EXTEND
, MVT::v16i8
, MVT::v16i1
, 3 },
1656 { ISD::ZERO_EXTEND
, MVT::v16i8
, MVT::v16i1
, 4 },
1658 // Sign extend is zmm vpternlogd+vptruncdw.
1659 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1660 { ISD::SIGN_EXTEND
, MVT::v2i16
, MVT::v2i1
, 3 },
1661 { ISD::ZERO_EXTEND
, MVT::v2i16
, MVT::v2i1
, 4 },
1662 { ISD::SIGN_EXTEND
, MVT::v4i16
, MVT::v4i1
, 3 },
1663 { ISD::ZERO_EXTEND
, MVT::v4i16
, MVT::v4i1
, 4 },
1664 { ISD::SIGN_EXTEND
, MVT::v8i16
, MVT::v8i1
, 3 },
1665 { ISD::ZERO_EXTEND
, MVT::v8i16
, MVT::v8i1
, 4 },
1666 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i1
, 3 },
1667 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i1
, 4 },
1669 { ISD::SIGN_EXTEND
, MVT::v2i32
, MVT::v2i1
, 1 }, // zmm vpternlogd
1670 { ISD::ZERO_EXTEND
, MVT::v2i32
, MVT::v2i1
, 2 }, // zmm vpternlogd+psrld
1671 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v4i1
, 1 }, // zmm vpternlogd
1672 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v4i1
, 2 }, // zmm vpternlogd+psrld
1673 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i1
, 1 }, // zmm vpternlogd
1674 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i1
, 2 }, // zmm vpternlogd+psrld
1675 { ISD::SIGN_EXTEND
, MVT::v2i64
, MVT::v2i1
, 1 }, // zmm vpternlogq
1676 { ISD::ZERO_EXTEND
, MVT::v2i64
, MVT::v2i1
, 2 }, // zmm vpternlogq+psrlq
1677 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i1
, 1 }, // zmm vpternlogq
1678 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i1
, 2 }, // zmm vpternlogq+psrlq
1680 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i1
, 1 }, // vpternlogd
1681 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i1
, 2 }, // vpternlogd+psrld
1682 { ISD::SIGN_EXTEND
, MVT::v8i64
, MVT::v8i1
, 1 }, // vpternlogq
1683 { ISD::ZERO_EXTEND
, MVT::v8i64
, MVT::v8i1
, 2 }, // vpternlogq+psrlq
1685 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i8
, 1 },
1686 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i8
, 1 },
1687 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i16
, 1 },
1688 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i16
, 1 },
1689 { ISD::SIGN_EXTEND
, MVT::v8i64
, MVT::v8i8
, 1 },
1690 { ISD::ZERO_EXTEND
, MVT::v8i64
, MVT::v8i8
, 1 },
1691 { ISD::SIGN_EXTEND
, MVT::v8i64
, MVT::v8i16
, 1 },
1692 { ISD::ZERO_EXTEND
, MVT::v8i64
, MVT::v8i16
, 1 },
1693 { ISD::SIGN_EXTEND
, MVT::v8i64
, MVT::v8i32
, 1 },
1694 { ISD::ZERO_EXTEND
, MVT::v8i64
, MVT::v8i32
, 1 },
1696 { ISD::SIGN_EXTEND
, MVT::v32i16
, MVT::v32i8
, 3 }, // FIXME: May not be right
1697 { ISD::ZERO_EXTEND
, MVT::v32i16
, MVT::v32i8
, 3 }, // FIXME: May not be right
1699 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i1
, 4 },
1700 { ISD::SINT_TO_FP
, MVT::v16f32
, MVT::v16i1
, 3 },
1701 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v16i8
, 2 },
1702 { ISD::SINT_TO_FP
, MVT::v16f32
, MVT::v16i8
, 1 },
1703 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i16
, 2 },
1704 { ISD::SINT_TO_FP
, MVT::v16f32
, MVT::v16i16
, 1 },
1705 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i32
, 1 },
1706 { ISD::SINT_TO_FP
, MVT::v16f32
, MVT::v16i32
, 1 },
1708 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i1
, 4 },
1709 { ISD::UINT_TO_FP
, MVT::v16f32
, MVT::v16i1
, 3 },
1710 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v16i8
, 2 },
1711 { ISD::UINT_TO_FP
, MVT::v16f32
, MVT::v16i8
, 1 },
1712 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i16
, 2 },
1713 { ISD::UINT_TO_FP
, MVT::v16f32
, MVT::v16i16
, 1 },
1714 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i32
, 1 },
1715 { ISD::UINT_TO_FP
, MVT::v16f32
, MVT::v16i32
, 1 },
1716 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i64
, 26 },
1717 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i64
, 5 },
1719 { ISD::FP_TO_SINT
, MVT::v16i8
, MVT::v16f32
, 2 },
1720 { ISD::FP_TO_SINT
, MVT::v16i8
, MVT::v16f64
, 7 },
1721 { ISD::FP_TO_SINT
, MVT::v32i8
, MVT::v32f64
,15 },
1722 { ISD::FP_TO_SINT
, MVT::v64i8
, MVT::v64f32
,11 },
1723 { ISD::FP_TO_SINT
, MVT::v64i8
, MVT::v64f64
,31 },
1724 { ISD::FP_TO_SINT
, MVT::v8i16
, MVT::v8f64
, 3 },
1725 { ISD::FP_TO_SINT
, MVT::v16i16
, MVT::v16f64
, 7 },
1726 { ISD::FP_TO_SINT
, MVT::v32i16
, MVT::v32f32
, 5 },
1727 { ISD::FP_TO_SINT
, MVT::v32i16
, MVT::v32f64
,15 },
1728 { ISD::FP_TO_SINT
, MVT::v8i32
, MVT::v8f64
, 1 },
1729 { ISD::FP_TO_SINT
, MVT::v16i32
, MVT::v16f64
, 3 },
1731 { ISD::FP_TO_UINT
, MVT::v8i32
, MVT::v8f64
, 1 },
1732 { ISD::FP_TO_UINT
, MVT::v8i16
, MVT::v8f64
, 3 },
1733 { ISD::FP_TO_UINT
, MVT::v8i8
, MVT::v8f64
, 3 },
1734 { ISD::FP_TO_UINT
, MVT::v16i32
, MVT::v16f32
, 1 },
1735 { ISD::FP_TO_UINT
, MVT::v16i16
, MVT::v16f32
, 3 },
1736 { ISD::FP_TO_UINT
, MVT::v16i8
, MVT::v16f32
, 3 },
1739 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl
[] {
1740 // Mask sign extend has an instruction.
1741 { ISD::SIGN_EXTEND
, MVT::v2i8
, MVT::v2i1
, 1 },
1742 { ISD::SIGN_EXTEND
, MVT::v2i16
, MVT::v2i1
, 1 },
1743 { ISD::SIGN_EXTEND
, MVT::v4i8
, MVT::v4i1
, 1 },
1744 { ISD::SIGN_EXTEND
, MVT::v4i16
, MVT::v4i1
, 1 },
1745 { ISD::SIGN_EXTEND
, MVT::v8i8
, MVT::v8i1
, 1 },
1746 { ISD::SIGN_EXTEND
, MVT::v8i16
, MVT::v8i1
, 1 },
1747 { ISD::SIGN_EXTEND
, MVT::v16i8
, MVT::v16i1
, 1 },
1748 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i1
, 1 },
1749 { ISD::SIGN_EXTEND
, MVT::v32i8
, MVT::v32i1
, 1 },
1751 // Mask zero extend is a sext + shift.
1752 { ISD::ZERO_EXTEND
, MVT::v2i8
, MVT::v2i1
, 2 },
1753 { ISD::ZERO_EXTEND
, MVT::v2i16
, MVT::v2i1
, 2 },
1754 { ISD::ZERO_EXTEND
, MVT::v4i8
, MVT::v4i1
, 2 },
1755 { ISD::ZERO_EXTEND
, MVT::v4i16
, MVT::v4i1
, 2 },
1756 { ISD::ZERO_EXTEND
, MVT::v8i8
, MVT::v8i1
, 2 },
1757 { ISD::ZERO_EXTEND
, MVT::v8i16
, MVT::v8i1
, 2 },
1758 { ISD::ZERO_EXTEND
, MVT::v16i8
, MVT::v16i1
, 2 },
1759 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i1
, 2 },
1760 { ISD::ZERO_EXTEND
, MVT::v32i8
, MVT::v32i1
, 2 },
1762 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i16
, 2 },
1763 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i8
, 2 }, // vpsllw+vptestmb
1764 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i16
, 2 }, // vpsllw+vptestmw
1765 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i8
, 2 }, // vpsllw+vptestmb
1766 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i16
, 2 }, // vpsllw+vptestmw
1767 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i8
, 2 }, // vpsllw+vptestmb
1768 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i16
, 2 }, // vpsllw+vptestmw
1769 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i8
, 2 }, // vpsllw+vptestmb
1770 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i16
, 2 }, // vpsllw+vptestmw
1771 { ISD::TRUNCATE
, MVT::v32i1
, MVT::v32i8
, 2 }, // vpsllw+vptestmb
1774 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl
[] = {
1775 { ISD::SINT_TO_FP
, MVT::v2f32
, MVT::v2i64
, 1 },
1776 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 1 },
1777 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i64
, 1 },
1778 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i64
, 1 },
1780 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i64
, 1 },
1781 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 1 },
1782 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i64
, 1 },
1783 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i64
, 1 },
1785 { ISD::FP_TO_SINT
, MVT::v2i64
, MVT::v4f32
, 1 },
1786 { ISD::FP_TO_SINT
, MVT::v4i64
, MVT::v4f32
, 1 },
1787 { ISD::FP_TO_SINT
, MVT::v2i64
, MVT::v2f64
, 1 },
1788 { ISD::FP_TO_SINT
, MVT::v4i64
, MVT::v4f64
, 1 },
1790 { ISD::FP_TO_UINT
, MVT::v2i64
, MVT::v4f32
, 1 },
1791 { ISD::FP_TO_UINT
, MVT::v4i64
, MVT::v4f32
, 1 },
1792 { ISD::FP_TO_UINT
, MVT::v2i64
, MVT::v2f64
, 1 },
1793 { ISD::FP_TO_UINT
, MVT::v4i64
, MVT::v4f64
, 1 },
1796 static const TypeConversionCostTblEntry AVX512VLConversionTbl
[] = {
1797 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i8
, 3 }, // sext+vpslld+vptestmd
1798 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i8
, 3 }, // sext+vpslld+vptestmd
1799 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i8
, 3 }, // sext+vpslld+vptestmd
1800 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i8
, 8 }, // split+2*v8i8
1801 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i16
, 3 }, // sext+vpsllq+vptestmq
1802 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i16
, 3 }, // sext+vpsllq+vptestmq
1803 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i16
, 3 }, // sext+vpsllq+vptestmq
1804 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i16
, 8 }, // split+2*v8i16
1805 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i32
, 2 }, // vpslld+vptestmd
1806 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i32
, 2 }, // vpslld+vptestmd
1807 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i32
, 2 }, // vpslld+vptestmd
1808 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i64
, 2 }, // vpsllq+vptestmq
1809 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i64
, 2 }, // vpsllq+vptestmq
1810 { ISD::TRUNCATE
, MVT::v4i32
, MVT::v4i64
, 1 }, // vpmovqd
1811 { ISD::TRUNCATE
, MVT::v4i8
, MVT::v4i64
, 2 }, // vpmovqb
1812 { ISD::TRUNCATE
, MVT::v4i16
, MVT::v4i64
, 2 }, // vpmovqw
1813 { ISD::TRUNCATE
, MVT::v8i8
, MVT::v8i32
, 2 }, // vpmovwb
1815 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1816 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1817 { ISD::SIGN_EXTEND
, MVT::v2i8
, MVT::v2i1
, 5 },
1818 { ISD::ZERO_EXTEND
, MVT::v2i8
, MVT::v2i1
, 6 },
1819 { ISD::SIGN_EXTEND
, MVT::v4i8
, MVT::v4i1
, 5 },
1820 { ISD::ZERO_EXTEND
, MVT::v4i8
, MVT::v4i1
, 6 },
1821 { ISD::SIGN_EXTEND
, MVT::v8i8
, MVT::v8i1
, 5 },
1822 { ISD::ZERO_EXTEND
, MVT::v8i8
, MVT::v8i1
, 6 },
1823 { ISD::SIGN_EXTEND
, MVT::v16i8
, MVT::v16i1
, 10 },
1824 { ISD::ZERO_EXTEND
, MVT::v16i8
, MVT::v16i1
, 12 },
1826 // sign extend is vpcmpeq+maskedmove+vpmovdw
1827 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1828 { ISD::SIGN_EXTEND
, MVT::v2i16
, MVT::v2i1
, 4 },
1829 { ISD::ZERO_EXTEND
, MVT::v2i16
, MVT::v2i1
, 5 },
1830 { ISD::SIGN_EXTEND
, MVT::v4i16
, MVT::v4i1
, 4 },
1831 { ISD::ZERO_EXTEND
, MVT::v4i16
, MVT::v4i1
, 5 },
1832 { ISD::SIGN_EXTEND
, MVT::v8i16
, MVT::v8i1
, 4 },
1833 { ISD::ZERO_EXTEND
, MVT::v8i16
, MVT::v8i1
, 5 },
1834 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i1
, 10 },
1835 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i1
, 12 },
1837 { ISD::SIGN_EXTEND
, MVT::v2i32
, MVT::v2i1
, 1 }, // vpternlogd
1838 { ISD::ZERO_EXTEND
, MVT::v2i32
, MVT::v2i1
, 2 }, // vpternlogd+psrld
1839 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v4i1
, 1 }, // vpternlogd
1840 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v4i1
, 2 }, // vpternlogd+psrld
1841 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i1
, 1 }, // vpternlogd
1842 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i1
, 2 }, // vpternlogd+psrld
1843 { ISD::SIGN_EXTEND
, MVT::v2i64
, MVT::v2i1
, 1 }, // vpternlogq
1844 { ISD::ZERO_EXTEND
, MVT::v2i64
, MVT::v2i1
, 2 }, // vpternlogq+psrlq
1845 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i1
, 1 }, // vpternlogq
1846 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i1
, 2 }, // vpternlogq+psrlq
1848 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v16i8
, 1 },
1849 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v16i8
, 1 },
1850 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v16i8
, 1 },
1851 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v16i8
, 1 },
1852 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i8
, 1 },
1853 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i8
, 1 },
1854 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v8i16
, 1 },
1855 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v8i16
, 1 },
1856 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i16
, 1 },
1857 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i16
, 1 },
1858 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i32
, 1 },
1859 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i32
, 1 },
1861 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v16i8
, 1 },
1862 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v16i8
, 1 },
1863 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v8i16
, 1 },
1864 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 1 },
1866 { ISD::UINT_TO_FP
, MVT::f32
, MVT::i64
, 1 },
1867 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i64
, 1 },
1868 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v16i8
, 1 },
1869 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v16i8
, 1 },
1870 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v8i16
, 1 },
1871 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 1 },
1872 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i32
, 1 },
1873 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 1 },
1874 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i32
, 1 },
1875 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 1 },
1876 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i64
, 5 },
1877 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 5 },
1878 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i64
, 5 },
1880 { ISD::FP_TO_SINT
, MVT::v16i8
, MVT::v8f32
, 2 },
1881 { ISD::FP_TO_SINT
, MVT::v16i8
, MVT::v16f32
, 2 },
1882 { ISD::FP_TO_SINT
, MVT::v32i8
, MVT::v32f32
, 5 },
1884 { ISD::FP_TO_UINT
, MVT::i64
, MVT::f32
, 1 },
1885 { ISD::FP_TO_UINT
, MVT::i64
, MVT::f64
, 1 },
1886 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f32
, 1 },
1887 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v2f64
, 1 },
1888 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f64
, 1 },
1889 { ISD::FP_TO_UINT
, MVT::v8i32
, MVT::v8f32
, 1 },
1890 { ISD::FP_TO_UINT
, MVT::v8i32
, MVT::v8f64
, 1 },
1893 static const TypeConversionCostTblEntry AVX2ConversionTbl
[] = {
1894 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i1
, 3 },
1895 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i1
, 3 },
1896 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i1
, 3 },
1897 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i1
, 3 },
1898 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i1
, 1 },
1899 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i1
, 1 },
1901 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v16i8
, 2 },
1902 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v16i8
, 2 },
1903 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v16i8
, 2 },
1904 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v16i8
, 2 },
1905 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i8
, 2 },
1906 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i8
, 2 },
1907 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v8i16
, 2 },
1908 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v8i16
, 2 },
1909 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i16
, 2 },
1910 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i16
, 2 },
1911 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i16
, 3 },
1912 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i16
, 3 },
1913 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i32
, 2 },
1914 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i32
, 2 },
1916 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i32
, 2 },
1918 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v8i16
, 1 },
1919 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v4i32
, 1 },
1920 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v2i64
, 1 },
1921 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v8i32
, 4 },
1922 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v4i64
, 4 },
1923 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v4i32
, 1 },
1924 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v2i64
, 1 },
1925 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v4i64
, 5 },
1926 { ISD::TRUNCATE
, MVT::v4i32
, MVT::v4i64
, 1 },
1927 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v8i32
, 2 },
1929 { ISD::FP_EXTEND
, MVT::v8f64
, MVT::v8f32
, 3 },
1930 { ISD::FP_ROUND
, MVT::v8f32
, MVT::v8f64
, 3 },
1932 { ISD::FP_TO_SINT
, MVT::v16i16
, MVT::v8f32
, 1 },
1933 { ISD::FP_TO_SINT
, MVT::v4i32
, MVT::v4f64
, 1 },
1934 { ISD::FP_TO_SINT
, MVT::v8i32
, MVT::v8f32
, 1 },
1935 { ISD::FP_TO_SINT
, MVT::v8i32
, MVT::v8f64
, 3 },
1937 { ISD::FP_TO_UINT
, MVT::i64
, MVT::f32
, 3 },
1938 { ISD::FP_TO_UINT
, MVT::i64
, MVT::f64
, 3 },
1939 { ISD::FP_TO_UINT
, MVT::v16i16
, MVT::v8f32
, 1 },
1940 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f32
, 3 },
1941 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v2f64
, 4 },
1942 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f64
, 4 },
1943 { ISD::FP_TO_UINT
, MVT::v8i32
, MVT::v8f32
, 3 },
1944 { ISD::FP_TO_UINT
, MVT::v8i32
, MVT::v4f64
, 4 },
1946 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v16i8
, 2 },
1947 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v16i8
, 2 },
1948 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v8i16
, 2 },
1949 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 2 },
1950 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i32
, 1 },
1951 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 1 },
1952 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i32
, 3 },
1954 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v16i8
, 2 },
1955 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v16i8
, 2 },
1956 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v8i16
, 2 },
1957 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 2 },
1958 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i32
, 2 },
1959 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i32
, 1 },
1960 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 2 },
1961 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i32
, 2 },
1962 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 2 },
1963 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i32
, 4 },
1966 static const TypeConversionCostTblEntry AVXConversionTbl
[] = {
1967 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i1
, 6 },
1968 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i1
, 4 },
1969 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i1
, 7 },
1970 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i1
, 4 },
1971 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i1
, 4 },
1972 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i1
, 4 },
1974 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v16i8
, 3 },
1975 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v16i8
, 3 },
1976 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v16i8
, 3 },
1977 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v16i8
, 3 },
1978 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i8
, 3 },
1979 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i8
, 3 },
1980 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v8i16
, 3 },
1981 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v8i16
, 3 },
1982 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i16
, 3 },
1983 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i16
, 3 },
1984 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i32
, 3 },
1985 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i32
, 3 },
1987 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i64
, 4 },
1988 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i32
, 5 },
1989 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i16
, 4 },
1990 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i64
, 9 },
1991 { ISD::TRUNCATE
, MVT::v16i1
, MVT::v16i64
, 11 },
1993 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i16
, 2 }, // and+extract+packuswb
1994 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v8i32
, 5 },
1995 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v8i32
, 5 },
1996 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v4i64
, 5 },
1997 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v4i64
, 3 }, // and+extract+2*packusdw
1998 { ISD::TRUNCATE
, MVT::v4i32
, MVT::v4i64
, 2 },
2000 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i1
, 3 },
2001 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i1
, 3 },
2002 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i1
, 8 },
2003 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v16i8
, 4 },
2004 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v16i8
, 2 },
2005 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 4 },
2006 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v8i16
, 2 },
2007 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i32
, 2 },
2008 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 2 },
2009 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i32
, 4 },
2010 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v2i64
, 5 },
2011 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i64
, 8 },
2013 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i1
, 7 },
2014 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i1
, 7 },
2015 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i1
, 6 },
2016 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v16i8
, 4 },
2017 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v16i8
, 2 },
2018 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 4 },
2019 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v8i16
, 2 },
2020 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i32
, 4 },
2021 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i32
, 4 },
2022 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 5 },
2023 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i32
, 6 },
2024 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 8 },
2025 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i32
, 10 },
2026 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i64
, 10 },
2027 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i64
, 18 },
2028 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 5 },
2029 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i64
, 10 },
2031 { ISD::FP_TO_SINT
, MVT::v16i8
, MVT::v8f32
, 2 },
2032 { ISD::FP_TO_SINT
, MVT::v16i8
, MVT::v4f64
, 2 },
2033 { ISD::FP_TO_SINT
, MVT::v32i8
, MVT::v8f32
, 2 },
2034 { ISD::FP_TO_SINT
, MVT::v32i8
, MVT::v4f64
, 2 },
2035 { ISD::FP_TO_SINT
, MVT::v8i16
, MVT::v8f32
, 2 },
2036 { ISD::FP_TO_SINT
, MVT::v8i16
, MVT::v4f64
, 2 },
2037 { ISD::FP_TO_SINT
, MVT::v16i16
, MVT::v8f32
, 2 },
2038 { ISD::FP_TO_SINT
, MVT::v16i16
, MVT::v4f64
, 2 },
2039 { ISD::FP_TO_SINT
, MVT::v4i32
, MVT::v4f64
, 2 },
2040 { ISD::FP_TO_SINT
, MVT::v8i32
, MVT::v8f32
, 2 },
2041 { ISD::FP_TO_SINT
, MVT::v8i32
, MVT::v8f64
, 5 },
2043 { ISD::FP_TO_UINT
, MVT::v16i8
, MVT::v8f32
, 2 },
2044 { ISD::FP_TO_UINT
, MVT::v16i8
, MVT::v4f64
, 2 },
2045 { ISD::FP_TO_UINT
, MVT::v32i8
, MVT::v8f32
, 2 },
2046 { ISD::FP_TO_UINT
, MVT::v32i8
, MVT::v4f64
, 2 },
2047 { ISD::FP_TO_UINT
, MVT::v8i16
, MVT::v8f32
, 2 },
2048 { ISD::FP_TO_UINT
, MVT::v8i16
, MVT::v4f64
, 2 },
2049 { ISD::FP_TO_UINT
, MVT::v16i16
, MVT::v8f32
, 2 },
2050 { ISD::FP_TO_UINT
, MVT::v16i16
, MVT::v4f64
, 2 },
2051 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f32
, 3 },
2052 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v2f64
, 4 },
2053 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f64
, 6 },
2054 { ISD::FP_TO_UINT
, MVT::v8i32
, MVT::v8f32
, 7 },
2055 { ISD::FP_TO_UINT
, MVT::v8i32
, MVT::v4f64
, 7 },
2057 { ISD::FP_EXTEND
, MVT::v4f64
, MVT::v4f32
, 1 },
2058 { ISD::FP_ROUND
, MVT::v4f32
, MVT::v4f64
, 1 },
2061 static const TypeConversionCostTblEntry SSE41ConversionTbl
[] = {
2062 { ISD::ZERO_EXTEND
, MVT::v2i64
, MVT::v16i8
, 1 },
2063 { ISD::SIGN_EXTEND
, MVT::v2i64
, MVT::v16i8
, 1 },
2064 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v16i8
, 1 },
2065 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v16i8
, 1 },
2066 { ISD::ZERO_EXTEND
, MVT::v8i16
, MVT::v16i8
, 1 },
2067 { ISD::SIGN_EXTEND
, MVT::v8i16
, MVT::v16i8
, 1 },
2068 { ISD::ZERO_EXTEND
, MVT::v2i64
, MVT::v8i16
, 1 },
2069 { ISD::SIGN_EXTEND
, MVT::v2i64
, MVT::v8i16
, 1 },
2070 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v8i16
, 1 },
2071 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v8i16
, 1 },
2072 { ISD::ZERO_EXTEND
, MVT::v2i64
, MVT::v4i32
, 1 },
2073 { ISD::SIGN_EXTEND
, MVT::v2i64
, MVT::v4i32
, 1 },
2075 // These truncates end up widening elements.
2076 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i8
, 1 }, // PMOVXZBQ
2077 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i16
, 1 }, // PMOVXZWQ
2078 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i8
, 1 }, // PMOVXZBD
2080 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v4i32
, 2 },
2081 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v4i32
, 2 },
2082 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v2i64
, 2 },
2084 { ISD::SINT_TO_FP
, MVT::f32
, MVT::i32
, 1 },
2085 { ISD::SINT_TO_FP
, MVT::f64
, MVT::i32
, 1 },
2086 { ISD::SINT_TO_FP
, MVT::f32
, MVT::i64
, 1 },
2087 { ISD::SINT_TO_FP
, MVT::f64
, MVT::i64
, 1 },
2088 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v16i8
, 1 },
2089 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v16i8
, 1 },
2090 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v8i16
, 1 },
2091 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v8i16
, 1 },
2092 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 1 },
2093 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v4i32
, 1 },
2094 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i32
, 2 },
2096 { ISD::UINT_TO_FP
, MVT::f32
, MVT::i32
, 1 },
2097 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i32
, 1 },
2098 { ISD::UINT_TO_FP
, MVT::f32
, MVT::i64
, 4 },
2099 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i64
, 4 },
2100 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v16i8
, 1 },
2101 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v16i8
, 1 },
2102 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v8i16
, 1 },
2103 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v8i16
, 1 },
2104 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i32
, 3 },
2105 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 3 },
2106 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v4i32
, 2 },
2107 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v2i64
, 12 },
2108 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i64
, 22 },
2109 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 4 },
2111 { ISD::FP_TO_SINT
, MVT::i32
, MVT::f32
, 1 },
2112 { ISD::FP_TO_SINT
, MVT::i64
, MVT::f32
, 1 },
2113 { ISD::FP_TO_SINT
, MVT::i32
, MVT::f64
, 1 },
2114 { ISD::FP_TO_SINT
, MVT::i64
, MVT::f64
, 1 },
2115 { ISD::FP_TO_SINT
, MVT::v16i8
, MVT::v4f32
, 2 },
2116 { ISD::FP_TO_SINT
, MVT::v16i8
, MVT::v2f64
, 2 },
2117 { ISD::FP_TO_SINT
, MVT::v8i16
, MVT::v4f32
, 1 },
2118 { ISD::FP_TO_SINT
, MVT::v8i16
, MVT::v2f64
, 1 },
2119 { ISD::FP_TO_SINT
, MVT::v4i32
, MVT::v4f32
, 1 },
2120 { ISD::FP_TO_SINT
, MVT::v4i32
, MVT::v2f64
, 1 },
2122 { ISD::FP_TO_UINT
, MVT::i32
, MVT::f32
, 1 },
2123 { ISD::FP_TO_UINT
, MVT::i64
, MVT::f32
, 4 },
2124 { ISD::FP_TO_UINT
, MVT::i32
, MVT::f64
, 1 },
2125 { ISD::FP_TO_UINT
, MVT::i64
, MVT::f64
, 4 },
2126 { ISD::FP_TO_UINT
, MVT::v16i8
, MVT::v4f32
, 2 },
2127 { ISD::FP_TO_UINT
, MVT::v16i8
, MVT::v2f64
, 2 },
2128 { ISD::FP_TO_UINT
, MVT::v8i16
, MVT::v4f32
, 1 },
2129 { ISD::FP_TO_UINT
, MVT::v8i16
, MVT::v2f64
, 1 },
2130 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f32
, 4 },
2131 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v2f64
, 4 },
2134 static const TypeConversionCostTblEntry SSE2ConversionTbl
[] = {
2135 // These are somewhat magic numbers justified by comparing the
2136 // output of llvm-mca for our various supported scheduler models
2137 // and basing it off the worst case scenario.
2138 { ISD::SINT_TO_FP
, MVT::f32
, MVT::i32
, 3 },
2139 { ISD::SINT_TO_FP
, MVT::f64
, MVT::i32
, 3 },
2140 { ISD::SINT_TO_FP
, MVT::f32
, MVT::i64
, 3 },
2141 { ISD::SINT_TO_FP
, MVT::f64
, MVT::i64
, 3 },
2142 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v16i8
, 3 },
2143 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v16i8
, 4 },
2144 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v8i16
, 3 },
2145 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v8i16
, 4 },
2146 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 3 },
2147 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v4i32
, 4 },
2148 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v2i64
, 8 },
2149 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 8 },
2151 { ISD::UINT_TO_FP
, MVT::f32
, MVT::i32
, 3 },
2152 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i32
, 3 },
2153 { ISD::UINT_TO_FP
, MVT::f32
, MVT::i64
, 8 },
2154 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i64
, 9 },
2155 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v16i8
, 4 },
2156 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v16i8
, 4 },
2157 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v8i16
, 4 },
2158 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v8i16
, 4 },
2159 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i32
, 7 },
2160 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v4i32
, 7 },
2161 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 5 },
2162 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 15 },
2163 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v2i64
, 18 },
2165 { ISD::FP_TO_SINT
, MVT::i32
, MVT::f32
, 4 },
2166 { ISD::FP_TO_SINT
, MVT::i64
, MVT::f32
, 4 },
2167 { ISD::FP_TO_SINT
, MVT::i32
, MVT::f64
, 4 },
2168 { ISD::FP_TO_SINT
, MVT::i64
, MVT::f64
, 4 },
2169 { ISD::FP_TO_SINT
, MVT::v16i8
, MVT::v4f32
, 6 },
2170 { ISD::FP_TO_SINT
, MVT::v16i8
, MVT::v2f64
, 6 },
2171 { ISD::FP_TO_SINT
, MVT::v8i16
, MVT::v4f32
, 5 },
2172 { ISD::FP_TO_SINT
, MVT::v8i16
, MVT::v2f64
, 5 },
2173 { ISD::FP_TO_SINT
, MVT::v4i32
, MVT::v4f32
, 4 },
2174 { ISD::FP_TO_SINT
, MVT::v4i32
, MVT::v2f64
, 4 },
2176 { ISD::FP_TO_UINT
, MVT::i32
, MVT::f32
, 4 },
2177 { ISD::FP_TO_UINT
, MVT::i64
, MVT::f32
, 4 },
2178 { ISD::FP_TO_UINT
, MVT::i32
, MVT::f64
, 4 },
2179 { ISD::FP_TO_UINT
, MVT::i64
, MVT::f64
, 15 },
2180 { ISD::FP_TO_UINT
, MVT::v16i8
, MVT::v4f32
, 6 },
2181 { ISD::FP_TO_UINT
, MVT::v16i8
, MVT::v2f64
, 6 },
2182 { ISD::FP_TO_UINT
, MVT::v8i16
, MVT::v4f32
, 5 },
2183 { ISD::FP_TO_UINT
, MVT::v8i16
, MVT::v2f64
, 5 },
2184 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f32
, 8 },
2185 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v2f64
, 8 },
2187 { ISD::ZERO_EXTEND
, MVT::v2i64
, MVT::v16i8
, 4 },
2188 { ISD::SIGN_EXTEND
, MVT::v2i64
, MVT::v16i8
, 4 },
2189 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v16i8
, 2 },
2190 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v16i8
, 3 },
2191 { ISD::ZERO_EXTEND
, MVT::v8i16
, MVT::v16i8
, 1 },
2192 { ISD::SIGN_EXTEND
, MVT::v8i16
, MVT::v16i8
, 2 },
2193 { ISD::ZERO_EXTEND
, MVT::v2i64
, MVT::v8i16
, 2 },
2194 { ISD::SIGN_EXTEND
, MVT::v2i64
, MVT::v8i16
, 3 },
2195 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v8i16
, 1 },
2196 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v8i16
, 2 },
2197 { ISD::ZERO_EXTEND
, MVT::v2i64
, MVT::v4i32
, 1 },
2198 { ISD::SIGN_EXTEND
, MVT::v2i64
, MVT::v4i32
, 2 },
2200 // These truncates are really widening elements.
2201 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i32
, 1 }, // PSHUFD
2202 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i16
, 2 }, // PUNPCKLWD+DQ
2203 { ISD::TRUNCATE
, MVT::v2i1
, MVT::v2i8
, 3 }, // PUNPCKLBW+WD+PSHUFD
2204 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i16
, 1 }, // PUNPCKLWD
2205 { ISD::TRUNCATE
, MVT::v4i1
, MVT::v4i8
, 2 }, // PUNPCKLBW+WD
2206 { ISD::TRUNCATE
, MVT::v8i1
, MVT::v8i8
, 1 }, // PUNPCKLBW
2208 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v8i16
, 2 }, // PAND+PACKUSWB
2209 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i16
, 3 },
2210 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v4i32
, 3 }, // PAND+2*PACKUSWB
2211 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i32
, 7 },
2212 { ISD::TRUNCATE
, MVT::v2i16
, MVT::v2i32
, 1 },
2213 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v4i32
, 3 },
2214 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v8i32
, 5 },
2215 { ISD::TRUNCATE
, MVT::v16i16
, MVT::v16i32
,10 },
2216 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v2i64
, 4 }, // PAND+3*PACKUSWB
2217 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v2i64
, 2 }, // PSHUFD+PSHUFLW
2218 { ISD::TRUNCATE
, MVT::v4i32
, MVT::v2i64
, 1 }, // PSHUFD
2221 // Attempt to map directly to (simple) MVT types to let us match custom entries.
2222 EVT SrcTy
= TLI
->getValueType(DL
, Src
);
2223 EVT DstTy
= TLI
->getValueType(DL
, Dst
);
2225 // The function getSimpleVT only handles simple value types.
2226 if (SrcTy
.isSimple() && DstTy
.isSimple()) {
2227 MVT SimpleSrcTy
= SrcTy
.getSimpleVT();
2228 MVT SimpleDstTy
= DstTy
.getSimpleVT();
2230 if (ST
->useAVX512Regs()) {
2232 if (const auto *Entry
= ConvertCostTableLookup(
2233 AVX512BWConversionTbl
, ISD
, SimpleDstTy
, SimpleSrcTy
))
2234 return AdjustCost(Entry
->Cost
);
2237 if (const auto *Entry
= ConvertCostTableLookup(
2238 AVX512DQConversionTbl
, ISD
, SimpleDstTy
, SimpleSrcTy
))
2239 return AdjustCost(Entry
->Cost
);
2241 if (ST
->hasAVX512())
2242 if (const auto *Entry
= ConvertCostTableLookup(
2243 AVX512FConversionTbl
, ISD
, SimpleDstTy
, SimpleSrcTy
))
2244 return AdjustCost(Entry
->Cost
);
2248 if (const auto *Entry
= ConvertCostTableLookup(
2249 AVX512BWVLConversionTbl
, ISD
, SimpleDstTy
, SimpleSrcTy
))
2250 return AdjustCost(Entry
->Cost
);
2253 if (const auto *Entry
= ConvertCostTableLookup(
2254 AVX512DQVLConversionTbl
, ISD
, SimpleDstTy
, SimpleSrcTy
))
2255 return AdjustCost(Entry
->Cost
);
2257 if (ST
->hasAVX512())
2258 if (const auto *Entry
= ConvertCostTableLookup(AVX512VLConversionTbl
, ISD
,
2259 SimpleDstTy
, SimpleSrcTy
))
2260 return AdjustCost(Entry
->Cost
);
2262 if (ST
->hasAVX2()) {
2263 if (const auto *Entry
= ConvertCostTableLookup(AVX2ConversionTbl
, ISD
,
2264 SimpleDstTy
, SimpleSrcTy
))
2265 return AdjustCost(Entry
->Cost
);
2269 if (const auto *Entry
= ConvertCostTableLookup(AVXConversionTbl
, ISD
,
2270 SimpleDstTy
, SimpleSrcTy
))
2271 return AdjustCost(Entry
->Cost
);
2274 if (ST
->hasSSE41()) {
2275 if (const auto *Entry
= ConvertCostTableLookup(SSE41ConversionTbl
, ISD
,
2276 SimpleDstTy
, SimpleSrcTy
))
2277 return AdjustCost(Entry
->Cost
);
2280 if (ST
->hasSSE2()) {
2281 if (const auto *Entry
= ConvertCostTableLookup(SSE2ConversionTbl
, ISD
,
2282 SimpleDstTy
, SimpleSrcTy
))
2283 return AdjustCost(Entry
->Cost
);
2287 // Fall back to legalized types.
2288 std::pair
<InstructionCost
, MVT
> LTSrc
= TLI
->getTypeLegalizationCost(DL
, Src
);
2289 std::pair
<InstructionCost
, MVT
> LTDest
=
2290 TLI
->getTypeLegalizationCost(DL
, Dst
);
2292 if (ST
->useAVX512Regs()) {
2294 if (const auto *Entry
= ConvertCostTableLookup(
2295 AVX512BWConversionTbl
, ISD
, LTDest
.second
, LTSrc
.second
))
2296 return AdjustCost(std::max(LTSrc
.first
, LTDest
.first
) * Entry
->Cost
);
2299 if (const auto *Entry
= ConvertCostTableLookup(
2300 AVX512DQConversionTbl
, ISD
, LTDest
.second
, LTSrc
.second
))
2301 return AdjustCost(std::max(LTSrc
.first
, LTDest
.first
) * Entry
->Cost
);
2303 if (ST
->hasAVX512())
2304 if (const auto *Entry
= ConvertCostTableLookup(
2305 AVX512FConversionTbl
, ISD
, LTDest
.second
, LTSrc
.second
))
2306 return AdjustCost(std::max(LTSrc
.first
, LTDest
.first
) * Entry
->Cost
);
2310 if (const auto *Entry
= ConvertCostTableLookup(AVX512BWVLConversionTbl
, ISD
,
2311 LTDest
.second
, LTSrc
.second
))
2312 return AdjustCost(std::max(LTSrc
.first
, LTDest
.first
) * Entry
->Cost
);
2315 if (const auto *Entry
= ConvertCostTableLookup(AVX512DQVLConversionTbl
, ISD
,
2316 LTDest
.second
, LTSrc
.second
))
2317 return AdjustCost(std::max(LTSrc
.first
, LTDest
.first
) * Entry
->Cost
);
2319 if (ST
->hasAVX512())
2320 if (const auto *Entry
= ConvertCostTableLookup(AVX512VLConversionTbl
, ISD
,
2321 LTDest
.second
, LTSrc
.second
))
2322 return AdjustCost(std::max(LTSrc
.first
, LTDest
.first
) * Entry
->Cost
);
2325 if (const auto *Entry
= ConvertCostTableLookup(AVX2ConversionTbl
, ISD
,
2326 LTDest
.second
, LTSrc
.second
))
2327 return AdjustCost(std::max(LTSrc
.first
, LTDest
.first
) * Entry
->Cost
);
2330 if (const auto *Entry
= ConvertCostTableLookup(AVXConversionTbl
, ISD
,
2331 LTDest
.second
, LTSrc
.second
))
2332 return AdjustCost(std::max(LTSrc
.first
, LTDest
.first
) * Entry
->Cost
);
2335 if (const auto *Entry
= ConvertCostTableLookup(SSE41ConversionTbl
, ISD
,
2336 LTDest
.second
, LTSrc
.second
))
2337 return AdjustCost(std::max(LTSrc
.first
, LTDest
.first
) * Entry
->Cost
);
2340 if (const auto *Entry
= ConvertCostTableLookup(SSE2ConversionTbl
, ISD
,
2341 LTDest
.second
, LTSrc
.second
))
2342 return AdjustCost(std::max(LTSrc
.first
, LTDest
.first
) * Entry
->Cost
);
2344 // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for
2346 if ((ISD
== ISD::SINT_TO_FP
|| ISD
== ISD::UINT_TO_FP
) &&
2347 1 < Src
->getScalarSizeInBits() && Src
->getScalarSizeInBits() < 32) {
2348 Type
*ExtSrc
= Src
->getWithNewBitWidth(32);
2350 (ISD
== ISD::SINT_TO_FP
) ? Instruction::SExt
: Instruction::ZExt
;
2352 // For scalar loads the extend would be free.
2353 InstructionCost ExtCost
= 0;
2354 if (!(Src
->isIntegerTy() && I
&& isa
<LoadInst
>(I
->getOperand(0))))
2355 ExtCost
= getCastInstrCost(ExtOpc
, ExtSrc
, Src
, CCH
, CostKind
);
2357 return ExtCost
+ getCastInstrCost(Instruction::SIToFP
, Dst
, ExtSrc
,
2358 TTI::CastContextHint::None
, CostKind
);
2361 // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi
2363 if ((ISD
== ISD::FP_TO_SINT
|| ISD
== ISD::FP_TO_UINT
) &&
2364 1 < Dst
->getScalarSizeInBits() && Dst
->getScalarSizeInBits() < 32) {
2365 Type
*TruncDst
= Dst
->getWithNewBitWidth(32);
2366 return getCastInstrCost(Instruction::FPToSI
, TruncDst
, Src
, CCH
, CostKind
) +
2367 getCastInstrCost(Instruction::Trunc
, Dst
, TruncDst
,
2368 TTI::CastContextHint::None
, CostKind
);
2372 BaseT::getCastInstrCost(Opcode
, Dst
, Src
, CCH
, CostKind
, I
));
2375 InstructionCost
X86TTIImpl::getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
,
2377 CmpInst::Predicate VecPred
,
2378 TTI::TargetCostKind CostKind
,
2379 const Instruction
*I
) {
2380 // TODO: Handle other cost kinds.
2381 if (CostKind
!= TTI::TCK_RecipThroughput
)
2382 return BaseT::getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, VecPred
, CostKind
,
2385 // Legalize the type.
2386 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, ValTy
);
2388 MVT MTy
= LT
.second
;
2390 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
2391 assert(ISD
&& "Invalid opcode");
2393 unsigned ExtraCost
= 0;
2394 if (I
&& (Opcode
== Instruction::ICmp
|| Opcode
== Instruction::FCmp
)) {
2395 // Some vector comparison predicates cost extra instructions.
2396 if (MTy
.isVector() &&
2397 !((ST
->hasXOP() && (!ST
->hasAVX2() || MTy
.is128BitVector())) ||
2398 (ST
->hasAVX512() && 32 <= MTy
.getScalarSizeInBits()) ||
2400 switch (cast
<CmpInst
>(I
)->getPredicate()) {
2401 case CmpInst::Predicate::ICMP_NE
:
2402 // xor(cmpeq(x,y),-1)
2405 case CmpInst::Predicate::ICMP_SGE
:
2406 case CmpInst::Predicate::ICMP_SLE
:
2407 // xor(cmpgt(x,y),-1)
2410 case CmpInst::Predicate::ICMP_ULT
:
2411 case CmpInst::Predicate::ICMP_UGT
:
2412 // cmpgt(xor(x,signbit),xor(y,signbit))
2413 // xor(cmpeq(pmaxu(x,y),x),-1)
2416 case CmpInst::Predicate::ICMP_ULE
:
2417 case CmpInst::Predicate::ICMP_UGE
:
2418 if ((ST
->hasSSE41() && MTy
.getScalarSizeInBits() == 32) ||
2419 (ST
->hasSSE2() && MTy
.getScalarSizeInBits() < 32)) {
2420 // cmpeq(psubus(x,y),0)
2421 // cmpeq(pminu(x,y),x)
2424 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2434 static const CostTblEntry SLMCostTbl
[] = {
2435 // slm pcmpeq/pcmpgt throughput is 2
2436 { ISD::SETCC
, MVT::v2i64
, 2 },
2439 static const CostTblEntry AVX512BWCostTbl
[] = {
2440 { ISD::SETCC
, MVT::v32i16
, 1 },
2441 { ISD::SETCC
, MVT::v64i8
, 1 },
2443 { ISD::SELECT
, MVT::v32i16
, 1 },
2444 { ISD::SELECT
, MVT::v64i8
, 1 },
2447 static const CostTblEntry AVX512CostTbl
[] = {
2448 { ISD::SETCC
, MVT::v8i64
, 1 },
2449 { ISD::SETCC
, MVT::v16i32
, 1 },
2450 { ISD::SETCC
, MVT::v8f64
, 1 },
2451 { ISD::SETCC
, MVT::v16f32
, 1 },
2453 { ISD::SELECT
, MVT::v8i64
, 1 },
2454 { ISD::SELECT
, MVT::v16i32
, 1 },
2455 { ISD::SELECT
, MVT::v8f64
, 1 },
2456 { ISD::SELECT
, MVT::v16f32
, 1 },
2458 { ISD::SETCC
, MVT::v32i16
, 2 }, // FIXME: should probably be 4
2459 { ISD::SETCC
, MVT::v64i8
, 2 }, // FIXME: should probably be 4
2461 { ISD::SELECT
, MVT::v32i16
, 2 }, // FIXME: should be 3
2462 { ISD::SELECT
, MVT::v64i8
, 2 }, // FIXME: should be 3
2465 static const CostTblEntry AVX2CostTbl
[] = {
2466 { ISD::SETCC
, MVT::v4i64
, 1 },
2467 { ISD::SETCC
, MVT::v8i32
, 1 },
2468 { ISD::SETCC
, MVT::v16i16
, 1 },
2469 { ISD::SETCC
, MVT::v32i8
, 1 },
2471 { ISD::SELECT
, MVT::v4i64
, 1 }, // pblendvb
2472 { ISD::SELECT
, MVT::v8i32
, 1 }, // pblendvb
2473 { ISD::SELECT
, MVT::v16i16
, 1 }, // pblendvb
2474 { ISD::SELECT
, MVT::v32i8
, 1 }, // pblendvb
2477 static const CostTblEntry AVX1CostTbl
[] = {
2478 { ISD::SETCC
, MVT::v4f64
, 1 },
2479 { ISD::SETCC
, MVT::v8f32
, 1 },
2480 // AVX1 does not support 8-wide integer compare.
2481 { ISD::SETCC
, MVT::v4i64
, 4 },
2482 { ISD::SETCC
, MVT::v8i32
, 4 },
2483 { ISD::SETCC
, MVT::v16i16
, 4 },
2484 { ISD::SETCC
, MVT::v32i8
, 4 },
2486 { ISD::SELECT
, MVT::v4f64
, 1 }, // vblendvpd
2487 { ISD::SELECT
, MVT::v8f32
, 1 }, // vblendvps
2488 { ISD::SELECT
, MVT::v4i64
, 1 }, // vblendvpd
2489 { ISD::SELECT
, MVT::v8i32
, 1 }, // vblendvps
2490 { ISD::SELECT
, MVT::v16i16
, 3 }, // vandps + vandnps + vorps
2491 { ISD::SELECT
, MVT::v32i8
, 3 }, // vandps + vandnps + vorps
2494 static const CostTblEntry SSE42CostTbl
[] = {
2495 { ISD::SETCC
, MVT::v2f64
, 1 },
2496 { ISD::SETCC
, MVT::v4f32
, 1 },
2497 { ISD::SETCC
, MVT::v2i64
, 1 },
2500 static const CostTblEntry SSE41CostTbl
[] = {
2501 { ISD::SELECT
, MVT::v2f64
, 1 }, // blendvpd
2502 { ISD::SELECT
, MVT::v4f32
, 1 }, // blendvps
2503 { ISD::SELECT
, MVT::v2i64
, 1 }, // pblendvb
2504 { ISD::SELECT
, MVT::v4i32
, 1 }, // pblendvb
2505 { ISD::SELECT
, MVT::v8i16
, 1 }, // pblendvb
2506 { ISD::SELECT
, MVT::v16i8
, 1 }, // pblendvb
2509 static const CostTblEntry SSE2CostTbl
[] = {
2510 { ISD::SETCC
, MVT::v2f64
, 2 },
2511 { ISD::SETCC
, MVT::f64
, 1 },
2512 { ISD::SETCC
, MVT::v2i64
, 8 },
2513 { ISD::SETCC
, MVT::v4i32
, 1 },
2514 { ISD::SETCC
, MVT::v8i16
, 1 },
2515 { ISD::SETCC
, MVT::v16i8
, 1 },
2517 { ISD::SELECT
, MVT::v2f64
, 3 }, // andpd + andnpd + orpd
2518 { ISD::SELECT
, MVT::v2i64
, 3 }, // pand + pandn + por
2519 { ISD::SELECT
, MVT::v4i32
, 3 }, // pand + pandn + por
2520 { ISD::SELECT
, MVT::v8i16
, 3 }, // pand + pandn + por
2521 { ISD::SELECT
, MVT::v16i8
, 3 }, // pand + pandn + por
2524 static const CostTblEntry SSE1CostTbl
[] = {
2525 { ISD::SETCC
, MVT::v4f32
, 2 },
2526 { ISD::SETCC
, MVT::f32
, 1 },
2528 { ISD::SELECT
, MVT::v4f32
, 3 }, // andps + andnps + orps
2532 if (const auto *Entry
= CostTableLookup(SLMCostTbl
, ISD
, MTy
))
2533 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
2536 if (const auto *Entry
= CostTableLookup(AVX512BWCostTbl
, ISD
, MTy
))
2537 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
2539 if (ST
->hasAVX512())
2540 if (const auto *Entry
= CostTableLookup(AVX512CostTbl
, ISD
, MTy
))
2541 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
2544 if (const auto *Entry
= CostTableLookup(AVX2CostTbl
, ISD
, MTy
))
2545 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
2548 if (const auto *Entry
= CostTableLookup(AVX1CostTbl
, ISD
, MTy
))
2549 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
2552 if (const auto *Entry
= CostTableLookup(SSE42CostTbl
, ISD
, MTy
))
2553 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
2556 if (const auto *Entry
= CostTableLookup(SSE41CostTbl
, ISD
, MTy
))
2557 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
2560 if (const auto *Entry
= CostTableLookup(SSE2CostTbl
, ISD
, MTy
))
2561 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
2564 if (const auto *Entry
= CostTableLookup(SSE1CostTbl
, ISD
, MTy
))
2565 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
2567 return BaseT::getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, VecPred
, CostKind
, I
);
2570 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2573 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes
&ICA
,
2574 TTI::TargetCostKind CostKind
) {
2576 // Costs should match the codegen from:
2577 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2578 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2579 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2580 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2581 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2583 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2584 // specialized in these tables yet.
2585 static const CostTblEntry AVX512BITALGCostTbl
[] = {
2586 { ISD::CTPOP
, MVT::v32i16
, 1 },
2587 { ISD::CTPOP
, MVT::v64i8
, 1 },
2588 { ISD::CTPOP
, MVT::v16i16
, 1 },
2589 { ISD::CTPOP
, MVT::v32i8
, 1 },
2590 { ISD::CTPOP
, MVT::v8i16
, 1 },
2591 { ISD::CTPOP
, MVT::v16i8
, 1 },
2593 static const CostTblEntry AVX512VPOPCNTDQCostTbl
[] = {
2594 { ISD::CTPOP
, MVT::v8i64
, 1 },
2595 { ISD::CTPOP
, MVT::v16i32
, 1 },
2596 { ISD::CTPOP
, MVT::v4i64
, 1 },
2597 { ISD::CTPOP
, MVT::v8i32
, 1 },
2598 { ISD::CTPOP
, MVT::v2i64
, 1 },
2599 { ISD::CTPOP
, MVT::v4i32
, 1 },
2601 static const CostTblEntry AVX512CDCostTbl
[] = {
2602 { ISD::CTLZ
, MVT::v8i64
, 1 },
2603 { ISD::CTLZ
, MVT::v16i32
, 1 },
2604 { ISD::CTLZ
, MVT::v32i16
, 8 },
2605 { ISD::CTLZ
, MVT::v64i8
, 20 },
2606 { ISD::CTLZ
, MVT::v4i64
, 1 },
2607 { ISD::CTLZ
, MVT::v8i32
, 1 },
2608 { ISD::CTLZ
, MVT::v16i16
, 4 },
2609 { ISD::CTLZ
, MVT::v32i8
, 10 },
2610 { ISD::CTLZ
, MVT::v2i64
, 1 },
2611 { ISD::CTLZ
, MVT::v4i32
, 1 },
2612 { ISD::CTLZ
, MVT::v8i16
, 4 },
2613 { ISD::CTLZ
, MVT::v16i8
, 4 },
2615 static const CostTblEntry AVX512BWCostTbl
[] = {
2616 { ISD::ABS
, MVT::v32i16
, 1 },
2617 { ISD::ABS
, MVT::v64i8
, 1 },
2618 { ISD::BITREVERSE
, MVT::v8i64
, 5 },
2619 { ISD::BITREVERSE
, MVT::v16i32
, 5 },
2620 { ISD::BITREVERSE
, MVT::v32i16
, 5 },
2621 { ISD::BITREVERSE
, MVT::v64i8
, 5 },
2622 { ISD::BSWAP
, MVT::v8i64
, 1 },
2623 { ISD::BSWAP
, MVT::v16i32
, 1 },
2624 { ISD::BSWAP
, MVT::v32i16
, 1 },
2625 { ISD::CTLZ
, MVT::v8i64
, 23 },
2626 { ISD::CTLZ
, MVT::v16i32
, 22 },
2627 { ISD::CTLZ
, MVT::v32i16
, 18 },
2628 { ISD::CTLZ
, MVT::v64i8
, 17 },
2629 { ISD::CTPOP
, MVT::v8i64
, 7 },
2630 { ISD::CTPOP
, MVT::v16i32
, 11 },
2631 { ISD::CTPOP
, MVT::v32i16
, 9 },
2632 { ISD::CTPOP
, MVT::v64i8
, 6 },
2633 { ISD::CTTZ
, MVT::v8i64
, 10 },
2634 { ISD::CTTZ
, MVT::v16i32
, 14 },
2635 { ISD::CTTZ
, MVT::v32i16
, 12 },
2636 { ISD::CTTZ
, MVT::v64i8
, 9 },
2637 { ISD::SADDSAT
, MVT::v32i16
, 1 },
2638 { ISD::SADDSAT
, MVT::v64i8
, 1 },
2639 { ISD::SMAX
, MVT::v32i16
, 1 },
2640 { ISD::SMAX
, MVT::v64i8
, 1 },
2641 { ISD::SMIN
, MVT::v32i16
, 1 },
2642 { ISD::SMIN
, MVT::v64i8
, 1 },
2643 { ISD::SSUBSAT
, MVT::v32i16
, 1 },
2644 { ISD::SSUBSAT
, MVT::v64i8
, 1 },
2645 { ISD::UADDSAT
, MVT::v32i16
, 1 },
2646 { ISD::UADDSAT
, MVT::v64i8
, 1 },
2647 { ISD::UMAX
, MVT::v32i16
, 1 },
2648 { ISD::UMAX
, MVT::v64i8
, 1 },
2649 { ISD::UMIN
, MVT::v32i16
, 1 },
2650 { ISD::UMIN
, MVT::v64i8
, 1 },
2651 { ISD::USUBSAT
, MVT::v32i16
, 1 },
2652 { ISD::USUBSAT
, MVT::v64i8
, 1 },
2654 static const CostTblEntry AVX512CostTbl
[] = {
2655 { ISD::ABS
, MVT::v8i64
, 1 },
2656 { ISD::ABS
, MVT::v16i32
, 1 },
2657 { ISD::ABS
, MVT::v32i16
, 2 }, // FIXME: include split
2658 { ISD::ABS
, MVT::v64i8
, 2 }, // FIXME: include split
2659 { ISD::ABS
, MVT::v4i64
, 1 },
2660 { ISD::ABS
, MVT::v2i64
, 1 },
2661 { ISD::BITREVERSE
, MVT::v8i64
, 36 },
2662 { ISD::BITREVERSE
, MVT::v16i32
, 24 },
2663 { ISD::BITREVERSE
, MVT::v32i16
, 10 },
2664 { ISD::BITREVERSE
, MVT::v64i8
, 10 },
2665 { ISD::BSWAP
, MVT::v8i64
, 4 },
2666 { ISD::BSWAP
, MVT::v16i32
, 4 },
2667 { ISD::BSWAP
, MVT::v32i16
, 4 },
2668 { ISD::CTLZ
, MVT::v8i64
, 29 },
2669 { ISD::CTLZ
, MVT::v16i32
, 35 },
2670 { ISD::CTLZ
, MVT::v32i16
, 28 },
2671 { ISD::CTLZ
, MVT::v64i8
, 18 },
2672 { ISD::CTPOP
, MVT::v8i64
, 16 },
2673 { ISD::CTPOP
, MVT::v16i32
, 24 },
2674 { ISD::CTPOP
, MVT::v32i16
, 18 },
2675 { ISD::CTPOP
, MVT::v64i8
, 12 },
2676 { ISD::CTTZ
, MVT::v8i64
, 20 },
2677 { ISD::CTTZ
, MVT::v16i32
, 28 },
2678 { ISD::CTTZ
, MVT::v32i16
, 24 },
2679 { ISD::CTTZ
, MVT::v64i8
, 18 },
2680 { ISD::SMAX
, MVT::v8i64
, 1 },
2681 { ISD::SMAX
, MVT::v16i32
, 1 },
2682 { ISD::SMAX
, MVT::v32i16
, 2 }, // FIXME: include split
2683 { ISD::SMAX
, MVT::v64i8
, 2 }, // FIXME: include split
2684 { ISD::SMAX
, MVT::v4i64
, 1 },
2685 { ISD::SMAX
, MVT::v2i64
, 1 },
2686 { ISD::SMIN
, MVT::v8i64
, 1 },
2687 { ISD::SMIN
, MVT::v16i32
, 1 },
2688 { ISD::SMIN
, MVT::v32i16
, 2 }, // FIXME: include split
2689 { ISD::SMIN
, MVT::v64i8
, 2 }, // FIXME: include split
2690 { ISD::SMIN
, MVT::v4i64
, 1 },
2691 { ISD::SMIN
, MVT::v2i64
, 1 },
2692 { ISD::UMAX
, MVT::v8i64
, 1 },
2693 { ISD::UMAX
, MVT::v16i32
, 1 },
2694 { ISD::UMAX
, MVT::v32i16
, 2 }, // FIXME: include split
2695 { ISD::UMAX
, MVT::v64i8
, 2 }, // FIXME: include split
2696 { ISD::UMAX
, MVT::v4i64
, 1 },
2697 { ISD::UMAX
, MVT::v2i64
, 1 },
2698 { ISD::UMIN
, MVT::v8i64
, 1 },
2699 { ISD::UMIN
, MVT::v16i32
, 1 },
2700 { ISD::UMIN
, MVT::v32i16
, 2 }, // FIXME: include split
2701 { ISD::UMIN
, MVT::v64i8
, 2 }, // FIXME: include split
2702 { ISD::UMIN
, MVT::v4i64
, 1 },
2703 { ISD::UMIN
, MVT::v2i64
, 1 },
2704 { ISD::USUBSAT
, MVT::v16i32
, 2 }, // pmaxud + psubd
2705 { ISD::USUBSAT
, MVT::v2i64
, 2 }, // pmaxuq + psubq
2706 { ISD::USUBSAT
, MVT::v4i64
, 2 }, // pmaxuq + psubq
2707 { ISD::USUBSAT
, MVT::v8i64
, 2 }, // pmaxuq + psubq
2708 { ISD::UADDSAT
, MVT::v16i32
, 3 }, // not + pminud + paddd
2709 { ISD::UADDSAT
, MVT::v2i64
, 3 }, // not + pminuq + paddq
2710 { ISD::UADDSAT
, MVT::v4i64
, 3 }, // not + pminuq + paddq
2711 { ISD::UADDSAT
, MVT::v8i64
, 3 }, // not + pminuq + paddq
2712 { ISD::SADDSAT
, MVT::v32i16
, 2 }, // FIXME: include split
2713 { ISD::SADDSAT
, MVT::v64i8
, 2 }, // FIXME: include split
2714 { ISD::SSUBSAT
, MVT::v32i16
, 2 }, // FIXME: include split
2715 { ISD::SSUBSAT
, MVT::v64i8
, 2 }, // FIXME: include split
2716 { ISD::UADDSAT
, MVT::v32i16
, 2 }, // FIXME: include split
2717 { ISD::UADDSAT
, MVT::v64i8
, 2 }, // FIXME: include split
2718 { ISD::USUBSAT
, MVT::v32i16
, 2 }, // FIXME: include split
2719 { ISD::USUBSAT
, MVT::v64i8
, 2 }, // FIXME: include split
2720 { ISD::FMAXNUM
, MVT::f32
, 2 },
2721 { ISD::FMAXNUM
, MVT::v4f32
, 2 },
2722 { ISD::FMAXNUM
, MVT::v8f32
, 2 },
2723 { ISD::FMAXNUM
, MVT::v16f32
, 2 },
2724 { ISD::FMAXNUM
, MVT::f64
, 2 },
2725 { ISD::FMAXNUM
, MVT::v2f64
, 2 },
2726 { ISD::FMAXNUM
, MVT::v4f64
, 2 },
2727 { ISD::FMAXNUM
, MVT::v8f64
, 2 },
2728 { ISD::ISNAN
, MVT::v8f64
, 1 },
2729 { ISD::ISNAN
, MVT::v16f32
, 1 },
2731 static const CostTblEntry XOPCostTbl
[] = {
2732 { ISD::BITREVERSE
, MVT::v4i64
, 4 },
2733 { ISD::BITREVERSE
, MVT::v8i32
, 4 },
2734 { ISD::BITREVERSE
, MVT::v16i16
, 4 },
2735 { ISD::BITREVERSE
, MVT::v32i8
, 4 },
2736 { ISD::BITREVERSE
, MVT::v2i64
, 1 },
2737 { ISD::BITREVERSE
, MVT::v4i32
, 1 },
2738 { ISD::BITREVERSE
, MVT::v8i16
, 1 },
2739 { ISD::BITREVERSE
, MVT::v16i8
, 1 },
2740 { ISD::BITREVERSE
, MVT::i64
, 3 },
2741 { ISD::BITREVERSE
, MVT::i32
, 3 },
2742 { ISD::BITREVERSE
, MVT::i16
, 3 },
2743 { ISD::BITREVERSE
, MVT::i8
, 3 }
2745 static const CostTblEntry AVX2CostTbl
[] = {
2746 { ISD::ABS
, MVT::v4i64
, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2747 { ISD::ABS
, MVT::v8i32
, 1 },
2748 { ISD::ABS
, MVT::v16i16
, 1 },
2749 { ISD::ABS
, MVT::v32i8
, 1 },
2750 { ISD::BITREVERSE
, MVT::v4i64
, 5 },
2751 { ISD::BITREVERSE
, MVT::v8i32
, 5 },
2752 { ISD::BITREVERSE
, MVT::v16i16
, 5 },
2753 { ISD::BITREVERSE
, MVT::v32i8
, 5 },
2754 { ISD::BSWAP
, MVT::v4i64
, 1 },
2755 { ISD::BSWAP
, MVT::v8i32
, 1 },
2756 { ISD::BSWAP
, MVT::v16i16
, 1 },
2757 { ISD::CTLZ
, MVT::v4i64
, 23 },
2758 { ISD::CTLZ
, MVT::v8i32
, 18 },
2759 { ISD::CTLZ
, MVT::v16i16
, 14 },
2760 { ISD::CTLZ
, MVT::v32i8
, 9 },
2761 { ISD::CTPOP
, MVT::v4i64
, 7 },
2762 { ISD::CTPOP
, MVT::v8i32
, 11 },
2763 { ISD::CTPOP
, MVT::v16i16
, 9 },
2764 { ISD::CTPOP
, MVT::v32i8
, 6 },
2765 { ISD::CTTZ
, MVT::v4i64
, 10 },
2766 { ISD::CTTZ
, MVT::v8i32
, 14 },
2767 { ISD::CTTZ
, MVT::v16i16
, 12 },
2768 { ISD::CTTZ
, MVT::v32i8
, 9 },
2769 { ISD::SADDSAT
, MVT::v16i16
, 1 },
2770 { ISD::SADDSAT
, MVT::v32i8
, 1 },
2771 { ISD::SMAX
, MVT::v8i32
, 1 },
2772 { ISD::SMAX
, MVT::v16i16
, 1 },
2773 { ISD::SMAX
, MVT::v32i8
, 1 },
2774 { ISD::SMIN
, MVT::v8i32
, 1 },
2775 { ISD::SMIN
, MVT::v16i16
, 1 },
2776 { ISD::SMIN
, MVT::v32i8
, 1 },
2777 { ISD::SSUBSAT
, MVT::v16i16
, 1 },
2778 { ISD::SSUBSAT
, MVT::v32i8
, 1 },
2779 { ISD::UADDSAT
, MVT::v16i16
, 1 },
2780 { ISD::UADDSAT
, MVT::v32i8
, 1 },
2781 { ISD::UADDSAT
, MVT::v8i32
, 3 }, // not + pminud + paddd
2782 { ISD::UMAX
, MVT::v8i32
, 1 },
2783 { ISD::UMAX
, MVT::v16i16
, 1 },
2784 { ISD::UMAX
, MVT::v32i8
, 1 },
2785 { ISD::UMIN
, MVT::v8i32
, 1 },
2786 { ISD::UMIN
, MVT::v16i16
, 1 },
2787 { ISD::UMIN
, MVT::v32i8
, 1 },
2788 { ISD::USUBSAT
, MVT::v16i16
, 1 },
2789 { ISD::USUBSAT
, MVT::v32i8
, 1 },
2790 { ISD::USUBSAT
, MVT::v8i32
, 2 }, // pmaxud + psubd
2791 { ISD::FMAXNUM
, MVT::v8f32
, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2792 { ISD::FMAXNUM
, MVT::v4f64
, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2793 { ISD::FSQRT
, MVT::f32
, 7 }, // Haswell from http://www.agner.org/
2794 { ISD::FSQRT
, MVT::v4f32
, 7 }, // Haswell from http://www.agner.org/
2795 { ISD::FSQRT
, MVT::v8f32
, 14 }, // Haswell from http://www.agner.org/
2796 { ISD::FSQRT
, MVT::f64
, 14 }, // Haswell from http://www.agner.org/
2797 { ISD::FSQRT
, MVT::v2f64
, 14 }, // Haswell from http://www.agner.org/
2798 { ISD::FSQRT
, MVT::v4f64
, 28 }, // Haswell from http://www.agner.org/
2800 static const CostTblEntry AVX1CostTbl
[] = {
2801 { ISD::ABS
, MVT::v4i64
, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2802 { ISD::ABS
, MVT::v8i32
, 3 },
2803 { ISD::ABS
, MVT::v16i16
, 3 },
2804 { ISD::ABS
, MVT::v32i8
, 3 },
2805 { ISD::BITREVERSE
, MVT::v4i64
, 12 }, // 2 x 128-bit Op + extract/insert
2806 { ISD::BITREVERSE
, MVT::v8i32
, 12 }, // 2 x 128-bit Op + extract/insert
2807 { ISD::BITREVERSE
, MVT::v16i16
, 12 }, // 2 x 128-bit Op + extract/insert
2808 { ISD::BITREVERSE
, MVT::v32i8
, 12 }, // 2 x 128-bit Op + extract/insert
2809 { ISD::BSWAP
, MVT::v4i64
, 4 },
2810 { ISD::BSWAP
, MVT::v8i32
, 4 },
2811 { ISD::BSWAP
, MVT::v16i16
, 4 },
2812 { ISD::CTLZ
, MVT::v4i64
, 48 }, // 2 x 128-bit Op + extract/insert
2813 { ISD::CTLZ
, MVT::v8i32
, 38 }, // 2 x 128-bit Op + extract/insert
2814 { ISD::CTLZ
, MVT::v16i16
, 30 }, // 2 x 128-bit Op + extract/insert
2815 { ISD::CTLZ
, MVT::v32i8
, 20 }, // 2 x 128-bit Op + extract/insert
2816 { ISD::CTPOP
, MVT::v4i64
, 16 }, // 2 x 128-bit Op + extract/insert
2817 { ISD::CTPOP
, MVT::v8i32
, 24 }, // 2 x 128-bit Op + extract/insert
2818 { ISD::CTPOP
, MVT::v16i16
, 20 }, // 2 x 128-bit Op + extract/insert
2819 { ISD::CTPOP
, MVT::v32i8
, 14 }, // 2 x 128-bit Op + extract/insert
2820 { ISD::CTTZ
, MVT::v4i64
, 22 }, // 2 x 128-bit Op + extract/insert
2821 { ISD::CTTZ
, MVT::v8i32
, 30 }, // 2 x 128-bit Op + extract/insert
2822 { ISD::CTTZ
, MVT::v16i16
, 26 }, // 2 x 128-bit Op + extract/insert
2823 { ISD::CTTZ
, MVT::v32i8
, 20 }, // 2 x 128-bit Op + extract/insert
2824 { ISD::SADDSAT
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
2825 { ISD::SADDSAT
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
2826 { ISD::SMAX
, MVT::v8i32
, 4 }, // 2 x 128-bit Op + extract/insert
2827 { ISD::SMAX
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
2828 { ISD::SMAX
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
2829 { ISD::SMIN
, MVT::v8i32
, 4 }, // 2 x 128-bit Op + extract/insert
2830 { ISD::SMIN
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
2831 { ISD::SMIN
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
2832 { ISD::SSUBSAT
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
2833 { ISD::SSUBSAT
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
2834 { ISD::UADDSAT
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
2835 { ISD::UADDSAT
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
2836 { ISD::UADDSAT
, MVT::v8i32
, 8 }, // 2 x 128-bit Op + extract/insert
2837 { ISD::UMAX
, MVT::v8i32
, 4 }, // 2 x 128-bit Op + extract/insert
2838 { ISD::UMAX
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
2839 { ISD::UMAX
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
2840 { ISD::UMIN
, MVT::v8i32
, 4 }, // 2 x 128-bit Op + extract/insert
2841 { ISD::UMIN
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
2842 { ISD::UMIN
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
2843 { ISD::USUBSAT
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
2844 { ISD::USUBSAT
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
2845 { ISD::USUBSAT
, MVT::v8i32
, 6 }, // 2 x 128-bit Op + extract/insert
2846 { ISD::FMAXNUM
, MVT::f32
, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS
2847 { ISD::FMAXNUM
, MVT::v4f32
, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2848 { ISD::FMAXNUM
, MVT::v8f32
, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
2849 { ISD::FMAXNUM
, MVT::f64
, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD
2850 { ISD::FMAXNUM
, MVT::v2f64
, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2851 { ISD::FMAXNUM
, MVT::v4f64
, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
2852 { ISD::FSQRT
, MVT::f32
, 14 }, // SNB from http://www.agner.org/
2853 { ISD::FSQRT
, MVT::v4f32
, 14 }, // SNB from http://www.agner.org/
2854 { ISD::FSQRT
, MVT::v8f32
, 28 }, // SNB from http://www.agner.org/
2855 { ISD::FSQRT
, MVT::f64
, 21 }, // SNB from http://www.agner.org/
2856 { ISD::FSQRT
, MVT::v2f64
, 21 }, // SNB from http://www.agner.org/
2857 { ISD::FSQRT
, MVT::v4f64
, 43 }, // SNB from http://www.agner.org/
2858 { ISD::ISNAN
, MVT::v4f64
, 1 },
2859 { ISD::ISNAN
, MVT::v8f32
, 1 },
2861 static const CostTblEntry GLMCostTbl
[] = {
2862 { ISD::FSQRT
, MVT::f32
, 19 }, // sqrtss
2863 { ISD::FSQRT
, MVT::v4f32
, 37 }, // sqrtps
2864 { ISD::FSQRT
, MVT::f64
, 34 }, // sqrtsd
2865 { ISD::FSQRT
, MVT::v2f64
, 67 }, // sqrtpd
2867 static const CostTblEntry SLMCostTbl
[] = {
2868 { ISD::FSQRT
, MVT::f32
, 20 }, // sqrtss
2869 { ISD::FSQRT
, MVT::v4f32
, 40 }, // sqrtps
2870 { ISD::FSQRT
, MVT::f64
, 35 }, // sqrtsd
2871 { ISD::FSQRT
, MVT::v2f64
, 70 }, // sqrtpd
2873 static const CostTblEntry SSE42CostTbl
[] = {
2874 { ISD::USUBSAT
, MVT::v4i32
, 2 }, // pmaxud + psubd
2875 { ISD::UADDSAT
, MVT::v4i32
, 3 }, // not + pminud + paddd
2876 { ISD::FSQRT
, MVT::f32
, 18 }, // Nehalem from http://www.agner.org/
2877 { ISD::FSQRT
, MVT::v4f32
, 18 }, // Nehalem from http://www.agner.org/
2879 static const CostTblEntry SSE41CostTbl
[] = {
2880 { ISD::ABS
, MVT::v2i64
, 2 }, // BLENDVPD(X,PSUBQ(0,X),X)
2881 { ISD::SMAX
, MVT::v4i32
, 1 },
2882 { ISD::SMAX
, MVT::v16i8
, 1 },
2883 { ISD::SMIN
, MVT::v4i32
, 1 },
2884 { ISD::SMIN
, MVT::v16i8
, 1 },
2885 { ISD::UMAX
, MVT::v4i32
, 1 },
2886 { ISD::UMAX
, MVT::v8i16
, 1 },
2887 { ISD::UMIN
, MVT::v4i32
, 1 },
2888 { ISD::UMIN
, MVT::v8i16
, 1 },
2890 static const CostTblEntry SSSE3CostTbl
[] = {
2891 { ISD::ABS
, MVT::v4i32
, 1 },
2892 { ISD::ABS
, MVT::v8i16
, 1 },
2893 { ISD::ABS
, MVT::v16i8
, 1 },
2894 { ISD::BITREVERSE
, MVT::v2i64
, 5 },
2895 { ISD::BITREVERSE
, MVT::v4i32
, 5 },
2896 { ISD::BITREVERSE
, MVT::v8i16
, 5 },
2897 { ISD::BITREVERSE
, MVT::v16i8
, 5 },
2898 { ISD::BSWAP
, MVT::v2i64
, 1 },
2899 { ISD::BSWAP
, MVT::v4i32
, 1 },
2900 { ISD::BSWAP
, MVT::v8i16
, 1 },
2901 { ISD::CTLZ
, MVT::v2i64
, 23 },
2902 { ISD::CTLZ
, MVT::v4i32
, 18 },
2903 { ISD::CTLZ
, MVT::v8i16
, 14 },
2904 { ISD::CTLZ
, MVT::v16i8
, 9 },
2905 { ISD::CTPOP
, MVT::v2i64
, 7 },
2906 { ISD::CTPOP
, MVT::v4i32
, 11 },
2907 { ISD::CTPOP
, MVT::v8i16
, 9 },
2908 { ISD::CTPOP
, MVT::v16i8
, 6 },
2909 { ISD::CTTZ
, MVT::v2i64
, 10 },
2910 { ISD::CTTZ
, MVT::v4i32
, 14 },
2911 { ISD::CTTZ
, MVT::v8i16
, 12 },
2912 { ISD::CTTZ
, MVT::v16i8
, 9 }
2914 static const CostTblEntry SSE2CostTbl
[] = {
2915 { ISD::ABS
, MVT::v2i64
, 4 },
2916 { ISD::ABS
, MVT::v4i32
, 3 },
2917 { ISD::ABS
, MVT::v8i16
, 2 },
2918 { ISD::ABS
, MVT::v16i8
, 2 },
2919 { ISD::BITREVERSE
, MVT::v2i64
, 29 },
2920 { ISD::BITREVERSE
, MVT::v4i32
, 27 },
2921 { ISD::BITREVERSE
, MVT::v8i16
, 27 },
2922 { ISD::BITREVERSE
, MVT::v16i8
, 20 },
2923 { ISD::BSWAP
, MVT::v2i64
, 7 },
2924 { ISD::BSWAP
, MVT::v4i32
, 7 },
2925 { ISD::BSWAP
, MVT::v8i16
, 7 },
2926 { ISD::CTLZ
, MVT::v2i64
, 25 },
2927 { ISD::CTLZ
, MVT::v4i32
, 26 },
2928 { ISD::CTLZ
, MVT::v8i16
, 20 },
2929 { ISD::CTLZ
, MVT::v16i8
, 17 },
2930 { ISD::CTPOP
, MVT::v2i64
, 12 },
2931 { ISD::CTPOP
, MVT::v4i32
, 15 },
2932 { ISD::CTPOP
, MVT::v8i16
, 13 },
2933 { ISD::CTPOP
, MVT::v16i8
, 10 },
2934 { ISD::CTTZ
, MVT::v2i64
, 14 },
2935 { ISD::CTTZ
, MVT::v4i32
, 18 },
2936 { ISD::CTTZ
, MVT::v8i16
, 16 },
2937 { ISD::CTTZ
, MVT::v16i8
, 13 },
2938 { ISD::SADDSAT
, MVT::v8i16
, 1 },
2939 { ISD::SADDSAT
, MVT::v16i8
, 1 },
2940 { ISD::SMAX
, MVT::v8i16
, 1 },
2941 { ISD::SMIN
, MVT::v8i16
, 1 },
2942 { ISD::SSUBSAT
, MVT::v8i16
, 1 },
2943 { ISD::SSUBSAT
, MVT::v16i8
, 1 },
2944 { ISD::UADDSAT
, MVT::v8i16
, 1 },
2945 { ISD::UADDSAT
, MVT::v16i8
, 1 },
2946 { ISD::UMAX
, MVT::v8i16
, 2 },
2947 { ISD::UMAX
, MVT::v16i8
, 1 },
2948 { ISD::UMIN
, MVT::v8i16
, 2 },
2949 { ISD::UMIN
, MVT::v16i8
, 1 },
2950 { ISD::USUBSAT
, MVT::v8i16
, 1 },
2951 { ISD::USUBSAT
, MVT::v16i8
, 1 },
2952 { ISD::FMAXNUM
, MVT::f64
, 4 },
2953 { ISD::FMAXNUM
, MVT::v2f64
, 4 },
2954 { ISD::FSQRT
, MVT::f64
, 32 }, // Nehalem from http://www.agner.org/
2955 { ISD::FSQRT
, MVT::v2f64
, 32 }, // Nehalem from http://www.agner.org/
2956 { ISD::ISNAN
, MVT::f64
, 1 },
2957 { ISD::ISNAN
, MVT::v2f64
, 1 },
2959 static const CostTblEntry SSE1CostTbl
[] = {
2960 { ISD::FMAXNUM
, MVT::f32
, 4 },
2961 { ISD::FMAXNUM
, MVT::v4f32
, 4 },
2962 { ISD::FSQRT
, MVT::f32
, 28 }, // Pentium III from http://www.agner.org/
2963 { ISD::FSQRT
, MVT::v4f32
, 56 }, // Pentium III from http://www.agner.org/
2964 { ISD::ISNAN
, MVT::f32
, 1 },
2965 { ISD::ISNAN
, MVT::v4f32
, 1 },
2967 static const CostTblEntry BMI64CostTbl
[] = { // 64-bit targets
2968 { ISD::CTTZ
, MVT::i64
, 1 },
2970 static const CostTblEntry BMI32CostTbl
[] = { // 32 or 64-bit targets
2971 { ISD::CTTZ
, MVT::i32
, 1 },
2972 { ISD::CTTZ
, MVT::i16
, 1 },
2973 { ISD::CTTZ
, MVT::i8
, 1 },
2975 static const CostTblEntry LZCNT64CostTbl
[] = { // 64-bit targets
2976 { ISD::CTLZ
, MVT::i64
, 1 },
2978 static const CostTblEntry LZCNT32CostTbl
[] = { // 32 or 64-bit targets
2979 { ISD::CTLZ
, MVT::i32
, 1 },
2980 { ISD::CTLZ
, MVT::i16
, 1 },
2981 { ISD::CTLZ
, MVT::i8
, 1 },
2983 static const CostTblEntry POPCNT64CostTbl
[] = { // 64-bit targets
2984 { ISD::CTPOP
, MVT::i64
, 1 },
2986 static const CostTblEntry POPCNT32CostTbl
[] = { // 32 or 64-bit targets
2987 { ISD::CTPOP
, MVT::i32
, 1 },
2988 { ISD::CTPOP
, MVT::i16
, 1 },
2989 { ISD::CTPOP
, MVT::i8
, 1 },
2991 static const CostTblEntry X64CostTbl
[] = { // 64-bit targets
2992 { ISD::ABS
, MVT::i64
, 2 }, // SUB+CMOV
2993 { ISD::BITREVERSE
, MVT::i64
, 14 },
2994 { ISD::BSWAP
, MVT::i64
, 1 },
2995 { ISD::CTLZ
, MVT::i64
, 4 }, // BSR+XOR or BSR+XOR+CMOV
2996 { ISD::CTTZ
, MVT::i64
, 3 }, // TEST+BSF+CMOV/BRANCH
2997 { ISD::CTPOP
, MVT::i64
, 10 },
2998 { ISD::SADDO
, MVT::i64
, 1 },
2999 { ISD::UADDO
, MVT::i64
, 1 },
3000 { ISD::UMULO
, MVT::i64
, 2 }, // mulq + seto
3002 static const CostTblEntry X86CostTbl
[] = { // 32 or 64-bit targets
3003 { ISD::ABS
, MVT::i32
, 2 }, // SUB+CMOV
3004 { ISD::ABS
, MVT::i16
, 2 }, // SUB+CMOV
3005 { ISD::BITREVERSE
, MVT::i32
, 14 },
3006 { ISD::BITREVERSE
, MVT::i16
, 14 },
3007 { ISD::BITREVERSE
, MVT::i8
, 11 },
3008 { ISD::BSWAP
, MVT::i32
, 1 },
3009 { ISD::BSWAP
, MVT::i16
, 1 }, // ROL
3010 { ISD::CTLZ
, MVT::i32
, 4 }, // BSR+XOR or BSR+XOR+CMOV
3011 { ISD::CTLZ
, MVT::i16
, 4 }, // BSR+XOR or BSR+XOR+CMOV
3012 { ISD::CTLZ
, MVT::i8
, 4 }, // BSR+XOR or BSR+XOR+CMOV
3013 { ISD::CTTZ
, MVT::i32
, 3 }, // TEST+BSF+CMOV/BRANCH
3014 { ISD::CTTZ
, MVT::i16
, 3 }, // TEST+BSF+CMOV/BRANCH
3015 { ISD::CTTZ
, MVT::i8
, 3 }, // TEST+BSF+CMOV/BRANCH
3016 { ISD::CTPOP
, MVT::i32
, 8 },
3017 { ISD::CTPOP
, MVT::i16
, 9 },
3018 { ISD::CTPOP
, MVT::i8
, 7 },
3019 { ISD::SADDO
, MVT::i32
, 1 },
3020 { ISD::SADDO
, MVT::i16
, 1 },
3021 { ISD::SADDO
, MVT::i8
, 1 },
3022 { ISD::UADDO
, MVT::i32
, 1 },
3023 { ISD::UADDO
, MVT::i16
, 1 },
3024 { ISD::UADDO
, MVT::i8
, 1 },
3025 { ISD::UMULO
, MVT::i32
, 2 }, // mul + seto
3026 { ISD::UMULO
, MVT::i16
, 2 },
3027 { ISD::UMULO
, MVT::i8
, 2 },
3030 Type
*RetTy
= ICA
.getReturnType();
3032 Intrinsic::ID IID
= ICA
.getID();
3033 unsigned ISD
= ISD::DELETED_NODE
;
3037 case Intrinsic::abs
:
3040 case Intrinsic::bitreverse
:
3041 ISD
= ISD::BITREVERSE
;
3043 case Intrinsic::bswap
:
3046 case Intrinsic::ctlz
:
3049 case Intrinsic::ctpop
:
3052 case Intrinsic::cttz
:
3055 case Intrinsic::isnan
:
3057 OpTy
= ICA
.getArgTypes()[0];
3059 case Intrinsic::maxnum
:
3060 case Intrinsic::minnum
:
3061 // FMINNUM has same costs so don't duplicate.
3064 case Intrinsic::sadd_sat
:
3067 case Intrinsic::smax
:
3070 case Intrinsic::smin
:
3073 case Intrinsic::ssub_sat
:
3076 case Intrinsic::uadd_sat
:
3079 case Intrinsic::umax
:
3082 case Intrinsic::umin
:
3085 case Intrinsic::usub_sat
:
3088 case Intrinsic::sqrt
:
3091 case Intrinsic::sadd_with_overflow
:
3092 case Intrinsic::ssub_with_overflow
:
3093 // SSUBO has same costs so don't duplicate.
3095 OpTy
= RetTy
->getContainedType(0);
3097 case Intrinsic::uadd_with_overflow
:
3098 case Intrinsic::usub_with_overflow
:
3099 // USUBO has same costs so don't duplicate.
3101 OpTy
= RetTy
->getContainedType(0);
3103 case Intrinsic::umul_with_overflow
:
3104 case Intrinsic::smul_with_overflow
:
3105 // SMULO has same costs so don't duplicate.
3107 OpTy
= RetTy
->getContainedType(0);
3111 if (ISD
!= ISD::DELETED_NODE
) {
3112 // Legalize the type.
3113 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, OpTy
);
3114 MVT MTy
= LT
.second
;
3116 // Attempt to lookup cost.
3117 if (ISD
== ISD::BITREVERSE
&& ST
->hasGFNI() && ST
->hasSSSE3() &&
3119 // With PSHUFB the code is very similar for all types. If we have integer
3120 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
3121 // we also need a PSHUFB.
3122 unsigned Cost
= MTy
.getVectorElementType() == MVT::i8
? 1 : 2;
3124 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
3125 // instructions. We also need an extract and an insert.
3126 if (!(MTy
.is128BitVector() || (ST
->hasAVX2() && MTy
.is256BitVector()) ||
3127 (ST
->hasBWI() && MTy
.is512BitVector())))
3128 Cost
= Cost
* 2 + 2;
3130 return LT
.first
* Cost
;
3133 auto adjustTableCost
= [](const CostTblEntry
&Entry
,
3134 InstructionCost LegalizationCost
,
3135 FastMathFlags FMF
) {
3136 // If there are no NANs to deal with, then these are reduced to a
3137 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
3138 // assume is used in the non-fast case.
3139 if (Entry
.ISD
== ISD::FMAXNUM
|| Entry
.ISD
== ISD::FMINNUM
) {
3141 return LegalizationCost
* 1;
3143 return LegalizationCost
* (int)Entry
.Cost
;
3146 if (ST
->useGLMDivSqrtCosts())
3147 if (const auto *Entry
= CostTableLookup(GLMCostTbl
, ISD
, MTy
))
3148 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3151 if (const auto *Entry
= CostTableLookup(SLMCostTbl
, ISD
, MTy
))
3152 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3154 if (ST
->hasBITALG())
3155 if (const auto *Entry
= CostTableLookup(AVX512BITALGCostTbl
, ISD
, MTy
))
3156 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3158 if (ST
->hasVPOPCNTDQ())
3159 if (const auto *Entry
= CostTableLookup(AVX512VPOPCNTDQCostTbl
, ISD
, MTy
))
3160 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3163 if (const auto *Entry
= CostTableLookup(AVX512CDCostTbl
, ISD
, MTy
))
3164 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3167 if (const auto *Entry
= CostTableLookup(AVX512BWCostTbl
, ISD
, MTy
))
3168 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3170 if (ST
->hasAVX512())
3171 if (const auto *Entry
= CostTableLookup(AVX512CostTbl
, ISD
, MTy
))
3172 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3175 if (const auto *Entry
= CostTableLookup(XOPCostTbl
, ISD
, MTy
))
3176 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3179 if (const auto *Entry
= CostTableLookup(AVX2CostTbl
, ISD
, MTy
))
3180 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3183 if (const auto *Entry
= CostTableLookup(AVX1CostTbl
, ISD
, MTy
))
3184 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3187 if (const auto *Entry
= CostTableLookup(SSE42CostTbl
, ISD
, MTy
))
3188 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3191 if (const auto *Entry
= CostTableLookup(SSE41CostTbl
, ISD
, MTy
))
3192 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3195 if (const auto *Entry
= CostTableLookup(SSSE3CostTbl
, ISD
, MTy
))
3196 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3199 if (const auto *Entry
= CostTableLookup(SSE2CostTbl
, ISD
, MTy
))
3200 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3203 if (const auto *Entry
= CostTableLookup(SSE1CostTbl
, ISD
, MTy
))
3204 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3208 if (const auto *Entry
= CostTableLookup(BMI64CostTbl
, ISD
, MTy
))
3209 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3211 if (const auto *Entry
= CostTableLookup(BMI32CostTbl
, ISD
, MTy
))
3212 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3215 if (ST
->hasLZCNT()) {
3217 if (const auto *Entry
= CostTableLookup(LZCNT64CostTbl
, ISD
, MTy
))
3218 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3220 if (const auto *Entry
= CostTableLookup(LZCNT32CostTbl
, ISD
, MTy
))
3221 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3224 if (ST
->hasPOPCNT()) {
3226 if (const auto *Entry
= CostTableLookup(POPCNT64CostTbl
, ISD
, MTy
))
3227 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3229 if (const auto *Entry
= CostTableLookup(POPCNT32CostTbl
, ISD
, MTy
))
3230 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3233 if (ISD
== ISD::BSWAP
&& ST
->hasMOVBE() && ST
->hasFastMOVBE()) {
3234 if (const Instruction
*II
= ICA
.getInst()) {
3235 if (II
->hasOneUse() && isa
<StoreInst
>(II
->user_back()))
3236 return TTI::TCC_Free
;
3237 if (auto *LI
= dyn_cast
<LoadInst
>(II
->getOperand(0))) {
3238 if (LI
->hasOneUse())
3239 return TTI::TCC_Free
;
3244 // TODO - add BMI (TZCNT) scalar handling
3247 if (const auto *Entry
= CostTableLookup(X64CostTbl
, ISD
, MTy
))
3248 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3250 if (const auto *Entry
= CostTableLookup(X86CostTbl
, ISD
, MTy
))
3251 return adjustTableCost(*Entry
, LT
.first
, ICA
.getFlags());
3254 return BaseT::getIntrinsicInstrCost(ICA
, CostKind
);
3258 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes
&ICA
,
3259 TTI::TargetCostKind CostKind
) {
3260 if (ICA
.isTypeBasedOnly())
3261 return getTypeBasedIntrinsicInstrCost(ICA
, CostKind
);
3263 static const CostTblEntry AVX512CostTbl
[] = {
3264 { ISD::ROTL
, MVT::v8i64
, 1 },
3265 { ISD::ROTL
, MVT::v4i64
, 1 },
3266 { ISD::ROTL
, MVT::v2i64
, 1 },
3267 { ISD::ROTL
, MVT::v16i32
, 1 },
3268 { ISD::ROTL
, MVT::v8i32
, 1 },
3269 { ISD::ROTL
, MVT::v4i32
, 1 },
3270 { ISD::ROTR
, MVT::v8i64
, 1 },
3271 { ISD::ROTR
, MVT::v4i64
, 1 },
3272 { ISD::ROTR
, MVT::v2i64
, 1 },
3273 { ISD::ROTR
, MVT::v16i32
, 1 },
3274 { ISD::ROTR
, MVT::v8i32
, 1 },
3275 { ISD::ROTR
, MVT::v4i32
, 1 }
3277 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
3278 static const CostTblEntry XOPCostTbl
[] = {
3279 { ISD::ROTL
, MVT::v4i64
, 4 },
3280 { ISD::ROTL
, MVT::v8i32
, 4 },
3281 { ISD::ROTL
, MVT::v16i16
, 4 },
3282 { ISD::ROTL
, MVT::v32i8
, 4 },
3283 { ISD::ROTL
, MVT::v2i64
, 1 },
3284 { ISD::ROTL
, MVT::v4i32
, 1 },
3285 { ISD::ROTL
, MVT::v8i16
, 1 },
3286 { ISD::ROTL
, MVT::v16i8
, 1 },
3287 { ISD::ROTR
, MVT::v4i64
, 6 },
3288 { ISD::ROTR
, MVT::v8i32
, 6 },
3289 { ISD::ROTR
, MVT::v16i16
, 6 },
3290 { ISD::ROTR
, MVT::v32i8
, 6 },
3291 { ISD::ROTR
, MVT::v2i64
, 2 },
3292 { ISD::ROTR
, MVT::v4i32
, 2 },
3293 { ISD::ROTR
, MVT::v8i16
, 2 },
3294 { ISD::ROTR
, MVT::v16i8
, 2 }
3296 static const CostTblEntry X64CostTbl
[] = { // 64-bit targets
3297 { ISD::ROTL
, MVT::i64
, 1 },
3298 { ISD::ROTR
, MVT::i64
, 1 },
3299 { ISD::FSHL
, MVT::i64
, 4 }
3301 static const CostTblEntry X86CostTbl
[] = { // 32 or 64-bit targets
3302 { ISD::ROTL
, MVT::i32
, 1 },
3303 { ISD::ROTL
, MVT::i16
, 1 },
3304 { ISD::ROTL
, MVT::i8
, 1 },
3305 { ISD::ROTR
, MVT::i32
, 1 },
3306 { ISD::ROTR
, MVT::i16
, 1 },
3307 { ISD::ROTR
, MVT::i8
, 1 },
3308 { ISD::FSHL
, MVT::i32
, 4 },
3309 { ISD::FSHL
, MVT::i16
, 4 },
3310 { ISD::FSHL
, MVT::i8
, 4 }
3313 Intrinsic::ID IID
= ICA
.getID();
3314 Type
*RetTy
= ICA
.getReturnType();
3315 const SmallVectorImpl
<const Value
*> &Args
= ICA
.getArgs();
3316 unsigned ISD
= ISD::DELETED_NODE
;
3320 case Intrinsic::fshl
:
3322 if (Args
[0] == Args
[1])
3325 case Intrinsic::fshr
:
3326 // FSHR has same costs so don't duplicate.
3328 if (Args
[0] == Args
[1])
3333 if (ISD
!= ISD::DELETED_NODE
) {
3334 // Legalize the type.
3335 std::pair
<InstructionCost
, MVT
> LT
=
3336 TLI
->getTypeLegalizationCost(DL
, RetTy
);
3337 MVT MTy
= LT
.second
;
3339 // Attempt to lookup cost.
3340 if (ST
->hasAVX512())
3341 if (const auto *Entry
= CostTableLookup(AVX512CostTbl
, ISD
, MTy
))
3342 return LT
.first
* Entry
->Cost
;
3345 if (const auto *Entry
= CostTableLookup(XOPCostTbl
, ISD
, MTy
))
3346 return LT
.first
* Entry
->Cost
;
3349 if (const auto *Entry
= CostTableLookup(X64CostTbl
, ISD
, MTy
))
3350 return LT
.first
* Entry
->Cost
;
3352 if (const auto *Entry
= CostTableLookup(X86CostTbl
, ISD
, MTy
))
3353 return LT
.first
* Entry
->Cost
;
3356 return BaseT::getIntrinsicInstrCost(ICA
, CostKind
);
3359 InstructionCost
X86TTIImpl::getVectorInstrCost(unsigned Opcode
, Type
*Val
,
3361 static const CostTblEntry SLMCostTbl
[] = {
3362 { ISD::EXTRACT_VECTOR_ELT
, MVT::i8
, 4 },
3363 { ISD::EXTRACT_VECTOR_ELT
, MVT::i16
, 4 },
3364 { ISD::EXTRACT_VECTOR_ELT
, MVT::i32
, 4 },
3365 { ISD::EXTRACT_VECTOR_ELT
, MVT::i64
, 7 }
3368 assert(Val
->isVectorTy() && "This must be a vector type");
3369 Type
*ScalarType
= Val
->getScalarType();
3370 int RegisterFileMoveCost
= 0;
3372 // Non-immediate extraction/insertion can be handled as a sequence of
3373 // aliased loads+stores via the stack.
3374 if (Index
== -1U && (Opcode
== Instruction::ExtractElement
||
3375 Opcode
== Instruction::InsertElement
)) {
3376 // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns:
3377 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
3379 // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling.
3380 assert(isa
<FixedVectorType
>(Val
) && "Fixed vector type expected");
3381 Align VecAlign
= DL
.getPrefTypeAlign(Val
);
3382 Align SclAlign
= DL
.getPrefTypeAlign(ScalarType
);
3384 // Extract - store vector to stack, load scalar.
3385 if (Opcode
== Instruction::ExtractElement
) {
3386 return getMemoryOpCost(Instruction::Store
, Val
, VecAlign
, 0,
3387 TTI::TargetCostKind::TCK_RecipThroughput
) +
3388 getMemoryOpCost(Instruction::Load
, ScalarType
, SclAlign
, 0,
3389 TTI::TargetCostKind::TCK_RecipThroughput
);
3391 // Insert - store vector to stack, store scalar, load vector.
3392 if (Opcode
== Instruction::InsertElement
) {
3393 return getMemoryOpCost(Instruction::Store
, Val
, VecAlign
, 0,
3394 TTI::TargetCostKind::TCK_RecipThroughput
) +
3395 getMemoryOpCost(Instruction::Store
, ScalarType
, SclAlign
, 0,
3396 TTI::TargetCostKind::TCK_RecipThroughput
) +
3397 getMemoryOpCost(Instruction::Load
, Val
, VecAlign
, 0,
3398 TTI::TargetCostKind::TCK_RecipThroughput
);
3402 if (Index
!= -1U && (Opcode
== Instruction::ExtractElement
||
3403 Opcode
== Instruction::InsertElement
)) {
3404 // Legalize the type.
3405 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Val
);
3407 // This type is legalized to a scalar type.
3408 if (!LT
.second
.isVector())
3411 // The type may be split. Normalize the index to the new type.
3412 unsigned NumElts
= LT
.second
.getVectorNumElements();
3413 unsigned SubNumElts
= NumElts
;
3414 Index
= Index
% NumElts
;
3416 // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3417 // For inserts, we also need to insert the subvector back.
3418 if (LT
.second
.getSizeInBits() > 128) {
3419 assert((LT
.second
.getSizeInBits() % 128) == 0 && "Illegal vector");
3420 unsigned NumSubVecs
= LT
.second
.getSizeInBits() / 128;
3421 SubNumElts
= NumElts
/ NumSubVecs
;
3422 if (SubNumElts
<= Index
) {
3423 RegisterFileMoveCost
+= (Opcode
== Instruction::InsertElement
? 2 : 1);
3424 Index
%= SubNumElts
;
3429 // Floating point scalars are already located in index #0.
3430 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3432 if (ScalarType
->isFloatingPointTy())
3433 return RegisterFileMoveCost
;
3435 // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3436 if (ScalarType
->isIntegerTy() && Opcode
== Instruction::ExtractElement
)
3437 return 1 + RegisterFileMoveCost
;
3440 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
3441 assert(ISD
&& "Unexpected vector opcode");
3442 MVT MScalarTy
= LT
.second
.getScalarType();
3444 if (auto *Entry
= CostTableLookup(SLMCostTbl
, ISD
, MScalarTy
))
3445 return Entry
->Cost
+ RegisterFileMoveCost
;
3447 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3448 if ((MScalarTy
== MVT::i16
&& ST
->hasSSE2()) ||
3449 (MScalarTy
.isInteger() && ST
->hasSSE41()))
3450 return 1 + RegisterFileMoveCost
;
3452 // Assume insertps is relatively cheap on all targets.
3453 if (MScalarTy
== MVT::f32
&& ST
->hasSSE41() &&
3454 Opcode
== Instruction::InsertElement
)
3455 return 1 + RegisterFileMoveCost
;
3457 // For extractions we just need to shuffle the element to index 0, which
3458 // should be very cheap (assume cost = 1). For insertions we need to shuffle
3459 // the elements to its destination. In both cases we must handle the
3460 // subvector move(s).
3461 // If the vector type is already less than 128-bits then don't reduce it.
3462 // TODO: Under what circumstances should we shuffle using the full width?
3463 InstructionCost ShuffleCost
= 1;
3464 if (Opcode
== Instruction::InsertElement
) {
3465 auto *SubTy
= cast
<VectorType
>(Val
);
3466 EVT VT
= TLI
->getValueType(DL
, Val
);
3467 if (VT
.getScalarType() != MScalarTy
|| VT
.getSizeInBits() >= 128)
3468 SubTy
= FixedVectorType::get(ScalarType
, SubNumElts
);
3470 getShuffleCost(TTI::SK_PermuteTwoSrc
, SubTy
, None
, 0, SubTy
);
3472 int IntOrFpCost
= ScalarType
->isFloatingPointTy() ? 0 : 1;
3473 return ShuffleCost
+ IntOrFpCost
+ RegisterFileMoveCost
;
3476 // Add to the base cost if we know that the extracted element of a vector is
3477 // destined to be moved to and used in the integer register file.
3478 if (Opcode
== Instruction::ExtractElement
&& ScalarType
->isPointerTy())
3479 RegisterFileMoveCost
+= 1;
3481 return BaseT::getVectorInstrCost(Opcode
, Val
, Index
) + RegisterFileMoveCost
;
3484 InstructionCost
X86TTIImpl::getScalarizationOverhead(VectorType
*Ty
,
3485 const APInt
&DemandedElts
,
3488 InstructionCost Cost
= 0;
3490 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3491 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3493 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Ty
);
3494 MVT MScalarTy
= LT
.second
.getScalarType();
3496 if ((MScalarTy
== MVT::i16
&& ST
->hasSSE2()) ||
3497 (MScalarTy
.isInteger() && ST
->hasSSE41()) ||
3498 (MScalarTy
== MVT::f32
&& ST
->hasSSE41())) {
3499 // For types we can insert directly, insertion into 128-bit sub vectors is
3500 // cheap, followed by a cheap chain of concatenations.
3501 if (LT
.second
.getSizeInBits() <= 128) {
3503 BaseT::getScalarizationOverhead(Ty
, DemandedElts
, Insert
, false);
3505 // In each 128-lane, if at least one index is demanded but not all
3506 // indices are demanded and this 128-lane is not the first 128-lane of
3507 // the legalized-vector, then this 128-lane needs a extracti128; If in
3508 // each 128-lane, there is at least one demanded index, this 128-lane
3509 // needs a inserti128.
3511 // The following cases will help you build a better understanding:
3512 // Assume we insert several elements into a v8i32 vector in avx2,
3513 // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3514 // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3516 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3517 const int CostValue
= *LT
.first
.getValue();
3518 assert(CostValue
>= 0 && "Negative cost!");
3519 unsigned Num128Lanes
= LT
.second
.getSizeInBits() / 128 * CostValue
;
3520 unsigned NumElts
= LT
.second
.getVectorNumElements() * CostValue
;
3521 APInt WidenedDemandedElts
= DemandedElts
.zextOrSelf(NumElts
);
3522 unsigned Scale
= NumElts
/ Num128Lanes
;
3523 // We iterate each 128-lane, and check if we need a
3524 // extracti128/inserti128 for this 128-lane.
3525 for (unsigned I
= 0; I
< NumElts
; I
+= Scale
) {
3526 APInt Mask
= WidenedDemandedElts
.getBitsSet(NumElts
, I
, I
+ Scale
);
3527 APInt MaskedDE
= Mask
& WidenedDemandedElts
;
3528 unsigned Population
= MaskedDE
.countPopulation();
3529 Cost
+= (Population
> 0 && Population
!= Scale
&&
3530 I
% LT
.second
.getVectorNumElements() != 0);
3531 Cost
+= Population
> 0;
3533 Cost
+= DemandedElts
.countPopulation();
3535 // For vXf32 cases, insertion into the 0'th index in each v4f32
3536 // 128-bit vector is free.
3537 // NOTE: This assumes legalization widens vXf32 vectors.
3538 if (MScalarTy
== MVT::f32
)
3539 for (unsigned i
= 0, e
= cast
<FixedVectorType
>(Ty
)->getNumElements();
3541 if (DemandedElts
[i
])
3544 } else if (LT
.second
.isVector()) {
3545 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3546 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3547 // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3548 // considered cheap.
3549 if (Ty
->isIntOrIntVectorTy())
3550 Cost
+= DemandedElts
.countPopulation();
3552 // Get the smaller of the legalized or original pow2-extended number of
3553 // vector elements, which represents the number of unpacks we'll end up
3555 unsigned NumElts
= LT
.second
.getVectorNumElements();
3557 PowerOf2Ceil(cast
<FixedVectorType
>(Ty
)->getNumElements());
3558 Cost
+= (std::min
<unsigned>(NumElts
, Pow2Elts
) - 1) * LT
.first
;
3562 // TODO: Use default extraction for now, but we should investigate extending this
3563 // to handle repeated subvector extraction.
3565 Cost
+= BaseT::getScalarizationOverhead(Ty
, DemandedElts
, false, Extract
);
3570 InstructionCost
X86TTIImpl::getMemoryOpCost(unsigned Opcode
, Type
*Src
,
3571 MaybeAlign Alignment
,
3572 unsigned AddressSpace
,
3573 TTI::TargetCostKind CostKind
,
3574 const Instruction
*I
) {
3575 // TODO: Handle other cost kinds.
3576 if (CostKind
!= TTI::TCK_RecipThroughput
) {
3577 if (auto *SI
= dyn_cast_or_null
<StoreInst
>(I
)) {
3578 // Store instruction with index and scale costs 2 Uops.
3579 // Check the preceding GEP to identify non-const indices.
3580 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(SI
->getPointerOperand())) {
3581 if (!all_of(GEP
->indices(), [](Value
*V
) { return isa
<Constant
>(V
); }))
3582 return TTI::TCC_Basic
* 2;
3585 return TTI::TCC_Basic
;
3588 assert((Opcode
== Instruction::Load
|| Opcode
== Instruction::Store
) &&
3590 // Type legalization can't handle structs
3591 if (TLI
->getValueType(DL
, Src
, true) == MVT::Other
)
3592 return BaseT::getMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
,
3595 // Legalize the type.
3596 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Src
);
3598 auto *VTy
= dyn_cast
<FixedVectorType
>(Src
);
3600 // Handle the simple case of non-vectors.
3601 // NOTE: this assumes that legalization never creates vector from scalars!
3602 if (!VTy
|| !LT
.second
.isVector())
3603 // Each load/store unit costs 1.
3604 return LT
.first
* 1;
3606 bool IsLoad
= Opcode
== Instruction::Load
;
3608 Type
*EltTy
= VTy
->getElementType();
3610 const int EltTyBits
= DL
.getTypeSizeInBits(EltTy
);
3612 InstructionCost Cost
= 0;
3614 // Source of truth: how many elements were there in the original IR vector?
3615 const unsigned SrcNumElt
= VTy
->getNumElements();
3617 // How far have we gotten?
3618 int NumEltRemaining
= SrcNumElt
;
3619 // Note that we intentionally capture by-reference, NumEltRemaining changes.
3620 auto NumEltDone
= [&]() { return SrcNumElt
- NumEltRemaining
; };
3622 const int MaxLegalOpSizeBytes
= divideCeil(LT
.second
.getSizeInBits(), 8);
3624 // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
3625 const unsigned XMMBits
= 128;
3626 if (XMMBits
% EltTyBits
!= 0)
3627 // Vector size must be a multiple of the element size. I.e. no padding.
3628 return BaseT::getMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
,
3630 const int NumEltPerXMM
= XMMBits
/ EltTyBits
;
3632 auto *XMMVecTy
= FixedVectorType::get(EltTy
, NumEltPerXMM
);
3634 for (int CurrOpSizeBytes
= MaxLegalOpSizeBytes
, SubVecEltsLeft
= 0;
3635 NumEltRemaining
> 0; CurrOpSizeBytes
/= 2) {
3636 // How many elements would a single op deal with at once?
3637 if ((8 * CurrOpSizeBytes
) % EltTyBits
!= 0)
3638 // Vector size must be a multiple of the element size. I.e. no padding.
3639 return BaseT::getMemoryOpCost(Opcode
, Src
, Alignment
, AddressSpace
,
3641 int CurrNumEltPerOp
= (8 * CurrOpSizeBytes
) / EltTyBits
;
3643 assert(CurrOpSizeBytes
> 0 && CurrNumEltPerOp
> 0 && "How'd we get here?");
3644 assert((((NumEltRemaining
* EltTyBits
) < (2 * 8 * CurrOpSizeBytes
)) ||
3645 (CurrOpSizeBytes
== MaxLegalOpSizeBytes
)) &&
3646 "Unless we haven't halved the op size yet, "
3647 "we have less than two op's sized units of work left.");
3649 auto *CurrVecTy
= CurrNumEltPerOp
> NumEltPerXMM
3650 ? FixedVectorType::get(EltTy
, CurrNumEltPerOp
)
3653 assert(CurrVecTy
->getNumElements() % CurrNumEltPerOp
== 0 &&
3654 "After halving sizes, the vector elt count is no longer a multiple "
3655 "of number of elements per operation?");
3656 auto *CoalescedVecTy
=
3657 CurrNumEltPerOp
== 1
3659 : FixedVectorType::get(
3660 IntegerType::get(Src
->getContext(),
3661 EltTyBits
* CurrNumEltPerOp
),
3662 CurrVecTy
->getNumElements() / CurrNumEltPerOp
);
3663 assert(DL
.getTypeSizeInBits(CoalescedVecTy
) ==
3664 DL
.getTypeSizeInBits(CurrVecTy
) &&
3665 "coalesciing elements doesn't change vector width.");
3667 while (NumEltRemaining
> 0) {
3668 assert(SubVecEltsLeft
>= 0 && "Subreg element count overconsumtion?");
3670 // Can we use this vector size, as per the remaining element count?
3671 // Iff the vector is naturally aligned, we can do a wide load regardless.
3672 if (NumEltRemaining
< CurrNumEltPerOp
&&
3673 (!IsLoad
|| Alignment
.valueOrOne() < CurrOpSizeBytes
) &&
3674 CurrOpSizeBytes
!= 1)
3675 break; // Try smalled vector size.
3677 bool Is0thSubVec
= (NumEltDone() % LT
.second
.getVectorNumElements()) == 0;
3679 // If we have fully processed the previous reg, we need to replenish it.
3680 if (SubVecEltsLeft
== 0) {
3681 SubVecEltsLeft
+= CurrVecTy
->getNumElements();
3682 // And that's free only for the 0'th subvector of a legalized vector.
3684 Cost
+= getShuffleCost(IsLoad
? TTI::ShuffleKind::SK_InsertSubvector
3685 : TTI::ShuffleKind::SK_ExtractSubvector
,
3686 VTy
, None
, NumEltDone(), CurrVecTy
);
3689 // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
3690 // for smaller widths (32/16/8) we have to insert/extract them separately.
3691 // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
3692 // but let's pretend that it is also true for 16/8 bit wide ops...)
3693 if (CurrOpSizeBytes
<= 32 / 8 && !Is0thSubVec
) {
3694 int NumEltDoneInCurrXMM
= NumEltDone() % NumEltPerXMM
;
3695 assert(NumEltDoneInCurrXMM
% CurrNumEltPerOp
== 0 && "");
3696 int CoalescedVecEltIdx
= NumEltDoneInCurrXMM
/ CurrNumEltPerOp
;
3697 APInt DemandedElts
=
3698 APInt::getBitsSet(CoalescedVecTy
->getNumElements(),
3699 CoalescedVecEltIdx
, CoalescedVecEltIdx
+ 1);
3700 assert(DemandedElts
.countPopulation() == 1 && "Inserting single value");
3701 Cost
+= getScalarizationOverhead(CoalescedVecTy
, DemandedElts
, IsLoad
,
3705 // This isn't exactly right. We're using slow unaligned 32-byte accesses
3706 // as a proxy for a double-pumped AVX memory interface such as on
3708 if (CurrOpSizeBytes
== 32 && ST
->isUnalignedMem32Slow())
3713 SubVecEltsLeft
-= CurrNumEltPerOp
;
3714 NumEltRemaining
-= CurrNumEltPerOp
;
3715 Alignment
= commonAlignment(Alignment
.valueOrOne(), CurrOpSizeBytes
);
3719 assert(NumEltRemaining
<= 0 && "Should have processed all the elements.");
3725 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode
, Type
*SrcTy
, Align Alignment
,
3726 unsigned AddressSpace
,
3727 TTI::TargetCostKind CostKind
) {
3728 bool IsLoad
= (Instruction::Load
== Opcode
);
3729 bool IsStore
= (Instruction::Store
== Opcode
);
3731 auto *SrcVTy
= dyn_cast
<FixedVectorType
>(SrcTy
);
3733 // To calculate scalar take the regular cost, without mask
3734 return getMemoryOpCost(Opcode
, SrcTy
, Alignment
, AddressSpace
, CostKind
);
3736 unsigned NumElem
= SrcVTy
->getNumElements();
3738 FixedVectorType::get(Type::getInt8Ty(SrcVTy
->getContext()), NumElem
);
3739 if ((IsLoad
&& !isLegalMaskedLoad(SrcVTy
, Alignment
)) ||
3740 (IsStore
&& !isLegalMaskedStore(SrcVTy
, Alignment
))) {
3742 APInt DemandedElts
= APInt::getAllOnesValue(NumElem
);
3743 InstructionCost MaskSplitCost
=
3744 getScalarizationOverhead(MaskTy
, DemandedElts
, false, true);
3745 InstructionCost ScalarCompareCost
= getCmpSelInstrCost(
3746 Instruction::ICmp
, Type::getInt8Ty(SrcVTy
->getContext()), nullptr,
3747 CmpInst::BAD_ICMP_PREDICATE
, CostKind
);
3748 InstructionCost BranchCost
= getCFInstrCost(Instruction::Br
, CostKind
);
3749 InstructionCost MaskCmpCost
= NumElem
* (BranchCost
+ ScalarCompareCost
);
3750 InstructionCost ValueSplitCost
=
3751 getScalarizationOverhead(SrcVTy
, DemandedElts
, IsLoad
, IsStore
);
3752 InstructionCost MemopCost
=
3753 NumElem
* BaseT::getMemoryOpCost(Opcode
, SrcVTy
->getScalarType(),
3754 Alignment
, AddressSpace
, CostKind
);
3755 return MemopCost
+ ValueSplitCost
+ MaskSplitCost
+ MaskCmpCost
;
3758 // Legalize the type.
3759 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, SrcVTy
);
3760 auto VT
= TLI
->getValueType(DL
, SrcVTy
);
3761 InstructionCost Cost
= 0;
3762 if (VT
.isSimple() && LT
.second
!= VT
.getSimpleVT() &&
3763 LT
.second
.getVectorNumElements() == NumElem
)
3764 // Promotion requires extend/truncate for data and a shuffle for mask.
3765 Cost
+= getShuffleCost(TTI::SK_PermuteTwoSrc
, SrcVTy
, None
, 0, nullptr) +
3766 getShuffleCost(TTI::SK_PermuteTwoSrc
, MaskTy
, None
, 0, nullptr);
3768 else if (LT
.first
* LT
.second
.getVectorNumElements() > NumElem
) {
3769 auto *NewMaskTy
= FixedVectorType::get(MaskTy
->getElementType(),
3770 LT
.second
.getVectorNumElements());
3771 // Expanding requires fill mask with zeroes
3772 Cost
+= getShuffleCost(TTI::SK_InsertSubvector
, NewMaskTy
, None
, 0, MaskTy
);
3775 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3776 if (!ST
->hasAVX512())
3777 return Cost
+ LT
.first
* (IsLoad
? 2 : 8);
3779 // AVX-512 masked load/store is cheapper
3780 return Cost
+ LT
.first
;
3783 InstructionCost
X86TTIImpl::getAddressComputationCost(Type
*Ty
,
3784 ScalarEvolution
*SE
,
3786 // Address computations in vectorized code with non-consecutive addresses will
3787 // likely result in more instructions compared to scalar code where the
3788 // computation can more often be merged into the index mode. The resulting
3789 // extra micro-ops can significantly decrease throughput.
3790 const unsigned NumVectorInstToHideOverhead
= 10;
3792 // Cost modeling of Strided Access Computation is hidden by the indexing
3793 // modes of X86 regardless of the stride value. We dont believe that there
3794 // is a difference between constant strided access in gerenal and constant
3795 // strided value which is less than or equal to 64.
3796 // Even in the case of (loop invariant) stride whose value is not known at
3797 // compile time, the address computation will not incur more than one extra
3799 if (Ty
->isVectorTy() && SE
) {
3800 if (!BaseT::isStridedAccess(Ptr
))
3801 return NumVectorInstToHideOverhead
;
3802 if (!BaseT::getConstantStrideStep(SE
, Ptr
))
3806 return BaseT::getAddressComputationCost(Ty
, SE
, Ptr
);
3810 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode
, VectorType
*ValTy
,
3811 Optional
<FastMathFlags
> FMF
,
3812 TTI::TargetCostKind CostKind
) {
3813 if (TTI::requiresOrderedReduction(FMF
))
3814 return BaseT::getArithmeticReductionCost(Opcode
, ValTy
, FMF
, CostKind
);
3816 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3817 // and make it as the cost.
3819 static const CostTblEntry SLMCostTblNoPairWise
[] = {
3820 { ISD::FADD
, MVT::v2f64
, 3 },
3821 { ISD::ADD
, MVT::v2i64
, 5 },
3824 static const CostTblEntry SSE2CostTblNoPairWise
[] = {
3825 { ISD::FADD
, MVT::v2f64
, 2 },
3826 { ISD::FADD
, MVT::v2f32
, 2 },
3827 { ISD::FADD
, MVT::v4f32
, 4 },
3828 { ISD::ADD
, MVT::v2i64
, 2 }, // The data reported by the IACA tool is "1.6".
3829 { ISD::ADD
, MVT::v2i32
, 2 }, // FIXME: chosen to be less than v4i32
3830 { ISD::ADD
, MVT::v4i32
, 3 }, // The data reported by the IACA tool is "3.3".
3831 { ISD::ADD
, MVT::v2i16
, 2 }, // The data reported by the IACA tool is "4.3".
3832 { ISD::ADD
, MVT::v4i16
, 3 }, // The data reported by the IACA tool is "4.3".
3833 { ISD::ADD
, MVT::v8i16
, 4 }, // The data reported by the IACA tool is "4.3".
3834 { ISD::ADD
, MVT::v2i8
, 2 },
3835 { ISD::ADD
, MVT::v4i8
, 2 },
3836 { ISD::ADD
, MVT::v8i8
, 2 },
3837 { ISD::ADD
, MVT::v16i8
, 3 },
3840 static const CostTblEntry AVX1CostTblNoPairWise
[] = {
3841 { ISD::FADD
, MVT::v4f64
, 3 },
3842 { ISD::FADD
, MVT::v4f32
, 3 },
3843 { ISD::FADD
, MVT::v8f32
, 4 },
3844 { ISD::ADD
, MVT::v2i64
, 1 }, // The data reported by the IACA tool is "1.5".
3845 { ISD::ADD
, MVT::v4i64
, 3 },
3846 { ISD::ADD
, MVT::v8i32
, 5 },
3847 { ISD::ADD
, MVT::v16i16
, 5 },
3848 { ISD::ADD
, MVT::v32i8
, 4 },
3851 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
3852 assert(ISD
&& "Invalid opcode");
3854 // Before legalizing the type, give a chance to look up illegal narrow types
3856 // FIXME: Is there a better way to do this?
3857 EVT VT
= TLI
->getValueType(DL
, ValTy
);
3858 if (VT
.isSimple()) {
3859 MVT MTy
= VT
.getSimpleVT();
3861 if (const auto *Entry
= CostTableLookup(SLMCostTblNoPairWise
, ISD
, MTy
))
3865 if (const auto *Entry
= CostTableLookup(AVX1CostTblNoPairWise
, ISD
, MTy
))
3869 if (const auto *Entry
= CostTableLookup(SSE2CostTblNoPairWise
, ISD
, MTy
))
3873 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, ValTy
);
3875 MVT MTy
= LT
.second
;
3877 auto *ValVTy
= cast
<FixedVectorType
>(ValTy
);
3879 // Special case: vXi8 mul reductions are performed as vXi16.
3880 if (ISD
== ISD::MUL
&& MTy
.getScalarType() == MVT::i8
) {
3881 auto *WideSclTy
= IntegerType::get(ValVTy
->getContext(), 16);
3882 auto *WideVecTy
= FixedVectorType::get(WideSclTy
, ValVTy
->getNumElements());
3883 return getCastInstrCost(Instruction::ZExt
, WideVecTy
, ValTy
,
3884 TargetTransformInfo::CastContextHint::None
,
3886 getArithmeticReductionCost(Opcode
, WideVecTy
, FMF
, CostKind
);
3889 InstructionCost ArithmeticCost
= 0;
3890 if (LT
.first
!= 1 && MTy
.isVector() &&
3891 MTy
.getVectorNumElements() < ValVTy
->getNumElements()) {
3892 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3893 auto *SingleOpTy
= FixedVectorType::get(ValVTy
->getElementType(),
3894 MTy
.getVectorNumElements());
3895 ArithmeticCost
= getArithmeticInstrCost(Opcode
, SingleOpTy
, CostKind
);
3896 ArithmeticCost
*= LT
.first
- 1;
3900 if (const auto *Entry
= CostTableLookup(SLMCostTblNoPairWise
, ISD
, MTy
))
3901 return ArithmeticCost
+ Entry
->Cost
;
3904 if (const auto *Entry
= CostTableLookup(AVX1CostTblNoPairWise
, ISD
, MTy
))
3905 return ArithmeticCost
+ Entry
->Cost
;
3908 if (const auto *Entry
= CostTableLookup(SSE2CostTblNoPairWise
, ISD
, MTy
))
3909 return ArithmeticCost
+ Entry
->Cost
;
3911 // FIXME: These assume a naive kshift+binop lowering, which is probably
3912 // conservative in most cases.
3913 static const CostTblEntry AVX512BoolReduction
[] = {
3914 { ISD::AND
, MVT::v2i1
, 3 },
3915 { ISD::AND
, MVT::v4i1
, 5 },
3916 { ISD::AND
, MVT::v8i1
, 7 },
3917 { ISD::AND
, MVT::v16i1
, 9 },
3918 { ISD::AND
, MVT::v32i1
, 11 },
3919 { ISD::AND
, MVT::v64i1
, 13 },
3920 { ISD::OR
, MVT::v2i1
, 3 },
3921 { ISD::OR
, MVT::v4i1
, 5 },
3922 { ISD::OR
, MVT::v8i1
, 7 },
3923 { ISD::OR
, MVT::v16i1
, 9 },
3924 { ISD::OR
, MVT::v32i1
, 11 },
3925 { ISD::OR
, MVT::v64i1
, 13 },
3928 static const CostTblEntry AVX2BoolReduction
[] = {
3929 { ISD::AND
, MVT::v16i16
, 2 }, // vpmovmskb + cmp
3930 { ISD::AND
, MVT::v32i8
, 2 }, // vpmovmskb + cmp
3931 { ISD::OR
, MVT::v16i16
, 2 }, // vpmovmskb + cmp
3932 { ISD::OR
, MVT::v32i8
, 2 }, // vpmovmskb + cmp
3935 static const CostTblEntry AVX1BoolReduction
[] = {
3936 { ISD::AND
, MVT::v4i64
, 2 }, // vmovmskpd + cmp
3937 { ISD::AND
, MVT::v8i32
, 2 }, // vmovmskps + cmp
3938 { ISD::AND
, MVT::v16i16
, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3939 { ISD::AND
, MVT::v32i8
, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3940 { ISD::OR
, MVT::v4i64
, 2 }, // vmovmskpd + cmp
3941 { ISD::OR
, MVT::v8i32
, 2 }, // vmovmskps + cmp
3942 { ISD::OR
, MVT::v16i16
, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3943 { ISD::OR
, MVT::v32i8
, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3946 static const CostTblEntry SSE2BoolReduction
[] = {
3947 { ISD::AND
, MVT::v2i64
, 2 }, // movmskpd + cmp
3948 { ISD::AND
, MVT::v4i32
, 2 }, // movmskps + cmp
3949 { ISD::AND
, MVT::v8i16
, 2 }, // pmovmskb + cmp
3950 { ISD::AND
, MVT::v16i8
, 2 }, // pmovmskb + cmp
3951 { ISD::OR
, MVT::v2i64
, 2 }, // movmskpd + cmp
3952 { ISD::OR
, MVT::v4i32
, 2 }, // movmskps + cmp
3953 { ISD::OR
, MVT::v8i16
, 2 }, // pmovmskb + cmp
3954 { ISD::OR
, MVT::v16i8
, 2 }, // pmovmskb + cmp
3957 // Handle bool allof/anyof patterns.
3958 if (ValVTy
->getElementType()->isIntegerTy(1)) {
3959 InstructionCost ArithmeticCost
= 0;
3960 if (LT
.first
!= 1 && MTy
.isVector() &&
3961 MTy
.getVectorNumElements() < ValVTy
->getNumElements()) {
3962 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3963 auto *SingleOpTy
= FixedVectorType::get(ValVTy
->getElementType(),
3964 MTy
.getVectorNumElements());
3965 ArithmeticCost
= getArithmeticInstrCost(Opcode
, SingleOpTy
, CostKind
);
3966 ArithmeticCost
*= LT
.first
- 1;
3969 if (ST
->hasAVX512())
3970 if (const auto *Entry
= CostTableLookup(AVX512BoolReduction
, ISD
, MTy
))
3971 return ArithmeticCost
+ Entry
->Cost
;
3973 if (const auto *Entry
= CostTableLookup(AVX2BoolReduction
, ISD
, MTy
))
3974 return ArithmeticCost
+ Entry
->Cost
;
3976 if (const auto *Entry
= CostTableLookup(AVX1BoolReduction
, ISD
, MTy
))
3977 return ArithmeticCost
+ Entry
->Cost
;
3979 if (const auto *Entry
= CostTableLookup(SSE2BoolReduction
, ISD
, MTy
))
3980 return ArithmeticCost
+ Entry
->Cost
;
3982 return BaseT::getArithmeticReductionCost(Opcode
, ValVTy
, FMF
, CostKind
);
3985 unsigned NumVecElts
= ValVTy
->getNumElements();
3986 unsigned ScalarSize
= ValVTy
->getScalarSizeInBits();
3988 // Special case power of 2 reductions where the scalar type isn't changed
3989 // by type legalization.
3990 if (!isPowerOf2_32(NumVecElts
) || ScalarSize
!= MTy
.getScalarSizeInBits())
3991 return BaseT::getArithmeticReductionCost(Opcode
, ValVTy
, FMF
, CostKind
);
3993 InstructionCost ReductionCost
= 0;
3996 if (LT
.first
!= 1 && MTy
.isVector() &&
3997 MTy
.getVectorNumElements() < ValVTy
->getNumElements()) {
3998 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3999 Ty
= FixedVectorType::get(ValVTy
->getElementType(),
4000 MTy
.getVectorNumElements());
4001 ReductionCost
= getArithmeticInstrCost(Opcode
, Ty
, CostKind
);
4002 ReductionCost
*= LT
.first
- 1;
4003 NumVecElts
= MTy
.getVectorNumElements();
4006 // Now handle reduction with the legal type, taking into account size changes
4008 while (NumVecElts
> 1) {
4009 // Determine the size of the remaining vector we need to reduce.
4010 unsigned Size
= NumVecElts
* ScalarSize
;
4012 // If we're reducing from 256/512 bits, use an extract_subvector.
4014 auto *SubTy
= FixedVectorType::get(ValVTy
->getElementType(), NumVecElts
);
4016 getShuffleCost(TTI::SK_ExtractSubvector
, Ty
, None
, NumVecElts
, SubTy
);
4018 } else if (Size
== 128) {
4019 // Reducing from 128 bits is a permute of v2f64/v2i64.
4020 FixedVectorType
*ShufTy
;
4021 if (ValVTy
->isFloatingPointTy())
4023 FixedVectorType::get(Type::getDoubleTy(ValVTy
->getContext()), 2);
4026 FixedVectorType::get(Type::getInt64Ty(ValVTy
->getContext()), 2);
4028 getShuffleCost(TTI::SK_PermuteSingleSrc
, ShufTy
, None
, 0, nullptr);
4029 } else if (Size
== 64) {
4030 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4031 FixedVectorType
*ShufTy
;
4032 if (ValVTy
->isFloatingPointTy())
4034 FixedVectorType::get(Type::getFloatTy(ValVTy
->getContext()), 4);
4037 FixedVectorType::get(Type::getInt32Ty(ValVTy
->getContext()), 4);
4039 getShuffleCost(TTI::SK_PermuteSingleSrc
, ShufTy
, None
, 0, nullptr);
4041 // Reducing from smaller size is a shift by immediate.
4042 auto *ShiftTy
= FixedVectorType::get(
4043 Type::getIntNTy(ValVTy
->getContext(), Size
), 128 / Size
);
4044 ReductionCost
+= getArithmeticInstrCost(
4045 Instruction::LShr
, ShiftTy
, CostKind
,
4046 TargetTransformInfo::OK_AnyValue
,
4047 TargetTransformInfo::OK_UniformConstantValue
,
4048 TargetTransformInfo::OP_None
, TargetTransformInfo::OP_None
);
4051 // Add the arithmetic op for this level.
4052 ReductionCost
+= getArithmeticInstrCost(Opcode
, Ty
, CostKind
);
4055 // Add the final extract element to the cost.
4056 return ReductionCost
+ getVectorInstrCost(Instruction::ExtractElement
, Ty
, 0);
4059 InstructionCost
X86TTIImpl::getMinMaxCost(Type
*Ty
, Type
*CondTy
,
4061 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Ty
);
4063 MVT MTy
= LT
.second
;
4066 if (Ty
->isIntOrIntVectorTy()) {
4067 ISD
= IsUnsigned
? ISD::UMIN
: ISD::SMIN
;
4069 assert(Ty
->isFPOrFPVectorTy() &&
4070 "Expected float point or integer vector type.");
4074 static const CostTblEntry SSE1CostTbl
[] = {
4075 {ISD::FMINNUM
, MVT::v4f32
, 1},
4078 static const CostTblEntry SSE2CostTbl
[] = {
4079 {ISD::FMINNUM
, MVT::v2f64
, 1},
4080 {ISD::SMIN
, MVT::v8i16
, 1},
4081 {ISD::UMIN
, MVT::v16i8
, 1},
4084 static const CostTblEntry SSE41CostTbl
[] = {
4085 {ISD::SMIN
, MVT::v4i32
, 1},
4086 {ISD::UMIN
, MVT::v4i32
, 1},
4087 {ISD::UMIN
, MVT::v8i16
, 1},
4088 {ISD::SMIN
, MVT::v16i8
, 1},
4091 static const CostTblEntry SSE42CostTbl
[] = {
4092 {ISD::UMIN
, MVT::v2i64
, 3}, // xor+pcmpgtq+blendvpd
4095 static const CostTblEntry AVX1CostTbl
[] = {
4096 {ISD::FMINNUM
, MVT::v8f32
, 1},
4097 {ISD::FMINNUM
, MVT::v4f64
, 1},
4098 {ISD::SMIN
, MVT::v8i32
, 3},
4099 {ISD::UMIN
, MVT::v8i32
, 3},
4100 {ISD::SMIN
, MVT::v16i16
, 3},
4101 {ISD::UMIN
, MVT::v16i16
, 3},
4102 {ISD::SMIN
, MVT::v32i8
, 3},
4103 {ISD::UMIN
, MVT::v32i8
, 3},
4106 static const CostTblEntry AVX2CostTbl
[] = {
4107 {ISD::SMIN
, MVT::v8i32
, 1},
4108 {ISD::UMIN
, MVT::v8i32
, 1},
4109 {ISD::SMIN
, MVT::v16i16
, 1},
4110 {ISD::UMIN
, MVT::v16i16
, 1},
4111 {ISD::SMIN
, MVT::v32i8
, 1},
4112 {ISD::UMIN
, MVT::v32i8
, 1},
4115 static const CostTblEntry AVX512CostTbl
[] = {
4116 {ISD::FMINNUM
, MVT::v16f32
, 1},
4117 {ISD::FMINNUM
, MVT::v8f64
, 1},
4118 {ISD::SMIN
, MVT::v2i64
, 1},
4119 {ISD::UMIN
, MVT::v2i64
, 1},
4120 {ISD::SMIN
, MVT::v4i64
, 1},
4121 {ISD::UMIN
, MVT::v4i64
, 1},
4122 {ISD::SMIN
, MVT::v8i64
, 1},
4123 {ISD::UMIN
, MVT::v8i64
, 1},
4124 {ISD::SMIN
, MVT::v16i32
, 1},
4125 {ISD::UMIN
, MVT::v16i32
, 1},
4128 static const CostTblEntry AVX512BWCostTbl
[] = {
4129 {ISD::SMIN
, MVT::v32i16
, 1},
4130 {ISD::UMIN
, MVT::v32i16
, 1},
4131 {ISD::SMIN
, MVT::v64i8
, 1},
4132 {ISD::UMIN
, MVT::v64i8
, 1},
4135 // If we have a native MIN/MAX instruction for this type, use it.
4137 if (const auto *Entry
= CostTableLookup(AVX512BWCostTbl
, ISD
, MTy
))
4138 return LT
.first
* Entry
->Cost
;
4140 if (ST
->hasAVX512())
4141 if (const auto *Entry
= CostTableLookup(AVX512CostTbl
, ISD
, MTy
))
4142 return LT
.first
* Entry
->Cost
;
4145 if (const auto *Entry
= CostTableLookup(AVX2CostTbl
, ISD
, MTy
))
4146 return LT
.first
* Entry
->Cost
;
4149 if (const auto *Entry
= CostTableLookup(AVX1CostTbl
, ISD
, MTy
))
4150 return LT
.first
* Entry
->Cost
;
4153 if (const auto *Entry
= CostTableLookup(SSE42CostTbl
, ISD
, MTy
))
4154 return LT
.first
* Entry
->Cost
;
4157 if (const auto *Entry
= CostTableLookup(SSE41CostTbl
, ISD
, MTy
))
4158 return LT
.first
* Entry
->Cost
;
4161 if (const auto *Entry
= CostTableLookup(SSE2CostTbl
, ISD
, MTy
))
4162 return LT
.first
* Entry
->Cost
;
4165 if (const auto *Entry
= CostTableLookup(SSE1CostTbl
, ISD
, MTy
))
4166 return LT
.first
* Entry
->Cost
;
4169 if (Ty
->isFPOrFPVectorTy()) {
4170 CmpOpcode
= Instruction::FCmp
;
4172 assert(Ty
->isIntOrIntVectorTy() &&
4173 "expecting floating point or integer type for min/max reduction");
4174 CmpOpcode
= Instruction::ICmp
;
4177 TTI::TargetCostKind CostKind
= TTI::TCK_RecipThroughput
;
4178 // Otherwise fall back to cmp+select.
4179 InstructionCost Result
=
4180 getCmpSelInstrCost(CmpOpcode
, Ty
, CondTy
, CmpInst::BAD_ICMP_PREDICATE
,
4182 getCmpSelInstrCost(Instruction::Select
, Ty
, CondTy
,
4183 CmpInst::BAD_ICMP_PREDICATE
, CostKind
);
4188 X86TTIImpl::getMinMaxReductionCost(VectorType
*ValTy
, VectorType
*CondTy
,
4190 TTI::TargetCostKind CostKind
) {
4191 std::pair
<InstructionCost
, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, ValTy
);
4193 MVT MTy
= LT
.second
;
4196 if (ValTy
->isIntOrIntVectorTy()) {
4197 ISD
= IsUnsigned
? ISD::UMIN
: ISD::SMIN
;
4199 assert(ValTy
->isFPOrFPVectorTy() &&
4200 "Expected float point or integer vector type.");
4204 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
4205 // and make it as the cost.
4207 static const CostTblEntry SSE2CostTblNoPairWise
[] = {
4208 {ISD::UMIN
, MVT::v2i16
, 5}, // need pxors to use pminsw/pmaxsw
4209 {ISD::UMIN
, MVT::v4i16
, 7}, // need pxors to use pminsw/pmaxsw
4210 {ISD::UMIN
, MVT::v8i16
, 9}, // need pxors to use pminsw/pmaxsw
4213 static const CostTblEntry SSE41CostTblNoPairWise
[] = {
4214 {ISD::SMIN
, MVT::v2i16
, 3}, // same as sse2
4215 {ISD::SMIN
, MVT::v4i16
, 5}, // same as sse2
4216 {ISD::UMIN
, MVT::v2i16
, 5}, // same as sse2
4217 {ISD::UMIN
, MVT::v4i16
, 7}, // same as sse2
4218 {ISD::SMIN
, MVT::v8i16
, 4}, // phminposuw+xor
4219 {ISD::UMIN
, MVT::v8i16
, 4}, // FIXME: umin is cheaper than umax
4220 {ISD::SMIN
, MVT::v2i8
, 3}, // pminsb
4221 {ISD::SMIN
, MVT::v4i8
, 5}, // pminsb
4222 {ISD::SMIN
, MVT::v8i8
, 7}, // pminsb
4223 {ISD::SMIN
, MVT::v16i8
, 6},
4224 {ISD::UMIN
, MVT::v2i8
, 3}, // same as sse2
4225 {ISD::UMIN
, MVT::v4i8
, 5}, // same as sse2
4226 {ISD::UMIN
, MVT::v8i8
, 7}, // same as sse2
4227 {ISD::UMIN
, MVT::v16i8
, 6}, // FIXME: umin is cheaper than umax
4230 static const CostTblEntry AVX1CostTblNoPairWise
[] = {
4231 {ISD::SMIN
, MVT::v16i16
, 6},
4232 {ISD::UMIN
, MVT::v16i16
, 6}, // FIXME: umin is cheaper than umax
4233 {ISD::SMIN
, MVT::v32i8
, 8},
4234 {ISD::UMIN
, MVT::v32i8
, 8},
4237 static const CostTblEntry AVX512BWCostTblNoPairWise
[] = {
4238 {ISD::SMIN
, MVT::v32i16
, 8},
4239 {ISD::UMIN
, MVT::v32i16
, 8}, // FIXME: umin is cheaper than umax
4240 {ISD::SMIN
, MVT::v64i8
, 10},
4241 {ISD::UMIN
, MVT::v64i8
, 10},
4244 // Before legalizing the type, give a chance to look up illegal narrow types
4246 // FIXME: Is there a better way to do this?
4247 EVT VT
= TLI
->getValueType(DL
, ValTy
);
4248 if (VT
.isSimple()) {
4249 MVT MTy
= VT
.getSimpleVT();
4251 if (const auto *Entry
= CostTableLookup(AVX512BWCostTblNoPairWise
, ISD
, MTy
))
4255 if (const auto *Entry
= CostTableLookup(AVX1CostTblNoPairWise
, ISD
, MTy
))
4259 if (const auto *Entry
= CostTableLookup(SSE41CostTblNoPairWise
, ISD
, MTy
))
4263 if (const auto *Entry
= CostTableLookup(SSE2CostTblNoPairWise
, ISD
, MTy
))
4267 auto *ValVTy
= cast
<FixedVectorType
>(ValTy
);
4268 unsigned NumVecElts
= ValVTy
->getNumElements();
4271 InstructionCost MinMaxCost
= 0;
4272 if (LT
.first
!= 1 && MTy
.isVector() &&
4273 MTy
.getVectorNumElements() < ValVTy
->getNumElements()) {
4274 // Type needs to be split. We need LT.first - 1 operations ops.
4275 Ty
= FixedVectorType::get(ValVTy
->getElementType(),
4276 MTy
.getVectorNumElements());
4277 auto *SubCondTy
= FixedVectorType::get(CondTy
->getElementType(),
4278 MTy
.getVectorNumElements());
4279 MinMaxCost
= getMinMaxCost(Ty
, SubCondTy
, IsUnsigned
);
4280 MinMaxCost
*= LT
.first
- 1;
4281 NumVecElts
= MTy
.getVectorNumElements();
4285 if (const auto *Entry
= CostTableLookup(AVX512BWCostTblNoPairWise
, ISD
, MTy
))
4286 return MinMaxCost
+ Entry
->Cost
;
4289 if (const auto *Entry
= CostTableLookup(AVX1CostTblNoPairWise
, ISD
, MTy
))
4290 return MinMaxCost
+ Entry
->Cost
;
4293 if (const auto *Entry
= CostTableLookup(SSE41CostTblNoPairWise
, ISD
, MTy
))
4294 return MinMaxCost
+ Entry
->Cost
;
4297 if (const auto *Entry
= CostTableLookup(SSE2CostTblNoPairWise
, ISD
, MTy
))
4298 return MinMaxCost
+ Entry
->Cost
;
4300 unsigned ScalarSize
= ValTy
->getScalarSizeInBits();
4302 // Special case power of 2 reductions where the scalar type isn't changed
4303 // by type legalization.
4304 if (!isPowerOf2_32(ValVTy
->getNumElements()) ||
4305 ScalarSize
!= MTy
.getScalarSizeInBits())
4306 return BaseT::getMinMaxReductionCost(ValTy
, CondTy
, IsUnsigned
, CostKind
);
4308 // Now handle reduction with the legal type, taking into account size changes
4310 while (NumVecElts
> 1) {
4311 // Determine the size of the remaining vector we need to reduce.
4312 unsigned Size
= NumVecElts
* ScalarSize
;
4314 // If we're reducing from 256/512 bits, use an extract_subvector.
4316 auto *SubTy
= FixedVectorType::get(ValVTy
->getElementType(), NumVecElts
);
4318 getShuffleCost(TTI::SK_ExtractSubvector
, Ty
, None
, NumVecElts
, SubTy
);
4320 } else if (Size
== 128) {
4321 // Reducing from 128 bits is a permute of v2f64/v2i64.
4323 if (ValTy
->isFloatingPointTy())
4325 FixedVectorType::get(Type::getDoubleTy(ValTy
->getContext()), 2);
4327 ShufTy
= FixedVectorType::get(Type::getInt64Ty(ValTy
->getContext()), 2);
4329 getShuffleCost(TTI::SK_PermuteSingleSrc
, ShufTy
, None
, 0, nullptr);
4330 } else if (Size
== 64) {
4331 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4332 FixedVectorType
*ShufTy
;
4333 if (ValTy
->isFloatingPointTy())
4334 ShufTy
= FixedVectorType::get(Type::getFloatTy(ValTy
->getContext()), 4);
4336 ShufTy
= FixedVectorType::get(Type::getInt32Ty(ValTy
->getContext()), 4);
4338 getShuffleCost(TTI::SK_PermuteSingleSrc
, ShufTy
, None
, 0, nullptr);
4340 // Reducing from smaller size is a shift by immediate.
4341 auto *ShiftTy
= FixedVectorType::get(
4342 Type::getIntNTy(ValTy
->getContext(), Size
), 128 / Size
);
4343 MinMaxCost
+= getArithmeticInstrCost(
4344 Instruction::LShr
, ShiftTy
, TTI::TCK_RecipThroughput
,
4345 TargetTransformInfo::OK_AnyValue
,
4346 TargetTransformInfo::OK_UniformConstantValue
,
4347 TargetTransformInfo::OP_None
, TargetTransformInfo::OP_None
);
4350 // Add the arithmetic op for this level.
4352 FixedVectorType::get(CondTy
->getElementType(), Ty
->getNumElements());
4353 MinMaxCost
+= getMinMaxCost(Ty
, SubCondTy
, IsUnsigned
);
4356 // Add the final extract element to the cost.
4357 return MinMaxCost
+ getVectorInstrCost(Instruction::ExtractElement
, Ty
, 0);
4360 /// Calculate the cost of materializing a 64-bit value. This helper
4361 /// method might only calculate a fraction of a larger immediate. Therefore it
4362 /// is valid to return a cost of ZERO.
4363 InstructionCost
X86TTIImpl::getIntImmCost(int64_t Val
) {
4365 return TTI::TCC_Free
;
4368 return TTI::TCC_Basic
;
4370 return 2 * TTI::TCC_Basic
;
4373 InstructionCost
X86TTIImpl::getIntImmCost(const APInt
&Imm
, Type
*Ty
,
4374 TTI::TargetCostKind CostKind
) {
4375 assert(Ty
->isIntegerTy());
4377 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
4381 // Never hoist constants larger than 128bit, because this might lead to
4382 // incorrect code generation or assertions in codegen.
4383 // Fixme: Create a cost model for types larger than i128 once the codegen
4384 // issues have been fixed.
4386 return TTI::TCC_Free
;
4389 return TTI::TCC_Free
;
4391 // Sign-extend all constants to a multiple of 64-bit.
4393 if (BitSize
% 64 != 0)
4394 ImmVal
= Imm
.sext(alignTo(BitSize
, 64));
4396 // Split the constant into 64-bit chunks and calculate the cost for each
4398 InstructionCost Cost
= 0;
4399 for (unsigned ShiftVal
= 0; ShiftVal
< BitSize
; ShiftVal
+= 64) {
4400 APInt Tmp
= ImmVal
.ashr(ShiftVal
).sextOrTrunc(64);
4401 int64_t Val
= Tmp
.getSExtValue();
4402 Cost
+= getIntImmCost(Val
);
4404 // We need at least one instruction to materialize the constant.
4405 return std::max
<InstructionCost
>(1, Cost
);
4408 InstructionCost
X86TTIImpl::getIntImmCostInst(unsigned Opcode
, unsigned Idx
,
4409 const APInt
&Imm
, Type
*Ty
,
4410 TTI::TargetCostKind CostKind
,
4411 Instruction
*Inst
) {
4412 assert(Ty
->isIntegerTy());
4414 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
4415 // There is no cost model for constants with a bit size of 0. Return TCC_Free
4416 // here, so that constant hoisting will ignore this constant.
4418 return TTI::TCC_Free
;
4420 unsigned ImmIdx
= ~0U;
4423 return TTI::TCC_Free
;
4424 case Instruction::GetElementPtr
:
4425 // Always hoist the base address of a GetElementPtr. This prevents the
4426 // creation of new constants for every base constant that gets constant
4427 // folded with the offset.
4429 return 2 * TTI::TCC_Basic
;
4430 return TTI::TCC_Free
;
4431 case Instruction::Store
:
4434 case Instruction::ICmp
:
4435 // This is an imperfect hack to prevent constant hoisting of
4436 // compares that might be trying to check if a 64-bit value fits in
4437 // 32-bits. The backend can optimize these cases using a right shift by 32.
4438 // Ideally we would check the compare predicate here. There also other
4439 // similar immediates the backend can use shifts for.
4440 if (Idx
== 1 && Imm
.getBitWidth() == 64) {
4441 uint64_t ImmVal
= Imm
.getZExtValue();
4442 if (ImmVal
== 0x100000000ULL
|| ImmVal
== 0xffffffff)
4443 return TTI::TCC_Free
;
4447 case Instruction::And
:
4448 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
4449 // by using a 32-bit operation with implicit zero extension. Detect such
4450 // immediates here as the normal path expects bit 31 to be sign extended.
4451 if (Idx
== 1 && Imm
.getBitWidth() == 64 && isUInt
<32>(Imm
.getZExtValue()))
4452 return TTI::TCC_Free
;
4455 case Instruction::Add
:
4456 case Instruction::Sub
:
4457 // For add/sub, we can use the opposite instruction for INT32_MIN.
4458 if (Idx
== 1 && Imm
.getBitWidth() == 64 && Imm
.getZExtValue() == 0x80000000)
4459 return TTI::TCC_Free
;
4462 case Instruction::UDiv
:
4463 case Instruction::SDiv
:
4464 case Instruction::URem
:
4465 case Instruction::SRem
:
4466 // Division by constant is typically expanded later into a different
4467 // instruction sequence. This completely changes the constants.
4468 // Report them as "free" to stop ConstantHoist from marking them as opaque.
4469 return TTI::TCC_Free
;
4470 case Instruction::Mul
:
4471 case Instruction::Or
:
4472 case Instruction::Xor
:
4475 // Always return TCC_Free for the shift value of a shift instruction.
4476 case Instruction::Shl
:
4477 case Instruction::LShr
:
4478 case Instruction::AShr
:
4480 return TTI::TCC_Free
;
4482 case Instruction::Trunc
:
4483 case Instruction::ZExt
:
4484 case Instruction::SExt
:
4485 case Instruction::IntToPtr
:
4486 case Instruction::PtrToInt
:
4487 case Instruction::BitCast
:
4488 case Instruction::PHI
:
4489 case Instruction::Call
:
4490 case Instruction::Select
:
4491 case Instruction::Ret
:
4492 case Instruction::Load
:
4496 if (Idx
== ImmIdx
) {
4497 int NumConstants
= divideCeil(BitSize
, 64);
4498 InstructionCost Cost
= X86TTIImpl::getIntImmCost(Imm
, Ty
, CostKind
);
4499 return (Cost
<= NumConstants
* TTI::TCC_Basic
)
4500 ? static_cast<int>(TTI::TCC_Free
)
4504 return X86TTIImpl::getIntImmCost(Imm
, Ty
, CostKind
);
4507 InstructionCost
X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID
, unsigned Idx
,
4508 const APInt
&Imm
, Type
*Ty
,
4509 TTI::TargetCostKind CostKind
) {
4510 assert(Ty
->isIntegerTy());
4512 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
4513 // There is no cost model for constants with a bit size of 0. Return TCC_Free
4514 // here, so that constant hoisting will ignore this constant.
4516 return TTI::TCC_Free
;
4520 return TTI::TCC_Free
;
4521 case Intrinsic::sadd_with_overflow
:
4522 case Intrinsic::uadd_with_overflow
:
4523 case Intrinsic::ssub_with_overflow
:
4524 case Intrinsic::usub_with_overflow
:
4525 case Intrinsic::smul_with_overflow
:
4526 case Intrinsic::umul_with_overflow
:
4527 if ((Idx
== 1) && Imm
.getBitWidth() <= 64 && isInt
<32>(Imm
.getSExtValue()))
4528 return TTI::TCC_Free
;
4530 case Intrinsic::experimental_stackmap
:
4531 if ((Idx
< 2) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
4532 return TTI::TCC_Free
;
4534 case Intrinsic::experimental_patchpoint_void
:
4535 case Intrinsic::experimental_patchpoint_i64
:
4536 if ((Idx
< 4) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
4537 return TTI::TCC_Free
;
4540 return X86TTIImpl::getIntImmCost(Imm
, Ty
, CostKind
);
4543 InstructionCost
X86TTIImpl::getCFInstrCost(unsigned Opcode
,
4544 TTI::TargetCostKind CostKind
,
4545 const Instruction
*I
) {
4546 if (CostKind
!= TTI::TCK_RecipThroughput
)
4547 return Opcode
== Instruction::PHI
? 0 : 1;
4548 // Branches are assumed to be predicted.
4552 int X86TTIImpl::getGatherOverhead() const {
4553 // Some CPUs have more overhead for gather. The specified overhead is relative
4554 // to the Load operation. "2" is the number provided by Intel architects. This
4555 // parameter is used for cost estimation of Gather Op and comparison with
4556 // other alternatives.
4557 // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4558 // enable gather with a -march.
4559 if (ST
->hasAVX512() || (ST
->hasAVX2() && ST
->hasFastGather()))
4565 int X86TTIImpl::getScatterOverhead() const {
4566 if (ST
->hasAVX512())
4572 // Return an average cost of Gather / Scatter instruction, maybe improved later.
4573 // FIXME: Add TargetCostKind support.
4574 InstructionCost
X86TTIImpl::getGSVectorCost(unsigned Opcode
, Type
*SrcVTy
,
4575 const Value
*Ptr
, Align Alignment
,
4576 unsigned AddressSpace
) {
4578 assert(isa
<VectorType
>(SrcVTy
) && "Unexpected type in getGSVectorCost");
4579 unsigned VF
= cast
<FixedVectorType
>(SrcVTy
)->getNumElements();
4581 // Try to reduce index size from 64 bit (default for GEP)
4582 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4583 // operation will use 16 x 64 indices which do not fit in a zmm and needs
4584 // to split. Also check that the base pointer is the same for all lanes,
4585 // and that there's at most one variable index.
4586 auto getIndexSizeInBits
= [](const Value
*Ptr
, const DataLayout
&DL
) {
4587 unsigned IndexSize
= DL
.getPointerSizeInBits();
4588 const GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
);
4589 if (IndexSize
< 64 || !GEP
)
4592 unsigned NumOfVarIndices
= 0;
4593 const Value
*Ptrs
= GEP
->getPointerOperand();
4594 if (Ptrs
->getType()->isVectorTy() && !getSplatValue(Ptrs
))
4596 for (unsigned i
= 1; i
< GEP
->getNumOperands(); ++i
) {
4597 if (isa
<Constant
>(GEP
->getOperand(i
)))
4599 Type
*IndxTy
= GEP
->getOperand(i
)->getType();
4600 if (auto *IndexVTy
= dyn_cast
<VectorType
>(IndxTy
))
4601 IndxTy
= IndexVTy
->getElementType();
4602 if ((IndxTy
->getPrimitiveSizeInBits() == 64 &&
4603 !isa
<SExtInst
>(GEP
->getOperand(i
))) ||
4604 ++NumOfVarIndices
> 1)
4605 return IndexSize
; // 64
4607 return (unsigned)32;
4610 // Trying to reduce IndexSize to 32 bits for vector 16.
4611 // By default the IndexSize is equal to pointer size.
4612 unsigned IndexSize
= (ST
->hasAVX512() && VF
>= 16)
4613 ? getIndexSizeInBits(Ptr
, DL
)
4614 : DL
.getPointerSizeInBits();
4616 auto *IndexVTy
= FixedVectorType::get(
4617 IntegerType::get(SrcVTy
->getContext(), IndexSize
), VF
);
4618 std::pair
<InstructionCost
, MVT
> IdxsLT
=
4619 TLI
->getTypeLegalizationCost(DL
, IndexVTy
);
4620 std::pair
<InstructionCost
, MVT
> SrcLT
=
4621 TLI
->getTypeLegalizationCost(DL
, SrcVTy
);
4622 InstructionCost::CostType SplitFactor
=
4623 *std::max(IdxsLT
.first
, SrcLT
.first
).getValue();
4624 if (SplitFactor
> 1) {
4625 // Handle splitting of vector of pointers
4627 FixedVectorType::get(SrcVTy
->getScalarType(), VF
/ SplitFactor
);
4628 return SplitFactor
* getGSVectorCost(Opcode
, SplitSrcTy
, Ptr
, Alignment
,
4632 // The gather / scatter cost is given by Intel architects. It is a rough
4633 // number since we are looking at one instruction in a time.
4634 const int GSOverhead
= (Opcode
== Instruction::Load
)
4635 ? getGatherOverhead()
4636 : getScatterOverhead();
4637 return GSOverhead
+ VF
* getMemoryOpCost(Opcode
, SrcVTy
->getScalarType(),
4638 MaybeAlign(Alignment
), AddressSpace
,
4639 TTI::TCK_RecipThroughput
);
4642 /// Return the cost of full scalarization of gather / scatter operation.
4644 /// Opcode - Load or Store instruction.
4645 /// SrcVTy - The type of the data vector that should be gathered or scattered.
4646 /// VariableMask - The mask is non-constant at compile time.
4647 /// Alignment - Alignment for one element.
4648 /// AddressSpace - pointer[s] address space.
4650 /// FIXME: Add TargetCostKind support.
4651 InstructionCost
X86TTIImpl::getGSScalarCost(unsigned Opcode
, Type
*SrcVTy
,
4652 bool VariableMask
, Align Alignment
,
4653 unsigned AddressSpace
) {
4654 unsigned VF
= cast
<FixedVectorType
>(SrcVTy
)->getNumElements();
4655 APInt DemandedElts
= APInt::getAllOnesValue(VF
);
4656 TTI::TargetCostKind CostKind
= TTI::TCK_RecipThroughput
;
4658 InstructionCost MaskUnpackCost
= 0;
4661 FixedVectorType::get(Type::getInt1Ty(SrcVTy
->getContext()), VF
);
4663 getScalarizationOverhead(MaskTy
, DemandedElts
, false, true);
4664 InstructionCost ScalarCompareCost
= getCmpSelInstrCost(
4665 Instruction::ICmp
, Type::getInt1Ty(SrcVTy
->getContext()), nullptr,
4666 CmpInst::BAD_ICMP_PREDICATE
, CostKind
);
4667 InstructionCost BranchCost
= getCFInstrCost(Instruction::Br
, CostKind
);
4668 MaskUnpackCost
+= VF
* (BranchCost
+ ScalarCompareCost
);
4671 // The cost of the scalar loads/stores.
4672 InstructionCost MemoryOpCost
=
4673 VF
* getMemoryOpCost(Opcode
, SrcVTy
->getScalarType(),
4674 MaybeAlign(Alignment
), AddressSpace
, CostKind
);
4676 InstructionCost InsertExtractCost
= 0;
4677 if (Opcode
== Instruction::Load
)
4678 for (unsigned i
= 0; i
< VF
; ++i
)
4679 // Add the cost of inserting each scalar load into the vector
4680 InsertExtractCost
+=
4681 getVectorInstrCost(Instruction::InsertElement
, SrcVTy
, i
);
4683 for (unsigned i
= 0; i
< VF
; ++i
)
4684 // Add the cost of extracting each element out of the data vector
4685 InsertExtractCost
+=
4686 getVectorInstrCost(Instruction::ExtractElement
, SrcVTy
, i
);
4688 return MemoryOpCost
+ MaskUnpackCost
+ InsertExtractCost
;
4691 /// Calculate the cost of Gather / Scatter operation
4692 InstructionCost
X86TTIImpl::getGatherScatterOpCost(
4693 unsigned Opcode
, Type
*SrcVTy
, const Value
*Ptr
, bool VariableMask
,
4694 Align Alignment
, TTI::TargetCostKind CostKind
,
4695 const Instruction
*I
= nullptr) {
4696 if (CostKind
!= TTI::TCK_RecipThroughput
) {
4697 if ((Opcode
== Instruction::Load
&&
4698 isLegalMaskedGather(SrcVTy
, Align(Alignment
))) ||
4699 (Opcode
== Instruction::Store
&&
4700 isLegalMaskedScatter(SrcVTy
, Align(Alignment
))))
4702 return BaseT::getGatherScatterOpCost(Opcode
, SrcVTy
, Ptr
, VariableMask
,
4703 Alignment
, CostKind
, I
);
4706 assert(SrcVTy
->isVectorTy() && "Unexpected data type for Gather/Scatter");
4707 PointerType
*PtrTy
= dyn_cast
<PointerType
>(Ptr
->getType());
4708 if (!PtrTy
&& Ptr
->getType()->isVectorTy())
4709 PtrTy
= dyn_cast
<PointerType
>(
4710 cast
<VectorType
>(Ptr
->getType())->getElementType());
4711 assert(PtrTy
&& "Unexpected type for Ptr argument");
4712 unsigned AddressSpace
= PtrTy
->getAddressSpace();
4714 if ((Opcode
== Instruction::Load
&&
4715 !isLegalMaskedGather(SrcVTy
, Align(Alignment
))) ||
4716 (Opcode
== Instruction::Store
&&
4717 !isLegalMaskedScatter(SrcVTy
, Align(Alignment
))))
4718 return getGSScalarCost(Opcode
, SrcVTy
, VariableMask
, Alignment
,
4721 return getGSVectorCost(Opcode
, SrcVTy
, Ptr
, Alignment
, AddressSpace
);
4724 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost
&C1
,
4725 TargetTransformInfo::LSRCost
&C2
) {
4726 // X86 specific here are "instruction number 1st priority".
4727 return std::tie(C1
.Insns
, C1
.NumRegs
, C1
.AddRecCost
,
4728 C1
.NumIVMuls
, C1
.NumBaseAdds
,
4729 C1
.ScaleCost
, C1
.ImmCost
, C1
.SetupCost
) <
4730 std::tie(C2
.Insns
, C2
.NumRegs
, C2
.AddRecCost
,
4731 C2
.NumIVMuls
, C2
.NumBaseAdds
,
4732 C2
.ScaleCost
, C2
.ImmCost
, C2
.SetupCost
);
4735 bool X86TTIImpl::canMacroFuseCmp() {
4736 return ST
->hasMacroFusion() || ST
->hasBranchFusion();
4739 bool X86TTIImpl::isLegalMaskedLoad(Type
*DataTy
, Align Alignment
) {
4743 // The backend can't handle a single element vector.
4744 if (isa
<VectorType
>(DataTy
) &&
4745 cast
<FixedVectorType
>(DataTy
)->getNumElements() == 1)
4747 Type
*ScalarTy
= DataTy
->getScalarType();
4749 if (ScalarTy
->isPointerTy())
4752 if (ScalarTy
->isFloatTy() || ScalarTy
->isDoubleTy())
4755 if (ScalarTy
->isHalfTy() && ST
->hasBWI() && ST
->hasFP16())
4758 if (!ScalarTy
->isIntegerTy())
4761 unsigned IntWidth
= ScalarTy
->getIntegerBitWidth();
4762 return IntWidth
== 32 || IntWidth
== 64 ||
4763 ((IntWidth
== 8 || IntWidth
== 16) && ST
->hasBWI());
4766 bool X86TTIImpl::isLegalMaskedStore(Type
*DataType
, Align Alignment
) {
4767 return isLegalMaskedLoad(DataType
, Alignment
);
4770 bool X86TTIImpl::isLegalNTLoad(Type
*DataType
, Align Alignment
) {
4771 unsigned DataSize
= DL
.getTypeStoreSize(DataType
);
4772 // The only supported nontemporal loads are for aligned vectors of 16 or 32
4773 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
4774 // (the equivalent stores only require AVX).
4775 if (Alignment
>= DataSize
&& (DataSize
== 16 || DataSize
== 32))
4776 return DataSize
== 16 ? ST
->hasSSE1() : ST
->hasAVX2();
4781 bool X86TTIImpl::isLegalNTStore(Type
*DataType
, Align Alignment
) {
4782 unsigned DataSize
= DL
.getTypeStoreSize(DataType
);
4784 // SSE4A supports nontemporal stores of float and double at arbitrary
4786 if (ST
->hasSSE4A() && (DataType
->isFloatTy() || DataType
->isDoubleTy()))
4789 // Besides the SSE4A subtarget exception above, only aligned stores are
4790 // available nontemporaly on any other subtarget. And only stores with a size
4791 // of 4..32 bytes (powers of 2, only) are permitted.
4792 if (Alignment
< DataSize
|| DataSize
< 4 || DataSize
> 32 ||
4793 !isPowerOf2_32(DataSize
))
4796 // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4797 // loads require AVX2).
4799 return ST
->hasAVX();
4800 else if (DataSize
== 16)
4801 return ST
->hasSSE1();
4805 bool X86TTIImpl::isLegalMaskedExpandLoad(Type
*DataTy
) {
4806 if (!isa
<VectorType
>(DataTy
))
4809 if (!ST
->hasAVX512())
4812 // The backend can't handle a single element vector.
4813 if (cast
<FixedVectorType
>(DataTy
)->getNumElements() == 1)
4816 Type
*ScalarTy
= cast
<VectorType
>(DataTy
)->getElementType();
4818 if (ScalarTy
->isFloatTy() || ScalarTy
->isDoubleTy())
4821 if (!ScalarTy
->isIntegerTy())
4824 unsigned IntWidth
= ScalarTy
->getIntegerBitWidth();
4825 return IntWidth
== 32 || IntWidth
== 64 ||
4826 ((IntWidth
== 8 || IntWidth
== 16) && ST
->hasVBMI2());
4829 bool X86TTIImpl::isLegalMaskedCompressStore(Type
*DataTy
) {
4830 return isLegalMaskedExpandLoad(DataTy
);
4833 bool X86TTIImpl::isLegalMaskedGather(Type
*DataTy
, Align Alignment
) {
4834 // Some CPUs have better gather performance than others.
4835 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4836 // enable gather with a -march.
4837 if (!(ST
->hasAVX512() || (ST
->hasFastGather() && ST
->hasAVX2())))
4840 // This function is called now in two cases: from the Loop Vectorizer
4841 // and from the Scalarizer.
4842 // When the Loop Vectorizer asks about legality of the feature,
4843 // the vectorization factor is not calculated yet. The Loop Vectorizer
4844 // sends a scalar type and the decision is based on the width of the
4846 // Later on, the cost model will estimate usage this intrinsic based on
4848 // The Scalarizer asks again about legality. It sends a vector type.
4849 // In this case we can reject non-power-of-2 vectors.
4850 // We also reject single element vectors as the type legalizer can't
4852 if (auto *DataVTy
= dyn_cast
<FixedVectorType
>(DataTy
)) {
4853 unsigned NumElts
= DataVTy
->getNumElements();
4856 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
4857 // Vector-4 of gather/scatter instruction does not exist on KNL.
4858 // We can extend it to 8 elements, but zeroing upper bits of
4859 // the mask vector will add more instructions. Right now we give the scalar
4860 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter
4861 // instruction is better in the VariableMask case.
4862 if (ST
->hasAVX512() && (NumElts
== 2 || (NumElts
== 4 && !ST
->hasVLX())))
4865 Type
*ScalarTy
= DataTy
->getScalarType();
4866 if (ScalarTy
->isPointerTy())
4869 if (ScalarTy
->isFloatTy() || ScalarTy
->isDoubleTy())
4872 if (!ScalarTy
->isIntegerTy())
4875 unsigned IntWidth
= ScalarTy
->getIntegerBitWidth();
4876 return IntWidth
== 32 || IntWidth
== 64;
4879 bool X86TTIImpl::isLegalMaskedScatter(Type
*DataType
, Align Alignment
) {
4880 // AVX2 doesn't support scatter
4881 if (!ST
->hasAVX512())
4883 return isLegalMaskedGather(DataType
, Alignment
);
4886 bool X86TTIImpl::hasDivRemOp(Type
*DataType
, bool IsSigned
) {
4887 EVT VT
= TLI
->getValueType(DL
, DataType
);
4888 return TLI
->isOperationLegal(IsSigned
? ISD::SDIVREM
: ISD::UDIVREM
, VT
);
4891 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type
*Ty
) {
4895 bool X86TTIImpl::areInlineCompatible(const Function
*Caller
,
4896 const Function
*Callee
) const {
4897 const TargetMachine
&TM
= getTLI()->getTargetMachine();
4899 // Work this as a subsetting of subtarget features.
4900 const FeatureBitset
&CallerBits
=
4901 TM
.getSubtargetImpl(*Caller
)->getFeatureBits();
4902 const FeatureBitset
&CalleeBits
=
4903 TM
.getSubtargetImpl(*Callee
)->getFeatureBits();
4905 FeatureBitset RealCallerBits
= CallerBits
& ~InlineFeatureIgnoreList
;
4906 FeatureBitset RealCalleeBits
= CalleeBits
& ~InlineFeatureIgnoreList
;
4907 return (RealCallerBits
& RealCalleeBits
) == RealCalleeBits
;
4910 bool X86TTIImpl::areFunctionArgsABICompatible(
4911 const Function
*Caller
, const Function
*Callee
,
4912 SmallPtrSetImpl
<Argument
*> &Args
) const {
4913 if (!BaseT::areFunctionArgsABICompatible(Caller
, Callee
, Args
))
4916 // If we get here, we know the target features match. If one function
4917 // considers 512-bit vectors legal and the other does not, consider them
4919 const TargetMachine
&TM
= getTLI()->getTargetMachine();
4921 if (TM
.getSubtarget
<X86Subtarget
>(*Caller
).useAVX512Regs() ==
4922 TM
.getSubtarget
<X86Subtarget
>(*Callee
).useAVX512Regs())
4925 // Consider the arguments compatible if they aren't vectors or aggregates.
4926 // FIXME: Look at the size of vectors.
4927 // FIXME: Look at the element types of aggregates to see if there are vectors.
4928 // FIXME: The API of this function seems intended to allow arguments
4929 // to be removed from the set, but the caller doesn't check if the set
4930 // becomes empty so that may not work in practice.
4931 return llvm::none_of(Args
, [](Argument
*A
) {
4932 auto *EltTy
= cast
<PointerType
>(A
->getType())->getElementType();
4933 return EltTy
->isVectorTy() || EltTy
->isAggregateType();
4937 X86TTIImpl::TTI::MemCmpExpansionOptions
4938 X86TTIImpl::enableMemCmpExpansion(bool OptSize
, bool IsZeroCmp
) const {
4939 TTI::MemCmpExpansionOptions Options
;
4940 Options
.MaxNumLoads
= TLI
->getMaxExpandSizeMemcmp(OptSize
);
4941 Options
.NumLoadsPerBlock
= 2;
4942 // All GPR and vector loads can be unaligned.
4943 Options
.AllowOverlappingLoads
= true;
4945 // Only enable vector loads for equality comparison. Right now the vector
4946 // version is not as fast for three way compare (see #33329).
4947 const unsigned PreferredWidth
= ST
->getPreferVectorWidth();
4948 if (PreferredWidth
>= 512 && ST
->hasAVX512()) Options
.LoadSizes
.push_back(64);
4949 if (PreferredWidth
>= 256 && ST
->hasAVX()) Options
.LoadSizes
.push_back(32);
4950 if (PreferredWidth
>= 128 && ST
->hasSSE2()) Options
.LoadSizes
.push_back(16);
4952 if (ST
->is64Bit()) {
4953 Options
.LoadSizes
.push_back(8);
4955 Options
.LoadSizes
.push_back(4);
4956 Options
.LoadSizes
.push_back(2);
4957 Options
.LoadSizes
.push_back(1);
4961 bool X86TTIImpl::enableInterleavedAccessVectorization() {
4962 // TODO: We expect this to be beneficial regardless of arch,
4963 // but there are currently some unexplained performance artifacts on Atom.
4964 // As a temporary solution, disable on Atom.
4965 return !(ST
->isAtom());
4968 // Get estimation for interleaved load/store operations for AVX2.
4969 // \p Factor is the interleaved-access factor (stride) - number of
4970 // (interleaved) elements in the group.
4971 // \p Indices contains the indices for a strided load: when the
4972 // interleaved load has gaps they indicate which elements are used.
4973 // If Indices is empty (or if the number of indices is equal to the size
4974 // of the interleaved-access as given in \p Factor) the access has no gaps.
4976 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow
4977 // computing the cost using a generic formula as a function of generic
4978 // shuffles. We therefore use a lookup table instead, filled according to
4979 // the instruction sequences that codegen currently generates.
4980 InstructionCost
X86TTIImpl::getInterleavedMemoryOpCostAVX2(
4981 unsigned Opcode
, FixedVectorType
*VecTy
, unsigned Factor
,
4982 ArrayRef
<unsigned> Indices
, Align Alignment
, unsigned AddressSpace
,
4983 TTI::TargetCostKind CostKind
, bool UseMaskForCond
, bool UseMaskForGaps
) {
4985 if (UseMaskForCond
|| UseMaskForGaps
)
4986 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
4987 Alignment
, AddressSpace
, CostKind
,
4988 UseMaskForCond
, UseMaskForGaps
);
4990 // We currently Support only fully-interleaved groups, with no gaps.
4991 // TODO: Support also strided loads (interleaved-groups with gaps).
4992 if (Indices
.size() && Indices
.size() != Factor
)
4993 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
4994 Alignment
, AddressSpace
, CostKind
);
4996 // VecTy for interleave memop is <VF*Factor x Elt>.
4997 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4998 // VecTy = <12 x i32>.
4999 MVT LegalVT
= getTLI()->getTypeLegalizationCost(DL
, VecTy
).second
;
5001 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
5002 // the VF=2, while v2i128 is an unsupported MVT vector type
5003 // (see MachineValueType.h::getVectorVT()).
5004 if (!LegalVT
.isVector())
5005 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
5006 Alignment
, AddressSpace
, CostKind
);
5008 unsigned VF
= VecTy
->getNumElements() / Factor
;
5009 Type
*ScalarTy
= VecTy
->getElementType();
5010 // Deduplicate entries, model floats/pointers as appropriately-sized integers.
5011 if (!ScalarTy
->isIntegerTy())
5013 Type::getIntNTy(ScalarTy
->getContext(), DL
.getTypeSizeInBits(ScalarTy
));
5015 // Get the cost of all the memory operations.
5016 InstructionCost MemOpCosts
= getMemoryOpCost(
5017 Opcode
, VecTy
, MaybeAlign(Alignment
), AddressSpace
, CostKind
);
5019 auto *VT
= FixedVectorType::get(ScalarTy
, VF
);
5020 EVT ETy
= TLI
->getValueType(DL
, VT
);
5021 if (!ETy
.isSimple())
5022 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
5023 Alignment
, AddressSpace
, CostKind
);
5025 // TODO: Complete for other data-types and strides.
5026 // Each combination of Stride, element bit width and VF results in a different
5027 // sequence; The cost tables are therefore accessed with:
5028 // Factor (stride) and VectorType=VFxiN.
5029 // The Cost accounts only for the shuffle sequence;
5030 // The cost of the loads/stores is accounted for separately.
5032 static const CostTblEntry AVX2InterleavedLoadTbl
[] = {
5033 {2, MVT::v4i64
, 6}, // (load 8i64 and) deinterleave into 2 x 4i64
5035 {3, MVT::v2i8
, 10}, // (load 6i8 and) deinterleave into 3 x 2i8
5036 {3, MVT::v4i8
, 4}, // (load 12i8 and) deinterleave into 3 x 4i8
5037 {3, MVT::v8i8
, 9}, // (load 24i8 and) deinterleave into 3 x 8i8
5038 {3, MVT::v16i8
, 11}, // (load 48i8 and) deinterleave into 3 x 16i8
5039 {3, MVT::v32i8
, 13}, // (load 96i8 and) deinterleave into 3 x 32i8
5041 {3, MVT::v8i32
, 17}, // (load 24i32 and) deinterleave into 3 x 8i32
5043 {4, MVT::v2i8
, 12}, // (load 8i8 and) deinterleave into 4 x 2i8
5044 {4, MVT::v4i8
, 4}, // (load 16i8 and) deinterleave into 4 x 4i8
5045 {4, MVT::v8i8
, 20}, // (load 32i8 and) deinterleave into 4 x 8i8
5046 {4, MVT::v16i8
, 39}, // (load 64i8 and) deinterleave into 4 x 16i8
5047 {4, MVT::v32i8
, 80}, // (load 128i8 and) deinterleave into 4 x 32i8
5049 {8, MVT::v8i32
, 40} // (load 64i32 and) deinterleave into 8 x 8i32
5052 static const CostTblEntry AVX2InterleavedStoreTbl
[] = {
5053 {2, MVT::v4i64
, 6}, // interleave 2 x 4i64 into 8i64 (and store)
5055 {3, MVT::v2i8
, 7}, // interleave 3 x 2i8 into 6i8 (and store)
5056 {3, MVT::v4i8
, 8}, // interleave 3 x 4i8 into 12i8 (and store)
5057 {3, MVT::v8i8
, 11}, // interleave 3 x 8i8 into 24i8 (and store)
5058 {3, MVT::v16i8
, 11}, // interleave 3 x 16i8 into 48i8 (and store)
5059 {3, MVT::v32i8
, 13}, // interleave 3 x 32i8 into 96i8 (and store)
5061 {4, MVT::v2i8
, 12}, // interleave 4 x 2i8 into 8i8 (and store)
5062 {4, MVT::v4i8
, 9}, // interleave 4 x 4i8 into 16i8 (and store)
5063 {4, MVT::v8i8
, 10}, // interleave 4 x 8i8 into 32i8 (and store)
5064 {4, MVT::v16i8
, 10}, // interleave 4 x 16i8 into 64i8 (and store)
5065 {4, MVT::v32i8
, 12} // interleave 4 x 32i8 into 128i8 (and store)
5068 if (Opcode
== Instruction::Load
) {
5069 if (const auto *Entry
=
5070 CostTableLookup(AVX2InterleavedLoadTbl
, Factor
, ETy
.getSimpleVT()))
5071 return MemOpCosts
+ Entry
->Cost
;
5073 assert(Opcode
== Instruction::Store
&&
5074 "Expected Store Instruction at this point");
5075 if (const auto *Entry
=
5076 CostTableLookup(AVX2InterleavedStoreTbl
, Factor
, ETy
.getSimpleVT()))
5077 return MemOpCosts
+ Entry
->Cost
;
5080 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
5081 Alignment
, AddressSpace
, CostKind
);
5084 // Get estimation for interleaved load/store operations and strided load.
5085 // \p Indices contains indices for strided load.
5086 // \p Factor - the factor of interleaving.
5087 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
5088 InstructionCost
X86TTIImpl::getInterleavedMemoryOpCostAVX512(
5089 unsigned Opcode
, FixedVectorType
*VecTy
, unsigned Factor
,
5090 ArrayRef
<unsigned> Indices
, Align Alignment
, unsigned AddressSpace
,
5091 TTI::TargetCostKind CostKind
, bool UseMaskForCond
, bool UseMaskForGaps
) {
5093 if (UseMaskForCond
|| UseMaskForGaps
)
5094 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
5095 Alignment
, AddressSpace
, CostKind
,
5096 UseMaskForCond
, UseMaskForGaps
);
5098 // VecTy for interleave memop is <VF*Factor x Elt>.
5099 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
5100 // VecTy = <12 x i32>.
5102 // Calculate the number of memory operations (NumOfMemOps), required
5103 // for load/store the VecTy.
5104 MVT LegalVT
= getTLI()->getTypeLegalizationCost(DL
, VecTy
).second
;
5105 unsigned VecTySize
= DL
.getTypeStoreSize(VecTy
);
5106 unsigned LegalVTSize
= LegalVT
.getStoreSize();
5107 unsigned NumOfMemOps
= (VecTySize
+ LegalVTSize
- 1) / LegalVTSize
;
5109 // Get the cost of one memory operation.
5110 auto *SingleMemOpTy
= FixedVectorType::get(VecTy
->getElementType(),
5111 LegalVT
.getVectorNumElements());
5112 InstructionCost MemOpCost
= getMemoryOpCost(
5113 Opcode
, SingleMemOpTy
, MaybeAlign(Alignment
), AddressSpace
, CostKind
);
5115 unsigned VF
= VecTy
->getNumElements() / Factor
;
5116 MVT VT
= MVT::getVectorVT(MVT::getVT(VecTy
->getScalarType()), VF
);
5118 if (Opcode
== Instruction::Load
) {
5119 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
5120 // contain the cost of the optimized shuffle sequence that the
5121 // X86InterleavedAccess pass will generate.
5122 // The cost of loads and stores are computed separately from the table.
5124 // X86InterleavedAccess support only the following interleaved-access group.
5125 static const CostTblEntry AVX512InterleavedLoadTbl
[] = {
5126 {3, MVT::v16i8
, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
5127 {3, MVT::v32i8
, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
5128 {3, MVT::v64i8
, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
5131 if (const auto *Entry
=
5132 CostTableLookup(AVX512InterleavedLoadTbl
, Factor
, VT
))
5133 return NumOfMemOps
* MemOpCost
+ Entry
->Cost
;
5134 //If an entry does not exist, fallback to the default implementation.
5136 // Kind of shuffle depends on number of loaded values.
5137 // If we load the entire data in one register, we can use a 1-src shuffle.
5138 // Otherwise, we'll merge 2 sources in each operation.
5139 TTI::ShuffleKind ShuffleKind
=
5140 (NumOfMemOps
> 1) ? TTI::SK_PermuteTwoSrc
: TTI::SK_PermuteSingleSrc
;
5142 InstructionCost ShuffleCost
=
5143 getShuffleCost(ShuffleKind
, SingleMemOpTy
, None
, 0, nullptr);
5145 unsigned NumOfLoadsInInterleaveGrp
=
5146 Indices
.size() ? Indices
.size() : Factor
;
5147 auto *ResultTy
= FixedVectorType::get(VecTy
->getElementType(),
5148 VecTy
->getNumElements() / Factor
);
5149 InstructionCost NumOfResults
=
5150 getTLI()->getTypeLegalizationCost(DL
, ResultTy
).first
*
5151 NumOfLoadsInInterleaveGrp
;
5153 // About a half of the loads may be folded in shuffles when we have only
5154 // one result. If we have more than one result, we do not fold loads at all.
5155 unsigned NumOfUnfoldedLoads
=
5156 NumOfResults
> 1 ? NumOfMemOps
: NumOfMemOps
/ 2;
5158 // Get a number of shuffle operations per result.
5159 unsigned NumOfShufflesPerResult
=
5160 std::max((unsigned)1, (unsigned)(NumOfMemOps
- 1));
5162 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5163 // When we have more than one destination, we need additional instructions
5165 InstructionCost NumOfMoves
= 0;
5166 if (NumOfResults
> 1 && ShuffleKind
== TTI::SK_PermuteTwoSrc
)
5167 NumOfMoves
= NumOfResults
* NumOfShufflesPerResult
/ 2;
5169 InstructionCost Cost
= NumOfResults
* NumOfShufflesPerResult
* ShuffleCost
+
5170 NumOfUnfoldedLoads
* MemOpCost
+ NumOfMoves
;
5176 assert(Opcode
== Instruction::Store
&&
5177 "Expected Store Instruction at this point");
5178 // X86InterleavedAccess support only the following interleaved-access group.
5179 static const CostTblEntry AVX512InterleavedStoreTbl
[] = {
5180 {3, MVT::v16i8
, 12}, // interleave 3 x 16i8 into 48i8 (and store)
5181 {3, MVT::v32i8
, 14}, // interleave 3 x 32i8 into 96i8 (and store)
5182 {3, MVT::v64i8
, 26}, // interleave 3 x 64i8 into 96i8 (and store)
5184 {4, MVT::v8i8
, 10}, // interleave 4 x 8i8 into 32i8 (and store)
5185 {4, MVT::v16i8
, 11}, // interleave 4 x 16i8 into 64i8 (and store)
5186 {4, MVT::v32i8
, 14}, // interleave 4 x 32i8 into 128i8 (and store)
5187 {4, MVT::v64i8
, 24} // interleave 4 x 32i8 into 256i8 (and store)
5190 if (const auto *Entry
=
5191 CostTableLookup(AVX512InterleavedStoreTbl
, Factor
, VT
))
5192 return NumOfMemOps
* MemOpCost
+ Entry
->Cost
;
5193 //If an entry does not exist, fallback to the default implementation.
5195 // There is no strided stores meanwhile. And store can't be folded in
5197 unsigned NumOfSources
= Factor
; // The number of values to be merged.
5198 InstructionCost ShuffleCost
=
5199 getShuffleCost(TTI::SK_PermuteTwoSrc
, SingleMemOpTy
, None
, 0, nullptr);
5200 unsigned NumOfShufflesPerStore
= NumOfSources
- 1;
5202 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5203 // We need additional instructions to keep sources.
5204 unsigned NumOfMoves
= NumOfMemOps
* NumOfShufflesPerStore
/ 2;
5205 InstructionCost Cost
=
5206 NumOfMemOps
* (MemOpCost
+ NumOfShufflesPerStore
* ShuffleCost
) +
5211 InstructionCost
X86TTIImpl::getInterleavedMemoryOpCost(
5212 unsigned Opcode
, Type
*VecTy
, unsigned Factor
, ArrayRef
<unsigned> Indices
,
5213 Align Alignment
, unsigned AddressSpace
, TTI::TargetCostKind CostKind
,
5214 bool UseMaskForCond
, bool UseMaskForGaps
) {
5215 auto isSupportedOnAVX512
= [&](Type
*VecTy
, bool HasBW
) {
5216 Type
*EltTy
= cast
<VectorType
>(VecTy
)->getElementType();
5217 if (EltTy
->isFloatTy() || EltTy
->isDoubleTy() || EltTy
->isIntegerTy(64) ||
5218 EltTy
->isIntegerTy(32) || EltTy
->isPointerTy())
5220 if (EltTy
->isIntegerTy(16) || EltTy
->isIntegerTy(8) ||
5221 (!ST
->useSoftFloat() && ST
->hasFP16() && EltTy
->isHalfTy()))
5225 if (ST
->hasAVX512() && isSupportedOnAVX512(VecTy
, ST
->hasBWI()))
5226 return getInterleavedMemoryOpCostAVX512(
5227 Opcode
, cast
<FixedVectorType
>(VecTy
), Factor
, Indices
, Alignment
,
5228 AddressSpace
, CostKind
, UseMaskForCond
, UseMaskForGaps
);
5230 return getInterleavedMemoryOpCostAVX2(
5231 Opcode
, cast
<FixedVectorType
>(VecTy
), Factor
, Indices
, Alignment
,
5232 AddressSpace
, CostKind
, UseMaskForCond
, UseMaskForGaps
);
5234 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
5235 Alignment
, AddressSpace
, CostKind
,
5236 UseMaskForCond
, UseMaskForGaps
);