1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// X86 target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
14 //===----------------------------------------------------------------------===//
15 /// About Cost Model numbers used below it's necessary to say the following:
16 /// the numbers correspond to some "generic" X86 CPU instead of usage of
17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19 /// the lookups below the cost is based on Nehalem as that was the first CPU
20 /// to support that feature level and thus has most likely the worst case cost.
21 /// Some examples of other technologies/CPUs:
22 /// SSE 3 - Pentium4 / Athlon64
25 /// AVX - Sandy Bridge
27 /// AVX-512 - Xeon Phi / Skylake
28 /// And some examples of instruction target dependent costs (latency)
29 /// divss sqrtss rsqrtss
31 /// Piledriver 9-24 13-15 5
33 /// Pentium II,III 18 30 2
34 /// Nehalem 7-14 7-18 3
35 /// Haswell 10-13 11 5
36 /// TODO: Develop and implement the target dependent cost model and
37 /// specialize cost numbers for different Cost Model Targets such as throughput,
38 /// code size, latency and uop count.
39 //===----------------------------------------------------------------------===//
41 #include "X86TargetTransformInfo.h"
42 #include "llvm/Analysis/TargetTransformInfo.h"
43 #include "llvm/CodeGen/BasicTTIImpl.h"
44 #include "llvm/CodeGen/CostTable.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/Support/Debug.h"
51 #define DEBUG_TYPE "x86tti"
53 //===----------------------------------------------------------------------===//
57 //===----------------------------------------------------------------------===//
59 TargetTransformInfo::PopcntSupportKind
60 X86TTIImpl::getPopcntSupport(unsigned TyWidth
) {
61 assert(isPowerOf2_32(TyWidth
) && "Ty width must be power of 2");
62 // TODO: Currently the __builtin_popcount() implementation using SSE3
63 // instructions is inefficient. Once the problem is fixed, we should
64 // call ST->hasSSE3() instead of ST->hasPOPCNT().
65 return ST
->hasPOPCNT() ? TTI::PSK_FastHardware
: TTI::PSK_Software
;
68 llvm::Optional
<unsigned> X86TTIImpl::getCacheSize(
69 TargetTransformInfo::CacheLevel Level
) const {
71 case TargetTransformInfo::CacheLevel::L1D
:
81 return 32 * 1024; // 32 KByte
82 case TargetTransformInfo::CacheLevel::L2D
:
92 return 256 * 1024; // 256 KByte
95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
98 llvm::Optional
<unsigned> X86TTIImpl::getCacheAssociativity(
99 TargetTransformInfo::CacheLevel Level
) const {
110 case TargetTransformInfo::CacheLevel::L1D
:
112 case TargetTransformInfo::CacheLevel::L2D
:
116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
119 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector
) {
120 if (Vector
&& !ST
->hasSSE1())
124 if (Vector
&& ST
->hasAVX512())
131 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector
) const {
132 unsigned PreferVectorWidth
= ST
->getPreferVectorWidth();
134 if (ST
->hasAVX512() && PreferVectorWidth
>= 512)
136 if (ST
->hasAVX() && PreferVectorWidth
>= 256)
138 if (ST
->hasSSE1() && PreferVectorWidth
>= 128)
149 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
150 return getRegisterBitWidth(true);
153 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF
) {
154 // If the loop will not be vectorized, don't interleave the loop.
155 // Let regular unroll to unroll the loop, which saves the overflow
156 // check and memory check cost.
163 // Sandybridge and Haswell have multiple execution ports and pipelined
171 int X86TTIImpl::getArithmeticInstrCost(
172 unsigned Opcode
, Type
*Ty
,
173 TTI::OperandValueKind Op1Info
, TTI::OperandValueKind Op2Info
,
174 TTI::OperandValueProperties Opd1PropInfo
,
175 TTI::OperandValueProperties Opd2PropInfo
,
176 ArrayRef
<const Value
*> Args
) {
177 // Legalize the type.
178 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Ty
);
180 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
181 assert(ISD
&& "Invalid opcode");
183 static const CostTblEntry GLMCostTable
[] = {
184 { ISD::FDIV
, MVT::f32
, 18 }, // divss
185 { ISD::FDIV
, MVT::v4f32
, 35 }, // divps
186 { ISD::FDIV
, MVT::f64
, 33 }, // divsd
187 { ISD::FDIV
, MVT::v2f64
, 65 }, // divpd
191 if (const auto *Entry
= CostTableLookup(GLMCostTable
, ISD
,
193 return LT
.first
* Entry
->Cost
;
195 static const CostTblEntry SLMCostTable
[] = {
196 { ISD::MUL
, MVT::v4i32
, 11 }, // pmulld
197 { ISD::MUL
, MVT::v8i16
, 2 }, // pmullw
198 { ISD::MUL
, MVT::v16i8
, 14 }, // extend/pmullw/trunc sequence.
199 { ISD::FMUL
, MVT::f64
, 2 }, // mulsd
200 { ISD::FMUL
, MVT::v2f64
, 4 }, // mulpd
201 { ISD::FMUL
, MVT::v4f32
, 2 }, // mulps
202 { ISD::FDIV
, MVT::f32
, 17 }, // divss
203 { ISD::FDIV
, MVT::v4f32
, 39 }, // divps
204 { ISD::FDIV
, MVT::f64
, 32 }, // divsd
205 { ISD::FDIV
, MVT::v2f64
, 69 }, // divpd
206 { ISD::FADD
, MVT::v2f64
, 2 }, // addpd
207 { ISD::FSUB
, MVT::v2f64
, 2 }, // subpd
208 // v2i64/v4i64 mul is custom lowered as a series of long:
209 // multiplies(3), shifts(3) and adds(2)
210 // slm muldq version throughput is 2 and addq throughput 4
211 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
212 // 3X4 (addq throughput) = 17
213 { ISD::MUL
, MVT::v2i64
, 17 },
214 // slm addq\subq throughput is 4
215 { ISD::ADD
, MVT::v2i64
, 4 },
216 { ISD::SUB
, MVT::v2i64
, 4 },
220 if (Args
.size() == 2 && ISD
== ISD::MUL
&& LT
.second
== MVT::v4i32
) {
221 // Check if the operands can be shrinked into a smaller datatype.
222 bool Op1Signed
= false;
223 unsigned Op1MinSize
= BaseT::minRequiredElementSize(Args
[0], Op1Signed
);
224 bool Op2Signed
= false;
225 unsigned Op2MinSize
= BaseT::minRequiredElementSize(Args
[1], Op2Signed
);
227 bool signedMode
= Op1Signed
| Op2Signed
;
228 unsigned OpMinSize
= std::max(Op1MinSize
, Op2MinSize
);
231 return LT
.first
* 3; // pmullw/sext
232 if (!signedMode
&& OpMinSize
<= 8)
233 return LT
.first
* 3; // pmullw/zext
235 return LT
.first
* 5; // pmullw/pmulhw/pshuf
236 if (!signedMode
&& OpMinSize
<= 16)
237 return LT
.first
* 5; // pmullw/pmulhw/pshuf
240 if (const auto *Entry
= CostTableLookup(SLMCostTable
, ISD
,
242 return LT
.first
* Entry
->Cost
;
246 if ((ISD
== ISD::SDIV
|| ISD
== ISD::SREM
|| ISD
== ISD::UDIV
||
248 (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
249 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) &&
250 Opd2PropInfo
== TargetTransformInfo::OP_PowerOf2
) {
251 if (ISD
== ISD::SDIV
|| ISD
== ISD::SREM
) {
252 // On X86, vector signed division by constants power-of-two are
253 // normally expanded to the sequence SRA + SRL + ADD + SRA.
254 // The OperandValue properties may not be the same as that of the previous
255 // operation; conservatively assume OP_None.
257 2 * getArithmeticInstrCost(Instruction::AShr
, Ty
, Op1Info
, Op2Info
,
258 TargetTransformInfo::OP_None
,
259 TargetTransformInfo::OP_None
);
260 Cost
+= getArithmeticInstrCost(Instruction::LShr
, Ty
, Op1Info
, Op2Info
,
261 TargetTransformInfo::OP_None
,
262 TargetTransformInfo::OP_None
);
263 Cost
+= getArithmeticInstrCost(Instruction::Add
, Ty
, Op1Info
, Op2Info
,
264 TargetTransformInfo::OP_None
,
265 TargetTransformInfo::OP_None
);
267 if (ISD
== ISD::SREM
) {
268 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
269 Cost
+= getArithmeticInstrCost(Instruction::Mul
, Ty
, Op1Info
, Op2Info
);
270 Cost
+= getArithmeticInstrCost(Instruction::Sub
, Ty
, Op1Info
, Op2Info
);
276 // Vector unsigned division/remainder will be simplified to shifts/masks.
277 if (ISD
== ISD::UDIV
)
278 return getArithmeticInstrCost(Instruction::LShr
, Ty
, Op1Info
, Op2Info
,
279 TargetTransformInfo::OP_None
,
280 TargetTransformInfo::OP_None
);
282 if (ISD
== ISD::UREM
)
283 return getArithmeticInstrCost(Instruction::And
, Ty
, Op1Info
, Op2Info
,
284 TargetTransformInfo::OP_None
,
285 TargetTransformInfo::OP_None
);
288 static const CostTblEntry AVX512BWUniformConstCostTable
[] = {
289 { ISD::SHL
, MVT::v64i8
, 2 }, // psllw + pand.
290 { ISD::SRL
, MVT::v64i8
, 2 }, // psrlw + pand.
291 { ISD::SRA
, MVT::v64i8
, 4 }, // psrlw, pand, pxor, psubb.
294 if (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
&&
296 if (const auto *Entry
= CostTableLookup(AVX512BWUniformConstCostTable
, ISD
,
298 return LT
.first
* Entry
->Cost
;
301 static const CostTblEntry AVX512UniformConstCostTable
[] = {
302 { ISD::SRA
, MVT::v2i64
, 1 },
303 { ISD::SRA
, MVT::v4i64
, 1 },
304 { ISD::SRA
, MVT::v8i64
, 1 },
307 if (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
&&
309 if (const auto *Entry
= CostTableLookup(AVX512UniformConstCostTable
, ISD
,
311 return LT
.first
* Entry
->Cost
;
314 static const CostTblEntry AVX2UniformConstCostTable
[] = {
315 { ISD::SHL
, MVT::v32i8
, 2 }, // psllw + pand.
316 { ISD::SRL
, MVT::v32i8
, 2 }, // psrlw + pand.
317 { ISD::SRA
, MVT::v32i8
, 4 }, // psrlw, pand, pxor, psubb.
319 { ISD::SRA
, MVT::v4i64
, 4 }, // 2 x psrad + shuffle.
322 if (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
&&
324 if (const auto *Entry
= CostTableLookup(AVX2UniformConstCostTable
, ISD
,
326 return LT
.first
* Entry
->Cost
;
329 static const CostTblEntry SSE2UniformConstCostTable
[] = {
330 { ISD::SHL
, MVT::v16i8
, 2 }, // psllw + pand.
331 { ISD::SRL
, MVT::v16i8
, 2 }, // psrlw + pand.
332 { ISD::SRA
, MVT::v16i8
, 4 }, // psrlw, pand, pxor, psubb.
334 { ISD::SHL
, MVT::v32i8
, 4+2 }, // 2*(psllw + pand) + split.
335 { ISD::SRL
, MVT::v32i8
, 4+2 }, // 2*(psrlw + pand) + split.
336 { ISD::SRA
, MVT::v32i8
, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
339 // XOP has faster vXi8 shifts.
340 if (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
&&
341 ST
->hasSSE2() && !ST
->hasXOP()) {
342 if (const auto *Entry
=
343 CostTableLookup(SSE2UniformConstCostTable
, ISD
, LT
.second
))
344 return LT
.first
* Entry
->Cost
;
347 static const CostTblEntry AVX512BWConstCostTable
[] = {
348 { ISD::SDIV
, MVT::v64i8
, 14 }, // 2*ext+2*pmulhw sequence
349 { ISD::SREM
, MVT::v64i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
350 { ISD::UDIV
, MVT::v64i8
, 14 }, // 2*ext+2*pmulhw sequence
351 { ISD::UREM
, MVT::v64i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
352 { ISD::SDIV
, MVT::v32i16
, 6 }, // vpmulhw sequence
353 { ISD::SREM
, MVT::v32i16
, 8 }, // vpmulhw+mul+sub sequence
354 { ISD::UDIV
, MVT::v32i16
, 6 }, // vpmulhuw sequence
355 { ISD::UREM
, MVT::v32i16
, 8 }, // vpmulhuw+mul+sub sequence
358 if ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
359 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) &&
361 if (const auto *Entry
=
362 CostTableLookup(AVX512BWConstCostTable
, ISD
, LT
.second
))
363 return LT
.first
* Entry
->Cost
;
366 static const CostTblEntry AVX512ConstCostTable
[] = {
367 { ISD::SDIV
, MVT::v16i32
, 15 }, // vpmuldq sequence
368 { ISD::SREM
, MVT::v16i32
, 17 }, // vpmuldq+mul+sub sequence
369 { ISD::UDIV
, MVT::v16i32
, 15 }, // vpmuludq sequence
370 { ISD::UREM
, MVT::v16i32
, 17 }, // vpmuludq+mul+sub sequence
373 if ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
374 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) &&
376 if (const auto *Entry
=
377 CostTableLookup(AVX512ConstCostTable
, ISD
, LT
.second
))
378 return LT
.first
* Entry
->Cost
;
381 static const CostTblEntry AVX2ConstCostTable
[] = {
382 { ISD::SDIV
, MVT::v32i8
, 14 }, // 2*ext+2*pmulhw sequence
383 { ISD::SREM
, MVT::v32i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
384 { ISD::UDIV
, MVT::v32i8
, 14 }, // 2*ext+2*pmulhw sequence
385 { ISD::UREM
, MVT::v32i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
386 { ISD::SDIV
, MVT::v16i16
, 6 }, // vpmulhw sequence
387 { ISD::SREM
, MVT::v16i16
, 8 }, // vpmulhw+mul+sub sequence
388 { ISD::UDIV
, MVT::v16i16
, 6 }, // vpmulhuw sequence
389 { ISD::UREM
, MVT::v16i16
, 8 }, // vpmulhuw+mul+sub sequence
390 { ISD::SDIV
, MVT::v8i32
, 15 }, // vpmuldq sequence
391 { ISD::SREM
, MVT::v8i32
, 19 }, // vpmuldq+mul+sub sequence
392 { ISD::UDIV
, MVT::v8i32
, 15 }, // vpmuludq sequence
393 { ISD::UREM
, MVT::v8i32
, 19 }, // vpmuludq+mul+sub sequence
396 if ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
397 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) &&
399 if (const auto *Entry
= CostTableLookup(AVX2ConstCostTable
, ISD
, LT
.second
))
400 return LT
.first
* Entry
->Cost
;
403 static const CostTblEntry SSE2ConstCostTable
[] = {
404 { ISD::SDIV
, MVT::v32i8
, 28+2 }, // 4*ext+4*pmulhw sequence + split.
405 { ISD::SREM
, MVT::v32i8
, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
406 { ISD::SDIV
, MVT::v16i8
, 14 }, // 2*ext+2*pmulhw sequence
407 { ISD::SREM
, MVT::v16i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
408 { ISD::UDIV
, MVT::v32i8
, 28+2 }, // 4*ext+4*pmulhw sequence + split.
409 { ISD::UREM
, MVT::v32i8
, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
410 { ISD::UDIV
, MVT::v16i8
, 14 }, // 2*ext+2*pmulhw sequence
411 { ISD::UREM
, MVT::v16i8
, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
412 { ISD::SDIV
, MVT::v16i16
, 12+2 }, // 2*pmulhw sequence + split.
413 { ISD::SREM
, MVT::v16i16
, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
414 { ISD::SDIV
, MVT::v8i16
, 6 }, // pmulhw sequence
415 { ISD::SREM
, MVT::v8i16
, 8 }, // pmulhw+mul+sub sequence
416 { ISD::UDIV
, MVT::v16i16
, 12+2 }, // 2*pmulhuw sequence + split.
417 { ISD::UREM
, MVT::v16i16
, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
418 { ISD::UDIV
, MVT::v8i16
, 6 }, // pmulhuw sequence
419 { ISD::UREM
, MVT::v8i16
, 8 }, // pmulhuw+mul+sub sequence
420 { ISD::SDIV
, MVT::v8i32
, 38+2 }, // 2*pmuludq sequence + split.
421 { ISD::SREM
, MVT::v8i32
, 48+2 }, // 2*pmuludq+mul+sub sequence + split.
422 { ISD::SDIV
, MVT::v4i32
, 19 }, // pmuludq sequence
423 { ISD::SREM
, MVT::v4i32
, 24 }, // pmuludq+mul+sub sequence
424 { ISD::UDIV
, MVT::v8i32
, 30+2 }, // 2*pmuludq sequence + split.
425 { ISD::UREM
, MVT::v8i32
, 40+2 }, // 2*pmuludq+mul+sub sequence + split.
426 { ISD::UDIV
, MVT::v4i32
, 15 }, // pmuludq sequence
427 { ISD::UREM
, MVT::v4i32
, 20 }, // pmuludq+mul+sub sequence
430 if ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
431 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) &&
434 if (ISD
== ISD::SDIV
&& LT
.second
== MVT::v8i32
&& ST
->hasAVX())
435 return LT
.first
* 32;
436 if (ISD
== ISD::SREM
&& LT
.second
== MVT::v8i32
&& ST
->hasAVX())
437 return LT
.first
* 38;
438 if (ISD
== ISD::SDIV
&& LT
.second
== MVT::v4i32
&& ST
->hasSSE41())
439 return LT
.first
* 15;
440 if (ISD
== ISD::SREM
&& LT
.second
== MVT::v4i32
&& ST
->hasSSE41())
441 return LT
.first
* 20;
443 if (const auto *Entry
= CostTableLookup(SSE2ConstCostTable
, ISD
, LT
.second
))
444 return LT
.first
* Entry
->Cost
;
447 static const CostTblEntry AVX2UniformCostTable
[] = {
448 // Uniform splats are cheaper for the following instructions.
449 { ISD::SHL
, MVT::v16i16
, 1 }, // psllw.
450 { ISD::SRL
, MVT::v16i16
, 1 }, // psrlw.
451 { ISD::SRA
, MVT::v16i16
, 1 }, // psraw.
455 ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
) ||
456 (Op2Info
== TargetTransformInfo::OK_UniformValue
))) {
457 if (const auto *Entry
=
458 CostTableLookup(AVX2UniformCostTable
, ISD
, LT
.second
))
459 return LT
.first
* Entry
->Cost
;
462 static const CostTblEntry SSE2UniformCostTable
[] = {
463 // Uniform splats are cheaper for the following instructions.
464 { ISD::SHL
, MVT::v8i16
, 1 }, // psllw.
465 { ISD::SHL
, MVT::v4i32
, 1 }, // pslld
466 { ISD::SHL
, MVT::v2i64
, 1 }, // psllq.
468 { ISD::SRL
, MVT::v8i16
, 1 }, // psrlw.
469 { ISD::SRL
, MVT::v4i32
, 1 }, // psrld.
470 { ISD::SRL
, MVT::v2i64
, 1 }, // psrlq.
472 { ISD::SRA
, MVT::v8i16
, 1 }, // psraw.
473 { ISD::SRA
, MVT::v4i32
, 1 }, // psrad.
477 ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
) ||
478 (Op2Info
== TargetTransformInfo::OK_UniformValue
))) {
479 if (const auto *Entry
=
480 CostTableLookup(SSE2UniformCostTable
, ISD
, LT
.second
))
481 return LT
.first
* Entry
->Cost
;
484 static const CostTblEntry AVX512DQCostTable
[] = {
485 { ISD::MUL
, MVT::v2i64
, 1 },
486 { ISD::MUL
, MVT::v4i64
, 1 },
487 { ISD::MUL
, MVT::v8i64
, 1 }
490 // Look for AVX512DQ lowering tricks for custom cases.
492 if (const auto *Entry
= CostTableLookup(AVX512DQCostTable
, ISD
, LT
.second
))
493 return LT
.first
* Entry
->Cost
;
495 static const CostTblEntry AVX512BWCostTable
[] = {
496 { ISD::SHL
, MVT::v8i16
, 1 }, // vpsllvw
497 { ISD::SRL
, MVT::v8i16
, 1 }, // vpsrlvw
498 { ISD::SRA
, MVT::v8i16
, 1 }, // vpsravw
500 { ISD::SHL
, MVT::v16i16
, 1 }, // vpsllvw
501 { ISD::SRL
, MVT::v16i16
, 1 }, // vpsrlvw
502 { ISD::SRA
, MVT::v16i16
, 1 }, // vpsravw
504 { ISD::SHL
, MVT::v32i16
, 1 }, // vpsllvw
505 { ISD::SRL
, MVT::v32i16
, 1 }, // vpsrlvw
506 { ISD::SRA
, MVT::v32i16
, 1 }, // vpsravw
508 { ISD::SHL
, MVT::v64i8
, 11 }, // vpblendvb sequence.
509 { ISD::SRL
, MVT::v64i8
, 11 }, // vpblendvb sequence.
510 { ISD::SRA
, MVT::v64i8
, 24 }, // vpblendvb sequence.
512 { ISD::MUL
, MVT::v64i8
, 11 }, // extend/pmullw/trunc sequence.
513 { ISD::MUL
, MVT::v32i8
, 4 }, // extend/pmullw/trunc sequence.
514 { ISD::MUL
, MVT::v16i8
, 4 }, // extend/pmullw/trunc sequence.
517 // Look for AVX512BW lowering tricks for custom cases.
519 if (const auto *Entry
= CostTableLookup(AVX512BWCostTable
, ISD
, LT
.second
))
520 return LT
.first
* Entry
->Cost
;
522 static const CostTblEntry AVX512CostTable
[] = {
523 { ISD::SHL
, MVT::v16i32
, 1 },
524 { ISD::SRL
, MVT::v16i32
, 1 },
525 { ISD::SRA
, MVT::v16i32
, 1 },
527 { ISD::SHL
, MVT::v8i64
, 1 },
528 { ISD::SRL
, MVT::v8i64
, 1 },
530 { ISD::SRA
, MVT::v2i64
, 1 },
531 { ISD::SRA
, MVT::v4i64
, 1 },
532 { ISD::SRA
, MVT::v8i64
, 1 },
534 { ISD::MUL
, MVT::v32i8
, 13 }, // extend/pmullw/trunc sequence.
535 { ISD::MUL
, MVT::v16i8
, 5 }, // extend/pmullw/trunc sequence.
536 { ISD::MUL
, MVT::v16i32
, 1 }, // pmulld (Skylake from agner.org)
537 { ISD::MUL
, MVT::v8i32
, 1 }, // pmulld (Skylake from agner.org)
538 { ISD::MUL
, MVT::v4i32
, 1 }, // pmulld (Skylake from agner.org)
539 { ISD::MUL
, MVT::v8i64
, 8 }, // 3*pmuludq/3*shift/2*add
541 { ISD::FADD
, MVT::v8f64
, 1 }, // Skylake from http://www.agner.org/
542 { ISD::FSUB
, MVT::v8f64
, 1 }, // Skylake from http://www.agner.org/
543 { ISD::FMUL
, MVT::v8f64
, 1 }, // Skylake from http://www.agner.org/
545 { ISD::FADD
, MVT::v16f32
, 1 }, // Skylake from http://www.agner.org/
546 { ISD::FSUB
, MVT::v16f32
, 1 }, // Skylake from http://www.agner.org/
547 { ISD::FMUL
, MVT::v16f32
, 1 }, // Skylake from http://www.agner.org/
551 if (const auto *Entry
= CostTableLookup(AVX512CostTable
, ISD
, LT
.second
))
552 return LT
.first
* Entry
->Cost
;
554 static const CostTblEntry AVX2ShiftCostTable
[] = {
555 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
556 // customize them to detect the cases where shift amount is a scalar one.
557 { ISD::SHL
, MVT::v4i32
, 1 },
558 { ISD::SRL
, MVT::v4i32
, 1 },
559 { ISD::SRA
, MVT::v4i32
, 1 },
560 { ISD::SHL
, MVT::v8i32
, 1 },
561 { ISD::SRL
, MVT::v8i32
, 1 },
562 { ISD::SRA
, MVT::v8i32
, 1 },
563 { ISD::SHL
, MVT::v2i64
, 1 },
564 { ISD::SRL
, MVT::v2i64
, 1 },
565 { ISD::SHL
, MVT::v4i64
, 1 },
566 { ISD::SRL
, MVT::v4i64
, 1 },
569 // Look for AVX2 lowering tricks.
571 if (ISD
== ISD::SHL
&& LT
.second
== MVT::v16i16
&&
572 (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
573 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
))
574 // On AVX2, a packed v16i16 shift left by a constant build_vector
575 // is lowered into a vector multiply (vpmullw).
576 return getArithmeticInstrCost(Instruction::Mul
, Ty
, Op1Info
, Op2Info
,
577 TargetTransformInfo::OP_None
,
578 TargetTransformInfo::OP_None
);
580 if (const auto *Entry
= CostTableLookup(AVX2ShiftCostTable
, ISD
, LT
.second
))
581 return LT
.first
* Entry
->Cost
;
584 static const CostTblEntry XOPShiftCostTable
[] = {
585 // 128bit shifts take 1cy, but right shifts require negation beforehand.
586 { ISD::SHL
, MVT::v16i8
, 1 },
587 { ISD::SRL
, MVT::v16i8
, 2 },
588 { ISD::SRA
, MVT::v16i8
, 2 },
589 { ISD::SHL
, MVT::v8i16
, 1 },
590 { ISD::SRL
, MVT::v8i16
, 2 },
591 { ISD::SRA
, MVT::v8i16
, 2 },
592 { ISD::SHL
, MVT::v4i32
, 1 },
593 { ISD::SRL
, MVT::v4i32
, 2 },
594 { ISD::SRA
, MVT::v4i32
, 2 },
595 { ISD::SHL
, MVT::v2i64
, 1 },
596 { ISD::SRL
, MVT::v2i64
, 2 },
597 { ISD::SRA
, MVT::v2i64
, 2 },
598 // 256bit shifts require splitting if AVX2 didn't catch them above.
599 { ISD::SHL
, MVT::v32i8
, 2+2 },
600 { ISD::SRL
, MVT::v32i8
, 4+2 },
601 { ISD::SRA
, MVT::v32i8
, 4+2 },
602 { ISD::SHL
, MVT::v16i16
, 2+2 },
603 { ISD::SRL
, MVT::v16i16
, 4+2 },
604 { ISD::SRA
, MVT::v16i16
, 4+2 },
605 { ISD::SHL
, MVT::v8i32
, 2+2 },
606 { ISD::SRL
, MVT::v8i32
, 4+2 },
607 { ISD::SRA
, MVT::v8i32
, 4+2 },
608 { ISD::SHL
, MVT::v4i64
, 2+2 },
609 { ISD::SRL
, MVT::v4i64
, 4+2 },
610 { ISD::SRA
, MVT::v4i64
, 4+2 },
613 // Look for XOP lowering tricks.
615 // If the right shift is constant then we'll fold the negation so
616 // it's as cheap as a left shift.
618 if ((ShiftISD
== ISD::SRL
|| ShiftISD
== ISD::SRA
) &&
619 (Op2Info
== TargetTransformInfo::OK_UniformConstantValue
||
620 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
))
622 if (const auto *Entry
=
623 CostTableLookup(XOPShiftCostTable
, ShiftISD
, LT
.second
))
624 return LT
.first
* Entry
->Cost
;
627 static const CostTblEntry SSE2UniformShiftCostTable
[] = {
628 // Uniform splats are cheaper for the following instructions.
629 { ISD::SHL
, MVT::v16i16
, 2+2 }, // 2*psllw + split.
630 { ISD::SHL
, MVT::v8i32
, 2+2 }, // 2*pslld + split.
631 { ISD::SHL
, MVT::v4i64
, 2+2 }, // 2*psllq + split.
633 { ISD::SRL
, MVT::v16i16
, 2+2 }, // 2*psrlw + split.
634 { ISD::SRL
, MVT::v8i32
, 2+2 }, // 2*psrld + split.
635 { ISD::SRL
, MVT::v4i64
, 2+2 }, // 2*psrlq + split.
637 { ISD::SRA
, MVT::v16i16
, 2+2 }, // 2*psraw + split.
638 { ISD::SRA
, MVT::v8i32
, 2+2 }, // 2*psrad + split.
639 { ISD::SRA
, MVT::v2i64
, 4 }, // 2*psrad + shuffle.
640 { ISD::SRA
, MVT::v4i64
, 8+2 }, // 2*(2*psrad + shuffle) + split.
644 ((Op2Info
== TargetTransformInfo::OK_UniformConstantValue
) ||
645 (Op2Info
== TargetTransformInfo::OK_UniformValue
))) {
647 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
648 if (ISD
== ISD::SRA
&& LT
.second
== MVT::v4i64
&& ST
->hasAVX2())
649 return LT
.first
* 4; // 2*psrad + shuffle.
651 if (const auto *Entry
=
652 CostTableLookup(SSE2UniformShiftCostTable
, ISD
, LT
.second
))
653 return LT
.first
* Entry
->Cost
;
656 if (ISD
== ISD::SHL
&&
657 Op2Info
== TargetTransformInfo::OK_NonUniformConstantValue
) {
659 // Vector shift left by non uniform constant can be lowered
660 // into vector multiply.
661 if (((VT
== MVT::v8i16
|| VT
== MVT::v4i32
) && ST
->hasSSE2()) ||
662 ((VT
== MVT::v16i16
|| VT
== MVT::v8i32
) && ST
->hasAVX()))
666 static const CostTblEntry AVX2CostTable
[] = {
667 { ISD::SHL
, MVT::v32i8
, 11 }, // vpblendvb sequence.
668 { ISD::SHL
, MVT::v16i16
, 10 }, // extend/vpsrlvd/pack sequence.
670 { ISD::SRL
, MVT::v32i8
, 11 }, // vpblendvb sequence.
671 { ISD::SRL
, MVT::v16i16
, 10 }, // extend/vpsrlvd/pack sequence.
673 { ISD::SRA
, MVT::v32i8
, 24 }, // vpblendvb sequence.
674 { ISD::SRA
, MVT::v16i16
, 10 }, // extend/vpsravd/pack sequence.
675 { ISD::SRA
, MVT::v2i64
, 4 }, // srl/xor/sub sequence.
676 { ISD::SRA
, MVT::v4i64
, 4 }, // srl/xor/sub sequence.
678 { ISD::SUB
, MVT::v32i8
, 1 }, // psubb
679 { ISD::ADD
, MVT::v32i8
, 1 }, // paddb
680 { ISD::SUB
, MVT::v16i16
, 1 }, // psubw
681 { ISD::ADD
, MVT::v16i16
, 1 }, // paddw
682 { ISD::SUB
, MVT::v8i32
, 1 }, // psubd
683 { ISD::ADD
, MVT::v8i32
, 1 }, // paddd
684 { ISD::SUB
, MVT::v4i64
, 1 }, // psubq
685 { ISD::ADD
, MVT::v4i64
, 1 }, // paddq
687 { ISD::MUL
, MVT::v32i8
, 17 }, // extend/pmullw/trunc sequence.
688 { ISD::MUL
, MVT::v16i8
, 7 }, // extend/pmullw/trunc sequence.
689 { ISD::MUL
, MVT::v16i16
, 1 }, // pmullw
690 { ISD::MUL
, MVT::v8i32
, 2 }, // pmulld (Haswell from agner.org)
691 { ISD::MUL
, MVT::v4i64
, 8 }, // 3*pmuludq/3*shift/2*add
693 { ISD::FADD
, MVT::v4f64
, 1 }, // Haswell from http://www.agner.org/
694 { ISD::FADD
, MVT::v8f32
, 1 }, // Haswell from http://www.agner.org/
695 { ISD::FSUB
, MVT::v4f64
, 1 }, // Haswell from http://www.agner.org/
696 { ISD::FSUB
, MVT::v8f32
, 1 }, // Haswell from http://www.agner.org/
697 { ISD::FMUL
, MVT::v4f64
, 1 }, // Haswell from http://www.agner.org/
698 { ISD::FMUL
, MVT::v8f32
, 1 }, // Haswell from http://www.agner.org/
700 { ISD::FDIV
, MVT::f32
, 7 }, // Haswell from http://www.agner.org/
701 { ISD::FDIV
, MVT::v4f32
, 7 }, // Haswell from http://www.agner.org/
702 { ISD::FDIV
, MVT::v8f32
, 14 }, // Haswell from http://www.agner.org/
703 { ISD::FDIV
, MVT::f64
, 14 }, // Haswell from http://www.agner.org/
704 { ISD::FDIV
, MVT::v2f64
, 14 }, // Haswell from http://www.agner.org/
705 { ISD::FDIV
, MVT::v4f64
, 28 }, // Haswell from http://www.agner.org/
708 // Look for AVX2 lowering tricks for custom cases.
710 if (const auto *Entry
= CostTableLookup(AVX2CostTable
, ISD
, LT
.second
))
711 return LT
.first
* Entry
->Cost
;
713 static const CostTblEntry AVX1CostTable
[] = {
714 // We don't have to scalarize unsupported ops. We can issue two half-sized
715 // operations and we only need to extract the upper YMM half.
716 // Two ops + 1 extract + 1 insert = 4.
717 { ISD::MUL
, MVT::v16i16
, 4 },
718 { ISD::MUL
, MVT::v8i32
, 4 },
719 { ISD::SUB
, MVT::v32i8
, 4 },
720 { ISD::ADD
, MVT::v32i8
, 4 },
721 { ISD::SUB
, MVT::v16i16
, 4 },
722 { ISD::ADD
, MVT::v16i16
, 4 },
723 { ISD::SUB
, MVT::v8i32
, 4 },
724 { ISD::ADD
, MVT::v8i32
, 4 },
725 { ISD::SUB
, MVT::v4i64
, 4 },
726 { ISD::ADD
, MVT::v4i64
, 4 },
728 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
729 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
730 // Because we believe v4i64 to be a legal type, we must also include the
731 // extract+insert in the cost table. Therefore, the cost here is 18
733 { ISD::MUL
, MVT::v4i64
, 18 },
735 { ISD::MUL
, MVT::v32i8
, 26 }, // extend/pmullw/trunc sequence.
737 { ISD::FDIV
, MVT::f32
, 14 }, // SNB from http://www.agner.org/
738 { ISD::FDIV
, MVT::v4f32
, 14 }, // SNB from http://www.agner.org/
739 { ISD::FDIV
, MVT::v8f32
, 28 }, // SNB from http://www.agner.org/
740 { ISD::FDIV
, MVT::f64
, 22 }, // SNB from http://www.agner.org/
741 { ISD::FDIV
, MVT::v2f64
, 22 }, // SNB from http://www.agner.org/
742 { ISD::FDIV
, MVT::v4f64
, 44 }, // SNB from http://www.agner.org/
746 if (const auto *Entry
= CostTableLookup(AVX1CostTable
, ISD
, LT
.second
))
747 return LT
.first
* Entry
->Cost
;
749 static const CostTblEntry SSE42CostTable
[] = {
750 { ISD::FADD
, MVT::f64
, 1 }, // Nehalem from http://www.agner.org/
751 { ISD::FADD
, MVT::f32
, 1 }, // Nehalem from http://www.agner.org/
752 { ISD::FADD
, MVT::v2f64
, 1 }, // Nehalem from http://www.agner.org/
753 { ISD::FADD
, MVT::v4f32
, 1 }, // Nehalem from http://www.agner.org/
755 { ISD::FSUB
, MVT::f64
, 1 }, // Nehalem from http://www.agner.org/
756 { ISD::FSUB
, MVT::f32
, 1 }, // Nehalem from http://www.agner.org/
757 { ISD::FSUB
, MVT::v2f64
, 1 }, // Nehalem from http://www.agner.org/
758 { ISD::FSUB
, MVT::v4f32
, 1 }, // Nehalem from http://www.agner.org/
760 { ISD::FMUL
, MVT::f64
, 1 }, // Nehalem from http://www.agner.org/
761 { ISD::FMUL
, MVT::f32
, 1 }, // Nehalem from http://www.agner.org/
762 { ISD::FMUL
, MVT::v2f64
, 1 }, // Nehalem from http://www.agner.org/
763 { ISD::FMUL
, MVT::v4f32
, 1 }, // Nehalem from http://www.agner.org/
765 { ISD::FDIV
, MVT::f32
, 14 }, // Nehalem from http://www.agner.org/
766 { ISD::FDIV
, MVT::v4f32
, 14 }, // Nehalem from http://www.agner.org/
767 { ISD::FDIV
, MVT::f64
, 22 }, // Nehalem from http://www.agner.org/
768 { ISD::FDIV
, MVT::v2f64
, 22 }, // Nehalem from http://www.agner.org/
772 if (const auto *Entry
= CostTableLookup(SSE42CostTable
, ISD
, LT
.second
))
773 return LT
.first
* Entry
->Cost
;
775 static const CostTblEntry SSE41CostTable
[] = {
776 { ISD::SHL
, MVT::v16i8
, 11 }, // pblendvb sequence.
777 { ISD::SHL
, MVT::v32i8
, 2*11+2 }, // pblendvb sequence + split.
778 { ISD::SHL
, MVT::v8i16
, 14 }, // pblendvb sequence.
779 { ISD::SHL
, MVT::v16i16
, 2*14+2 }, // pblendvb sequence + split.
780 { ISD::SHL
, MVT::v4i32
, 4 }, // pslld/paddd/cvttps2dq/pmulld
781 { ISD::SHL
, MVT::v8i32
, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split
783 { ISD::SRL
, MVT::v16i8
, 12 }, // pblendvb sequence.
784 { ISD::SRL
, MVT::v32i8
, 2*12+2 }, // pblendvb sequence + split.
785 { ISD::SRL
, MVT::v8i16
, 14 }, // pblendvb sequence.
786 { ISD::SRL
, MVT::v16i16
, 2*14+2 }, // pblendvb sequence + split.
787 { ISD::SRL
, MVT::v4i32
, 11 }, // Shift each lane + blend.
788 { ISD::SRL
, MVT::v8i32
, 2*11+2 }, // Shift each lane + blend + split.
790 { ISD::SRA
, MVT::v16i8
, 24 }, // pblendvb sequence.
791 { ISD::SRA
, MVT::v32i8
, 2*24+2 }, // pblendvb sequence + split.
792 { ISD::SRA
, MVT::v8i16
, 14 }, // pblendvb sequence.
793 { ISD::SRA
, MVT::v16i16
, 2*14+2 }, // pblendvb sequence + split.
794 { ISD::SRA
, MVT::v4i32
, 12 }, // Shift each lane + blend.
795 { ISD::SRA
, MVT::v8i32
, 2*12+2 }, // Shift each lane + blend + split.
797 { ISD::MUL
, MVT::v4i32
, 2 } // pmulld (Nehalem from agner.org)
801 if (const auto *Entry
= CostTableLookup(SSE41CostTable
, ISD
, LT
.second
))
802 return LT
.first
* Entry
->Cost
;
804 static const CostTblEntry SSE2CostTable
[] = {
805 // We don't correctly identify costs of casts because they are marked as
807 { ISD::SHL
, MVT::v16i8
, 26 }, // cmpgtb sequence.
808 { ISD::SHL
, MVT::v8i16
, 32 }, // cmpgtb sequence.
809 { ISD::SHL
, MVT::v4i32
, 2*5 }, // We optimized this using mul.
810 { ISD::SHL
, MVT::v2i64
, 4 }, // splat+shuffle sequence.
811 { ISD::SHL
, MVT::v4i64
, 2*4+2 }, // splat+shuffle sequence + split.
813 { ISD::SRL
, MVT::v16i8
, 26 }, // cmpgtb sequence.
814 { ISD::SRL
, MVT::v8i16
, 32 }, // cmpgtb sequence.
815 { ISD::SRL
, MVT::v4i32
, 16 }, // Shift each lane + blend.
816 { ISD::SRL
, MVT::v2i64
, 4 }, // splat+shuffle sequence.
817 { ISD::SRL
, MVT::v4i64
, 2*4+2 }, // splat+shuffle sequence + split.
819 { ISD::SRA
, MVT::v16i8
, 54 }, // unpacked cmpgtb sequence.
820 { ISD::SRA
, MVT::v8i16
, 32 }, // cmpgtb sequence.
821 { ISD::SRA
, MVT::v4i32
, 16 }, // Shift each lane + blend.
822 { ISD::SRA
, MVT::v2i64
, 12 }, // srl/xor/sub sequence.
823 { ISD::SRA
, MVT::v4i64
, 2*12+2 }, // srl/xor/sub sequence+split.
825 { ISD::MUL
, MVT::v16i8
, 12 }, // extend/pmullw/trunc sequence.
826 { ISD::MUL
, MVT::v8i16
, 1 }, // pmullw
827 { ISD::MUL
, MVT::v4i32
, 6 }, // 3*pmuludq/4*shuffle
828 { ISD::MUL
, MVT::v2i64
, 8 }, // 3*pmuludq/3*shift/2*add
830 { ISD::FDIV
, MVT::f32
, 23 }, // Pentium IV from http://www.agner.org/
831 { ISD::FDIV
, MVT::v4f32
, 39 }, // Pentium IV from http://www.agner.org/
832 { ISD::FDIV
, MVT::f64
, 38 }, // Pentium IV from http://www.agner.org/
833 { ISD::FDIV
, MVT::v2f64
, 69 }, // Pentium IV from http://www.agner.org/
835 { ISD::FADD
, MVT::f32
, 2 }, // Pentium IV from http://www.agner.org/
836 { ISD::FADD
, MVT::f64
, 2 }, // Pentium IV from http://www.agner.org/
838 { ISD::FSUB
, MVT::f32
, 2 }, // Pentium IV from http://www.agner.org/
839 { ISD::FSUB
, MVT::f64
, 2 }, // Pentium IV from http://www.agner.org/
843 if (const auto *Entry
= CostTableLookup(SSE2CostTable
, ISD
, LT
.second
))
844 return LT
.first
* Entry
->Cost
;
846 static const CostTblEntry SSE1CostTable
[] = {
847 { ISD::FDIV
, MVT::f32
, 17 }, // Pentium III from http://www.agner.org/
848 { ISD::FDIV
, MVT::v4f32
, 34 }, // Pentium III from http://www.agner.org/
850 { ISD::FADD
, MVT::f32
, 1 }, // Pentium III from http://www.agner.org/
851 { ISD::FADD
, MVT::v4f32
, 2 }, // Pentium III from http://www.agner.org/
853 { ISD::FSUB
, MVT::f32
, 1 }, // Pentium III from http://www.agner.org/
854 { ISD::FSUB
, MVT::v4f32
, 2 }, // Pentium III from http://www.agner.org/
856 { ISD::ADD
, MVT::i8
, 1 }, // Pentium III from http://www.agner.org/
857 { ISD::ADD
, MVT::i16
, 1 }, // Pentium III from http://www.agner.org/
858 { ISD::ADD
, MVT::i32
, 1 }, // Pentium III from http://www.agner.org/
860 { ISD::SUB
, MVT::i8
, 1 }, // Pentium III from http://www.agner.org/
861 { ISD::SUB
, MVT::i16
, 1 }, // Pentium III from http://www.agner.org/
862 { ISD::SUB
, MVT::i32
, 1 }, // Pentium III from http://www.agner.org/
866 if (const auto *Entry
= CostTableLookup(SSE1CostTable
, ISD
, LT
.second
))
867 return LT
.first
* Entry
->Cost
;
869 // It is not a good idea to vectorize division. We have to scalarize it and
870 // in the process we will often end up having to spilling regular
871 // registers. The overhead of division is going to dominate most kernels
872 // anyways so try hard to prevent vectorization of division - it is
873 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
874 // to hide "20 cycles" for each lane.
875 if (LT
.second
.isVector() && (ISD
== ISD::SDIV
|| ISD
== ISD::SREM
||
876 ISD
== ISD::UDIV
|| ISD
== ISD::UREM
)) {
877 int ScalarCost
= getArithmeticInstrCost(
878 Opcode
, Ty
->getScalarType(), Op1Info
, Op2Info
,
879 TargetTransformInfo::OP_None
, TargetTransformInfo::OP_None
);
880 return 20 * LT
.first
* LT
.second
.getVectorNumElements() * ScalarCost
;
883 // Fallback to the default implementation.
884 return BaseT::getArithmeticInstrCost(Opcode
, Ty
, Op1Info
, Op2Info
);
887 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind
, Type
*Tp
, int Index
,
889 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
890 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
891 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Tp
);
893 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
894 if (Kind
== TTI::SK_Transpose
)
895 Kind
= TTI::SK_PermuteTwoSrc
;
897 // For Broadcasts we are splatting the first element from the first input
898 // register, so only need to reference that input and all the output
899 // registers are the same.
900 if (Kind
== TTI::SK_Broadcast
)
903 // Subvector extractions are free if they start at the beginning of a
904 // vector and cheap if the subvectors are aligned.
905 if (Kind
== TTI::SK_ExtractSubvector
&& LT
.second
.isVector()) {
906 int NumElts
= LT
.second
.getVectorNumElements();
907 if ((Index
% NumElts
) == 0)
909 std::pair
<int, MVT
> SubLT
= TLI
->getTypeLegalizationCost(DL
, SubTp
);
910 if (SubLT
.second
.isVector()) {
911 int NumSubElts
= SubLT
.second
.getVectorNumElements();
912 if ((Index
% NumSubElts
) == 0 && (NumElts
% NumSubElts
) == 0)
917 // We are going to permute multiple sources and the result will be in multiple
918 // destinations. Providing an accurate cost only for splits where the element
919 // type remains the same.
920 if (Kind
== TTI::SK_PermuteSingleSrc
&& LT
.first
!= 1) {
921 MVT LegalVT
= LT
.second
;
922 if (LegalVT
.isVector() &&
923 LegalVT
.getVectorElementType().getSizeInBits() ==
924 Tp
->getVectorElementType()->getPrimitiveSizeInBits() &&
925 LegalVT
.getVectorNumElements() < Tp
->getVectorNumElements()) {
927 unsigned VecTySize
= DL
.getTypeStoreSize(Tp
);
928 unsigned LegalVTSize
= LegalVT
.getStoreSize();
929 // Number of source vectors after legalization:
930 unsigned NumOfSrcs
= (VecTySize
+ LegalVTSize
- 1) / LegalVTSize
;
931 // Number of destination vectors after legalization:
932 unsigned NumOfDests
= LT
.first
;
934 Type
*SingleOpTy
= VectorType::get(Tp
->getVectorElementType(),
935 LegalVT
.getVectorNumElements());
937 unsigned NumOfShuffles
= (NumOfSrcs
- 1) * NumOfDests
;
938 return NumOfShuffles
*
939 getShuffleCost(TTI::SK_PermuteTwoSrc
, SingleOpTy
, 0, nullptr);
942 return BaseT::getShuffleCost(Kind
, Tp
, Index
, SubTp
);
945 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
946 if (Kind
== TTI::SK_PermuteTwoSrc
&& LT
.first
!= 1) {
947 // We assume that source and destination have the same vector type.
948 int NumOfDests
= LT
.first
;
949 int NumOfShufflesPerDest
= LT
.first
* 2 - 1;
950 LT
.first
= NumOfDests
* NumOfShufflesPerDest
;
953 static const CostTblEntry AVX512VBMIShuffleTbl
[] = {
954 {TTI::SK_Reverse
, MVT::v64i8
, 1}, // vpermb
955 {TTI::SK_Reverse
, MVT::v32i8
, 1}, // vpermb
957 {TTI::SK_PermuteSingleSrc
, MVT::v64i8
, 1}, // vpermb
958 {TTI::SK_PermuteSingleSrc
, MVT::v32i8
, 1}, // vpermb
960 {TTI::SK_PermuteTwoSrc
, MVT::v64i8
, 1}, // vpermt2b
961 {TTI::SK_PermuteTwoSrc
, MVT::v32i8
, 1}, // vpermt2b
962 {TTI::SK_PermuteTwoSrc
, MVT::v16i8
, 1} // vpermt2b
966 if (const auto *Entry
=
967 CostTableLookup(AVX512VBMIShuffleTbl
, Kind
, LT
.second
))
968 return LT
.first
* Entry
->Cost
;
970 static const CostTblEntry AVX512BWShuffleTbl
[] = {
971 {TTI::SK_Broadcast
, MVT::v32i16
, 1}, // vpbroadcastw
972 {TTI::SK_Broadcast
, MVT::v64i8
, 1}, // vpbroadcastb
974 {TTI::SK_Reverse
, MVT::v32i16
, 1}, // vpermw
975 {TTI::SK_Reverse
, MVT::v16i16
, 1}, // vpermw
976 {TTI::SK_Reverse
, MVT::v64i8
, 2}, // pshufb + vshufi64x2
978 {TTI::SK_PermuteSingleSrc
, MVT::v32i16
, 1}, // vpermw
979 {TTI::SK_PermuteSingleSrc
, MVT::v16i16
, 1}, // vpermw
980 {TTI::SK_PermuteSingleSrc
, MVT::v8i16
, 1}, // vpermw
981 {TTI::SK_PermuteSingleSrc
, MVT::v64i8
, 8}, // extend to v32i16
982 {TTI::SK_PermuteSingleSrc
, MVT::v32i8
, 3}, // vpermw + zext/trunc
984 {TTI::SK_PermuteTwoSrc
, MVT::v32i16
, 1}, // vpermt2w
985 {TTI::SK_PermuteTwoSrc
, MVT::v16i16
, 1}, // vpermt2w
986 {TTI::SK_PermuteTwoSrc
, MVT::v8i16
, 1}, // vpermt2w
987 {TTI::SK_PermuteTwoSrc
, MVT::v32i8
, 3}, // zext + vpermt2w + trunc
988 {TTI::SK_PermuteTwoSrc
, MVT::v64i8
, 19}, // 6 * v32i8 + 1
989 {TTI::SK_PermuteTwoSrc
, MVT::v16i8
, 3} // zext + vpermt2w + trunc
993 if (const auto *Entry
=
994 CostTableLookup(AVX512BWShuffleTbl
, Kind
, LT
.second
))
995 return LT
.first
* Entry
->Cost
;
997 static const CostTblEntry AVX512ShuffleTbl
[] = {
998 {TTI::SK_Broadcast
, MVT::v8f64
, 1}, // vbroadcastpd
999 {TTI::SK_Broadcast
, MVT::v16f32
, 1}, // vbroadcastps
1000 {TTI::SK_Broadcast
, MVT::v8i64
, 1}, // vpbroadcastq
1001 {TTI::SK_Broadcast
, MVT::v16i32
, 1}, // vpbroadcastd
1003 {TTI::SK_Reverse
, MVT::v8f64
, 1}, // vpermpd
1004 {TTI::SK_Reverse
, MVT::v16f32
, 1}, // vpermps
1005 {TTI::SK_Reverse
, MVT::v8i64
, 1}, // vpermq
1006 {TTI::SK_Reverse
, MVT::v16i32
, 1}, // vpermd
1008 {TTI::SK_PermuteSingleSrc
, MVT::v8f64
, 1}, // vpermpd
1009 {TTI::SK_PermuteSingleSrc
, MVT::v4f64
, 1}, // vpermpd
1010 {TTI::SK_PermuteSingleSrc
, MVT::v2f64
, 1}, // vpermpd
1011 {TTI::SK_PermuteSingleSrc
, MVT::v16f32
, 1}, // vpermps
1012 {TTI::SK_PermuteSingleSrc
, MVT::v8f32
, 1}, // vpermps
1013 {TTI::SK_PermuteSingleSrc
, MVT::v4f32
, 1}, // vpermps
1014 {TTI::SK_PermuteSingleSrc
, MVT::v8i64
, 1}, // vpermq
1015 {TTI::SK_PermuteSingleSrc
, MVT::v4i64
, 1}, // vpermq
1016 {TTI::SK_PermuteSingleSrc
, MVT::v2i64
, 1}, // vpermq
1017 {TTI::SK_PermuteSingleSrc
, MVT::v16i32
, 1}, // vpermd
1018 {TTI::SK_PermuteSingleSrc
, MVT::v8i32
, 1}, // vpermd
1019 {TTI::SK_PermuteSingleSrc
, MVT::v4i32
, 1}, // vpermd
1020 {TTI::SK_PermuteSingleSrc
, MVT::v16i8
, 1}, // pshufb
1022 {TTI::SK_PermuteTwoSrc
, MVT::v8f64
, 1}, // vpermt2pd
1023 {TTI::SK_PermuteTwoSrc
, MVT::v16f32
, 1}, // vpermt2ps
1024 {TTI::SK_PermuteTwoSrc
, MVT::v8i64
, 1}, // vpermt2q
1025 {TTI::SK_PermuteTwoSrc
, MVT::v16i32
, 1}, // vpermt2d
1026 {TTI::SK_PermuteTwoSrc
, MVT::v4f64
, 1}, // vpermt2pd
1027 {TTI::SK_PermuteTwoSrc
, MVT::v8f32
, 1}, // vpermt2ps
1028 {TTI::SK_PermuteTwoSrc
, MVT::v4i64
, 1}, // vpermt2q
1029 {TTI::SK_PermuteTwoSrc
, MVT::v8i32
, 1}, // vpermt2d
1030 {TTI::SK_PermuteTwoSrc
, MVT::v2f64
, 1}, // vpermt2pd
1031 {TTI::SK_PermuteTwoSrc
, MVT::v4f32
, 1}, // vpermt2ps
1032 {TTI::SK_PermuteTwoSrc
, MVT::v2i64
, 1}, // vpermt2q
1033 {TTI::SK_PermuteTwoSrc
, MVT::v4i32
, 1} // vpermt2d
1036 if (ST
->hasAVX512())
1037 if (const auto *Entry
= CostTableLookup(AVX512ShuffleTbl
, Kind
, LT
.second
))
1038 return LT
.first
* Entry
->Cost
;
1040 static const CostTblEntry AVX2ShuffleTbl
[] = {
1041 {TTI::SK_Broadcast
, MVT::v4f64
, 1}, // vbroadcastpd
1042 {TTI::SK_Broadcast
, MVT::v8f32
, 1}, // vbroadcastps
1043 {TTI::SK_Broadcast
, MVT::v4i64
, 1}, // vpbroadcastq
1044 {TTI::SK_Broadcast
, MVT::v8i32
, 1}, // vpbroadcastd
1045 {TTI::SK_Broadcast
, MVT::v16i16
, 1}, // vpbroadcastw
1046 {TTI::SK_Broadcast
, MVT::v32i8
, 1}, // vpbroadcastb
1048 {TTI::SK_Reverse
, MVT::v4f64
, 1}, // vpermpd
1049 {TTI::SK_Reverse
, MVT::v8f32
, 1}, // vpermps
1050 {TTI::SK_Reverse
, MVT::v4i64
, 1}, // vpermq
1051 {TTI::SK_Reverse
, MVT::v8i32
, 1}, // vpermd
1052 {TTI::SK_Reverse
, MVT::v16i16
, 2}, // vperm2i128 + pshufb
1053 {TTI::SK_Reverse
, MVT::v32i8
, 2}, // vperm2i128 + pshufb
1055 {TTI::SK_Select
, MVT::v16i16
, 1}, // vpblendvb
1056 {TTI::SK_Select
, MVT::v32i8
, 1}, // vpblendvb
1058 {TTI::SK_PermuteSingleSrc
, MVT::v4f64
, 1}, // vpermpd
1059 {TTI::SK_PermuteSingleSrc
, MVT::v8f32
, 1}, // vpermps
1060 {TTI::SK_PermuteSingleSrc
, MVT::v4i64
, 1}, // vpermq
1061 {TTI::SK_PermuteSingleSrc
, MVT::v8i32
, 1}, // vpermd
1062 {TTI::SK_PermuteSingleSrc
, MVT::v16i16
, 4}, // vperm2i128 + 2*vpshufb
1064 {TTI::SK_PermuteSingleSrc
, MVT::v32i8
, 4}, // vperm2i128 + 2*vpshufb
1067 {TTI::SK_PermuteTwoSrc
, MVT::v4f64
, 3}, // 2*vpermpd + vblendpd
1068 {TTI::SK_PermuteTwoSrc
, MVT::v8f32
, 3}, // 2*vpermps + vblendps
1069 {TTI::SK_PermuteTwoSrc
, MVT::v4i64
, 3}, // 2*vpermq + vpblendd
1070 {TTI::SK_PermuteTwoSrc
, MVT::v8i32
, 3}, // 2*vpermd + vpblendd
1071 {TTI::SK_PermuteTwoSrc
, MVT::v16i16
, 7}, // 2*vperm2i128 + 4*vpshufb
1073 {TTI::SK_PermuteTwoSrc
, MVT::v32i8
, 7}, // 2*vperm2i128 + 4*vpshufb
1078 if (const auto *Entry
= CostTableLookup(AVX2ShuffleTbl
, Kind
, LT
.second
))
1079 return LT
.first
* Entry
->Cost
;
1081 static const CostTblEntry XOPShuffleTbl
[] = {
1082 {TTI::SK_PermuteSingleSrc
, MVT::v4f64
, 2}, // vperm2f128 + vpermil2pd
1083 {TTI::SK_PermuteSingleSrc
, MVT::v8f32
, 2}, // vperm2f128 + vpermil2ps
1084 {TTI::SK_PermuteSingleSrc
, MVT::v4i64
, 2}, // vperm2f128 + vpermil2pd
1085 {TTI::SK_PermuteSingleSrc
, MVT::v8i32
, 2}, // vperm2f128 + vpermil2ps
1086 {TTI::SK_PermuteSingleSrc
, MVT::v16i16
, 4}, // vextractf128 + 2*vpperm
1088 {TTI::SK_PermuteSingleSrc
, MVT::v32i8
, 4}, // vextractf128 + 2*vpperm
1091 {TTI::SK_PermuteTwoSrc
, MVT::v16i16
, 9}, // 2*vextractf128 + 6*vpperm
1093 {TTI::SK_PermuteTwoSrc
, MVT::v8i16
, 1}, // vpperm
1094 {TTI::SK_PermuteTwoSrc
, MVT::v32i8
, 9}, // 2*vextractf128 + 6*vpperm
1096 {TTI::SK_PermuteTwoSrc
, MVT::v16i8
, 1}, // vpperm
1100 if (const auto *Entry
= CostTableLookup(XOPShuffleTbl
, Kind
, LT
.second
))
1101 return LT
.first
* Entry
->Cost
;
1103 static const CostTblEntry AVX1ShuffleTbl
[] = {
1104 {TTI::SK_Broadcast
, MVT::v4f64
, 2}, // vperm2f128 + vpermilpd
1105 {TTI::SK_Broadcast
, MVT::v8f32
, 2}, // vperm2f128 + vpermilps
1106 {TTI::SK_Broadcast
, MVT::v4i64
, 2}, // vperm2f128 + vpermilpd
1107 {TTI::SK_Broadcast
, MVT::v8i32
, 2}, // vperm2f128 + vpermilps
1108 {TTI::SK_Broadcast
, MVT::v16i16
, 3}, // vpshuflw + vpshufd + vinsertf128
1109 {TTI::SK_Broadcast
, MVT::v32i8
, 2}, // vpshufb + vinsertf128
1111 {TTI::SK_Reverse
, MVT::v4f64
, 2}, // vperm2f128 + vpermilpd
1112 {TTI::SK_Reverse
, MVT::v8f32
, 2}, // vperm2f128 + vpermilps
1113 {TTI::SK_Reverse
, MVT::v4i64
, 2}, // vperm2f128 + vpermilpd
1114 {TTI::SK_Reverse
, MVT::v8i32
, 2}, // vperm2f128 + vpermilps
1115 {TTI::SK_Reverse
, MVT::v16i16
, 4}, // vextractf128 + 2*pshufb
1117 {TTI::SK_Reverse
, MVT::v32i8
, 4}, // vextractf128 + 2*pshufb
1120 {TTI::SK_Select
, MVT::v4i64
, 1}, // vblendpd
1121 {TTI::SK_Select
, MVT::v4f64
, 1}, // vblendpd
1122 {TTI::SK_Select
, MVT::v8i32
, 1}, // vblendps
1123 {TTI::SK_Select
, MVT::v8f32
, 1}, // vblendps
1124 {TTI::SK_Select
, MVT::v16i16
, 3}, // vpand + vpandn + vpor
1125 {TTI::SK_Select
, MVT::v32i8
, 3}, // vpand + vpandn + vpor
1127 {TTI::SK_PermuteSingleSrc
, MVT::v4f64
, 2}, // vperm2f128 + vshufpd
1128 {TTI::SK_PermuteSingleSrc
, MVT::v4i64
, 2}, // vperm2f128 + vshufpd
1129 {TTI::SK_PermuteSingleSrc
, MVT::v8f32
, 4}, // 2*vperm2f128 + 2*vshufps
1130 {TTI::SK_PermuteSingleSrc
, MVT::v8i32
, 4}, // 2*vperm2f128 + 2*vshufps
1131 {TTI::SK_PermuteSingleSrc
, MVT::v16i16
, 8}, // vextractf128 + 4*pshufb
1132 // + 2*por + vinsertf128
1133 {TTI::SK_PermuteSingleSrc
, MVT::v32i8
, 8}, // vextractf128 + 4*pshufb
1134 // + 2*por + vinsertf128
1136 {TTI::SK_PermuteTwoSrc
, MVT::v4f64
, 3}, // 2*vperm2f128 + vshufpd
1137 {TTI::SK_PermuteTwoSrc
, MVT::v4i64
, 3}, // 2*vperm2f128 + vshufpd
1138 {TTI::SK_PermuteTwoSrc
, MVT::v8f32
, 4}, // 2*vperm2f128 + 2*vshufps
1139 {TTI::SK_PermuteTwoSrc
, MVT::v8i32
, 4}, // 2*vperm2f128 + 2*vshufps
1140 {TTI::SK_PermuteTwoSrc
, MVT::v16i16
, 15}, // 2*vextractf128 + 8*pshufb
1141 // + 4*por + vinsertf128
1142 {TTI::SK_PermuteTwoSrc
, MVT::v32i8
, 15}, // 2*vextractf128 + 8*pshufb
1143 // + 4*por + vinsertf128
1147 if (const auto *Entry
= CostTableLookup(AVX1ShuffleTbl
, Kind
, LT
.second
))
1148 return LT
.first
* Entry
->Cost
;
1150 static const CostTblEntry SSE41ShuffleTbl
[] = {
1151 {TTI::SK_Select
, MVT::v2i64
, 1}, // pblendw
1152 {TTI::SK_Select
, MVT::v2f64
, 1}, // movsd
1153 {TTI::SK_Select
, MVT::v4i32
, 1}, // pblendw
1154 {TTI::SK_Select
, MVT::v4f32
, 1}, // blendps
1155 {TTI::SK_Select
, MVT::v8i16
, 1}, // pblendw
1156 {TTI::SK_Select
, MVT::v16i8
, 1} // pblendvb
1160 if (const auto *Entry
= CostTableLookup(SSE41ShuffleTbl
, Kind
, LT
.second
))
1161 return LT
.first
* Entry
->Cost
;
1163 static const CostTblEntry SSSE3ShuffleTbl
[] = {
1164 {TTI::SK_Broadcast
, MVT::v8i16
, 1}, // pshufb
1165 {TTI::SK_Broadcast
, MVT::v16i8
, 1}, // pshufb
1167 {TTI::SK_Reverse
, MVT::v8i16
, 1}, // pshufb
1168 {TTI::SK_Reverse
, MVT::v16i8
, 1}, // pshufb
1170 {TTI::SK_Select
, MVT::v8i16
, 3}, // 2*pshufb + por
1171 {TTI::SK_Select
, MVT::v16i8
, 3}, // 2*pshufb + por
1173 {TTI::SK_PermuteSingleSrc
, MVT::v8i16
, 1}, // pshufb
1174 {TTI::SK_PermuteSingleSrc
, MVT::v16i8
, 1}, // pshufb
1176 {TTI::SK_PermuteTwoSrc
, MVT::v8i16
, 3}, // 2*pshufb + por
1177 {TTI::SK_PermuteTwoSrc
, MVT::v16i8
, 3}, // 2*pshufb + por
1181 if (const auto *Entry
= CostTableLookup(SSSE3ShuffleTbl
, Kind
, LT
.second
))
1182 return LT
.first
* Entry
->Cost
;
1184 static const CostTblEntry SSE2ShuffleTbl
[] = {
1185 {TTI::SK_Broadcast
, MVT::v2f64
, 1}, // shufpd
1186 {TTI::SK_Broadcast
, MVT::v2i64
, 1}, // pshufd
1187 {TTI::SK_Broadcast
, MVT::v4i32
, 1}, // pshufd
1188 {TTI::SK_Broadcast
, MVT::v8i16
, 2}, // pshuflw + pshufd
1189 {TTI::SK_Broadcast
, MVT::v16i8
, 3}, // unpck + pshuflw + pshufd
1191 {TTI::SK_Reverse
, MVT::v2f64
, 1}, // shufpd
1192 {TTI::SK_Reverse
, MVT::v2i64
, 1}, // pshufd
1193 {TTI::SK_Reverse
, MVT::v4i32
, 1}, // pshufd
1194 {TTI::SK_Reverse
, MVT::v8i16
, 3}, // pshuflw + pshufhw + pshufd
1195 {TTI::SK_Reverse
, MVT::v16i8
, 9}, // 2*pshuflw + 2*pshufhw
1196 // + 2*pshufd + 2*unpck + packus
1198 {TTI::SK_Select
, MVT::v2i64
, 1}, // movsd
1199 {TTI::SK_Select
, MVT::v2f64
, 1}, // movsd
1200 {TTI::SK_Select
, MVT::v4i32
, 2}, // 2*shufps
1201 {TTI::SK_Select
, MVT::v8i16
, 3}, // pand + pandn + por
1202 {TTI::SK_Select
, MVT::v16i8
, 3}, // pand + pandn + por
1204 {TTI::SK_PermuteSingleSrc
, MVT::v2f64
, 1}, // shufpd
1205 {TTI::SK_PermuteSingleSrc
, MVT::v2i64
, 1}, // pshufd
1206 {TTI::SK_PermuteSingleSrc
, MVT::v4i32
, 1}, // pshufd
1207 {TTI::SK_PermuteSingleSrc
, MVT::v8i16
, 5}, // 2*pshuflw + 2*pshufhw
1209 { TTI::SK_PermuteSingleSrc
, MVT::v16i8
, 10 }, // 2*pshuflw + 2*pshufhw
1210 // + 2*pshufd + 2*unpck + 2*packus
1212 { TTI::SK_PermuteTwoSrc
, MVT::v2f64
, 1 }, // shufpd
1213 { TTI::SK_PermuteTwoSrc
, MVT::v2i64
, 1 }, // shufpd
1214 { TTI::SK_PermuteTwoSrc
, MVT::v4i32
, 2 }, // 2*{unpck,movsd,pshufd}
1215 { TTI::SK_PermuteTwoSrc
, MVT::v8i16
, 8 }, // blend+permute
1216 { TTI::SK_PermuteTwoSrc
, MVT::v16i8
, 13 }, // blend+permute
1220 if (const auto *Entry
= CostTableLookup(SSE2ShuffleTbl
, Kind
, LT
.second
))
1221 return LT
.first
* Entry
->Cost
;
1223 static const CostTblEntry SSE1ShuffleTbl
[] = {
1224 { TTI::SK_Broadcast
, MVT::v4f32
, 1 }, // shufps
1225 { TTI::SK_Reverse
, MVT::v4f32
, 1 }, // shufps
1226 { TTI::SK_Select
, MVT::v4f32
, 2 }, // 2*shufps
1227 { TTI::SK_PermuteSingleSrc
, MVT::v4f32
, 1 }, // shufps
1228 { TTI::SK_PermuteTwoSrc
, MVT::v4f32
, 2 }, // 2*shufps
1232 if (const auto *Entry
= CostTableLookup(SSE1ShuffleTbl
, Kind
, LT
.second
))
1233 return LT
.first
* Entry
->Cost
;
1235 return BaseT::getShuffleCost(Kind
, Tp
, Index
, SubTp
);
1238 int X86TTIImpl::getCastInstrCost(unsigned Opcode
, Type
*Dst
, Type
*Src
,
1239 const Instruction
*I
) {
1240 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
1241 assert(ISD
&& "Invalid opcode");
1243 // FIXME: Need a better design of the cost table to handle non-simple types of
1244 // potential massive combinations (elem_num x src_type x dst_type).
1246 static const TypeConversionCostTblEntry AVX512BWConversionTbl
[] {
1247 { ISD::SIGN_EXTEND
, MVT::v32i16
, MVT::v32i8
, 1 },
1248 { ISD::ZERO_EXTEND
, MVT::v32i16
, MVT::v32i8
, 1 },
1250 // Mask sign extend has an instruction.
1251 { ISD::SIGN_EXTEND
, MVT::v8i16
, MVT::v8i1
, 1 },
1252 { ISD::SIGN_EXTEND
, MVT::v16i8
, MVT::v16i1
, 1 },
1253 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i1
, 1 },
1254 { ISD::SIGN_EXTEND
, MVT::v32i8
, MVT::v32i1
, 1 },
1255 { ISD::SIGN_EXTEND
, MVT::v32i16
, MVT::v32i1
, 1 },
1256 { ISD::SIGN_EXTEND
, MVT::v64i8
, MVT::v64i1
, 1 },
1258 // Mask zero extend is a load + broadcast.
1259 { ISD::ZERO_EXTEND
, MVT::v8i16
, MVT::v8i1
, 2 },
1260 { ISD::ZERO_EXTEND
, MVT::v16i8
, MVT::v16i1
, 2 },
1261 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i1
, 2 },
1262 { ISD::ZERO_EXTEND
, MVT::v32i8
, MVT::v32i1
, 2 },
1263 { ISD::ZERO_EXTEND
, MVT::v32i16
, MVT::v32i1
, 2 },
1264 { ISD::ZERO_EXTEND
, MVT::v64i8
, MVT::v64i1
, 2 },
1267 static const TypeConversionCostTblEntry AVX512DQConversionTbl
[] = {
1268 { ISD::SINT_TO_FP
, MVT::v2f32
, MVT::v2i64
, 1 },
1269 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 1 },
1270 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i64
, 1 },
1271 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i64
, 1 },
1272 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i64
, 1 },
1273 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i64
, 1 },
1275 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i64
, 1 },
1276 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 1 },
1277 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i64
, 1 },
1278 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i64
, 1 },
1279 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i64
, 1 },
1280 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i64
, 1 },
1282 { ISD::FP_TO_SINT
, MVT::v2i64
, MVT::v2f32
, 1 },
1283 { ISD::FP_TO_SINT
, MVT::v4i64
, MVT::v4f32
, 1 },
1284 { ISD::FP_TO_SINT
, MVT::v8i64
, MVT::v8f32
, 1 },
1285 { ISD::FP_TO_SINT
, MVT::v2i64
, MVT::v2f64
, 1 },
1286 { ISD::FP_TO_SINT
, MVT::v4i64
, MVT::v4f64
, 1 },
1287 { ISD::FP_TO_SINT
, MVT::v8i64
, MVT::v8f64
, 1 },
1289 { ISD::FP_TO_UINT
, MVT::v2i64
, MVT::v2f32
, 1 },
1290 { ISD::FP_TO_UINT
, MVT::v4i64
, MVT::v4f32
, 1 },
1291 { ISD::FP_TO_UINT
, MVT::v8i64
, MVT::v8f32
, 1 },
1292 { ISD::FP_TO_UINT
, MVT::v2i64
, MVT::v2f64
, 1 },
1293 { ISD::FP_TO_UINT
, MVT::v4i64
, MVT::v4f64
, 1 },
1294 { ISD::FP_TO_UINT
, MVT::v8i64
, MVT::v8f64
, 1 },
1297 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1298 // 256-bit wide vectors.
1300 static const TypeConversionCostTblEntry AVX512FConversionTbl
[] = {
1301 { ISD::FP_EXTEND
, MVT::v8f64
, MVT::v8f32
, 1 },
1302 { ISD::FP_EXTEND
, MVT::v8f64
, MVT::v16f32
, 3 },
1303 { ISD::FP_ROUND
, MVT::v8f32
, MVT::v8f64
, 1 },
1305 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i32
, 1 },
1306 { ISD::TRUNCATE
, MVT::v16i16
, MVT::v16i32
, 1 },
1307 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v8i64
, 1 },
1308 { ISD::TRUNCATE
, MVT::v8i32
, MVT::v8i64
, 1 },
1310 // v16i1 -> v16i32 - load + broadcast
1311 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i1
, 2 },
1312 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i1
, 2 },
1313 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i8
, 1 },
1314 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i8
, 1 },
1315 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i16
, 1 },
1316 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i16
, 1 },
1317 { ISD::ZERO_EXTEND
, MVT::v8i64
, MVT::v8i16
, 1 },
1318 { ISD::SIGN_EXTEND
, MVT::v8i64
, MVT::v8i16
, 1 },
1319 { ISD::SIGN_EXTEND
, MVT::v8i64
, MVT::v8i32
, 1 },
1320 { ISD::ZERO_EXTEND
, MVT::v8i64
, MVT::v8i32
, 1 },
1322 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i1
, 4 },
1323 { ISD::SINT_TO_FP
, MVT::v16f32
, MVT::v16i1
, 3 },
1324 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i8
, 2 },
1325 { ISD::SINT_TO_FP
, MVT::v16f32
, MVT::v16i8
, 2 },
1326 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i16
, 2 },
1327 { ISD::SINT_TO_FP
, MVT::v16f32
, MVT::v16i16
, 2 },
1328 { ISD::SINT_TO_FP
, MVT::v16f32
, MVT::v16i32
, 1 },
1329 { ISD::SINT_TO_FP
, MVT::v8f64
, MVT::v8i32
, 1 },
1331 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i1
, 4 },
1332 { ISD::UINT_TO_FP
, MVT::v16f32
, MVT::v16i1
, 3 },
1333 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i8
, 2 },
1334 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i8
, 2 },
1335 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i8
, 2 },
1336 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i8
, 2 },
1337 { ISD::UINT_TO_FP
, MVT::v16f32
, MVT::v16i8
, 2 },
1338 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i16
, 5 },
1339 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i16
, 2 },
1340 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 2 },
1341 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i16
, 2 },
1342 { ISD::UINT_TO_FP
, MVT::v16f32
, MVT::v16i16
, 2 },
1343 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i32
, 2 },
1344 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i32
, 1 },
1345 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 1 },
1346 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i32
, 1 },
1347 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 1 },
1348 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i32
, 1 },
1349 { ISD::UINT_TO_FP
, MVT::v16f32
, MVT::v16i32
, 1 },
1350 { ISD::UINT_TO_FP
, MVT::v2f32
, MVT::v2i64
, 5 },
1351 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i64
, 26 },
1352 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 5 },
1353 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i64
, 5 },
1354 { ISD::UINT_TO_FP
, MVT::v8f64
, MVT::v8i64
, 5 },
1356 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i64
, 1 },
1358 { ISD::FP_TO_UINT
, MVT::v2i32
, MVT::v2f32
, 1 },
1359 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f32
, 1 },
1360 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f64
, 1 },
1361 { ISD::FP_TO_UINT
, MVT::v8i32
, MVT::v8f32
, 1 },
1362 { ISD::FP_TO_UINT
, MVT::v8i16
, MVT::v8f64
, 2 },
1363 { ISD::FP_TO_UINT
, MVT::v8i8
, MVT::v8f64
, 2 },
1364 { ISD::FP_TO_UINT
, MVT::v16i32
, MVT::v16f32
, 1 },
1365 { ISD::FP_TO_UINT
, MVT::v16i16
, MVT::v16f32
, 2 },
1366 { ISD::FP_TO_UINT
, MVT::v16i8
, MVT::v16f32
, 2 },
1369 static const TypeConversionCostTblEntry AVX2ConversionTbl
[] = {
1370 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i1
, 3 },
1371 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i1
, 3 },
1372 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i1
, 3 },
1373 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i1
, 3 },
1374 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i8
, 3 },
1375 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i8
, 3 },
1376 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i8
, 3 },
1377 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i8
, 3 },
1378 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i8
, 1 },
1379 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i8
, 1 },
1380 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i16
, 3 },
1381 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i16
, 3 },
1382 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i16
, 1 },
1383 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i16
, 1 },
1384 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i32
, 1 },
1385 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i32
, 1 },
1387 { ISD::TRUNCATE
, MVT::v4i8
, MVT::v4i64
, 2 },
1388 { ISD::TRUNCATE
, MVT::v4i16
, MVT::v4i64
, 2 },
1389 { ISD::TRUNCATE
, MVT::v4i32
, MVT::v4i64
, 2 },
1390 { ISD::TRUNCATE
, MVT::v8i8
, MVT::v8i32
, 2 },
1391 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v8i32
, 2 },
1392 { ISD::TRUNCATE
, MVT::v8i32
, MVT::v8i64
, 4 },
1394 { ISD::FP_EXTEND
, MVT::v8f64
, MVT::v8f32
, 3 },
1395 { ISD::FP_ROUND
, MVT::v8f32
, MVT::v8f64
, 3 },
1397 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 8 },
1400 static const TypeConversionCostTblEntry AVXConversionTbl
[] = {
1401 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i1
, 6 },
1402 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i1
, 4 },
1403 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i1
, 7 },
1404 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i1
, 4 },
1405 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i8
, 6 },
1406 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i8
, 4 },
1407 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i8
, 7 },
1408 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i8
, 4 },
1409 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i8
, 4 },
1410 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i8
, 4 },
1411 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i16
, 6 },
1412 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i16
, 3 },
1413 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i16
, 4 },
1414 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i16
, 4 },
1415 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i32
, 4 },
1416 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i32
, 4 },
1418 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i16
, 4 },
1419 { ISD::TRUNCATE
, MVT::v8i8
, MVT::v8i32
, 4 },
1420 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v8i32
, 5 },
1421 { ISD::TRUNCATE
, MVT::v4i8
, MVT::v4i64
, 4 },
1422 { ISD::TRUNCATE
, MVT::v4i16
, MVT::v4i64
, 4 },
1423 { ISD::TRUNCATE
, MVT::v4i32
, MVT::v4i64
, 4 },
1424 { ISD::TRUNCATE
, MVT::v8i32
, MVT::v8i64
, 9 },
1426 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i1
, 3 },
1427 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i1
, 3 },
1428 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i1
, 8 },
1429 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i8
, 3 },
1430 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i8
, 3 },
1431 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i8
, 8 },
1432 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i16
, 3 },
1433 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i16
, 3 },
1434 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 5 },
1435 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 1 },
1436 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i32
, 1 },
1437 { ISD::SINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 1 },
1439 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i1
, 7 },
1440 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i1
, 7 },
1441 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i1
, 6 },
1442 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i8
, 2 },
1443 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i8
, 2 },
1444 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i8
, 5 },
1445 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i16
, 2 },
1446 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i16
, 2 },
1447 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i16
, 5 },
1448 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i32
, 6 },
1449 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 6 },
1450 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i32
, 6 },
1451 { ISD::UINT_TO_FP
, MVT::v8f32
, MVT::v8i32
, 9 },
1452 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 5 },
1453 { ISD::UINT_TO_FP
, MVT::v4f64
, MVT::v4i64
, 6 },
1454 // The generic code to compute the scalar overhead is currently broken.
1455 // Workaround this limitation by estimating the scalarization overhead
1456 // here. We have roughly 10 instructions per scalar element.
1457 // Multiply that by the vector width.
1458 // FIXME: remove that when PR19268 is fixed.
1459 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i64
, 13 },
1460 { ISD::SINT_TO_FP
, MVT::v4f64
, MVT::v4i64
, 13 },
1462 { ISD::FP_TO_SINT
, MVT::v4i8
, MVT::v4f32
, 1 },
1463 { ISD::FP_TO_SINT
, MVT::v8i8
, MVT::v8f32
, 7 },
1464 // This node is expanded into scalarized operations but BasicTTI is overly
1465 // optimistic estimating its cost. It computes 3 per element (one
1466 // vector-extract, one scalar conversion and one vector-insert). The
1467 // problem is that the inserts form a read-modify-write chain so latency
1468 // should be factored in too. Inflating the cost per element by 1.
1469 { ISD::FP_TO_UINT
, MVT::v8i32
, MVT::v8f32
, 8*4 },
1470 { ISD::FP_TO_UINT
, MVT::v4i32
, MVT::v4f64
, 4*4 },
1472 { ISD::FP_EXTEND
, MVT::v4f64
, MVT::v4f32
, 1 },
1473 { ISD::FP_ROUND
, MVT::v4f32
, MVT::v4f64
, 1 },
1476 static const TypeConversionCostTblEntry SSE41ConversionTbl
[] = {
1477 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i8
, 2 },
1478 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i8
, 2 },
1479 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i16
, 2 },
1480 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i16
, 2 },
1481 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i32
, 2 },
1482 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i32
, 2 },
1484 { ISD::ZERO_EXTEND
, MVT::v4i16
, MVT::v4i8
, 1 },
1485 { ISD::SIGN_EXTEND
, MVT::v4i16
, MVT::v4i8
, 2 },
1486 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v4i8
, 1 },
1487 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v4i8
, 1 },
1488 { ISD::ZERO_EXTEND
, MVT::v8i16
, MVT::v8i8
, 1 },
1489 { ISD::SIGN_EXTEND
, MVT::v8i16
, MVT::v8i8
, 1 },
1490 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i8
, 2 },
1491 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i8
, 2 },
1492 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i8
, 2 },
1493 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i8
, 2 },
1494 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i8
, 4 },
1495 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i8
, 4 },
1496 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v4i16
, 1 },
1497 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v4i16
, 1 },
1498 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i16
, 2 },
1499 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i16
, 2 },
1500 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i16
, 4 },
1501 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i16
, 4 },
1503 { ISD::TRUNCATE
, MVT::v4i8
, MVT::v4i16
, 2 },
1504 { ISD::TRUNCATE
, MVT::v8i8
, MVT::v8i16
, 1 },
1505 { ISD::TRUNCATE
, MVT::v4i8
, MVT::v4i32
, 1 },
1506 { ISD::TRUNCATE
, MVT::v4i16
, MVT::v4i32
, 1 },
1507 { ISD::TRUNCATE
, MVT::v8i8
, MVT::v8i32
, 3 },
1508 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v8i32
, 3 },
1509 { ISD::TRUNCATE
, MVT::v16i16
, MVT::v16i32
, 6 },
1511 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i64
, 4 },
1514 static const TypeConversionCostTblEntry SSE2ConversionTbl
[] = {
1515 // These are somewhat magic numbers justified by looking at the output of
1516 // Intel's IACA, running some kernels and making sure when we take
1517 // legalization into account the throughput will be overestimated.
1518 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v16i8
, 8 },
1519 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v16i8
, 16*10 },
1520 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v8i16
, 15 },
1521 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v8i16
, 8*10 },
1522 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 5 },
1523 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v4i32
, 4*10 },
1524 { ISD::SINT_TO_FP
, MVT::v4f32
, MVT::v2i64
, 15 },
1525 { ISD::SINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 2*10 },
1527 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v16i8
, 16*10 },
1528 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v16i8
, 8 },
1529 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v8i16
, 15 },
1530 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v8i16
, 8*10 },
1531 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v4i32
, 4*10 },
1532 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v4i32
, 8 },
1533 { ISD::UINT_TO_FP
, MVT::v2f64
, MVT::v2i64
, 6 },
1534 { ISD::UINT_TO_FP
, MVT::v4f32
, MVT::v2i64
, 15 },
1536 { ISD::FP_TO_SINT
, MVT::v2i32
, MVT::v2f64
, 3 },
1538 { ISD::UINT_TO_FP
, MVT::f64
, MVT::i64
, 6 },
1540 { ISD::ZERO_EXTEND
, MVT::v4i16
, MVT::v4i8
, 1 },
1541 { ISD::SIGN_EXTEND
, MVT::v4i16
, MVT::v4i8
, 6 },
1542 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v4i8
, 2 },
1543 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v4i8
, 3 },
1544 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i8
, 4 },
1545 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i8
, 8 },
1546 { ISD::ZERO_EXTEND
, MVT::v8i16
, MVT::v8i8
, 1 },
1547 { ISD::SIGN_EXTEND
, MVT::v8i16
, MVT::v8i8
, 2 },
1548 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i8
, 6 },
1549 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i8
, 6 },
1550 { ISD::ZERO_EXTEND
, MVT::v16i16
, MVT::v16i8
, 3 },
1551 { ISD::SIGN_EXTEND
, MVT::v16i16
, MVT::v16i8
, 4 },
1552 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i8
, 9 },
1553 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i8
, 12 },
1554 { ISD::ZERO_EXTEND
, MVT::v4i32
, MVT::v4i16
, 1 },
1555 { ISD::SIGN_EXTEND
, MVT::v4i32
, MVT::v4i16
, 2 },
1556 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i16
, 3 },
1557 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i16
, 10 },
1558 { ISD::ZERO_EXTEND
, MVT::v8i32
, MVT::v8i16
, 3 },
1559 { ISD::SIGN_EXTEND
, MVT::v8i32
, MVT::v8i16
, 4 },
1560 { ISD::ZERO_EXTEND
, MVT::v16i32
, MVT::v16i16
, 6 },
1561 { ISD::SIGN_EXTEND
, MVT::v16i32
, MVT::v16i16
, 8 },
1562 { ISD::ZERO_EXTEND
, MVT::v4i64
, MVT::v4i32
, 3 },
1563 { ISD::SIGN_EXTEND
, MVT::v4i64
, MVT::v4i32
, 5 },
1565 { ISD::TRUNCATE
, MVT::v4i8
, MVT::v4i16
, 4 },
1566 { ISD::TRUNCATE
, MVT::v8i8
, MVT::v8i16
, 2 },
1567 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i16
, 3 },
1568 { ISD::TRUNCATE
, MVT::v4i8
, MVT::v4i32
, 3 },
1569 { ISD::TRUNCATE
, MVT::v4i16
, MVT::v4i32
, 3 },
1570 { ISD::TRUNCATE
, MVT::v8i8
, MVT::v8i32
, 4 },
1571 { ISD::TRUNCATE
, MVT::v16i8
, MVT::v16i32
, 7 },
1572 { ISD::TRUNCATE
, MVT::v8i16
, MVT::v8i32
, 5 },
1573 { ISD::TRUNCATE
, MVT::v16i16
, MVT::v16i32
, 10 },
1576 std::pair
<int, MVT
> LTSrc
= TLI
->getTypeLegalizationCost(DL
, Src
);
1577 std::pair
<int, MVT
> LTDest
= TLI
->getTypeLegalizationCost(DL
, Dst
);
1579 if (ST
->hasSSE2() && !ST
->hasAVX()) {
1580 if (const auto *Entry
= ConvertCostTableLookup(SSE2ConversionTbl
, ISD
,
1581 LTDest
.second
, LTSrc
.second
))
1582 return LTSrc
.first
* Entry
->Cost
;
1585 EVT SrcTy
= TLI
->getValueType(DL
, Src
);
1586 EVT DstTy
= TLI
->getValueType(DL
, Dst
);
1588 // The function getSimpleVT only handles simple value types.
1589 if (!SrcTy
.isSimple() || !DstTy
.isSimple())
1590 return BaseT::getCastInstrCost(Opcode
, Dst
, Src
);
1592 MVT SimpleSrcTy
= SrcTy
.getSimpleVT();
1593 MVT SimpleDstTy
= DstTy
.getSimpleVT();
1595 // Make sure that neither type is going to be split before using the
1596 // AVX512 tables. This handles -mprefer-vector-width=256
1597 // with -min-legal-vector-width<=256
1598 if (TLI
->getTypeAction(SimpleSrcTy
) != TargetLowering::TypeSplitVector
&&
1599 TLI
->getTypeAction(SimpleDstTy
) != TargetLowering::TypeSplitVector
) {
1601 if (const auto *Entry
= ConvertCostTableLookup(AVX512BWConversionTbl
, ISD
,
1602 SimpleDstTy
, SimpleSrcTy
))
1606 if (const auto *Entry
= ConvertCostTableLookup(AVX512DQConversionTbl
, ISD
,
1607 SimpleDstTy
, SimpleSrcTy
))
1610 if (ST
->hasAVX512())
1611 if (const auto *Entry
= ConvertCostTableLookup(AVX512FConversionTbl
, ISD
,
1612 SimpleDstTy
, SimpleSrcTy
))
1616 if (ST
->hasAVX2()) {
1617 if (const auto *Entry
= ConvertCostTableLookup(AVX2ConversionTbl
, ISD
,
1618 SimpleDstTy
, SimpleSrcTy
))
1623 if (const auto *Entry
= ConvertCostTableLookup(AVXConversionTbl
, ISD
,
1624 SimpleDstTy
, SimpleSrcTy
))
1628 if (ST
->hasSSE41()) {
1629 if (const auto *Entry
= ConvertCostTableLookup(SSE41ConversionTbl
, ISD
,
1630 SimpleDstTy
, SimpleSrcTy
))
1634 if (ST
->hasSSE2()) {
1635 if (const auto *Entry
= ConvertCostTableLookup(SSE2ConversionTbl
, ISD
,
1636 SimpleDstTy
, SimpleSrcTy
))
1640 return BaseT::getCastInstrCost(Opcode
, Dst
, Src
, I
);
1643 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode
, Type
*ValTy
, Type
*CondTy
,
1644 const Instruction
*I
) {
1645 // Legalize the type.
1646 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, ValTy
);
1648 MVT MTy
= LT
.second
;
1650 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
1651 assert(ISD
&& "Invalid opcode");
1653 unsigned ExtraCost
= 0;
1654 if (I
&& (Opcode
== Instruction::ICmp
|| Opcode
== Instruction::FCmp
)) {
1655 // Some vector comparison predicates cost extra instructions.
1656 if (MTy
.isVector() &&
1657 !((ST
->hasXOP() && (!ST
->hasAVX2() || MTy
.is128BitVector())) ||
1658 (ST
->hasAVX512() && 32 <= MTy
.getScalarSizeInBits()) ||
1660 switch (cast
<CmpInst
>(I
)->getPredicate()) {
1661 case CmpInst::Predicate::ICMP_NE
:
1662 // xor(cmpeq(x,y),-1)
1665 case CmpInst::Predicate::ICMP_SGE
:
1666 case CmpInst::Predicate::ICMP_SLE
:
1667 // xor(cmpgt(x,y),-1)
1670 case CmpInst::Predicate::ICMP_ULT
:
1671 case CmpInst::Predicate::ICMP_UGT
:
1672 // cmpgt(xor(x,signbit),xor(y,signbit))
1673 // xor(cmpeq(pmaxu(x,y),x),-1)
1676 case CmpInst::Predicate::ICMP_ULE
:
1677 case CmpInst::Predicate::ICMP_UGE
:
1678 if ((ST
->hasSSE41() && MTy
.getScalarSizeInBits() == 32) ||
1679 (ST
->hasSSE2() && MTy
.getScalarSizeInBits() < 32)) {
1680 // cmpeq(psubus(x,y),0)
1681 // cmpeq(pminu(x,y),x)
1684 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
1694 static const CostTblEntry AVX512BWCostTbl
[] = {
1695 { ISD::SETCC
, MVT::v32i16
, 1 },
1696 { ISD::SETCC
, MVT::v64i8
, 1 },
1698 { ISD::SELECT
, MVT::v32i16
, 1 },
1699 { ISD::SELECT
, MVT::v64i8
, 1 },
1702 static const CostTblEntry AVX512CostTbl
[] = {
1703 { ISD::SETCC
, MVT::v8i64
, 1 },
1704 { ISD::SETCC
, MVT::v16i32
, 1 },
1705 { ISD::SETCC
, MVT::v8f64
, 1 },
1706 { ISD::SETCC
, MVT::v16f32
, 1 },
1708 { ISD::SELECT
, MVT::v8i64
, 1 },
1709 { ISD::SELECT
, MVT::v16i32
, 1 },
1710 { ISD::SELECT
, MVT::v8f64
, 1 },
1711 { ISD::SELECT
, MVT::v16f32
, 1 },
1714 static const CostTblEntry AVX2CostTbl
[] = {
1715 { ISD::SETCC
, MVT::v4i64
, 1 },
1716 { ISD::SETCC
, MVT::v8i32
, 1 },
1717 { ISD::SETCC
, MVT::v16i16
, 1 },
1718 { ISD::SETCC
, MVT::v32i8
, 1 },
1720 { ISD::SELECT
, MVT::v4i64
, 1 }, // pblendvb
1721 { ISD::SELECT
, MVT::v8i32
, 1 }, // pblendvb
1722 { ISD::SELECT
, MVT::v16i16
, 1 }, // pblendvb
1723 { ISD::SELECT
, MVT::v32i8
, 1 }, // pblendvb
1726 static const CostTblEntry AVX1CostTbl
[] = {
1727 { ISD::SETCC
, MVT::v4f64
, 1 },
1728 { ISD::SETCC
, MVT::v8f32
, 1 },
1729 // AVX1 does not support 8-wide integer compare.
1730 { ISD::SETCC
, MVT::v4i64
, 4 },
1731 { ISD::SETCC
, MVT::v8i32
, 4 },
1732 { ISD::SETCC
, MVT::v16i16
, 4 },
1733 { ISD::SETCC
, MVT::v32i8
, 4 },
1735 { ISD::SELECT
, MVT::v4f64
, 1 }, // vblendvpd
1736 { ISD::SELECT
, MVT::v8f32
, 1 }, // vblendvps
1737 { ISD::SELECT
, MVT::v4i64
, 1 }, // vblendvpd
1738 { ISD::SELECT
, MVT::v8i32
, 1 }, // vblendvps
1739 { ISD::SELECT
, MVT::v16i16
, 3 }, // vandps + vandnps + vorps
1740 { ISD::SELECT
, MVT::v32i8
, 3 }, // vandps + vandnps + vorps
1743 static const CostTblEntry SSE42CostTbl
[] = {
1744 { ISD::SETCC
, MVT::v2f64
, 1 },
1745 { ISD::SETCC
, MVT::v4f32
, 1 },
1746 { ISD::SETCC
, MVT::v2i64
, 1 },
1749 static const CostTblEntry SSE41CostTbl
[] = {
1750 { ISD::SELECT
, MVT::v2f64
, 1 }, // blendvpd
1751 { ISD::SELECT
, MVT::v4f32
, 1 }, // blendvps
1752 { ISD::SELECT
, MVT::v2i64
, 1 }, // pblendvb
1753 { ISD::SELECT
, MVT::v4i32
, 1 }, // pblendvb
1754 { ISD::SELECT
, MVT::v8i16
, 1 }, // pblendvb
1755 { ISD::SELECT
, MVT::v16i8
, 1 }, // pblendvb
1758 static const CostTblEntry SSE2CostTbl
[] = {
1759 { ISD::SETCC
, MVT::v2f64
, 2 },
1760 { ISD::SETCC
, MVT::f64
, 1 },
1761 { ISD::SETCC
, MVT::v2i64
, 8 },
1762 { ISD::SETCC
, MVT::v4i32
, 1 },
1763 { ISD::SETCC
, MVT::v8i16
, 1 },
1764 { ISD::SETCC
, MVT::v16i8
, 1 },
1766 { ISD::SELECT
, MVT::v2f64
, 3 }, // andpd + andnpd + orpd
1767 { ISD::SELECT
, MVT::v2i64
, 3 }, // pand + pandn + por
1768 { ISD::SELECT
, MVT::v4i32
, 3 }, // pand + pandn + por
1769 { ISD::SELECT
, MVT::v8i16
, 3 }, // pand + pandn + por
1770 { ISD::SELECT
, MVT::v16i8
, 3 }, // pand + pandn + por
1773 static const CostTblEntry SSE1CostTbl
[] = {
1774 { ISD::SETCC
, MVT::v4f32
, 2 },
1775 { ISD::SETCC
, MVT::f32
, 1 },
1777 { ISD::SELECT
, MVT::v4f32
, 3 }, // andps + andnps + orps
1781 if (const auto *Entry
= CostTableLookup(AVX512BWCostTbl
, ISD
, MTy
))
1782 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
1784 if (ST
->hasAVX512())
1785 if (const auto *Entry
= CostTableLookup(AVX512CostTbl
, ISD
, MTy
))
1786 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
1789 if (const auto *Entry
= CostTableLookup(AVX2CostTbl
, ISD
, MTy
))
1790 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
1793 if (const auto *Entry
= CostTableLookup(AVX1CostTbl
, ISD
, MTy
))
1794 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
1797 if (const auto *Entry
= CostTableLookup(SSE42CostTbl
, ISD
, MTy
))
1798 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
1801 if (const auto *Entry
= CostTableLookup(SSE41CostTbl
, ISD
, MTy
))
1802 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
1805 if (const auto *Entry
= CostTableLookup(SSE2CostTbl
, ISD
, MTy
))
1806 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
1809 if (const auto *Entry
= CostTableLookup(SSE1CostTbl
, ISD
, MTy
))
1810 return LT
.first
* (ExtraCost
+ Entry
->Cost
);
1812 return BaseT::getCmpSelInstrCost(Opcode
, ValTy
, CondTy
, I
);
1815 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
1817 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID
, Type
*RetTy
,
1818 ArrayRef
<Type
*> Tys
, FastMathFlags FMF
,
1819 unsigned ScalarizationCostPassed
) {
1820 // Costs should match the codegen from:
1821 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
1822 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
1823 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
1824 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
1825 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
1826 static const CostTblEntry AVX512CDCostTbl
[] = {
1827 { ISD::CTLZ
, MVT::v8i64
, 1 },
1828 { ISD::CTLZ
, MVT::v16i32
, 1 },
1829 { ISD::CTLZ
, MVT::v32i16
, 8 },
1830 { ISD::CTLZ
, MVT::v64i8
, 20 },
1831 { ISD::CTLZ
, MVT::v4i64
, 1 },
1832 { ISD::CTLZ
, MVT::v8i32
, 1 },
1833 { ISD::CTLZ
, MVT::v16i16
, 4 },
1834 { ISD::CTLZ
, MVT::v32i8
, 10 },
1835 { ISD::CTLZ
, MVT::v2i64
, 1 },
1836 { ISD::CTLZ
, MVT::v4i32
, 1 },
1837 { ISD::CTLZ
, MVT::v8i16
, 4 },
1838 { ISD::CTLZ
, MVT::v16i8
, 4 },
1840 static const CostTblEntry AVX512BWCostTbl
[] = {
1841 { ISD::BITREVERSE
, MVT::v8i64
, 5 },
1842 { ISD::BITREVERSE
, MVT::v16i32
, 5 },
1843 { ISD::BITREVERSE
, MVT::v32i16
, 5 },
1844 { ISD::BITREVERSE
, MVT::v64i8
, 5 },
1845 { ISD::CTLZ
, MVT::v8i64
, 23 },
1846 { ISD::CTLZ
, MVT::v16i32
, 22 },
1847 { ISD::CTLZ
, MVT::v32i16
, 18 },
1848 { ISD::CTLZ
, MVT::v64i8
, 17 },
1849 { ISD::CTPOP
, MVT::v8i64
, 7 },
1850 { ISD::CTPOP
, MVT::v16i32
, 11 },
1851 { ISD::CTPOP
, MVT::v32i16
, 9 },
1852 { ISD::CTPOP
, MVT::v64i8
, 6 },
1853 { ISD::CTTZ
, MVT::v8i64
, 10 },
1854 { ISD::CTTZ
, MVT::v16i32
, 14 },
1855 { ISD::CTTZ
, MVT::v32i16
, 12 },
1856 { ISD::CTTZ
, MVT::v64i8
, 9 },
1857 { ISD::SADDSAT
, MVT::v32i16
, 1 },
1858 { ISD::SADDSAT
, MVT::v64i8
, 1 },
1859 { ISD::SSUBSAT
, MVT::v32i16
, 1 },
1860 { ISD::SSUBSAT
, MVT::v64i8
, 1 },
1861 { ISD::UADDSAT
, MVT::v32i16
, 1 },
1862 { ISD::UADDSAT
, MVT::v64i8
, 1 },
1863 { ISD::USUBSAT
, MVT::v32i16
, 1 },
1864 { ISD::USUBSAT
, MVT::v64i8
, 1 },
1866 static const CostTblEntry AVX512CostTbl
[] = {
1867 { ISD::BITREVERSE
, MVT::v8i64
, 36 },
1868 { ISD::BITREVERSE
, MVT::v16i32
, 24 },
1869 { ISD::CTLZ
, MVT::v8i64
, 29 },
1870 { ISD::CTLZ
, MVT::v16i32
, 35 },
1871 { ISD::CTPOP
, MVT::v8i64
, 16 },
1872 { ISD::CTPOP
, MVT::v16i32
, 24 },
1873 { ISD::CTTZ
, MVT::v8i64
, 20 },
1874 { ISD::CTTZ
, MVT::v16i32
, 28 },
1875 { ISD::USUBSAT
, MVT::v16i32
, 2 }, // pmaxud + psubd
1876 { ISD::USUBSAT
, MVT::v2i64
, 2 }, // pmaxuq + psubq
1877 { ISD::USUBSAT
, MVT::v4i64
, 2 }, // pmaxuq + psubq
1878 { ISD::USUBSAT
, MVT::v8i64
, 2 }, // pmaxuq + psubq
1879 { ISD::UADDSAT
, MVT::v16i32
, 3 }, // not + pminud + paddd
1880 { ISD::UADDSAT
, MVT::v2i64
, 3 }, // not + pminuq + paddq
1881 { ISD::UADDSAT
, MVT::v4i64
, 3 }, // not + pminuq + paddq
1882 { ISD::UADDSAT
, MVT::v8i64
, 3 }, // not + pminuq + paddq
1884 static const CostTblEntry XOPCostTbl
[] = {
1885 { ISD::BITREVERSE
, MVT::v4i64
, 4 },
1886 { ISD::BITREVERSE
, MVT::v8i32
, 4 },
1887 { ISD::BITREVERSE
, MVT::v16i16
, 4 },
1888 { ISD::BITREVERSE
, MVT::v32i8
, 4 },
1889 { ISD::BITREVERSE
, MVT::v2i64
, 1 },
1890 { ISD::BITREVERSE
, MVT::v4i32
, 1 },
1891 { ISD::BITREVERSE
, MVT::v8i16
, 1 },
1892 { ISD::BITREVERSE
, MVT::v16i8
, 1 },
1893 { ISD::BITREVERSE
, MVT::i64
, 3 },
1894 { ISD::BITREVERSE
, MVT::i32
, 3 },
1895 { ISD::BITREVERSE
, MVT::i16
, 3 },
1896 { ISD::BITREVERSE
, MVT::i8
, 3 }
1898 static const CostTblEntry AVX2CostTbl
[] = {
1899 { ISD::BITREVERSE
, MVT::v4i64
, 5 },
1900 { ISD::BITREVERSE
, MVT::v8i32
, 5 },
1901 { ISD::BITREVERSE
, MVT::v16i16
, 5 },
1902 { ISD::BITREVERSE
, MVT::v32i8
, 5 },
1903 { ISD::BSWAP
, MVT::v4i64
, 1 },
1904 { ISD::BSWAP
, MVT::v8i32
, 1 },
1905 { ISD::BSWAP
, MVT::v16i16
, 1 },
1906 { ISD::CTLZ
, MVT::v4i64
, 23 },
1907 { ISD::CTLZ
, MVT::v8i32
, 18 },
1908 { ISD::CTLZ
, MVT::v16i16
, 14 },
1909 { ISD::CTLZ
, MVT::v32i8
, 9 },
1910 { ISD::CTPOP
, MVT::v4i64
, 7 },
1911 { ISD::CTPOP
, MVT::v8i32
, 11 },
1912 { ISD::CTPOP
, MVT::v16i16
, 9 },
1913 { ISD::CTPOP
, MVT::v32i8
, 6 },
1914 { ISD::CTTZ
, MVT::v4i64
, 10 },
1915 { ISD::CTTZ
, MVT::v8i32
, 14 },
1916 { ISD::CTTZ
, MVT::v16i16
, 12 },
1917 { ISD::CTTZ
, MVT::v32i8
, 9 },
1918 { ISD::SADDSAT
, MVT::v16i16
, 1 },
1919 { ISD::SADDSAT
, MVT::v32i8
, 1 },
1920 { ISD::SSUBSAT
, MVT::v16i16
, 1 },
1921 { ISD::SSUBSAT
, MVT::v32i8
, 1 },
1922 { ISD::UADDSAT
, MVT::v16i16
, 1 },
1923 { ISD::UADDSAT
, MVT::v32i8
, 1 },
1924 { ISD::UADDSAT
, MVT::v8i32
, 3 }, // not + pminud + paddd
1925 { ISD::USUBSAT
, MVT::v16i16
, 1 },
1926 { ISD::USUBSAT
, MVT::v32i8
, 1 },
1927 { ISD::USUBSAT
, MVT::v8i32
, 2 }, // pmaxud + psubd
1928 { ISD::FSQRT
, MVT::f32
, 7 }, // Haswell from http://www.agner.org/
1929 { ISD::FSQRT
, MVT::v4f32
, 7 }, // Haswell from http://www.agner.org/
1930 { ISD::FSQRT
, MVT::v8f32
, 14 }, // Haswell from http://www.agner.org/
1931 { ISD::FSQRT
, MVT::f64
, 14 }, // Haswell from http://www.agner.org/
1932 { ISD::FSQRT
, MVT::v2f64
, 14 }, // Haswell from http://www.agner.org/
1933 { ISD::FSQRT
, MVT::v4f64
, 28 }, // Haswell from http://www.agner.org/
1935 static const CostTblEntry AVX1CostTbl
[] = {
1936 { ISD::BITREVERSE
, MVT::v4i64
, 12 }, // 2 x 128-bit Op + extract/insert
1937 { ISD::BITREVERSE
, MVT::v8i32
, 12 }, // 2 x 128-bit Op + extract/insert
1938 { ISD::BITREVERSE
, MVT::v16i16
, 12 }, // 2 x 128-bit Op + extract/insert
1939 { ISD::BITREVERSE
, MVT::v32i8
, 12 }, // 2 x 128-bit Op + extract/insert
1940 { ISD::BSWAP
, MVT::v4i64
, 4 },
1941 { ISD::BSWAP
, MVT::v8i32
, 4 },
1942 { ISD::BSWAP
, MVT::v16i16
, 4 },
1943 { ISD::CTLZ
, MVT::v4i64
, 48 }, // 2 x 128-bit Op + extract/insert
1944 { ISD::CTLZ
, MVT::v8i32
, 38 }, // 2 x 128-bit Op + extract/insert
1945 { ISD::CTLZ
, MVT::v16i16
, 30 }, // 2 x 128-bit Op + extract/insert
1946 { ISD::CTLZ
, MVT::v32i8
, 20 }, // 2 x 128-bit Op + extract/insert
1947 { ISD::CTPOP
, MVT::v4i64
, 16 }, // 2 x 128-bit Op + extract/insert
1948 { ISD::CTPOP
, MVT::v8i32
, 24 }, // 2 x 128-bit Op + extract/insert
1949 { ISD::CTPOP
, MVT::v16i16
, 20 }, // 2 x 128-bit Op + extract/insert
1950 { ISD::CTPOP
, MVT::v32i8
, 14 }, // 2 x 128-bit Op + extract/insert
1951 { ISD::CTTZ
, MVT::v4i64
, 22 }, // 2 x 128-bit Op + extract/insert
1952 { ISD::CTTZ
, MVT::v8i32
, 30 }, // 2 x 128-bit Op + extract/insert
1953 { ISD::CTTZ
, MVT::v16i16
, 26 }, // 2 x 128-bit Op + extract/insert
1954 { ISD::CTTZ
, MVT::v32i8
, 20 }, // 2 x 128-bit Op + extract/insert
1955 { ISD::SADDSAT
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
1956 { ISD::SADDSAT
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
1957 { ISD::SSUBSAT
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
1958 { ISD::SSUBSAT
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
1959 { ISD::UADDSAT
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
1960 { ISD::UADDSAT
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
1961 { ISD::UADDSAT
, MVT::v8i32
, 8 }, // 2 x 128-bit Op + extract/insert
1962 { ISD::USUBSAT
, MVT::v16i16
, 4 }, // 2 x 128-bit Op + extract/insert
1963 { ISD::USUBSAT
, MVT::v32i8
, 4 }, // 2 x 128-bit Op + extract/insert
1964 { ISD::USUBSAT
, MVT::v8i32
, 6 }, // 2 x 128-bit Op + extract/insert
1965 { ISD::FSQRT
, MVT::f32
, 14 }, // SNB from http://www.agner.org/
1966 { ISD::FSQRT
, MVT::v4f32
, 14 }, // SNB from http://www.agner.org/
1967 { ISD::FSQRT
, MVT::v8f32
, 28 }, // SNB from http://www.agner.org/
1968 { ISD::FSQRT
, MVT::f64
, 21 }, // SNB from http://www.agner.org/
1969 { ISD::FSQRT
, MVT::v2f64
, 21 }, // SNB from http://www.agner.org/
1970 { ISD::FSQRT
, MVT::v4f64
, 43 }, // SNB from http://www.agner.org/
1972 static const CostTblEntry GLMCostTbl
[] = {
1973 { ISD::FSQRT
, MVT::f32
, 19 }, // sqrtss
1974 { ISD::FSQRT
, MVT::v4f32
, 37 }, // sqrtps
1975 { ISD::FSQRT
, MVT::f64
, 34 }, // sqrtsd
1976 { ISD::FSQRT
, MVT::v2f64
, 67 }, // sqrtpd
1978 static const CostTblEntry SLMCostTbl
[] = {
1979 { ISD::FSQRT
, MVT::f32
, 20 }, // sqrtss
1980 { ISD::FSQRT
, MVT::v4f32
, 40 }, // sqrtps
1981 { ISD::FSQRT
, MVT::f64
, 35 }, // sqrtsd
1982 { ISD::FSQRT
, MVT::v2f64
, 70 }, // sqrtpd
1984 static const CostTblEntry SSE42CostTbl
[] = {
1985 { ISD::USUBSAT
, MVT::v4i32
, 2 }, // pmaxud + psubd
1986 { ISD::UADDSAT
, MVT::v4i32
, 3 }, // not + pminud + paddd
1987 { ISD::FSQRT
, MVT::f32
, 18 }, // Nehalem from http://www.agner.org/
1988 { ISD::FSQRT
, MVT::v4f32
, 18 }, // Nehalem from http://www.agner.org/
1990 static const CostTblEntry SSSE3CostTbl
[] = {
1991 { ISD::BITREVERSE
, MVT::v2i64
, 5 },
1992 { ISD::BITREVERSE
, MVT::v4i32
, 5 },
1993 { ISD::BITREVERSE
, MVT::v8i16
, 5 },
1994 { ISD::BITREVERSE
, MVT::v16i8
, 5 },
1995 { ISD::BSWAP
, MVT::v2i64
, 1 },
1996 { ISD::BSWAP
, MVT::v4i32
, 1 },
1997 { ISD::BSWAP
, MVT::v8i16
, 1 },
1998 { ISD::CTLZ
, MVT::v2i64
, 23 },
1999 { ISD::CTLZ
, MVT::v4i32
, 18 },
2000 { ISD::CTLZ
, MVT::v8i16
, 14 },
2001 { ISD::CTLZ
, MVT::v16i8
, 9 },
2002 { ISD::CTPOP
, MVT::v2i64
, 7 },
2003 { ISD::CTPOP
, MVT::v4i32
, 11 },
2004 { ISD::CTPOP
, MVT::v8i16
, 9 },
2005 { ISD::CTPOP
, MVT::v16i8
, 6 },
2006 { ISD::CTTZ
, MVT::v2i64
, 10 },
2007 { ISD::CTTZ
, MVT::v4i32
, 14 },
2008 { ISD::CTTZ
, MVT::v8i16
, 12 },
2009 { ISD::CTTZ
, MVT::v16i8
, 9 }
2011 static const CostTblEntry SSE2CostTbl
[] = {
2012 { ISD::BITREVERSE
, MVT::v2i64
, 29 },
2013 { ISD::BITREVERSE
, MVT::v4i32
, 27 },
2014 { ISD::BITREVERSE
, MVT::v8i16
, 27 },
2015 { ISD::BITREVERSE
, MVT::v16i8
, 20 },
2016 { ISD::BSWAP
, MVT::v2i64
, 7 },
2017 { ISD::BSWAP
, MVT::v4i32
, 7 },
2018 { ISD::BSWAP
, MVT::v8i16
, 7 },
2019 { ISD::CTLZ
, MVT::v2i64
, 25 },
2020 { ISD::CTLZ
, MVT::v4i32
, 26 },
2021 { ISD::CTLZ
, MVT::v8i16
, 20 },
2022 { ISD::CTLZ
, MVT::v16i8
, 17 },
2023 { ISD::CTPOP
, MVT::v2i64
, 12 },
2024 { ISD::CTPOP
, MVT::v4i32
, 15 },
2025 { ISD::CTPOP
, MVT::v8i16
, 13 },
2026 { ISD::CTPOP
, MVT::v16i8
, 10 },
2027 { ISD::CTTZ
, MVT::v2i64
, 14 },
2028 { ISD::CTTZ
, MVT::v4i32
, 18 },
2029 { ISD::CTTZ
, MVT::v8i16
, 16 },
2030 { ISD::CTTZ
, MVT::v16i8
, 13 },
2031 { ISD::SADDSAT
, MVT::v8i16
, 1 },
2032 { ISD::SADDSAT
, MVT::v16i8
, 1 },
2033 { ISD::SSUBSAT
, MVT::v8i16
, 1 },
2034 { ISD::SSUBSAT
, MVT::v16i8
, 1 },
2035 { ISD::UADDSAT
, MVT::v8i16
, 1 },
2036 { ISD::UADDSAT
, MVT::v16i8
, 1 },
2037 { ISD::USUBSAT
, MVT::v8i16
, 1 },
2038 { ISD::USUBSAT
, MVT::v16i8
, 1 },
2039 { ISD::FSQRT
, MVT::f64
, 32 }, // Nehalem from http://www.agner.org/
2040 { ISD::FSQRT
, MVT::v2f64
, 32 }, // Nehalem from http://www.agner.org/
2042 static const CostTblEntry SSE1CostTbl
[] = {
2043 { ISD::FSQRT
, MVT::f32
, 28 }, // Pentium III from http://www.agner.org/
2044 { ISD::FSQRT
, MVT::v4f32
, 56 }, // Pentium III from http://www.agner.org/
2046 static const CostTblEntry X64CostTbl
[] = { // 64-bit targets
2047 { ISD::BITREVERSE
, MVT::i64
, 14 },
2048 { ISD::SADDO
, MVT::i64
, 1 },
2049 { ISD::UADDO
, MVT::i64
, 1 },
2051 static const CostTblEntry X86CostTbl
[] = { // 32 or 64-bit targets
2052 { ISD::BITREVERSE
, MVT::i32
, 14 },
2053 { ISD::BITREVERSE
, MVT::i16
, 14 },
2054 { ISD::BITREVERSE
, MVT::i8
, 11 },
2055 { ISD::SADDO
, MVT::i32
, 1 },
2056 { ISD::SADDO
, MVT::i16
, 1 },
2057 { ISD::SADDO
, MVT::i8
, 1 },
2058 { ISD::UADDO
, MVT::i32
, 1 },
2059 { ISD::UADDO
, MVT::i16
, 1 },
2060 { ISD::UADDO
, MVT::i8
, 1 },
2064 unsigned ISD
= ISD::DELETED_NODE
;
2068 case Intrinsic::bitreverse
:
2069 ISD
= ISD::BITREVERSE
;
2071 case Intrinsic::bswap
:
2074 case Intrinsic::ctlz
:
2077 case Intrinsic::ctpop
:
2080 case Intrinsic::cttz
:
2083 case Intrinsic::sadd_sat
:
2086 case Intrinsic::ssub_sat
:
2089 case Intrinsic::uadd_sat
:
2092 case Intrinsic::usub_sat
:
2095 case Intrinsic::sqrt
:
2098 case Intrinsic::sadd_with_overflow
:
2099 case Intrinsic::ssub_with_overflow
:
2100 // SSUBO has same costs so don't duplicate.
2102 OpTy
= RetTy
->getContainedType(0);
2104 case Intrinsic::uadd_with_overflow
:
2105 case Intrinsic::usub_with_overflow
:
2106 // USUBO has same costs so don't duplicate.
2108 OpTy
= RetTy
->getContainedType(0);
2112 if (ISD
!= ISD::DELETED_NODE
) {
2113 // Legalize the type.
2114 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, OpTy
);
2115 MVT MTy
= LT
.second
;
2117 // Attempt to lookup cost.
2119 if (const auto *Entry
= CostTableLookup(GLMCostTbl
, ISD
, MTy
))
2120 return LT
.first
* Entry
->Cost
;
2123 if (const auto *Entry
= CostTableLookup(SLMCostTbl
, ISD
, MTy
))
2124 return LT
.first
* Entry
->Cost
;
2127 if (const auto *Entry
= CostTableLookup(AVX512CDCostTbl
, ISD
, MTy
))
2128 return LT
.first
* Entry
->Cost
;
2131 if (const auto *Entry
= CostTableLookup(AVX512BWCostTbl
, ISD
, MTy
))
2132 return LT
.first
* Entry
->Cost
;
2134 if (ST
->hasAVX512())
2135 if (const auto *Entry
= CostTableLookup(AVX512CostTbl
, ISD
, MTy
))
2136 return LT
.first
* Entry
->Cost
;
2139 if (const auto *Entry
= CostTableLookup(XOPCostTbl
, ISD
, MTy
))
2140 return LT
.first
* Entry
->Cost
;
2143 if (const auto *Entry
= CostTableLookup(AVX2CostTbl
, ISD
, MTy
))
2144 return LT
.first
* Entry
->Cost
;
2147 if (const auto *Entry
= CostTableLookup(AVX1CostTbl
, ISD
, MTy
))
2148 return LT
.first
* Entry
->Cost
;
2151 if (const auto *Entry
= CostTableLookup(SSE42CostTbl
, ISD
, MTy
))
2152 return LT
.first
* Entry
->Cost
;
2155 if (const auto *Entry
= CostTableLookup(SSSE3CostTbl
, ISD
, MTy
))
2156 return LT
.first
* Entry
->Cost
;
2159 if (const auto *Entry
= CostTableLookup(SSE2CostTbl
, ISD
, MTy
))
2160 return LT
.first
* Entry
->Cost
;
2163 if (const auto *Entry
= CostTableLookup(SSE1CostTbl
, ISD
, MTy
))
2164 return LT
.first
* Entry
->Cost
;
2167 if (const auto *Entry
= CostTableLookup(X64CostTbl
, ISD
, MTy
))
2168 return LT
.first
* Entry
->Cost
;
2170 if (const auto *Entry
= CostTableLookup(X86CostTbl
, ISD
, MTy
))
2171 return LT
.first
* Entry
->Cost
;
2174 return BaseT::getIntrinsicInstrCost(IID
, RetTy
, Tys
, FMF
, ScalarizationCostPassed
);
2177 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID
, Type
*RetTy
,
2178 ArrayRef
<Value
*> Args
, FastMathFlags FMF
,
2180 static const CostTblEntry AVX512CostTbl
[] = {
2181 { ISD::ROTL
, MVT::v8i64
, 1 },
2182 { ISD::ROTL
, MVT::v4i64
, 1 },
2183 { ISD::ROTL
, MVT::v2i64
, 1 },
2184 { ISD::ROTL
, MVT::v16i32
, 1 },
2185 { ISD::ROTL
, MVT::v8i32
, 1 },
2186 { ISD::ROTL
, MVT::v4i32
, 1 },
2187 { ISD::ROTR
, MVT::v8i64
, 1 },
2188 { ISD::ROTR
, MVT::v4i64
, 1 },
2189 { ISD::ROTR
, MVT::v2i64
, 1 },
2190 { ISD::ROTR
, MVT::v16i32
, 1 },
2191 { ISD::ROTR
, MVT::v8i32
, 1 },
2192 { ISD::ROTR
, MVT::v4i32
, 1 }
2194 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
2195 static const CostTblEntry XOPCostTbl
[] = {
2196 { ISD::ROTL
, MVT::v4i64
, 4 },
2197 { ISD::ROTL
, MVT::v8i32
, 4 },
2198 { ISD::ROTL
, MVT::v16i16
, 4 },
2199 { ISD::ROTL
, MVT::v32i8
, 4 },
2200 { ISD::ROTL
, MVT::v2i64
, 1 },
2201 { ISD::ROTL
, MVT::v4i32
, 1 },
2202 { ISD::ROTL
, MVT::v8i16
, 1 },
2203 { ISD::ROTL
, MVT::v16i8
, 1 },
2204 { ISD::ROTR
, MVT::v4i64
, 6 },
2205 { ISD::ROTR
, MVT::v8i32
, 6 },
2206 { ISD::ROTR
, MVT::v16i16
, 6 },
2207 { ISD::ROTR
, MVT::v32i8
, 6 },
2208 { ISD::ROTR
, MVT::v2i64
, 2 },
2209 { ISD::ROTR
, MVT::v4i32
, 2 },
2210 { ISD::ROTR
, MVT::v8i16
, 2 },
2211 { ISD::ROTR
, MVT::v16i8
, 2 }
2213 static const CostTblEntry X64CostTbl
[] = { // 64-bit targets
2214 { ISD::ROTL
, MVT::i64
, 1 },
2215 { ISD::ROTR
, MVT::i64
, 1 },
2216 { ISD::FSHL
, MVT::i64
, 4 }
2218 static const CostTblEntry X86CostTbl
[] = { // 32 or 64-bit targets
2219 { ISD::ROTL
, MVT::i32
, 1 },
2220 { ISD::ROTL
, MVT::i16
, 1 },
2221 { ISD::ROTL
, MVT::i8
, 1 },
2222 { ISD::ROTR
, MVT::i32
, 1 },
2223 { ISD::ROTR
, MVT::i16
, 1 },
2224 { ISD::ROTR
, MVT::i8
, 1 },
2225 { ISD::FSHL
, MVT::i32
, 4 },
2226 { ISD::FSHL
, MVT::i16
, 4 },
2227 { ISD::FSHL
, MVT::i8
, 4 }
2230 unsigned ISD
= ISD::DELETED_NODE
;
2234 case Intrinsic::fshl
:
2236 if (Args
[0] == Args
[1])
2239 case Intrinsic::fshr
:
2240 // FSHR has same costs so don't duplicate.
2242 if (Args
[0] == Args
[1])
2247 if (ISD
!= ISD::DELETED_NODE
) {
2248 // Legalize the type.
2249 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, RetTy
);
2250 MVT MTy
= LT
.second
;
2252 // Attempt to lookup cost.
2253 if (ST
->hasAVX512())
2254 if (const auto *Entry
= CostTableLookup(AVX512CostTbl
, ISD
, MTy
))
2255 return LT
.first
* Entry
->Cost
;
2258 if (const auto *Entry
= CostTableLookup(XOPCostTbl
, ISD
, MTy
))
2259 return LT
.first
* Entry
->Cost
;
2262 if (const auto *Entry
= CostTableLookup(X64CostTbl
, ISD
, MTy
))
2263 return LT
.first
* Entry
->Cost
;
2265 if (const auto *Entry
= CostTableLookup(X86CostTbl
, ISD
, MTy
))
2266 return LT
.first
* Entry
->Cost
;
2269 return BaseT::getIntrinsicInstrCost(IID
, RetTy
, Args
, FMF
, VF
);
2272 int X86TTIImpl::getVectorInstrCost(unsigned Opcode
, Type
*Val
, unsigned Index
) {
2273 assert(Val
->isVectorTy() && "This must be a vector type");
2275 Type
*ScalarType
= Val
->getScalarType();
2278 // Legalize the type.
2279 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Val
);
2281 // This type is legalized to a scalar type.
2282 if (!LT
.second
.isVector())
2285 // The type may be split. Normalize the index to the new type.
2286 unsigned Width
= LT
.second
.getVectorNumElements();
2287 Index
= Index
% Width
;
2289 // Floating point scalars are already located in index #0.
2290 if (ScalarType
->isFloatingPointTy() && Index
== 0)
2294 // Add to the base cost if we know that the extracted element of a vector is
2295 // destined to be moved to and used in the integer register file.
2296 int RegisterFileMoveCost
= 0;
2297 if (Opcode
== Instruction::ExtractElement
&& ScalarType
->isPointerTy())
2298 RegisterFileMoveCost
= 1;
2300 return BaseT::getVectorInstrCost(Opcode
, Val
, Index
) + RegisterFileMoveCost
;
2303 int X86TTIImpl::getMemoryOpCost(unsigned Opcode
, Type
*Src
, unsigned Alignment
,
2304 unsigned AddressSpace
, const Instruction
*I
) {
2305 // Handle non-power-of-two vectors such as <3 x float>
2306 if (VectorType
*VTy
= dyn_cast
<VectorType
>(Src
)) {
2307 unsigned NumElem
= VTy
->getVectorNumElements();
2309 // Handle a few common cases:
2311 if (NumElem
== 3 && VTy
->getScalarSizeInBits() == 32)
2312 // Cost = 64 bit store + extract + 32 bit store.
2316 if (NumElem
== 3 && VTy
->getScalarSizeInBits() == 64)
2317 // Cost = 128 bit store + unpack + 64 bit store.
2320 // Assume that all other non-power-of-two numbers are scalarized.
2321 if (!isPowerOf2_32(NumElem
)) {
2322 int Cost
= BaseT::getMemoryOpCost(Opcode
, VTy
->getScalarType(), Alignment
,
2324 int SplitCost
= getScalarizationOverhead(Src
, Opcode
== Instruction::Load
,
2325 Opcode
== Instruction::Store
);
2326 return NumElem
* Cost
+ SplitCost
;
2330 // Legalize the type.
2331 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, Src
);
2332 assert((Opcode
== Instruction::Load
|| Opcode
== Instruction::Store
) &&
2335 // Each load/store unit costs 1.
2336 int Cost
= LT
.first
* 1;
2338 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
2339 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
2340 if (LT
.second
.getStoreSize() == 32 && ST
->isUnalignedMem32Slow())
2346 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode
, Type
*SrcTy
,
2348 unsigned AddressSpace
) {
2349 bool IsLoad
= (Instruction::Load
== Opcode
);
2350 bool IsStore
= (Instruction::Store
== Opcode
);
2352 VectorType
*SrcVTy
= dyn_cast
<VectorType
>(SrcTy
);
2354 // To calculate scalar take the regular cost, without mask
2355 return getMemoryOpCost(Opcode
, SrcTy
, Alignment
, AddressSpace
);
2357 unsigned NumElem
= SrcVTy
->getVectorNumElements();
2358 VectorType
*MaskTy
=
2359 VectorType::get(Type::getInt8Ty(SrcVTy
->getContext()), NumElem
);
2360 if ((IsLoad
&& !isLegalMaskedLoad(SrcVTy
)) ||
2361 (IsStore
&& !isLegalMaskedStore(SrcVTy
)) || !isPowerOf2_32(NumElem
)) {
2363 int MaskSplitCost
= getScalarizationOverhead(MaskTy
, false, true);
2364 int ScalarCompareCost
= getCmpSelInstrCost(
2365 Instruction::ICmp
, Type::getInt8Ty(SrcVTy
->getContext()), nullptr);
2366 int BranchCost
= getCFInstrCost(Instruction::Br
);
2367 int MaskCmpCost
= NumElem
* (BranchCost
+ ScalarCompareCost
);
2369 int ValueSplitCost
= getScalarizationOverhead(SrcVTy
, IsLoad
, IsStore
);
2371 NumElem
* BaseT::getMemoryOpCost(Opcode
, SrcVTy
->getScalarType(),
2372 Alignment
, AddressSpace
);
2373 return MemopCost
+ ValueSplitCost
+ MaskSplitCost
+ MaskCmpCost
;
2376 // Legalize the type.
2377 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, SrcVTy
);
2378 auto VT
= TLI
->getValueType(DL
, SrcVTy
);
2380 if (VT
.isSimple() && LT
.second
!= VT
.getSimpleVT() &&
2381 LT
.second
.getVectorNumElements() == NumElem
)
2382 // Promotion requires expand/truncate for data and a shuffle for mask.
2383 Cost
+= getShuffleCost(TTI::SK_PermuteTwoSrc
, SrcVTy
, 0, nullptr) +
2384 getShuffleCost(TTI::SK_PermuteTwoSrc
, MaskTy
, 0, nullptr);
2386 else if (LT
.second
.getVectorNumElements() > NumElem
) {
2387 VectorType
*NewMaskTy
= VectorType::get(MaskTy
->getVectorElementType(),
2388 LT
.second
.getVectorNumElements());
2389 // Expanding requires fill mask with zeroes
2390 Cost
+= getShuffleCost(TTI::SK_InsertSubvector
, NewMaskTy
, 0, MaskTy
);
2393 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
2394 if (!ST
->hasAVX512())
2395 return Cost
+ LT
.first
* (IsLoad
? 2 : 8);
2397 // AVX-512 masked load/store is cheapper
2398 return Cost
+ LT
.first
;
2401 int X86TTIImpl::getAddressComputationCost(Type
*Ty
, ScalarEvolution
*SE
,
2403 // Address computations in vectorized code with non-consecutive addresses will
2404 // likely result in more instructions compared to scalar code where the
2405 // computation can more often be merged into the index mode. The resulting
2406 // extra micro-ops can significantly decrease throughput.
2407 const unsigned NumVectorInstToHideOverhead
= 10;
2409 // Cost modeling of Strided Access Computation is hidden by the indexing
2410 // modes of X86 regardless of the stride value. We dont believe that there
2411 // is a difference between constant strided access in gerenal and constant
2412 // strided value which is less than or equal to 64.
2413 // Even in the case of (loop invariant) stride whose value is not known at
2414 // compile time, the address computation will not incur more than one extra
2416 if (Ty
->isVectorTy() && SE
) {
2417 if (!BaseT::isStridedAccess(Ptr
))
2418 return NumVectorInstToHideOverhead
;
2419 if (!BaseT::getConstantStrideStep(SE
, Ptr
))
2423 return BaseT::getAddressComputationCost(Ty
, SE
, Ptr
);
2426 int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode
, Type
*ValTy
,
2429 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, ValTy
);
2431 MVT MTy
= LT
.second
;
2433 int ISD
= TLI
->InstructionOpcodeToISD(Opcode
);
2434 assert(ISD
&& "Invalid opcode");
2436 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
2437 // and make it as the cost.
2439 static const CostTblEntry SSE42CostTblPairWise
[] = {
2440 { ISD::FADD
, MVT::v2f64
, 2 },
2441 { ISD::FADD
, MVT::v4f32
, 4 },
2442 { ISD::ADD
, MVT::v2i64
, 2 }, // The data reported by the IACA tool is "1.6".
2443 { ISD::ADD
, MVT::v4i32
, 3 }, // The data reported by the IACA tool is "3.5".
2444 { ISD::ADD
, MVT::v8i16
, 5 },
2447 static const CostTblEntry AVX1CostTblPairWise
[] = {
2448 { ISD::FADD
, MVT::v4f32
, 4 },
2449 { ISD::FADD
, MVT::v4f64
, 5 },
2450 { ISD::FADD
, MVT::v8f32
, 7 },
2451 { ISD::ADD
, MVT::v2i64
, 1 }, // The data reported by the IACA tool is "1.5".
2452 { ISD::ADD
, MVT::v4i32
, 3 }, // The data reported by the IACA tool is "3.5".
2453 { ISD::ADD
, MVT::v4i64
, 5 }, // The data reported by the IACA tool is "4.8".
2454 { ISD::ADD
, MVT::v8i16
, 5 },
2455 { ISD::ADD
, MVT::v8i32
, 5 },
2458 static const CostTblEntry SSE42CostTblNoPairWise
[] = {
2459 { ISD::FADD
, MVT::v2f64
, 2 },
2460 { ISD::FADD
, MVT::v4f32
, 4 },
2461 { ISD::ADD
, MVT::v2i64
, 2 }, // The data reported by the IACA tool is "1.6".
2462 { ISD::ADD
, MVT::v4i32
, 3 }, // The data reported by the IACA tool is "3.3".
2463 { ISD::ADD
, MVT::v8i16
, 4 }, // The data reported by the IACA tool is "4.3".
2466 static const CostTblEntry AVX1CostTblNoPairWise
[] = {
2467 { ISD::FADD
, MVT::v4f32
, 3 },
2468 { ISD::FADD
, MVT::v4f64
, 3 },
2469 { ISD::FADD
, MVT::v8f32
, 4 },
2470 { ISD::ADD
, MVT::v2i64
, 1 }, // The data reported by the IACA tool is "1.5".
2471 { ISD::ADD
, MVT::v4i32
, 3 }, // The data reported by the IACA tool is "2.8".
2472 { ISD::ADD
, MVT::v4i64
, 3 },
2473 { ISD::ADD
, MVT::v8i16
, 4 },
2474 { ISD::ADD
, MVT::v8i32
, 5 },
2479 if (const auto *Entry
= CostTableLookup(AVX1CostTblPairWise
, ISD
, MTy
))
2480 return LT
.first
* Entry
->Cost
;
2483 if (const auto *Entry
= CostTableLookup(SSE42CostTblPairWise
, ISD
, MTy
))
2484 return LT
.first
* Entry
->Cost
;
2487 if (const auto *Entry
= CostTableLookup(AVX1CostTblNoPairWise
, ISD
, MTy
))
2488 return LT
.first
* Entry
->Cost
;
2491 if (const auto *Entry
= CostTableLookup(SSE42CostTblNoPairWise
, ISD
, MTy
))
2492 return LT
.first
* Entry
->Cost
;
2495 static const CostTblEntry AVX2BoolReduction
[] = {
2496 { ISD::AND
, MVT::v16i16
, 2 }, // vpmovmskb + cmp
2497 { ISD::AND
, MVT::v32i8
, 2 }, // vpmovmskb + cmp
2498 { ISD::OR
, MVT::v16i16
, 2 }, // vpmovmskb + cmp
2499 { ISD::OR
, MVT::v32i8
, 2 }, // vpmovmskb + cmp
2502 static const CostTblEntry AVX1BoolReduction
[] = {
2503 { ISD::AND
, MVT::v4i64
, 2 }, // vmovmskpd + cmp
2504 { ISD::AND
, MVT::v8i32
, 2 }, // vmovmskps + cmp
2505 { ISD::AND
, MVT::v16i16
, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
2506 { ISD::AND
, MVT::v32i8
, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
2507 { ISD::OR
, MVT::v4i64
, 2 }, // vmovmskpd + cmp
2508 { ISD::OR
, MVT::v8i32
, 2 }, // vmovmskps + cmp
2509 { ISD::OR
, MVT::v16i16
, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
2510 { ISD::OR
, MVT::v32i8
, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
2513 static const CostTblEntry SSE2BoolReduction
[] = {
2514 { ISD::AND
, MVT::v2i64
, 2 }, // movmskpd + cmp
2515 { ISD::AND
, MVT::v4i32
, 2 }, // movmskps + cmp
2516 { ISD::AND
, MVT::v8i16
, 2 }, // pmovmskb + cmp
2517 { ISD::AND
, MVT::v16i8
, 2 }, // pmovmskb + cmp
2518 { ISD::OR
, MVT::v2i64
, 2 }, // movmskpd + cmp
2519 { ISD::OR
, MVT::v4i32
, 2 }, // movmskps + cmp
2520 { ISD::OR
, MVT::v8i16
, 2 }, // pmovmskb + cmp
2521 { ISD::OR
, MVT::v16i8
, 2 }, // pmovmskb + cmp
2524 // Handle bool allof/anyof patterns.
2525 if (ValTy
->getVectorElementType()->isIntegerTy(1)) {
2527 if (const auto *Entry
= CostTableLookup(AVX2BoolReduction
, ISD
, MTy
))
2528 return LT
.first
* Entry
->Cost
;
2530 if (const auto *Entry
= CostTableLookup(AVX1BoolReduction
, ISD
, MTy
))
2531 return LT
.first
* Entry
->Cost
;
2533 if (const auto *Entry
= CostTableLookup(SSE2BoolReduction
, ISD
, MTy
))
2534 return LT
.first
* Entry
->Cost
;
2537 return BaseT::getArithmeticReductionCost(Opcode
, ValTy
, IsPairwise
);
2540 int X86TTIImpl::getMinMaxReductionCost(Type
*ValTy
, Type
*CondTy
,
2541 bool IsPairwise
, bool IsUnsigned
) {
2542 std::pair
<int, MVT
> LT
= TLI
->getTypeLegalizationCost(DL
, ValTy
);
2544 MVT MTy
= LT
.second
;
2547 if (ValTy
->isIntOrIntVectorTy()) {
2548 ISD
= IsUnsigned
? ISD::UMIN
: ISD::SMIN
;
2550 assert(ValTy
->isFPOrFPVectorTy() &&
2551 "Expected float point or integer vector type.");
2555 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
2556 // and make it as the cost.
2558 static const CostTblEntry SSE1CostTblPairWise
[] = {
2559 {ISD::FMINNUM
, MVT::v4f32
, 4},
2562 static const CostTblEntry SSE2CostTblPairWise
[] = {
2563 {ISD::FMINNUM
, MVT::v2f64
, 3},
2564 {ISD::SMIN
, MVT::v2i64
, 6},
2565 {ISD::UMIN
, MVT::v2i64
, 8},
2566 {ISD::SMIN
, MVT::v4i32
, 6},
2567 {ISD::UMIN
, MVT::v4i32
, 8},
2568 {ISD::SMIN
, MVT::v8i16
, 4},
2569 {ISD::UMIN
, MVT::v8i16
, 6},
2570 {ISD::SMIN
, MVT::v16i8
, 8},
2571 {ISD::UMIN
, MVT::v16i8
, 6},
2574 static const CostTblEntry SSE41CostTblPairWise
[] = {
2575 {ISD::FMINNUM
, MVT::v4f32
, 2},
2576 {ISD::SMIN
, MVT::v2i64
, 9},
2577 {ISD::UMIN
, MVT::v2i64
,10},
2578 {ISD::SMIN
, MVT::v4i32
, 1}, // The data reported by the IACA is "1.5"
2579 {ISD::UMIN
, MVT::v4i32
, 2}, // The data reported by the IACA is "1.8"
2580 {ISD::SMIN
, MVT::v8i16
, 2},
2581 {ISD::UMIN
, MVT::v8i16
, 2},
2582 {ISD::SMIN
, MVT::v16i8
, 3},
2583 {ISD::UMIN
, MVT::v16i8
, 3},
2586 static const CostTblEntry SSE42CostTblPairWise
[] = {
2587 {ISD::SMIN
, MVT::v2i64
, 7}, // The data reported by the IACA is "6.8"
2588 {ISD::UMIN
, MVT::v2i64
, 8}, // The data reported by the IACA is "8.6"
2591 static const CostTblEntry AVX1CostTblPairWise
[] = {
2592 {ISD::FMINNUM
, MVT::v4f32
, 1},
2593 {ISD::FMINNUM
, MVT::v4f64
, 1},
2594 {ISD::FMINNUM
, MVT::v8f32
, 2},
2595 {ISD::SMIN
, MVT::v2i64
, 3},
2596 {ISD::UMIN
, MVT::v2i64
, 3},
2597 {ISD::SMIN
, MVT::v4i32
, 1},
2598 {ISD::UMIN
, MVT::v4i32
, 1},
2599 {ISD::SMIN
, MVT::v8i16
, 1},
2600 {ISD::UMIN
, MVT::v8i16
, 1},
2601 {ISD::SMIN
, MVT::v16i8
, 2},
2602 {ISD::UMIN
, MVT::v16i8
, 2},
2603 {ISD::SMIN
, MVT::v4i64
, 7},
2604 {ISD::UMIN
, MVT::v4i64
, 7},
2605 {ISD::SMIN
, MVT::v8i32
, 3},
2606 {ISD::UMIN
, MVT::v8i32
, 3},
2607 {ISD::SMIN
, MVT::v16i16
, 3},
2608 {ISD::UMIN
, MVT::v16i16
, 3},
2609 {ISD::SMIN
, MVT::v32i8
, 3},
2610 {ISD::UMIN
, MVT::v32i8
, 3},
2613 static const CostTblEntry AVX2CostTblPairWise
[] = {
2614 {ISD::SMIN
, MVT::v4i64
, 2},
2615 {ISD::UMIN
, MVT::v4i64
, 2},
2616 {ISD::SMIN
, MVT::v8i32
, 1},
2617 {ISD::UMIN
, MVT::v8i32
, 1},
2618 {ISD::SMIN
, MVT::v16i16
, 1},
2619 {ISD::UMIN
, MVT::v16i16
, 1},
2620 {ISD::SMIN
, MVT::v32i8
, 2},
2621 {ISD::UMIN
, MVT::v32i8
, 2},
2624 static const CostTblEntry AVX512CostTblPairWise
[] = {
2625 {ISD::FMINNUM
, MVT::v8f64
, 1},
2626 {ISD::FMINNUM
, MVT::v16f32
, 2},
2627 {ISD::SMIN
, MVT::v8i64
, 2},
2628 {ISD::UMIN
, MVT::v8i64
, 2},
2629 {ISD::SMIN
, MVT::v16i32
, 1},
2630 {ISD::UMIN
, MVT::v16i32
, 1},
2633 static const CostTblEntry SSE1CostTblNoPairWise
[] = {
2634 {ISD::FMINNUM
, MVT::v4f32
, 4},
2637 static const CostTblEntry SSE2CostTblNoPairWise
[] = {
2638 {ISD::FMINNUM
, MVT::v2f64
, 3},
2639 {ISD::SMIN
, MVT::v2i64
, 6},
2640 {ISD::UMIN
, MVT::v2i64
, 8},
2641 {ISD::SMIN
, MVT::v4i32
, 6},
2642 {ISD::UMIN
, MVT::v4i32
, 8},
2643 {ISD::SMIN
, MVT::v8i16
, 4},
2644 {ISD::UMIN
, MVT::v8i16
, 6},
2645 {ISD::SMIN
, MVT::v16i8
, 8},
2646 {ISD::UMIN
, MVT::v16i8
, 6},
2649 static const CostTblEntry SSE41CostTblNoPairWise
[] = {
2650 {ISD::FMINNUM
, MVT::v4f32
, 3},
2651 {ISD::SMIN
, MVT::v2i64
, 9},
2652 {ISD::UMIN
, MVT::v2i64
,11},
2653 {ISD::SMIN
, MVT::v4i32
, 1}, // The data reported by the IACA is "1.5"
2654 {ISD::UMIN
, MVT::v4i32
, 2}, // The data reported by the IACA is "1.8"
2655 {ISD::SMIN
, MVT::v8i16
, 1}, // The data reported by the IACA is "1.5"
2656 {ISD::UMIN
, MVT::v8i16
, 2}, // The data reported by the IACA is "1.8"
2657 {ISD::SMIN
, MVT::v16i8
, 3},
2658 {ISD::UMIN
, MVT::v16i8
, 3},
2661 static const CostTblEntry SSE42CostTblNoPairWise
[] = {
2662 {ISD::SMIN
, MVT::v2i64
, 7}, // The data reported by the IACA is "6.8"
2663 {ISD::UMIN
, MVT::v2i64
, 9}, // The data reported by the IACA is "8.6"
2666 static const CostTblEntry AVX1CostTblNoPairWise
[] = {
2667 {ISD::FMINNUM
, MVT::v4f32
, 1},
2668 {ISD::FMINNUM
, MVT::v4f64
, 1},
2669 {ISD::FMINNUM
, MVT::v8f32
, 1},
2670 {ISD::SMIN
, MVT::v2i64
, 3},
2671 {ISD::UMIN
, MVT::v2i64
, 3},
2672 {ISD::SMIN
, MVT::v4i32
, 1},
2673 {ISD::UMIN
, MVT::v4i32
, 1},
2674 {ISD::SMIN
, MVT::v8i16
, 1},
2675 {ISD::UMIN
, MVT::v8i16
, 1},
2676 {ISD::SMIN
, MVT::v16i8
, 2},
2677 {ISD::UMIN
, MVT::v16i8
, 2},
2678 {ISD::SMIN
, MVT::v4i64
, 7},
2679 {ISD::UMIN
, MVT::v4i64
, 7},
2680 {ISD::SMIN
, MVT::v8i32
, 2},
2681 {ISD::UMIN
, MVT::v8i32
, 2},
2682 {ISD::SMIN
, MVT::v16i16
, 2},
2683 {ISD::UMIN
, MVT::v16i16
, 2},
2684 {ISD::SMIN
, MVT::v32i8
, 2},
2685 {ISD::UMIN
, MVT::v32i8
, 2},
2688 static const CostTblEntry AVX2CostTblNoPairWise
[] = {
2689 {ISD::SMIN
, MVT::v4i64
, 1},
2690 {ISD::UMIN
, MVT::v4i64
, 1},
2691 {ISD::SMIN
, MVT::v8i32
, 1},
2692 {ISD::UMIN
, MVT::v8i32
, 1},
2693 {ISD::SMIN
, MVT::v16i16
, 1},
2694 {ISD::UMIN
, MVT::v16i16
, 1},
2695 {ISD::SMIN
, MVT::v32i8
, 1},
2696 {ISD::UMIN
, MVT::v32i8
, 1},
2699 static const CostTblEntry AVX512CostTblNoPairWise
[] = {
2700 {ISD::FMINNUM
, MVT::v8f64
, 1},
2701 {ISD::FMINNUM
, MVT::v16f32
, 2},
2702 {ISD::SMIN
, MVT::v8i64
, 1},
2703 {ISD::UMIN
, MVT::v8i64
, 1},
2704 {ISD::SMIN
, MVT::v16i32
, 1},
2705 {ISD::UMIN
, MVT::v16i32
, 1},
2709 if (ST
->hasAVX512())
2710 if (const auto *Entry
= CostTableLookup(AVX512CostTblPairWise
, ISD
, MTy
))
2711 return LT
.first
* Entry
->Cost
;
2714 if (const auto *Entry
= CostTableLookup(AVX2CostTblPairWise
, ISD
, MTy
))
2715 return LT
.first
* Entry
->Cost
;
2718 if (const auto *Entry
= CostTableLookup(AVX1CostTblPairWise
, ISD
, MTy
))
2719 return LT
.first
* Entry
->Cost
;
2722 if (const auto *Entry
= CostTableLookup(SSE42CostTblPairWise
, ISD
, MTy
))
2723 return LT
.first
* Entry
->Cost
;
2726 if (const auto *Entry
= CostTableLookup(SSE41CostTblPairWise
, ISD
, MTy
))
2727 return LT
.first
* Entry
->Cost
;
2730 if (const auto *Entry
= CostTableLookup(SSE2CostTblPairWise
, ISD
, MTy
))
2731 return LT
.first
* Entry
->Cost
;
2734 if (const auto *Entry
= CostTableLookup(SSE1CostTblPairWise
, ISD
, MTy
))
2735 return LT
.first
* Entry
->Cost
;
2737 if (ST
->hasAVX512())
2738 if (const auto *Entry
=
2739 CostTableLookup(AVX512CostTblNoPairWise
, ISD
, MTy
))
2740 return LT
.first
* Entry
->Cost
;
2743 if (const auto *Entry
= CostTableLookup(AVX2CostTblNoPairWise
, ISD
, MTy
))
2744 return LT
.first
* Entry
->Cost
;
2747 if (const auto *Entry
= CostTableLookup(AVX1CostTblNoPairWise
, ISD
, MTy
))
2748 return LT
.first
* Entry
->Cost
;
2751 if (const auto *Entry
= CostTableLookup(SSE42CostTblNoPairWise
, ISD
, MTy
))
2752 return LT
.first
* Entry
->Cost
;
2755 if (const auto *Entry
= CostTableLookup(SSE41CostTblNoPairWise
, ISD
, MTy
))
2756 return LT
.first
* Entry
->Cost
;
2759 if (const auto *Entry
= CostTableLookup(SSE2CostTblNoPairWise
, ISD
, MTy
))
2760 return LT
.first
* Entry
->Cost
;
2763 if (const auto *Entry
= CostTableLookup(SSE1CostTblNoPairWise
, ISD
, MTy
))
2764 return LT
.first
* Entry
->Cost
;
2767 return BaseT::getMinMaxReductionCost(ValTy
, CondTy
, IsPairwise
, IsUnsigned
);
2770 /// Calculate the cost of materializing a 64-bit value. This helper
2771 /// method might only calculate a fraction of a larger immediate. Therefore it
2772 /// is valid to return a cost of ZERO.
2773 int X86TTIImpl::getIntImmCost(int64_t Val
) {
2775 return TTI::TCC_Free
;
2778 return TTI::TCC_Basic
;
2780 return 2 * TTI::TCC_Basic
;
2783 int X86TTIImpl::getIntImmCost(const APInt
&Imm
, Type
*Ty
) {
2784 assert(Ty
->isIntegerTy());
2786 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
2790 // Never hoist constants larger than 128bit, because this might lead to
2791 // incorrect code generation or assertions in codegen.
2792 // Fixme: Create a cost model for types larger than i128 once the codegen
2793 // issues have been fixed.
2795 return TTI::TCC_Free
;
2798 return TTI::TCC_Free
;
2800 // Sign-extend all constants to a multiple of 64-bit.
2802 if (BitSize
% 64 != 0)
2803 ImmVal
= Imm
.sext(alignTo(BitSize
, 64));
2805 // Split the constant into 64-bit chunks and calculate the cost for each
2808 for (unsigned ShiftVal
= 0; ShiftVal
< BitSize
; ShiftVal
+= 64) {
2809 APInt Tmp
= ImmVal
.ashr(ShiftVal
).sextOrTrunc(64);
2810 int64_t Val
= Tmp
.getSExtValue();
2811 Cost
+= getIntImmCost(Val
);
2813 // We need at least one instruction to materialize the constant.
2814 return std::max(1, Cost
);
2817 int X86TTIImpl::getIntImmCost(unsigned Opcode
, unsigned Idx
, const APInt
&Imm
,
2819 assert(Ty
->isIntegerTy());
2821 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
2822 // There is no cost model for constants with a bit size of 0. Return TCC_Free
2823 // here, so that constant hoisting will ignore this constant.
2825 return TTI::TCC_Free
;
2827 unsigned ImmIdx
= ~0U;
2830 return TTI::TCC_Free
;
2831 case Instruction::GetElementPtr
:
2832 // Always hoist the base address of a GetElementPtr. This prevents the
2833 // creation of new constants for every base constant that gets constant
2834 // folded with the offset.
2836 return 2 * TTI::TCC_Basic
;
2837 return TTI::TCC_Free
;
2838 case Instruction::Store
:
2841 case Instruction::ICmp
:
2842 // This is an imperfect hack to prevent constant hoisting of
2843 // compares that might be trying to check if a 64-bit value fits in
2844 // 32-bits. The backend can optimize these cases using a right shift by 32.
2845 // Ideally we would check the compare predicate here. There also other
2846 // similar immediates the backend can use shifts for.
2847 if (Idx
== 1 && Imm
.getBitWidth() == 64) {
2848 uint64_t ImmVal
= Imm
.getZExtValue();
2849 if (ImmVal
== 0x100000000ULL
|| ImmVal
== 0xffffffff)
2850 return TTI::TCC_Free
;
2854 case Instruction::And
:
2855 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
2856 // by using a 32-bit operation with implicit zero extension. Detect such
2857 // immediates here as the normal path expects bit 31 to be sign extended.
2858 if (Idx
== 1 && Imm
.getBitWidth() == 64 && isUInt
<32>(Imm
.getZExtValue()))
2859 return TTI::TCC_Free
;
2862 case Instruction::Add
:
2863 case Instruction::Sub
:
2864 // For add/sub, we can use the opposite instruction for INT32_MIN.
2865 if (Idx
== 1 && Imm
.getBitWidth() == 64 && Imm
.getZExtValue() == 0x80000000)
2866 return TTI::TCC_Free
;
2869 case Instruction::UDiv
:
2870 case Instruction::SDiv
:
2871 case Instruction::URem
:
2872 case Instruction::SRem
:
2873 // Division by constant is typically expanded later into a different
2874 // instruction sequence. This completely changes the constants.
2875 // Report them as "free" to stop ConstantHoist from marking them as opaque.
2876 return TTI::TCC_Free
;
2877 case Instruction::Mul
:
2878 case Instruction::Or
:
2879 case Instruction::Xor
:
2882 // Always return TCC_Free for the shift value of a shift instruction.
2883 case Instruction::Shl
:
2884 case Instruction::LShr
:
2885 case Instruction::AShr
:
2887 return TTI::TCC_Free
;
2889 case Instruction::Trunc
:
2890 case Instruction::ZExt
:
2891 case Instruction::SExt
:
2892 case Instruction::IntToPtr
:
2893 case Instruction::PtrToInt
:
2894 case Instruction::BitCast
:
2895 case Instruction::PHI
:
2896 case Instruction::Call
:
2897 case Instruction::Select
:
2898 case Instruction::Ret
:
2899 case Instruction::Load
:
2903 if (Idx
== ImmIdx
) {
2904 int NumConstants
= divideCeil(BitSize
, 64);
2905 int Cost
= X86TTIImpl::getIntImmCost(Imm
, Ty
);
2906 return (Cost
<= NumConstants
* TTI::TCC_Basic
)
2907 ? static_cast<int>(TTI::TCC_Free
)
2911 return X86TTIImpl::getIntImmCost(Imm
, Ty
);
2914 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID
, unsigned Idx
, const APInt
&Imm
,
2916 assert(Ty
->isIntegerTy());
2918 unsigned BitSize
= Ty
->getPrimitiveSizeInBits();
2919 // There is no cost model for constants with a bit size of 0. Return TCC_Free
2920 // here, so that constant hoisting will ignore this constant.
2922 return TTI::TCC_Free
;
2926 return TTI::TCC_Free
;
2927 case Intrinsic::sadd_with_overflow
:
2928 case Intrinsic::uadd_with_overflow
:
2929 case Intrinsic::ssub_with_overflow
:
2930 case Intrinsic::usub_with_overflow
:
2931 case Intrinsic::smul_with_overflow
:
2932 case Intrinsic::umul_with_overflow
:
2933 if ((Idx
== 1) && Imm
.getBitWidth() <= 64 && isInt
<32>(Imm
.getSExtValue()))
2934 return TTI::TCC_Free
;
2936 case Intrinsic::experimental_stackmap
:
2937 if ((Idx
< 2) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
2938 return TTI::TCC_Free
;
2940 case Intrinsic::experimental_patchpoint_void
:
2941 case Intrinsic::experimental_patchpoint_i64
:
2942 if ((Idx
< 4) || (Imm
.getBitWidth() <= 64 && isInt
<64>(Imm
.getSExtValue())))
2943 return TTI::TCC_Free
;
2946 return X86TTIImpl::getIntImmCost(Imm
, Ty
);
2949 unsigned X86TTIImpl::getUserCost(const User
*U
,
2950 ArrayRef
<const Value
*> Operands
) {
2951 if (isa
<StoreInst
>(U
)) {
2952 Value
*Ptr
= U
->getOperand(1);
2953 // Store instruction with index and scale costs 2 Uops.
2954 // Check the preceding GEP to identify non-const indices.
2955 if (auto GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
2956 if (!all_of(GEP
->indices(), [](Value
*V
) { return isa
<Constant
>(V
); }))
2957 return TTI::TCC_Basic
* 2;
2959 return TTI::TCC_Basic
;
2961 return BaseT::getUserCost(U
, Operands
);
2964 // Return an average cost of Gather / Scatter instruction, maybe improved later
2965 int X86TTIImpl::getGSVectorCost(unsigned Opcode
, Type
*SrcVTy
, Value
*Ptr
,
2966 unsigned Alignment
, unsigned AddressSpace
) {
2968 assert(isa
<VectorType
>(SrcVTy
) && "Unexpected type in getGSVectorCost");
2969 unsigned VF
= SrcVTy
->getVectorNumElements();
2971 // Try to reduce index size from 64 bit (default for GEP)
2972 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
2973 // operation will use 16 x 64 indices which do not fit in a zmm and needs
2974 // to split. Also check that the base pointer is the same for all lanes,
2975 // and that there's at most one variable index.
2976 auto getIndexSizeInBits
= [](Value
*Ptr
, const DataLayout
& DL
) {
2977 unsigned IndexSize
= DL
.getPointerSizeInBits();
2978 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
);
2979 if (IndexSize
< 64 || !GEP
)
2982 unsigned NumOfVarIndices
= 0;
2983 Value
*Ptrs
= GEP
->getPointerOperand();
2984 if (Ptrs
->getType()->isVectorTy() && !getSplatValue(Ptrs
))
2986 for (unsigned i
= 1; i
< GEP
->getNumOperands(); ++i
) {
2987 if (isa
<Constant
>(GEP
->getOperand(i
)))
2989 Type
*IndxTy
= GEP
->getOperand(i
)->getType();
2990 if (IndxTy
->isVectorTy())
2991 IndxTy
= IndxTy
->getVectorElementType();
2992 if ((IndxTy
->getPrimitiveSizeInBits() == 64 &&
2993 !isa
<SExtInst
>(GEP
->getOperand(i
))) ||
2994 ++NumOfVarIndices
> 1)
2995 return IndexSize
; // 64
2997 return (unsigned)32;
3001 // Trying to reduce IndexSize to 32 bits for vector 16.
3002 // By default the IndexSize is equal to pointer size.
3003 unsigned IndexSize
= (ST
->hasAVX512() && VF
>= 16)
3004 ? getIndexSizeInBits(Ptr
, DL
)
3005 : DL
.getPointerSizeInBits();
3007 Type
*IndexVTy
= VectorType::get(IntegerType::get(SrcVTy
->getContext(),
3009 std::pair
<int, MVT
> IdxsLT
= TLI
->getTypeLegalizationCost(DL
, IndexVTy
);
3010 std::pair
<int, MVT
> SrcLT
= TLI
->getTypeLegalizationCost(DL
, SrcVTy
);
3011 int SplitFactor
= std::max(IdxsLT
.first
, SrcLT
.first
);
3012 if (SplitFactor
> 1) {
3013 // Handle splitting of vector of pointers
3014 Type
*SplitSrcTy
= VectorType::get(SrcVTy
->getScalarType(), VF
/ SplitFactor
);
3015 return SplitFactor
* getGSVectorCost(Opcode
, SplitSrcTy
, Ptr
, Alignment
,
3019 // The gather / scatter cost is given by Intel architects. It is a rough
3020 // number since we are looking at one instruction in a time.
3021 const int GSOverhead
= (Opcode
== Instruction::Load
)
3022 ? ST
->getGatherOverhead()
3023 : ST
->getScatterOverhead();
3024 return GSOverhead
+ VF
* getMemoryOpCost(Opcode
, SrcVTy
->getScalarType(),
3025 Alignment
, AddressSpace
);
3028 /// Return the cost of full scalarization of gather / scatter operation.
3030 /// Opcode - Load or Store instruction.
3031 /// SrcVTy - The type of the data vector that should be gathered or scattered.
3032 /// VariableMask - The mask is non-constant at compile time.
3033 /// Alignment - Alignment for one element.
3034 /// AddressSpace - pointer[s] address space.
3036 int X86TTIImpl::getGSScalarCost(unsigned Opcode
, Type
*SrcVTy
,
3037 bool VariableMask
, unsigned Alignment
,
3038 unsigned AddressSpace
) {
3039 unsigned VF
= SrcVTy
->getVectorNumElements();
3041 int MaskUnpackCost
= 0;
3043 VectorType
*MaskTy
=
3044 VectorType::get(Type::getInt1Ty(SrcVTy
->getContext()), VF
);
3045 MaskUnpackCost
= getScalarizationOverhead(MaskTy
, false, true);
3046 int ScalarCompareCost
=
3047 getCmpSelInstrCost(Instruction::ICmp
, Type::getInt1Ty(SrcVTy
->getContext()),
3049 int BranchCost
= getCFInstrCost(Instruction::Br
);
3050 MaskUnpackCost
+= VF
* (BranchCost
+ ScalarCompareCost
);
3053 // The cost of the scalar loads/stores.
3054 int MemoryOpCost
= VF
* getMemoryOpCost(Opcode
, SrcVTy
->getScalarType(),
3055 Alignment
, AddressSpace
);
3057 int InsertExtractCost
= 0;
3058 if (Opcode
== Instruction::Load
)
3059 for (unsigned i
= 0; i
< VF
; ++i
)
3060 // Add the cost of inserting each scalar load into the vector
3061 InsertExtractCost
+=
3062 getVectorInstrCost(Instruction::InsertElement
, SrcVTy
, i
);
3064 for (unsigned i
= 0; i
< VF
; ++i
)
3065 // Add the cost of extracting each element out of the data vector
3066 InsertExtractCost
+=
3067 getVectorInstrCost(Instruction::ExtractElement
, SrcVTy
, i
);
3069 return MemoryOpCost
+ MaskUnpackCost
+ InsertExtractCost
;
3072 /// Calculate the cost of Gather / Scatter operation
3073 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode
, Type
*SrcVTy
,
3074 Value
*Ptr
, bool VariableMask
,
3075 unsigned Alignment
) {
3076 assert(SrcVTy
->isVectorTy() && "Unexpected data type for Gather/Scatter");
3077 unsigned VF
= SrcVTy
->getVectorNumElements();
3078 PointerType
*PtrTy
= dyn_cast
<PointerType
>(Ptr
->getType());
3079 if (!PtrTy
&& Ptr
->getType()->isVectorTy())
3080 PtrTy
= dyn_cast
<PointerType
>(Ptr
->getType()->getVectorElementType());
3081 assert(PtrTy
&& "Unexpected type for Ptr argument");
3082 unsigned AddressSpace
= PtrTy
->getAddressSpace();
3084 bool Scalarize
= false;
3085 if ((Opcode
== Instruction::Load
&& !isLegalMaskedGather(SrcVTy
)) ||
3086 (Opcode
== Instruction::Store
&& !isLegalMaskedScatter(SrcVTy
)))
3088 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
3089 // Vector-4 of gather/scatter instruction does not exist on KNL.
3090 // We can extend it to 8 elements, but zeroing upper bits of
3091 // the mask vector will add more instructions. Right now we give the scalar
3092 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
3093 // is better in the VariableMask case.
3094 if (ST
->hasAVX512() && (VF
== 2 || (VF
== 4 && !ST
->hasVLX())))
3098 return getGSScalarCost(Opcode
, SrcVTy
, VariableMask
, Alignment
,
3101 return getGSVectorCost(Opcode
, SrcVTy
, Ptr
, Alignment
, AddressSpace
);
3104 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost
&C1
,
3105 TargetTransformInfo::LSRCost
&C2
) {
3106 // X86 specific here are "instruction number 1st priority".
3107 return std::tie(C1
.Insns
, C1
.NumRegs
, C1
.AddRecCost
,
3108 C1
.NumIVMuls
, C1
.NumBaseAdds
,
3109 C1
.ScaleCost
, C1
.ImmCost
, C1
.SetupCost
) <
3110 std::tie(C2
.Insns
, C2
.NumRegs
, C2
.AddRecCost
,
3111 C2
.NumIVMuls
, C2
.NumBaseAdds
,
3112 C2
.ScaleCost
, C2
.ImmCost
, C2
.SetupCost
);
3115 bool X86TTIImpl::canMacroFuseCmp() {
3116 return ST
->hasMacroFusion() || ST
->hasBranchFusion();
3119 bool X86TTIImpl::isLegalMaskedLoad(Type
*DataTy
) {
3123 // The backend can't handle a single element vector.
3124 if (isa
<VectorType
>(DataTy
) && DataTy
->getVectorNumElements() == 1)
3126 Type
*ScalarTy
= DataTy
->getScalarType();
3128 if (ScalarTy
->isPointerTy())
3131 if (ScalarTy
->isFloatTy() || ScalarTy
->isDoubleTy())
3134 if (!ScalarTy
->isIntegerTy())
3137 unsigned IntWidth
= ScalarTy
->getIntegerBitWidth();
3138 return IntWidth
== 32 || IntWidth
== 64 ||
3139 ((IntWidth
== 8 || IntWidth
== 16) && ST
->hasBWI());
3142 bool X86TTIImpl::isLegalMaskedStore(Type
*DataType
) {
3143 return isLegalMaskedLoad(DataType
);
3146 bool X86TTIImpl::isLegalNTLoad(Type
*DataType
, unsigned Alignment
) {
3147 unsigned DataSize
= DL
.getTypeStoreSize(DataType
);
3148 // The only supported nontemporal loads are for aligned vectors of 16 or 32
3149 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
3150 // (the equivalent stores only require AVX).
3151 if (Alignment
>= DataSize
&& (DataSize
== 16 || DataSize
== 32))
3152 return DataSize
== 16 ? ST
->hasSSE1() : ST
->hasAVX2();
3157 bool X86TTIImpl::isLegalNTStore(Type
*DataType
, unsigned Alignment
) {
3158 unsigned DataSize
= DL
.getTypeStoreSize(DataType
);
3160 // SSE4A supports nontemporal stores of float and double at arbitrary
3162 if (ST
->hasSSE4A() && (DataType
->isFloatTy() || DataType
->isDoubleTy()))
3165 // Besides the SSE4A subtarget exception above, only aligned stores are
3166 // available nontemporaly on any other subtarget. And only stores with a size
3167 // of 4..32 bytes (powers of 2, only) are permitted.
3168 if (Alignment
< DataSize
|| DataSize
< 4 || DataSize
> 32 ||
3169 !isPowerOf2_32(DataSize
))
3172 // 32-byte vector nontemporal stores are supported by AVX (the equivalent
3173 // loads require AVX2).
3175 return ST
->hasAVX();
3176 else if (DataSize
== 16)
3177 return ST
->hasSSE1();
3181 bool X86TTIImpl::isLegalMaskedExpandLoad(Type
*DataTy
) {
3182 if (!isa
<VectorType
>(DataTy
))
3185 if (!ST
->hasAVX512())
3188 // The backend can't handle a single element vector.
3189 if (DataTy
->getVectorNumElements() == 1)
3192 Type
*ScalarTy
= DataTy
->getVectorElementType();
3194 if (ScalarTy
->isFloatTy() || ScalarTy
->isDoubleTy())
3197 if (!ScalarTy
->isIntegerTy())
3200 unsigned IntWidth
= ScalarTy
->getIntegerBitWidth();
3201 return IntWidth
== 32 || IntWidth
== 64 ||
3202 ((IntWidth
== 8 || IntWidth
== 16) && ST
->hasVBMI2());
3205 bool X86TTIImpl::isLegalMaskedCompressStore(Type
*DataTy
) {
3206 return isLegalMaskedExpandLoad(DataTy
);
3209 bool X86TTIImpl::isLegalMaskedGather(Type
*DataTy
) {
3210 // Some CPUs have better gather performance than others.
3211 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
3212 // enable gather with a -march.
3213 if (!(ST
->hasAVX512() || (ST
->hasFastGather() && ST
->hasAVX2())))
3216 // This function is called now in two cases: from the Loop Vectorizer
3217 // and from the Scalarizer.
3218 // When the Loop Vectorizer asks about legality of the feature,
3219 // the vectorization factor is not calculated yet. The Loop Vectorizer
3220 // sends a scalar type and the decision is based on the width of the
3222 // Later on, the cost model will estimate usage this intrinsic based on
3224 // The Scalarizer asks again about legality. It sends a vector type.
3225 // In this case we can reject non-power-of-2 vectors.
3226 // We also reject single element vectors as the type legalizer can't
3228 if (isa
<VectorType
>(DataTy
)) {
3229 unsigned NumElts
= DataTy
->getVectorNumElements();
3230 if (NumElts
== 1 || !isPowerOf2_32(NumElts
))
3233 Type
*ScalarTy
= DataTy
->getScalarType();
3234 if (ScalarTy
->isPointerTy())
3237 if (ScalarTy
->isFloatTy() || ScalarTy
->isDoubleTy())
3240 if (!ScalarTy
->isIntegerTy())
3243 unsigned IntWidth
= ScalarTy
->getIntegerBitWidth();
3244 return IntWidth
== 32 || IntWidth
== 64;
3247 bool X86TTIImpl::isLegalMaskedScatter(Type
*DataType
) {
3248 // AVX2 doesn't support scatter
3249 if (!ST
->hasAVX512())
3251 return isLegalMaskedGather(DataType
);
3254 bool X86TTIImpl::hasDivRemOp(Type
*DataType
, bool IsSigned
) {
3255 EVT VT
= TLI
->getValueType(DL
, DataType
);
3256 return TLI
->isOperationLegal(IsSigned
? ISD::SDIVREM
: ISD::UDIVREM
, VT
);
3259 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type
*Ty
) {
3263 bool X86TTIImpl::areInlineCompatible(const Function
*Caller
,
3264 const Function
*Callee
) const {
3265 const TargetMachine
&TM
= getTLI()->getTargetMachine();
3267 // Work this as a subsetting of subtarget features.
3268 const FeatureBitset
&CallerBits
=
3269 TM
.getSubtargetImpl(*Caller
)->getFeatureBits();
3270 const FeatureBitset
&CalleeBits
=
3271 TM
.getSubtargetImpl(*Callee
)->getFeatureBits();
3273 FeatureBitset RealCallerBits
= CallerBits
& ~InlineFeatureIgnoreList
;
3274 FeatureBitset RealCalleeBits
= CalleeBits
& ~InlineFeatureIgnoreList
;
3275 return (RealCallerBits
& RealCalleeBits
) == RealCalleeBits
;
3278 bool X86TTIImpl::areFunctionArgsABICompatible(
3279 const Function
*Caller
, const Function
*Callee
,
3280 SmallPtrSetImpl
<Argument
*> &Args
) const {
3281 if (!BaseT::areFunctionArgsABICompatible(Caller
, Callee
, Args
))
3284 // If we get here, we know the target features match. If one function
3285 // considers 512-bit vectors legal and the other does not, consider them
3287 // FIXME Look at the arguments and only consider 512 bit or larger vectors?
3288 const TargetMachine
&TM
= getTLI()->getTargetMachine();
3290 return TM
.getSubtarget
<X86Subtarget
>(*Caller
).useAVX512Regs() ==
3291 TM
.getSubtarget
<X86Subtarget
>(*Callee
).useAVX512Regs();
3294 X86TTIImpl::TTI::MemCmpExpansionOptions
3295 X86TTIImpl::enableMemCmpExpansion(bool OptSize
, bool IsZeroCmp
) const {
3296 TTI::MemCmpExpansionOptions Options
;
3297 Options
.MaxNumLoads
= TLI
->getMaxExpandSizeMemcmp(OptSize
);
3298 Options
.NumLoadsPerBlock
= 2;
3300 // Only enable vector loads for equality comparison. Right now the vector
3301 // version is not as fast for three way compare (see #33329).
3302 // TODO: enable AVX512 when the DAG is ready.
3303 // if (ST->hasAVX512()) Options.LoadSizes.push_back(64);
3304 const unsigned PreferredWidth
= ST
->getPreferVectorWidth();
3305 if (PreferredWidth
>= 256 && ST
->hasAVX2()) Options
.LoadSizes
.push_back(32);
3306 if (PreferredWidth
>= 128 && ST
->hasSSE2()) Options
.LoadSizes
.push_back(16);
3307 // All GPR and vector loads can be unaligned. SIMD compare requires integer
3308 // vectors (SSE2/AVX2).
3309 Options
.AllowOverlappingLoads
= true;
3311 if (ST
->is64Bit()) {
3312 Options
.LoadSizes
.push_back(8);
3314 Options
.LoadSizes
.push_back(4);
3315 Options
.LoadSizes
.push_back(2);
3316 Options
.LoadSizes
.push_back(1);
3320 bool X86TTIImpl::enableInterleavedAccessVectorization() {
3321 // TODO: We expect this to be beneficial regardless of arch,
3322 // but there are currently some unexplained performance artifacts on Atom.
3323 // As a temporary solution, disable on Atom.
3324 return !(ST
->isAtom());
3327 // Get estimation for interleaved load/store operations for AVX2.
3328 // \p Factor is the interleaved-access factor (stride) - number of
3329 // (interleaved) elements in the group.
3330 // \p Indices contains the indices for a strided load: when the
3331 // interleaved load has gaps they indicate which elements are used.
3332 // If Indices is empty (or if the number of indices is equal to the size
3333 // of the interleaved-access as given in \p Factor) the access has no gaps.
3335 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow
3336 // computing the cost using a generic formula as a function of generic
3337 // shuffles. We therefore use a lookup table instead, filled according to
3338 // the instruction sequences that codegen currently generates.
3339 int X86TTIImpl::getInterleavedMemoryOpCostAVX2(unsigned Opcode
, Type
*VecTy
,
3341 ArrayRef
<unsigned> Indices
,
3343 unsigned AddressSpace
,
3344 bool UseMaskForCond
,
3345 bool UseMaskForGaps
) {
3347 if (UseMaskForCond
|| UseMaskForGaps
)
3348 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
3349 Alignment
, AddressSpace
,
3350 UseMaskForCond
, UseMaskForGaps
);
3352 // We currently Support only fully-interleaved groups, with no gaps.
3353 // TODO: Support also strided loads (interleaved-groups with gaps).
3354 if (Indices
.size() && Indices
.size() != Factor
)
3355 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
3356 Alignment
, AddressSpace
);
3358 // VecTy for interleave memop is <VF*Factor x Elt>.
3359 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
3360 // VecTy = <12 x i32>.
3361 MVT LegalVT
= getTLI()->getTypeLegalizationCost(DL
, VecTy
).second
;
3363 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
3364 // the VF=2, while v2i128 is an unsupported MVT vector type
3365 // (see MachineValueType.h::getVectorVT()).
3366 if (!LegalVT
.isVector())
3367 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
3368 Alignment
, AddressSpace
);
3370 unsigned VF
= VecTy
->getVectorNumElements() / Factor
;
3371 Type
*ScalarTy
= VecTy
->getVectorElementType();
3373 // Calculate the number of memory operations (NumOfMemOps), required
3374 // for load/store the VecTy.
3375 unsigned VecTySize
= DL
.getTypeStoreSize(VecTy
);
3376 unsigned LegalVTSize
= LegalVT
.getStoreSize();
3377 unsigned NumOfMemOps
= (VecTySize
+ LegalVTSize
- 1) / LegalVTSize
;
3379 // Get the cost of one memory operation.
3380 Type
*SingleMemOpTy
= VectorType::get(VecTy
->getVectorElementType(),
3381 LegalVT
.getVectorNumElements());
3382 unsigned MemOpCost
=
3383 getMemoryOpCost(Opcode
, SingleMemOpTy
, Alignment
, AddressSpace
);
3385 VectorType
*VT
= VectorType::get(ScalarTy
, VF
);
3386 EVT ETy
= TLI
->getValueType(DL
, VT
);
3387 if (!ETy
.isSimple())
3388 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
3389 Alignment
, AddressSpace
);
3391 // TODO: Complete for other data-types and strides.
3392 // Each combination of Stride, ElementTy and VF results in a different
3393 // sequence; The cost tables are therefore accessed with:
3394 // Factor (stride) and VectorType=VFxElemType.
3395 // The Cost accounts only for the shuffle sequence;
3396 // The cost of the loads/stores is accounted for separately.
3398 static const CostTblEntry AVX2InterleavedLoadTbl
[] = {
3399 { 2, MVT::v4i64
, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64
3400 { 2, MVT::v4f64
, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64
3402 { 3, MVT::v2i8
, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8
3403 { 3, MVT::v4i8
, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8
3404 { 3, MVT::v8i8
, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8
3405 { 3, MVT::v16i8
, 11}, //(load 48i8 and) deinterleave into 3 x 16i8
3406 { 3, MVT::v32i8
, 13}, //(load 96i8 and) deinterleave into 3 x 32i8
3407 { 3, MVT::v8f32
, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32
3409 { 4, MVT::v2i8
, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8
3410 { 4, MVT::v4i8
, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8
3411 { 4, MVT::v8i8
, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8
3412 { 4, MVT::v16i8
, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8
3413 { 4, MVT::v32i8
, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8
3415 { 8, MVT::v8f32
, 40 } //(load 64f32 and)deinterleave into 8 x 8f32
3418 static const CostTblEntry AVX2InterleavedStoreTbl
[] = {
3419 { 2, MVT::v4i64
, 6 }, //interleave into 2 x 4i64 into 8i64 (and store)
3420 { 2, MVT::v4f64
, 6 }, //interleave into 2 x 4f64 into 8f64 (and store)
3422 { 3, MVT::v2i8
, 7 }, //interleave 3 x 2i8 into 6i8 (and store)
3423 { 3, MVT::v4i8
, 8 }, //interleave 3 x 4i8 into 12i8 (and store)
3424 { 3, MVT::v8i8
, 11 }, //interleave 3 x 8i8 into 24i8 (and store)
3425 { 3, MVT::v16i8
, 11 }, //interleave 3 x 16i8 into 48i8 (and store)
3426 { 3, MVT::v32i8
, 13 }, //interleave 3 x 32i8 into 96i8 (and store)
3428 { 4, MVT::v2i8
, 12 }, //interleave 4 x 2i8 into 8i8 (and store)
3429 { 4, MVT::v4i8
, 9 }, //interleave 4 x 4i8 into 16i8 (and store)
3430 { 4, MVT::v8i8
, 10 }, //interleave 4 x 8i8 into 32i8 (and store)
3431 { 4, MVT::v16i8
, 10 }, //interleave 4 x 16i8 into 64i8 (and store)
3432 { 4, MVT::v32i8
, 12 } //interleave 4 x 32i8 into 128i8 (and store)
3435 if (Opcode
== Instruction::Load
) {
3436 if (const auto *Entry
=
3437 CostTableLookup(AVX2InterleavedLoadTbl
, Factor
, ETy
.getSimpleVT()))
3438 return NumOfMemOps
* MemOpCost
+ Entry
->Cost
;
3440 assert(Opcode
== Instruction::Store
&&
3441 "Expected Store Instruction at this point");
3442 if (const auto *Entry
=
3443 CostTableLookup(AVX2InterleavedStoreTbl
, Factor
, ETy
.getSimpleVT()))
3444 return NumOfMemOps
* MemOpCost
+ Entry
->Cost
;
3447 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
3448 Alignment
, AddressSpace
);
3451 // Get estimation for interleaved load/store operations and strided load.
3452 // \p Indices contains indices for strided load.
3453 // \p Factor - the factor of interleaving.
3454 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
3455 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode
, Type
*VecTy
,
3457 ArrayRef
<unsigned> Indices
,
3459 unsigned AddressSpace
,
3460 bool UseMaskForCond
,
3461 bool UseMaskForGaps
) {
3463 if (UseMaskForCond
|| UseMaskForGaps
)
3464 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
3465 Alignment
, AddressSpace
,
3466 UseMaskForCond
, UseMaskForGaps
);
3468 // VecTy for interleave memop is <VF*Factor x Elt>.
3469 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
3470 // VecTy = <12 x i32>.
3472 // Calculate the number of memory operations (NumOfMemOps), required
3473 // for load/store the VecTy.
3474 MVT LegalVT
= getTLI()->getTypeLegalizationCost(DL
, VecTy
).second
;
3475 unsigned VecTySize
= DL
.getTypeStoreSize(VecTy
);
3476 unsigned LegalVTSize
= LegalVT
.getStoreSize();
3477 unsigned NumOfMemOps
= (VecTySize
+ LegalVTSize
- 1) / LegalVTSize
;
3479 // Get the cost of one memory operation.
3480 Type
*SingleMemOpTy
= VectorType::get(VecTy
->getVectorElementType(),
3481 LegalVT
.getVectorNumElements());
3482 unsigned MemOpCost
=
3483 getMemoryOpCost(Opcode
, SingleMemOpTy
, Alignment
, AddressSpace
);
3485 unsigned VF
= VecTy
->getVectorNumElements() / Factor
;
3486 MVT VT
= MVT::getVectorVT(MVT::getVT(VecTy
->getScalarType()), VF
);
3488 if (Opcode
== Instruction::Load
) {
3489 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
3490 // contain the cost of the optimized shuffle sequence that the
3491 // X86InterleavedAccess pass will generate.
3492 // The cost of loads and stores are computed separately from the table.
3494 // X86InterleavedAccess support only the following interleaved-access group.
3495 static const CostTblEntry AVX512InterleavedLoadTbl
[] = {
3496 {3, MVT::v16i8
, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
3497 {3, MVT::v32i8
, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
3498 {3, MVT::v64i8
, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
3501 if (const auto *Entry
=
3502 CostTableLookup(AVX512InterleavedLoadTbl
, Factor
, VT
))
3503 return NumOfMemOps
* MemOpCost
+ Entry
->Cost
;
3504 //If an entry does not exist, fallback to the default implementation.
3506 // Kind of shuffle depends on number of loaded values.
3507 // If we load the entire data in one register, we can use a 1-src shuffle.
3508 // Otherwise, we'll merge 2 sources in each operation.
3509 TTI::ShuffleKind ShuffleKind
=
3510 (NumOfMemOps
> 1) ? TTI::SK_PermuteTwoSrc
: TTI::SK_PermuteSingleSrc
;
3512 unsigned ShuffleCost
=
3513 getShuffleCost(ShuffleKind
, SingleMemOpTy
, 0, nullptr);
3515 unsigned NumOfLoadsInInterleaveGrp
=
3516 Indices
.size() ? Indices
.size() : Factor
;
3517 Type
*ResultTy
= VectorType::get(VecTy
->getVectorElementType(),
3518 VecTy
->getVectorNumElements() / Factor
);
3519 unsigned NumOfResults
=
3520 getTLI()->getTypeLegalizationCost(DL
, ResultTy
).first
*
3521 NumOfLoadsInInterleaveGrp
;
3523 // About a half of the loads may be folded in shuffles when we have only
3524 // one result. If we have more than one result, we do not fold loads at all.
3525 unsigned NumOfUnfoldedLoads
=
3526 NumOfResults
> 1 ? NumOfMemOps
: NumOfMemOps
/ 2;
3528 // Get a number of shuffle operations per result.
3529 unsigned NumOfShufflesPerResult
=
3530 std::max((unsigned)1, (unsigned)(NumOfMemOps
- 1));
3532 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
3533 // When we have more than one destination, we need additional instructions
3535 unsigned NumOfMoves
= 0;
3536 if (NumOfResults
> 1 && ShuffleKind
== TTI::SK_PermuteTwoSrc
)
3537 NumOfMoves
= NumOfResults
* NumOfShufflesPerResult
/ 2;
3539 int Cost
= NumOfResults
* NumOfShufflesPerResult
* ShuffleCost
+
3540 NumOfUnfoldedLoads
* MemOpCost
+ NumOfMoves
;
3546 assert(Opcode
== Instruction::Store
&&
3547 "Expected Store Instruction at this point");
3548 // X86InterleavedAccess support only the following interleaved-access group.
3549 static const CostTblEntry AVX512InterleavedStoreTbl
[] = {
3550 {3, MVT::v16i8
, 12}, // interleave 3 x 16i8 into 48i8 (and store)
3551 {3, MVT::v32i8
, 14}, // interleave 3 x 32i8 into 96i8 (and store)
3552 {3, MVT::v64i8
, 26}, // interleave 3 x 64i8 into 96i8 (and store)
3554 {4, MVT::v8i8
, 10}, // interleave 4 x 8i8 into 32i8 (and store)
3555 {4, MVT::v16i8
, 11}, // interleave 4 x 16i8 into 64i8 (and store)
3556 {4, MVT::v32i8
, 14}, // interleave 4 x 32i8 into 128i8 (and store)
3557 {4, MVT::v64i8
, 24} // interleave 4 x 32i8 into 256i8 (and store)
3560 if (const auto *Entry
=
3561 CostTableLookup(AVX512InterleavedStoreTbl
, Factor
, VT
))
3562 return NumOfMemOps
* MemOpCost
+ Entry
->Cost
;
3563 //If an entry does not exist, fallback to the default implementation.
3565 // There is no strided stores meanwhile. And store can't be folded in
3567 unsigned NumOfSources
= Factor
; // The number of values to be merged.
3568 unsigned ShuffleCost
=
3569 getShuffleCost(TTI::SK_PermuteTwoSrc
, SingleMemOpTy
, 0, nullptr);
3570 unsigned NumOfShufflesPerStore
= NumOfSources
- 1;
3572 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
3573 // We need additional instructions to keep sources.
3574 unsigned NumOfMoves
= NumOfMemOps
* NumOfShufflesPerStore
/ 2;
3575 int Cost
= NumOfMemOps
* (MemOpCost
+ NumOfShufflesPerStore
* ShuffleCost
) +
3580 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode
, Type
*VecTy
,
3582 ArrayRef
<unsigned> Indices
,
3584 unsigned AddressSpace
,
3585 bool UseMaskForCond
,
3586 bool UseMaskForGaps
) {
3587 auto isSupportedOnAVX512
= [](Type
*VecTy
, bool HasBW
) {
3588 Type
*EltTy
= VecTy
->getVectorElementType();
3589 if (EltTy
->isFloatTy() || EltTy
->isDoubleTy() || EltTy
->isIntegerTy(64) ||
3590 EltTy
->isIntegerTy(32) || EltTy
->isPointerTy())
3592 if (EltTy
->isIntegerTy(16) || EltTy
->isIntegerTy(8))
3596 if (ST
->hasAVX512() && isSupportedOnAVX512(VecTy
, ST
->hasBWI()))
3597 return getInterleavedMemoryOpCostAVX512(Opcode
, VecTy
, Factor
, Indices
,
3598 Alignment
, AddressSpace
,
3599 UseMaskForCond
, UseMaskForGaps
);
3601 return getInterleavedMemoryOpCostAVX2(Opcode
, VecTy
, Factor
, Indices
,
3602 Alignment
, AddressSpace
,
3603 UseMaskForCond
, UseMaskForGaps
);
3605 return BaseT::getInterleavedMemoryOpCost(Opcode
, VecTy
, Factor
, Indices
,
3606 Alignment
, AddressSpace
,
3607 UseMaskForCond
, UseMaskForGaps
);