[llvm-shlib] Fix the version naming style of libLLVM for Windows (#85710)
[llvm-project.git] / llvm / lib / Target / X86 / X86TargetTransformInfo.cpp
blobbe774a89eccbb4b2566fb51be12886795b1df9a9
1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// X86 target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 /// About Cost Model numbers used below it's necessary to say the following:
16 /// the numbers correspond to some "generic" X86 CPU instead of usage of a
17 /// specific CPU model. Usually the numbers correspond to the CPU where the
18 /// feature first appeared. For example, if we do Subtarget.hasSSE42() in
19 /// the lookups below the cost is based on Nehalem as that was the first CPU
20 /// to support that feature level and thus has most likely the worst case cost,
21 /// although we may discard an outlying worst cost from one CPU (e.g. Atom).
22 ///
23 /// Some examples of other technologies/CPUs:
24 /// SSE 3 - Pentium4 / Athlon64
25 /// SSE 4.1 - Penryn
26 /// SSE 4.2 - Nehalem / Silvermont
27 /// AVX - Sandy Bridge / Jaguar / Bulldozer
28 /// AVX2 - Haswell / Ryzen
29 /// AVX-512 - Xeon Phi / Skylake
30 ///
31 /// And some examples of instruction target dependent costs (latency)
32 /// divss sqrtss rsqrtss
33 /// AMD K7 11-16 19 3
34 /// Piledriver 9-24 13-15 5
35 /// Jaguar 14 16 2
36 /// Pentium II,III 18 30 2
37 /// Nehalem 7-14 7-18 3
38 /// Haswell 10-13 11 5
39 ///
40 /// Interpreting the 4 TargetCostKind types:
41 /// TCK_RecipThroughput and TCK_Latency should try to match the worst case
42 /// values reported by the CPU scheduler models (and llvm-mca).
43 /// TCK_CodeSize should match the instruction count (e.g. divss = 1), NOT the
44 /// actual encoding size of the instruction.
45 /// TCK_SizeAndLatency should match the worst case micro-op counts reported by
46 /// by the CPU scheduler models (and llvm-mca), to ensure that they are
47 /// compatible with the MicroOpBufferSize and LoopMicroOpBufferSize values which are
48 /// often used as the cost thresholds where TCK_SizeAndLatency is requested.
49 //===----------------------------------------------------------------------===//
51 #include "X86TargetTransformInfo.h"
52 #include "llvm/Analysis/TargetTransformInfo.h"
53 #include "llvm/CodeGen/BasicTTIImpl.h"
54 #include "llvm/CodeGen/CostTable.h"
55 #include "llvm/CodeGen/TargetLowering.h"
56 #include "llvm/IR/InstIterator.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/Support/Debug.h"
59 #include <optional>
61 using namespace llvm;
63 #define DEBUG_TYPE "x86tti"
65 //===----------------------------------------------------------------------===//
67 // X86 cost model.
69 //===----------------------------------------------------------------------===//
71 // Helper struct to store/access costs for each cost kind.
72 // TODO: Move this to allow other targets to use it?
73 struct CostKindCosts {
74 unsigned RecipThroughputCost = ~0U;
75 unsigned LatencyCost = ~0U;
76 unsigned CodeSizeCost = ~0U;
77 unsigned SizeAndLatencyCost = ~0U;
79 std::optional<unsigned>
80 operator[](TargetTransformInfo::TargetCostKind Kind) const {
81 unsigned Cost = ~0U;
82 switch (Kind) {
83 case TargetTransformInfo::TCK_RecipThroughput:
84 Cost = RecipThroughputCost;
85 break;
86 case TargetTransformInfo::TCK_Latency:
87 Cost = LatencyCost;
88 break;
89 case TargetTransformInfo::TCK_CodeSize:
90 Cost = CodeSizeCost;
91 break;
92 case TargetTransformInfo::TCK_SizeAndLatency:
93 Cost = SizeAndLatencyCost;
94 break;
96 if (Cost == ~0U)
97 return std::nullopt;
98 return Cost;
101 using CostKindTblEntry = CostTblEntryT<CostKindCosts>;
103 TargetTransformInfo::PopcntSupportKind
104 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
105 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
106 // TODO: Currently the __builtin_popcount() implementation using SSE3
107 // instructions is inefficient. Once the problem is fixed, we should
108 // call ST->hasSSE3() instead of ST->hasPOPCNT().
109 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
112 std::optional<unsigned> X86TTIImpl::getCacheSize(
113 TargetTransformInfo::CacheLevel Level) const {
114 switch (Level) {
115 case TargetTransformInfo::CacheLevel::L1D:
116 // - Penryn
117 // - Nehalem
118 // - Westmere
119 // - Sandy Bridge
120 // - Ivy Bridge
121 // - Haswell
122 // - Broadwell
123 // - Skylake
124 // - Kabylake
125 return 32 * 1024; // 32 KByte
126 case TargetTransformInfo::CacheLevel::L2D:
127 // - Penryn
128 // - Nehalem
129 // - Westmere
130 // - Sandy Bridge
131 // - Ivy Bridge
132 // - Haswell
133 // - Broadwell
134 // - Skylake
135 // - Kabylake
136 return 256 * 1024; // 256 KByte
139 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
142 std::optional<unsigned> X86TTIImpl::getCacheAssociativity(
143 TargetTransformInfo::CacheLevel Level) const {
144 // - Penryn
145 // - Nehalem
146 // - Westmere
147 // - Sandy Bridge
148 // - Ivy Bridge
149 // - Haswell
150 // - Broadwell
151 // - Skylake
152 // - Kabylake
153 switch (Level) {
154 case TargetTransformInfo::CacheLevel::L1D:
155 [[fallthrough]];
156 case TargetTransformInfo::CacheLevel::L2D:
157 return 8;
160 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
163 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
164 bool Vector = (ClassID == 1);
165 if (Vector && !ST->hasSSE1())
166 return 0;
168 if (ST->is64Bit()) {
169 if (Vector && ST->hasAVX512())
170 return 32;
171 return 16;
173 return 8;
176 TypeSize
177 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
178 unsigned PreferVectorWidth = ST->getPreferVectorWidth();
179 switch (K) {
180 case TargetTransformInfo::RGK_Scalar:
181 return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
182 case TargetTransformInfo::RGK_FixedWidthVector:
183 if (ST->hasAVX512() && ST->hasEVEX512() && PreferVectorWidth >= 512)
184 return TypeSize::getFixed(512);
185 if (ST->hasAVX() && PreferVectorWidth >= 256)
186 return TypeSize::getFixed(256);
187 if (ST->hasSSE1() && PreferVectorWidth >= 128)
188 return TypeSize::getFixed(128);
189 return TypeSize::getFixed(0);
190 case TargetTransformInfo::RGK_ScalableVector:
191 return TypeSize::getScalable(0);
194 llvm_unreachable("Unsupported register kind");
197 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
198 return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
199 .getFixedValue();
202 unsigned X86TTIImpl::getMaxInterleaveFactor(ElementCount VF) {
203 // If the loop will not be vectorized, don't interleave the loop.
204 // Let regular unroll to unroll the loop, which saves the overflow
205 // check and memory check cost.
206 if (VF.isScalar())
207 return 1;
209 if (ST->isAtom())
210 return 1;
212 // Sandybridge and Haswell have multiple execution ports and pipelined
213 // vector units.
214 if (ST->hasAVX())
215 return 4;
217 return 2;
220 InstructionCost X86TTIImpl::getArithmeticInstrCost(
221 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
222 TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
223 ArrayRef<const Value *> Args,
224 const Instruction *CxtI) {
226 // vXi8 multiplications are always promoted to vXi16.
227 // Sub-128-bit types can be extended/packed more efficiently.
228 if (Opcode == Instruction::Mul && Ty->isVectorTy() &&
229 Ty->getPrimitiveSizeInBits() <= 64 && Ty->getScalarSizeInBits() == 8) {
230 Type *WideVecTy =
231 VectorType::getExtendedElementVectorType(cast<VectorType>(Ty));
232 return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty,
233 TargetTransformInfo::CastContextHint::None,
234 CostKind) +
235 getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
236 TargetTransformInfo::CastContextHint::None,
237 CostKind) +
238 getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info);
241 // Legalize the type.
242 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
244 int ISD = TLI->InstructionOpcodeToISD(Opcode);
245 assert(ISD && "Invalid opcode");
247 if (ISD == ISD::MUL && Args.size() == 2 && LT.second.isVector() &&
248 (LT.second.getScalarType() == MVT::i32 ||
249 LT.second.getScalarType() == MVT::i64)) {
250 // Check if the operands can be represented as a smaller datatype.
251 bool Op1Signed = false, Op2Signed = false;
252 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
253 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
254 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
255 bool SignedMode = Op1Signed || Op2Signed;
257 // If both vXi32 are representable as i15 and at least one is constant,
258 // zero-extended, or sign-extended from vXi16 (or less pre-SSE41) then we
259 // can treat this as PMADDWD which has the same costs as a vXi16 multiply.
260 if (OpMinSize <= 15 && !ST->isPMADDWDSlow() &&
261 LT.second.getScalarType() == MVT::i32) {
262 bool Op1Constant =
263 isa<ConstantDataVector>(Args[0]) || isa<ConstantVector>(Args[0]);
264 bool Op2Constant =
265 isa<ConstantDataVector>(Args[1]) || isa<ConstantVector>(Args[1]);
266 bool Op1Sext = isa<SExtInst>(Args[0]) &&
267 (Op1MinSize == 15 || (Op1MinSize < 15 && !ST->hasSSE41()));
268 bool Op2Sext = isa<SExtInst>(Args[1]) &&
269 (Op2MinSize == 15 || (Op2MinSize < 15 && !ST->hasSSE41()));
271 bool IsZeroExtended = !Op1Signed || !Op2Signed;
272 bool IsConstant = Op1Constant || Op2Constant;
273 bool IsSext = Op1Sext || Op2Sext;
274 if (IsConstant || IsZeroExtended || IsSext)
275 LT.second =
276 MVT::getVectorVT(MVT::i16, 2 * LT.second.getVectorNumElements());
279 // Check if the vXi32 operands can be shrunk into a smaller datatype.
280 // This should match the codegen from reduceVMULWidth.
281 // TODO: Make this generic (!ST->SSE41 || ST->isPMULLDSlow()).
282 if (ST->useSLMArithCosts() && LT.second == MVT::v4i32) {
283 if (OpMinSize <= 7)
284 return LT.first * 3; // pmullw/sext
285 if (!SignedMode && OpMinSize <= 8)
286 return LT.first * 3; // pmullw/zext
287 if (OpMinSize <= 15)
288 return LT.first * 5; // pmullw/pmulhw/pshuf
289 if (!SignedMode && OpMinSize <= 16)
290 return LT.first * 5; // pmullw/pmulhw/pshuf
293 // If both vXi64 are representable as (unsigned) i32, then we can perform
294 // the multiple with a single PMULUDQ instruction.
295 // TODO: Add (SSE41+) PMULDQ handling for signed extensions.
296 if (!SignedMode && OpMinSize <= 32 && LT.second.getScalarType() == MVT::i64)
297 ISD = X86ISD::PMULUDQ;
300 // Vector multiply by pow2 will be simplified to shifts.
301 // Vector multiply by -pow2 will be simplified to shifts/negates.
302 if (ISD == ISD::MUL && Op2Info.isConstant() &&
303 (Op2Info.isPowerOf2() || Op2Info.isNegatedPowerOf2())) {
304 InstructionCost Cost =
305 getArithmeticInstrCost(Instruction::Shl, Ty, CostKind,
306 Op1Info.getNoProps(), Op2Info.getNoProps());
307 if (Op2Info.isNegatedPowerOf2())
308 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
309 return Cost;
312 // On X86, vector signed division by constants power-of-two are
313 // normally expanded to the sequence SRA + SRL + ADD + SRA.
314 // The OperandValue properties may not be the same as that of the previous
315 // operation; conservatively assume OP_None.
316 if ((ISD == ISD::SDIV || ISD == ISD::SREM) &&
317 Op2Info.isConstant() && Op2Info.isPowerOf2()) {
318 InstructionCost Cost =
319 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
320 Op1Info.getNoProps(), Op2Info.getNoProps());
321 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
322 Op1Info.getNoProps(), Op2Info.getNoProps());
323 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
324 Op1Info.getNoProps(), Op2Info.getNoProps());
326 if (ISD == ISD::SREM) {
327 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
328 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info.getNoProps(),
329 Op2Info.getNoProps());
330 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info.getNoProps(),
331 Op2Info.getNoProps());
334 return Cost;
337 // Vector unsigned division/remainder will be simplified to shifts/masks.
338 if ((ISD == ISD::UDIV || ISD == ISD::UREM) &&
339 Op2Info.isConstant() && Op2Info.isPowerOf2()) {
340 if (ISD == ISD::UDIV)
341 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
342 Op1Info.getNoProps(), Op2Info.getNoProps());
343 // UREM
344 return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
345 Op1Info.getNoProps(), Op2Info.getNoProps());
348 static const CostKindTblEntry AVX512BWUniformConstCostTable[] = {
349 { ISD::SHL, MVT::v16i8, { 1, 7, 2, 3 } }, // psllw + pand.
350 { ISD::SRL, MVT::v16i8, { 1, 7, 2, 3 } }, // psrlw + pand.
351 { ISD::SRA, MVT::v16i8, { 1, 8, 4, 5 } }, // psrlw, pand, pxor, psubb.
352 { ISD::SHL, MVT::v32i8, { 1, 8, 2, 3 } }, // psllw + pand.
353 { ISD::SRL, MVT::v32i8, { 1, 8, 2, 3 } }, // psrlw + pand.
354 { ISD::SRA, MVT::v32i8, { 1, 9, 4, 5 } }, // psrlw, pand, pxor, psubb.
355 { ISD::SHL, MVT::v64i8, { 1, 8, 2, 3 } }, // psllw + pand.
356 { ISD::SRL, MVT::v64i8, { 1, 8, 2, 3 } }, // psrlw + pand.
357 { ISD::SRA, MVT::v64i8, { 1, 9, 4, 6 } }, // psrlw, pand, pxor, psubb.
359 { ISD::SHL, MVT::v16i16, { 1, 1, 1, 1 } }, // psllw
360 { ISD::SRL, MVT::v16i16, { 1, 1, 1, 1 } }, // psrlw
361 { ISD::SRA, MVT::v16i16, { 1, 1, 1, 1 } }, // psrlw
362 { ISD::SHL, MVT::v32i16, { 1, 1, 1, 1 } }, // psllw
363 { ISD::SRL, MVT::v32i16, { 1, 1, 1, 1 } }, // psrlw
364 { ISD::SRA, MVT::v32i16, { 1, 1, 1, 1 } }, // psrlw
367 if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasBWI())
368 if (const auto *Entry =
369 CostTableLookup(AVX512BWUniformConstCostTable, ISD, LT.second))
370 if (auto KindCost = Entry->Cost[CostKind])
371 return LT.first * *KindCost;
373 static const CostKindTblEntry AVX512UniformConstCostTable[] = {
374 { ISD::SHL, MVT::v64i8, { 2, 12, 5, 6 } }, // psllw + pand.
375 { ISD::SRL, MVT::v64i8, { 2, 12, 5, 6 } }, // psrlw + pand.
376 { ISD::SRA, MVT::v64i8, { 3, 10, 12, 12 } }, // psrlw, pand, pxor, psubb.
378 { ISD::SHL, MVT::v16i16, { 2, 7, 4, 4 } }, // psllw + split.
379 { ISD::SRL, MVT::v16i16, { 2, 7, 4, 4 } }, // psrlw + split.
380 { ISD::SRA, MVT::v16i16, { 2, 7, 4, 4 } }, // psraw + split.
382 { ISD::SHL, MVT::v8i32, { 1, 1, 1, 1 } }, // pslld
383 { ISD::SRL, MVT::v8i32, { 1, 1, 1, 1 } }, // psrld
384 { ISD::SRA, MVT::v8i32, { 1, 1, 1, 1 } }, // psrad
385 { ISD::SHL, MVT::v16i32, { 1, 1, 1, 1 } }, // pslld
386 { ISD::SRL, MVT::v16i32, { 1, 1, 1, 1 } }, // psrld
387 { ISD::SRA, MVT::v16i32, { 1, 1, 1, 1 } }, // psrad
389 { ISD::SRA, MVT::v2i64, { 1, 1, 1, 1 } }, // psraq
390 { ISD::SHL, MVT::v4i64, { 1, 1, 1, 1 } }, // psllq
391 { ISD::SRL, MVT::v4i64, { 1, 1, 1, 1 } }, // psrlq
392 { ISD::SRA, MVT::v4i64, { 1, 1, 1, 1 } }, // psraq
393 { ISD::SHL, MVT::v8i64, { 1, 1, 1, 1 } }, // psllq
394 { ISD::SRL, MVT::v8i64, { 1, 1, 1, 1 } }, // psrlq
395 { ISD::SRA, MVT::v8i64, { 1, 1, 1, 1 } }, // psraq
397 { ISD::SDIV, MVT::v16i32, { 6 } }, // pmuludq sequence
398 { ISD::SREM, MVT::v16i32, { 8 } }, // pmuludq+mul+sub sequence
399 { ISD::UDIV, MVT::v16i32, { 5 } }, // pmuludq sequence
400 { ISD::UREM, MVT::v16i32, { 7 } }, // pmuludq+mul+sub sequence
403 if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasAVX512())
404 if (const auto *Entry =
405 CostTableLookup(AVX512UniformConstCostTable, ISD, LT.second))
406 if (auto KindCost = Entry->Cost[CostKind])
407 return LT.first * *KindCost;
409 static const CostKindTblEntry AVX2UniformConstCostTable[] = {
410 { ISD::SHL, MVT::v16i8, { 1, 8, 2, 3 } }, // psllw + pand.
411 { ISD::SRL, MVT::v16i8, { 1, 8, 2, 3 } }, // psrlw + pand.
412 { ISD::SRA, MVT::v16i8, { 2, 10, 5, 6 } }, // psrlw, pand, pxor, psubb.
413 { ISD::SHL, MVT::v32i8, { 2, 8, 2, 4 } }, // psllw + pand.
414 { ISD::SRL, MVT::v32i8, { 2, 8, 2, 4 } }, // psrlw + pand.
415 { ISD::SRA, MVT::v32i8, { 3, 10, 5, 9 } }, // psrlw, pand, pxor, psubb.
417 { ISD::SHL, MVT::v8i16, { 1, 1, 1, 1 } }, // psllw
418 { ISD::SRL, MVT::v8i16, { 1, 1, 1, 1 } }, // psrlw
419 { ISD::SRA, MVT::v8i16, { 1, 1, 1, 1 } }, // psraw
420 { ISD::SHL, MVT::v16i16,{ 2, 2, 1, 2 } }, // psllw
421 { ISD::SRL, MVT::v16i16,{ 2, 2, 1, 2 } }, // psrlw
422 { ISD::SRA, MVT::v16i16,{ 2, 2, 1, 2 } }, // psraw
424 { ISD::SHL, MVT::v4i32, { 1, 1, 1, 1 } }, // pslld
425 { ISD::SRL, MVT::v4i32, { 1, 1, 1, 1 } }, // psrld
426 { ISD::SRA, MVT::v4i32, { 1, 1, 1, 1 } }, // psrad
427 { ISD::SHL, MVT::v8i32, { 2, 2, 1, 2 } }, // pslld
428 { ISD::SRL, MVT::v8i32, { 2, 2, 1, 2 } }, // psrld
429 { ISD::SRA, MVT::v8i32, { 2, 2, 1, 2 } }, // psrad
431 { ISD::SHL, MVT::v2i64, { 1, 1, 1, 1 } }, // psllq
432 { ISD::SRL, MVT::v2i64, { 1, 1, 1, 1 } }, // psrlq
433 { ISD::SRA, MVT::v2i64, { 2, 3, 3, 3 } }, // psrad + shuffle.
434 { ISD::SHL, MVT::v4i64, { 2, 2, 1, 2 } }, // psllq
435 { ISD::SRL, MVT::v4i64, { 2, 2, 1, 2 } }, // psrlq
436 { ISD::SRA, MVT::v4i64, { 4, 4, 3, 6 } }, // psrad + shuffle + split.
438 { ISD::SDIV, MVT::v8i32, { 6 } }, // pmuludq sequence
439 { ISD::SREM, MVT::v8i32, { 8 } }, // pmuludq+mul+sub sequence
440 { ISD::UDIV, MVT::v8i32, { 5 } }, // pmuludq sequence
441 { ISD::UREM, MVT::v8i32, { 7 } }, // pmuludq+mul+sub sequence
444 if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasAVX2())
445 if (const auto *Entry =
446 CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second))
447 if (auto KindCost = Entry->Cost[CostKind])
448 return LT.first * *KindCost;
450 static const CostKindTblEntry AVXUniformConstCostTable[] = {
451 { ISD::SHL, MVT::v16i8, { 2, 7, 2, 3 } }, // psllw + pand.
452 { ISD::SRL, MVT::v16i8, { 2, 7, 2, 3 } }, // psrlw + pand.
453 { ISD::SRA, MVT::v16i8, { 3, 9, 5, 6 } }, // psrlw, pand, pxor, psubb.
454 { ISD::SHL, MVT::v32i8, { 4, 7, 7, 8 } }, // 2*(psllw + pand) + split.
455 { ISD::SRL, MVT::v32i8, { 4, 7, 7, 8 } }, // 2*(psrlw + pand) + split.
456 { ISD::SRA, MVT::v32i8, { 7, 7, 12, 13 } }, // 2*(psrlw, pand, pxor, psubb) + split.
458 { ISD::SHL, MVT::v8i16, { 1, 2, 1, 1 } }, // psllw.
459 { ISD::SRL, MVT::v8i16, { 1, 2, 1, 1 } }, // psrlw.
460 { ISD::SRA, MVT::v8i16, { 1, 2, 1, 1 } }, // psraw.
461 { ISD::SHL, MVT::v16i16,{ 3, 6, 4, 5 } }, // psllw + split.
462 { ISD::SRL, MVT::v16i16,{ 3, 6, 4, 5 } }, // psrlw + split.
463 { ISD::SRA, MVT::v16i16,{ 3, 6, 4, 5 } }, // psraw + split.
465 { ISD::SHL, MVT::v4i32, { 1, 2, 1, 1 } }, // pslld.
466 { ISD::SRL, MVT::v4i32, { 1, 2, 1, 1 } }, // psrld.
467 { ISD::SRA, MVT::v4i32, { 1, 2, 1, 1 } }, // psrad.
468 { ISD::SHL, MVT::v8i32, { 3, 6, 4, 5 } }, // pslld + split.
469 { ISD::SRL, MVT::v8i32, { 3, 6, 4, 5 } }, // psrld + split.
470 { ISD::SRA, MVT::v8i32, { 3, 6, 4, 5 } }, // psrad + split.
472 { ISD::SHL, MVT::v2i64, { 1, 2, 1, 1 } }, // psllq.
473 { ISD::SRL, MVT::v2i64, { 1, 2, 1, 1 } }, // psrlq.
474 { ISD::SRA, MVT::v2i64, { 2, 3, 3, 3 } }, // psrad + shuffle.
475 { ISD::SHL, MVT::v4i64, { 3, 6, 4, 5 } }, // 2 x psllq + split.
476 { ISD::SRL, MVT::v4i64, { 3, 6, 4, 5 } }, // 2 x psllq + split.
477 { ISD::SRA, MVT::v4i64, { 5, 7, 8, 9 } }, // 2 x psrad + shuffle + split.
479 { ISD::SDIV, MVT::v8i32, { 14 } }, // 2*pmuludq sequence + split.
480 { ISD::SREM, MVT::v8i32, { 18 } }, // 2*pmuludq+mul+sub sequence + split.
481 { ISD::UDIV, MVT::v8i32, { 12 } }, // 2*pmuludq sequence + split.
482 { ISD::UREM, MVT::v8i32, { 16 } }, // 2*pmuludq+mul+sub sequence + split.
485 // XOP has faster vXi8 shifts.
486 if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasAVX() &&
487 (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
488 if (const auto *Entry =
489 CostTableLookup(AVXUniformConstCostTable, ISD, LT.second))
490 if (auto KindCost = Entry->Cost[CostKind])
491 return LT.first * *KindCost;
493 static const CostKindTblEntry SSE2UniformConstCostTable[] = {
494 { ISD::SHL, MVT::v16i8, { 1, 7, 2, 3 } }, // psllw + pand.
495 { ISD::SRL, MVT::v16i8, { 1, 7, 2, 3 } }, // psrlw + pand.
496 { ISD::SRA, MVT::v16i8, { 3, 9, 5, 6 } }, // psrlw, pand, pxor, psubb.
498 { ISD::SHL, MVT::v8i16, { 1, 1, 1, 1 } }, // psllw.
499 { ISD::SRL, MVT::v8i16, { 1, 1, 1, 1 } }, // psrlw.
500 { ISD::SRA, MVT::v8i16, { 1, 1, 1, 1 } }, // psraw.
502 { ISD::SHL, MVT::v4i32, { 1, 1, 1, 1 } }, // pslld
503 { ISD::SRL, MVT::v4i32, { 1, 1, 1, 1 } }, // psrld.
504 { ISD::SRA, MVT::v4i32, { 1, 1, 1, 1 } }, // psrad.
506 { ISD::SHL, MVT::v2i64, { 1, 1, 1, 1 } }, // psllq.
507 { ISD::SRL, MVT::v2i64, { 1, 1, 1, 1 } }, // psrlq.
508 { ISD::SRA, MVT::v2i64, { 3, 5, 6, 6 } }, // 2 x psrad + shuffle.
510 { ISD::SDIV, MVT::v4i32, { 6 } }, // pmuludq sequence
511 { ISD::SREM, MVT::v4i32, { 8 } }, // pmuludq+mul+sub sequence
512 { ISD::UDIV, MVT::v4i32, { 5 } }, // pmuludq sequence
513 { ISD::UREM, MVT::v4i32, { 7 } }, // pmuludq+mul+sub sequence
516 // XOP has faster vXi8 shifts.
517 if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasSSE2() &&
518 (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
519 if (const auto *Entry =
520 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
521 if (auto KindCost = Entry->Cost[CostKind])
522 return LT.first * *KindCost;
524 static const CostKindTblEntry AVX512BWConstCostTable[] = {
525 { ISD::SDIV, MVT::v64i8, { 14 } }, // 2*ext+2*pmulhw sequence
526 { ISD::SREM, MVT::v64i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
527 { ISD::UDIV, MVT::v64i8, { 14 } }, // 2*ext+2*pmulhw sequence
528 { ISD::UREM, MVT::v64i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
530 { ISD::SDIV, MVT::v32i16, { 6 } }, // vpmulhw sequence
531 { ISD::SREM, MVT::v32i16, { 8 } }, // vpmulhw+mul+sub sequence
532 { ISD::UDIV, MVT::v32i16, { 6 } }, // vpmulhuw sequence
533 { ISD::UREM, MVT::v32i16, { 8 } }, // vpmulhuw+mul+sub sequence
536 if (Op2Info.isConstant() && ST->hasBWI())
537 if (const auto *Entry =
538 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
539 if (auto KindCost = Entry->Cost[CostKind])
540 return LT.first * *KindCost;
542 static const CostKindTblEntry AVX512ConstCostTable[] = {
543 { ISD::SDIV, MVT::v64i8, { 28 } }, // 4*ext+4*pmulhw sequence
544 { ISD::SREM, MVT::v64i8, { 32 } }, // 4*ext+4*pmulhw+mul+sub sequence
545 { ISD::UDIV, MVT::v64i8, { 28 } }, // 4*ext+4*pmulhw sequence
546 { ISD::UREM, MVT::v64i8, { 32 } }, // 4*ext+4*pmulhw+mul+sub sequence
548 { ISD::SDIV, MVT::v32i16, { 12 } }, // 2*vpmulhw sequence
549 { ISD::SREM, MVT::v32i16, { 16 } }, // 2*vpmulhw+mul+sub sequence
550 { ISD::UDIV, MVT::v32i16, { 12 } }, // 2*vpmulhuw sequence
551 { ISD::UREM, MVT::v32i16, { 16 } }, // 2*vpmulhuw+mul+sub sequence
553 { ISD::SDIV, MVT::v16i32, { 15 } }, // vpmuldq sequence
554 { ISD::SREM, MVT::v16i32, { 17 } }, // vpmuldq+mul+sub sequence
555 { ISD::UDIV, MVT::v16i32, { 15 } }, // vpmuludq sequence
556 { ISD::UREM, MVT::v16i32, { 17 } }, // vpmuludq+mul+sub sequence
559 if (Op2Info.isConstant() && ST->hasAVX512())
560 if (const auto *Entry =
561 CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
562 if (auto KindCost = Entry->Cost[CostKind])
563 return LT.first * *KindCost;
565 static const CostKindTblEntry AVX2ConstCostTable[] = {
566 { ISD::SDIV, MVT::v32i8, { 14 } }, // 2*ext+2*pmulhw sequence
567 { ISD::SREM, MVT::v32i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
568 { ISD::UDIV, MVT::v32i8, { 14 } }, // 2*ext+2*pmulhw sequence
569 { ISD::UREM, MVT::v32i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
571 { ISD::SDIV, MVT::v16i16, { 6 } }, // vpmulhw sequence
572 { ISD::SREM, MVT::v16i16, { 8 } }, // vpmulhw+mul+sub sequence
573 { ISD::UDIV, MVT::v16i16, { 6 } }, // vpmulhuw sequence
574 { ISD::UREM, MVT::v16i16, { 8 } }, // vpmulhuw+mul+sub sequence
576 { ISD::SDIV, MVT::v8i32, { 15 } }, // vpmuldq sequence
577 { ISD::SREM, MVT::v8i32, { 19 } }, // vpmuldq+mul+sub sequence
578 { ISD::UDIV, MVT::v8i32, { 15 } }, // vpmuludq sequence
579 { ISD::UREM, MVT::v8i32, { 19 } }, // vpmuludq+mul+sub sequence
582 if (Op2Info.isConstant() && ST->hasAVX2())
583 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
584 if (auto KindCost = Entry->Cost[CostKind])
585 return LT.first * *KindCost;
587 static const CostKindTblEntry AVXConstCostTable[] = {
588 { ISD::SDIV, MVT::v32i8, { 30 } }, // 4*ext+4*pmulhw sequence + split.
589 { ISD::SREM, MVT::v32i8, { 34 } }, // 4*ext+4*pmulhw+mul+sub sequence + split.
590 { ISD::UDIV, MVT::v32i8, { 30 } }, // 4*ext+4*pmulhw sequence + split.
591 { ISD::UREM, MVT::v32i8, { 34 } }, // 4*ext+4*pmulhw+mul+sub sequence + split.
593 { ISD::SDIV, MVT::v16i16, { 14 } }, // 2*pmulhw sequence + split.
594 { ISD::SREM, MVT::v16i16, { 18 } }, // 2*pmulhw+mul+sub sequence + split.
595 { ISD::UDIV, MVT::v16i16, { 14 } }, // 2*pmulhuw sequence + split.
596 { ISD::UREM, MVT::v16i16, { 18 } }, // 2*pmulhuw+mul+sub sequence + split.
598 { ISD::SDIV, MVT::v8i32, { 32 } }, // vpmuludq sequence
599 { ISD::SREM, MVT::v8i32, { 38 } }, // vpmuludq+mul+sub sequence
600 { ISD::UDIV, MVT::v8i32, { 32 } }, // 2*pmuludq sequence + split.
601 { ISD::UREM, MVT::v8i32, { 42 } }, // 2*pmuludq+mul+sub sequence + split.
604 if (Op2Info.isConstant() && ST->hasAVX())
605 if (const auto *Entry = CostTableLookup(AVXConstCostTable, ISD, LT.second))
606 if (auto KindCost = Entry->Cost[CostKind])
607 return LT.first * *KindCost;
609 static const CostKindTblEntry SSE41ConstCostTable[] = {
610 { ISD::SDIV, MVT::v4i32, { 15 } }, // vpmuludq sequence
611 { ISD::SREM, MVT::v4i32, { 20 } }, // vpmuludq+mul+sub sequence
614 if (Op2Info.isConstant() && ST->hasSSE41())
615 if (const auto *Entry =
616 CostTableLookup(SSE41ConstCostTable, ISD, LT.second))
617 if (auto KindCost = Entry->Cost[CostKind])
618 return LT.first * *KindCost;
620 static const CostKindTblEntry SSE2ConstCostTable[] = {
621 { ISD::SDIV, MVT::v16i8, { 14 } }, // 2*ext+2*pmulhw sequence
622 { ISD::SREM, MVT::v16i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
623 { ISD::UDIV, MVT::v16i8, { 14 } }, // 2*ext+2*pmulhw sequence
624 { ISD::UREM, MVT::v16i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
626 { ISD::SDIV, MVT::v8i16, { 6 } }, // pmulhw sequence
627 { ISD::SREM, MVT::v8i16, { 8 } }, // pmulhw+mul+sub sequence
628 { ISD::UDIV, MVT::v8i16, { 6 } }, // pmulhuw sequence
629 { ISD::UREM, MVT::v8i16, { 8 } }, // pmulhuw+mul+sub sequence
631 { ISD::SDIV, MVT::v4i32, { 19 } }, // pmuludq sequence
632 { ISD::SREM, MVT::v4i32, { 24 } }, // pmuludq+mul+sub sequence
633 { ISD::UDIV, MVT::v4i32, { 15 } }, // pmuludq sequence
634 { ISD::UREM, MVT::v4i32, { 20 } }, // pmuludq+mul+sub sequence
637 if (Op2Info.isConstant() && ST->hasSSE2())
638 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
639 if (auto KindCost = Entry->Cost[CostKind])
640 return LT.first * *KindCost;
642 static const CostKindTblEntry AVX512BWUniformCostTable[] = {
643 { ISD::SHL, MVT::v16i8, { 3, 5, 5, 7 } }, // psllw + pand.
644 { ISD::SRL, MVT::v16i8, { 3,10, 5, 8 } }, // psrlw + pand.
645 { ISD::SRA, MVT::v16i8, { 4,12, 8,12 } }, // psrlw, pand, pxor, psubb.
646 { ISD::SHL, MVT::v32i8, { 4, 7, 6, 8 } }, // psllw + pand.
647 { ISD::SRL, MVT::v32i8, { 4, 8, 7, 9 } }, // psrlw + pand.
648 { ISD::SRA, MVT::v32i8, { 5,10,10,13 } }, // psrlw, pand, pxor, psubb.
649 { ISD::SHL, MVT::v64i8, { 4, 7, 6, 8 } }, // psllw + pand.
650 { ISD::SRL, MVT::v64i8, { 4, 8, 7,10 } }, // psrlw + pand.
651 { ISD::SRA, MVT::v64i8, { 5,10,10,15 } }, // psrlw, pand, pxor, psubb.
653 { ISD::SHL, MVT::v32i16, { 2, 4, 2, 3 } }, // psllw
654 { ISD::SRL, MVT::v32i16, { 2, 4, 2, 3 } }, // psrlw
655 { ISD::SRA, MVT::v32i16, { 2, 4, 2, 3 } }, // psrqw
658 if (ST->hasBWI() && Op2Info.isUniform())
659 if (const auto *Entry =
660 CostTableLookup(AVX512BWUniformCostTable, ISD, LT.second))
661 if (auto KindCost = Entry->Cost[CostKind])
662 return LT.first * *KindCost;
664 static const CostKindTblEntry AVX512UniformCostTable[] = {
665 { ISD::SHL, MVT::v32i16, { 5,10, 5, 7 } }, // psllw + split.
666 { ISD::SRL, MVT::v32i16, { 5,10, 5, 7 } }, // psrlw + split.
667 { ISD::SRA, MVT::v32i16, { 5,10, 5, 7 } }, // psraw + split.
669 { ISD::SHL, MVT::v16i32, { 2, 4, 2, 3 } }, // pslld
670 { ISD::SRL, MVT::v16i32, { 2, 4, 2, 3 } }, // psrld
671 { ISD::SRA, MVT::v16i32, { 2, 4, 2, 3 } }, // psrad
673 { ISD::SRA, MVT::v2i64, { 1, 2, 1, 2 } }, // psraq
674 { ISD::SHL, MVT::v4i64, { 1, 4, 1, 2 } }, // psllq
675 { ISD::SRL, MVT::v4i64, { 1, 4, 1, 2 } }, // psrlq
676 { ISD::SRA, MVT::v4i64, { 1, 4, 1, 2 } }, // psraq
677 { ISD::SHL, MVT::v8i64, { 1, 4, 1, 2 } }, // psllq
678 { ISD::SRL, MVT::v8i64, { 1, 4, 1, 2 } }, // psrlq
679 { ISD::SRA, MVT::v8i64, { 1, 4, 1, 2 } }, // psraq
682 if (ST->hasAVX512() && Op2Info.isUniform())
683 if (const auto *Entry =
684 CostTableLookup(AVX512UniformCostTable, ISD, LT.second))
685 if (auto KindCost = Entry->Cost[CostKind])
686 return LT.first * *KindCost;
688 static const CostKindTblEntry AVX2UniformCostTable[] = {
689 // Uniform splats are cheaper for the following instructions.
690 { ISD::SHL, MVT::v16i8, { 3, 5, 5, 7 } }, // psllw + pand.
691 { ISD::SRL, MVT::v16i8, { 3, 9, 5, 8 } }, // psrlw + pand.
692 { ISD::SRA, MVT::v16i8, { 4, 5, 9,13 } }, // psrlw, pand, pxor, psubb.
693 { ISD::SHL, MVT::v32i8, { 4, 7, 6, 8 } }, // psllw + pand.
694 { ISD::SRL, MVT::v32i8, { 4, 8, 7, 9 } }, // psrlw + pand.
695 { ISD::SRA, MVT::v32i8, { 6, 9,11,16 } }, // psrlw, pand, pxor, psubb.
697 { ISD::SHL, MVT::v8i16, { 1, 2, 1, 2 } }, // psllw.
698 { ISD::SRL, MVT::v8i16, { 1, 2, 1, 2 } }, // psrlw.
699 { ISD::SRA, MVT::v8i16, { 1, 2, 1, 2 } }, // psraw.
700 { ISD::SHL, MVT::v16i16, { 2, 4, 2, 3 } }, // psllw.
701 { ISD::SRL, MVT::v16i16, { 2, 4, 2, 3 } }, // psrlw.
702 { ISD::SRA, MVT::v16i16, { 2, 4, 2, 3 } }, // psraw.
704 { ISD::SHL, MVT::v4i32, { 1, 2, 1, 2 } }, // pslld
705 { ISD::SRL, MVT::v4i32, { 1, 2, 1, 2 } }, // psrld
706 { ISD::SRA, MVT::v4i32, { 1, 2, 1, 2 } }, // psrad
707 { ISD::SHL, MVT::v8i32, { 2, 4, 2, 3 } }, // pslld
708 { ISD::SRL, MVT::v8i32, { 2, 4, 2, 3 } }, // psrld
709 { ISD::SRA, MVT::v8i32, { 2, 4, 2, 3 } }, // psrad
711 { ISD::SHL, MVT::v2i64, { 1, 2, 1, 2 } }, // psllq
712 { ISD::SRL, MVT::v2i64, { 1, 2, 1, 2 } }, // psrlq
713 { ISD::SRA, MVT::v2i64, { 2, 4, 5, 7 } }, // 2 x psrad + shuffle.
714 { ISD::SHL, MVT::v4i64, { 2, 4, 1, 2 } }, // psllq
715 { ISD::SRL, MVT::v4i64, { 2, 4, 1, 2 } }, // psrlq
716 { ISD::SRA, MVT::v4i64, { 4, 6, 5, 9 } }, // 2 x psrad + shuffle.
719 if (ST->hasAVX2() && Op2Info.isUniform())
720 if (const auto *Entry =
721 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
722 if (auto KindCost = Entry->Cost[CostKind])
723 return LT.first * *KindCost;
725 static const CostKindTblEntry AVXUniformCostTable[] = {
726 { ISD::SHL, MVT::v16i8, { 4, 4, 6, 8 } }, // psllw + pand.
727 { ISD::SRL, MVT::v16i8, { 4, 8, 5, 8 } }, // psrlw + pand.
728 { ISD::SRA, MVT::v16i8, { 6, 6, 9,13 } }, // psrlw, pand, pxor, psubb.
729 { ISD::SHL, MVT::v32i8, { 7, 8,11,14 } }, // psllw + pand + split.
730 { ISD::SRL, MVT::v32i8, { 7, 9,10,14 } }, // psrlw + pand + split.
731 { ISD::SRA, MVT::v32i8, { 10,11,16,21 } }, // psrlw, pand, pxor, psubb + split.
733 { ISD::SHL, MVT::v8i16, { 1, 3, 1, 2 } }, // psllw.
734 { ISD::SRL, MVT::v8i16, { 1, 3, 1, 2 } }, // psrlw.
735 { ISD::SRA, MVT::v8i16, { 1, 3, 1, 2 } }, // psraw.
736 { ISD::SHL, MVT::v16i16, { 3, 7, 5, 7 } }, // psllw + split.
737 { ISD::SRL, MVT::v16i16, { 3, 7, 5, 7 } }, // psrlw + split.
738 { ISD::SRA, MVT::v16i16, { 3, 7, 5, 7 } }, // psraw + split.
740 { ISD::SHL, MVT::v4i32, { 1, 3, 1, 2 } }, // pslld.
741 { ISD::SRL, MVT::v4i32, { 1, 3, 1, 2 } }, // psrld.
742 { ISD::SRA, MVT::v4i32, { 1, 3, 1, 2 } }, // psrad.
743 { ISD::SHL, MVT::v8i32, { 3, 7, 5, 7 } }, // pslld + split.
744 { ISD::SRL, MVT::v8i32, { 3, 7, 5, 7 } }, // psrld + split.
745 { ISD::SRA, MVT::v8i32, { 3, 7, 5, 7 } }, // psrad + split.
747 { ISD::SHL, MVT::v2i64, { 1, 3, 1, 2 } }, // psllq.
748 { ISD::SRL, MVT::v2i64, { 1, 3, 1, 2 } }, // psrlq.
749 { ISD::SRA, MVT::v2i64, { 3, 4, 5, 7 } }, // 2 x psrad + shuffle.
750 { ISD::SHL, MVT::v4i64, { 3, 7, 4, 6 } }, // psllq + split.
751 { ISD::SRL, MVT::v4i64, { 3, 7, 4, 6 } }, // psrlq + split.
752 { ISD::SRA, MVT::v4i64, { 6, 7,10,13 } }, // 2 x (2 x psrad + shuffle) + split.
755 // XOP has faster vXi8 shifts.
756 if (ST->hasAVX() && Op2Info.isUniform() &&
757 (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
758 if (const auto *Entry =
759 CostTableLookup(AVXUniformCostTable, ISD, LT.second))
760 if (auto KindCost = Entry->Cost[CostKind])
761 return LT.first * *KindCost;
763 static const CostKindTblEntry SSE2UniformCostTable[] = {
764 // Uniform splats are cheaper for the following instructions.
765 { ISD::SHL, MVT::v16i8, { 9, 10, 6, 9 } }, // psllw + pand.
766 { ISD::SRL, MVT::v16i8, { 9, 13, 5, 9 } }, // psrlw + pand.
767 { ISD::SRA, MVT::v16i8, { 11, 15, 9,13 } }, // pcmpgtb sequence.
769 { ISD::SHL, MVT::v8i16, { 2, 2, 1, 2 } }, // psllw.
770 { ISD::SRL, MVT::v8i16, { 2, 2, 1, 2 } }, // psrlw.
771 { ISD::SRA, MVT::v8i16, { 2, 2, 1, 2 } }, // psraw.
773 { ISD::SHL, MVT::v4i32, { 2, 2, 1, 2 } }, // pslld
774 { ISD::SRL, MVT::v4i32, { 2, 2, 1, 2 } }, // psrld.
775 { ISD::SRA, MVT::v4i32, { 2, 2, 1, 2 } }, // psrad.
777 { ISD::SHL, MVT::v2i64, { 2, 2, 1, 2 } }, // psllq.
778 { ISD::SRL, MVT::v2i64, { 2, 2, 1, 2 } }, // psrlq.
779 { ISD::SRA, MVT::v2i64, { 5, 9, 5, 7 } }, // 2*psrlq + xor + sub.
782 if (ST->hasSSE2() && Op2Info.isUniform() &&
783 (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
784 if (const auto *Entry =
785 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
786 if (auto KindCost = Entry->Cost[CostKind])
787 return LT.first * *KindCost;
789 static const CostKindTblEntry AVX512DQCostTable[] = {
790 { ISD::MUL, MVT::v2i64, { 2, 15, 1, 3 } }, // pmullq
791 { ISD::MUL, MVT::v4i64, { 2, 15, 1, 3 } }, // pmullq
792 { ISD::MUL, MVT::v8i64, { 3, 15, 1, 3 } } // pmullq
795 // Look for AVX512DQ lowering tricks for custom cases.
796 if (ST->hasDQI())
797 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
798 if (auto KindCost = Entry->Cost[CostKind])
799 return LT.first * *KindCost;
801 static const CostKindTblEntry AVX512BWCostTable[] = {
802 { ISD::SHL, MVT::v16i8, { 4, 8, 4, 5 } }, // extend/vpsllvw/pack sequence.
803 { ISD::SRL, MVT::v16i8, { 4, 8, 4, 5 } }, // extend/vpsrlvw/pack sequence.
804 { ISD::SRA, MVT::v16i8, { 4, 8, 4, 5 } }, // extend/vpsravw/pack sequence.
805 { ISD::SHL, MVT::v32i8, { 4, 23,11,16 } }, // extend/vpsllvw/pack sequence.
806 { ISD::SRL, MVT::v32i8, { 4, 30,12,18 } }, // extend/vpsrlvw/pack sequence.
807 { ISD::SRA, MVT::v32i8, { 6, 13,24,30 } }, // extend/vpsravw/pack sequence.
808 { ISD::SHL, MVT::v64i8, { 6, 19,13,15 } }, // extend/vpsllvw/pack sequence.
809 { ISD::SRL, MVT::v64i8, { 7, 27,15,18 } }, // extend/vpsrlvw/pack sequence.
810 { ISD::SRA, MVT::v64i8, { 15, 15,30,30 } }, // extend/vpsravw/pack sequence.
812 { ISD::SHL, MVT::v8i16, { 1, 1, 1, 1 } }, // vpsllvw
813 { ISD::SRL, MVT::v8i16, { 1, 1, 1, 1 } }, // vpsrlvw
814 { ISD::SRA, MVT::v8i16, { 1, 1, 1, 1 } }, // vpsravw
815 { ISD::SHL, MVT::v16i16, { 1, 1, 1, 1 } }, // vpsllvw
816 { ISD::SRL, MVT::v16i16, { 1, 1, 1, 1 } }, // vpsrlvw
817 { ISD::SRA, MVT::v16i16, { 1, 1, 1, 1 } }, // vpsravw
818 { ISD::SHL, MVT::v32i16, { 1, 1, 1, 1 } }, // vpsllvw
819 { ISD::SRL, MVT::v32i16, { 1, 1, 1, 1 } }, // vpsrlvw
820 { ISD::SRA, MVT::v32i16, { 1, 1, 1, 1 } }, // vpsravw
822 { ISD::ADD, MVT::v64i8, { 1, 1, 1, 1 } }, // paddb
823 { ISD::ADD, MVT::v32i16, { 1, 1, 1, 1 } }, // paddw
825 { ISD::ADD, MVT::v32i8, { 1, 1, 1, 1 } }, // paddb
826 { ISD::ADD, MVT::v16i16, { 1, 1, 1, 1 } }, // paddw
827 { ISD::ADD, MVT::v8i32, { 1, 1, 1, 1 } }, // paddd
828 { ISD::ADD, MVT::v4i64, { 1, 1, 1, 1 } }, // paddq
830 { ISD::SUB, MVT::v64i8, { 1, 1, 1, 1 } }, // psubb
831 { ISD::SUB, MVT::v32i16, { 1, 1, 1, 1 } }, // psubw
833 { ISD::MUL, MVT::v64i8, { 5, 10,10,11 } },
834 { ISD::MUL, MVT::v32i16, { 1, 5, 1, 1 } }, // pmullw
836 { ISD::SUB, MVT::v32i8, { 1, 1, 1, 1 } }, // psubb
837 { ISD::SUB, MVT::v16i16, { 1, 1, 1, 1 } }, // psubw
838 { ISD::SUB, MVT::v8i32, { 1, 1, 1, 1 } }, // psubd
839 { ISD::SUB, MVT::v4i64, { 1, 1, 1, 1 } }, // psubq
842 // Look for AVX512BW lowering tricks for custom cases.
843 if (ST->hasBWI())
844 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
845 if (auto KindCost = Entry->Cost[CostKind])
846 return LT.first * *KindCost;
848 static const CostKindTblEntry AVX512CostTable[] = {
849 { ISD::SHL, MVT::v64i8, { 15, 19,27,33 } }, // vpblendv+split sequence.
850 { ISD::SRL, MVT::v64i8, { 15, 19,30,36 } }, // vpblendv+split sequence.
851 { ISD::SRA, MVT::v64i8, { 37, 37,51,63 } }, // vpblendv+split sequence.
853 { ISD::SHL, MVT::v32i16, { 11, 16,11,15 } }, // 2*extend/vpsrlvd/pack sequence.
854 { ISD::SRL, MVT::v32i16, { 11, 16,11,15 } }, // 2*extend/vpsrlvd/pack sequence.
855 { ISD::SRA, MVT::v32i16, { 11, 16,11,15 } }, // 2*extend/vpsravd/pack sequence.
857 { ISD::SHL, MVT::v4i32, { 1, 1, 1, 1 } },
858 { ISD::SRL, MVT::v4i32, { 1, 1, 1, 1 } },
859 { ISD::SRA, MVT::v4i32, { 1, 1, 1, 1 } },
860 { ISD::SHL, MVT::v8i32, { 1, 1, 1, 1 } },
861 { ISD::SRL, MVT::v8i32, { 1, 1, 1, 1 } },
862 { ISD::SRA, MVT::v8i32, { 1, 1, 1, 1 } },
863 { ISD::SHL, MVT::v16i32, { 1, 1, 1, 1 } },
864 { ISD::SRL, MVT::v16i32, { 1, 1, 1, 1 } },
865 { ISD::SRA, MVT::v16i32, { 1, 1, 1, 1 } },
867 { ISD::SHL, MVT::v2i64, { 1, 1, 1, 1 } },
868 { ISD::SRL, MVT::v2i64, { 1, 1, 1, 1 } },
869 { ISD::SRA, MVT::v2i64, { 1, 1, 1, 1 } },
870 { ISD::SHL, MVT::v4i64, { 1, 1, 1, 1 } },
871 { ISD::SRL, MVT::v4i64, { 1, 1, 1, 1 } },
872 { ISD::SRA, MVT::v4i64, { 1, 1, 1, 1 } },
873 { ISD::SHL, MVT::v8i64, { 1, 1, 1, 1 } },
874 { ISD::SRL, MVT::v8i64, { 1, 1, 1, 1 } },
875 { ISD::SRA, MVT::v8i64, { 1, 1, 1, 1 } },
877 { ISD::ADD, MVT::v64i8, { 3, 7, 5, 5 } }, // 2*paddb + split
878 { ISD::ADD, MVT::v32i16, { 3, 7, 5, 5 } }, // 2*paddw + split
880 { ISD::SUB, MVT::v64i8, { 3, 7, 5, 5 } }, // 2*psubb + split
881 { ISD::SUB, MVT::v32i16, { 3, 7, 5, 5 } }, // 2*psubw + split
883 { ISD::AND, MVT::v32i8, { 1, 1, 1, 1 } },
884 { ISD::AND, MVT::v16i16, { 1, 1, 1, 1 } },
885 { ISD::AND, MVT::v8i32, { 1, 1, 1, 1 } },
886 { ISD::AND, MVT::v4i64, { 1, 1, 1, 1 } },
888 { ISD::OR, MVT::v32i8, { 1, 1, 1, 1 } },
889 { ISD::OR, MVT::v16i16, { 1, 1, 1, 1 } },
890 { ISD::OR, MVT::v8i32, { 1, 1, 1, 1 } },
891 { ISD::OR, MVT::v4i64, { 1, 1, 1, 1 } },
893 { ISD::XOR, MVT::v32i8, { 1, 1, 1, 1 } },
894 { ISD::XOR, MVT::v16i16, { 1, 1, 1, 1 } },
895 { ISD::XOR, MVT::v8i32, { 1, 1, 1, 1 } },
896 { ISD::XOR, MVT::v4i64, { 1, 1, 1, 1 } },
898 { ISD::MUL, MVT::v16i32, { 1, 10, 1, 2 } }, // pmulld (Skylake from agner.org)
899 { ISD::MUL, MVT::v8i32, { 1, 10, 1, 2 } }, // pmulld (Skylake from agner.org)
900 { ISD::MUL, MVT::v4i32, { 1, 10, 1, 2 } }, // pmulld (Skylake from agner.org)
901 { ISD::MUL, MVT::v8i64, { 6, 9, 8, 8 } }, // 3*pmuludq/3*shift/2*add
902 { ISD::MUL, MVT::i64, { 1 } }, // Skylake from http://www.agner.org/
904 { X86ISD::PMULUDQ, MVT::v8i64, { 1, 5, 1, 1 } },
906 { ISD::FNEG, MVT::v8f64, { 1, 1, 1, 2 } }, // Skylake from http://www.agner.org/
907 { ISD::FADD, MVT::v8f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
908 { ISD::FADD, MVT::v4f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
909 { ISD::FSUB, MVT::v8f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
910 { ISD::FSUB, MVT::v4f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
911 { ISD::FMUL, MVT::v8f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
912 { ISD::FMUL, MVT::v4f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
913 { ISD::FMUL, MVT::v2f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
914 { ISD::FMUL, MVT::f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
916 { ISD::FDIV, MVT::f64, { 4, 14, 1, 1 } }, // Skylake from http://www.agner.org/
917 { ISD::FDIV, MVT::v2f64, { 4, 14, 1, 1 } }, // Skylake from http://www.agner.org/
918 { ISD::FDIV, MVT::v4f64, { 8, 14, 1, 1 } }, // Skylake from http://www.agner.org/
919 { ISD::FDIV, MVT::v8f64, { 16, 23, 1, 3 } }, // Skylake from http://www.agner.org/
921 { ISD::FNEG, MVT::v16f32, { 1, 1, 1, 2 } }, // Skylake from http://www.agner.org/
922 { ISD::FADD, MVT::v16f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
923 { ISD::FADD, MVT::v8f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
924 { ISD::FSUB, MVT::v16f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
925 { ISD::FSUB, MVT::v8f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
926 { ISD::FMUL, MVT::v16f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
927 { ISD::FMUL, MVT::v8f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
928 { ISD::FMUL, MVT::v4f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
929 { ISD::FMUL, MVT::f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
931 { ISD::FDIV, MVT::f32, { 3, 11, 1, 1 } }, // Skylake from http://www.agner.org/
932 { ISD::FDIV, MVT::v4f32, { 3, 11, 1, 1 } }, // Skylake from http://www.agner.org/
933 { ISD::FDIV, MVT::v8f32, { 5, 11, 1, 1 } }, // Skylake from http://www.agner.org/
934 { ISD::FDIV, MVT::v16f32, { 10, 18, 1, 3 } }, // Skylake from http://www.agner.org/
937 if (ST->hasAVX512())
938 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
939 if (auto KindCost = Entry->Cost[CostKind])
940 return LT.first * *KindCost;
942 static const CostKindTblEntry AVX2ShiftCostTable[] = {
943 // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
944 // customize them to detect the cases where shift amount is a scalar one.
945 { ISD::SHL, MVT::v4i32, { 2, 3, 1, 3 } }, // vpsllvd (Haswell from agner.org)
946 { ISD::SRL, MVT::v4i32, { 2, 3, 1, 3 } }, // vpsrlvd (Haswell from agner.org)
947 { ISD::SRA, MVT::v4i32, { 2, 3, 1, 3 } }, // vpsravd (Haswell from agner.org)
948 { ISD::SHL, MVT::v8i32, { 4, 4, 1, 3 } }, // vpsllvd (Haswell from agner.org)
949 { ISD::SRL, MVT::v8i32, { 4, 4, 1, 3 } }, // vpsrlvd (Haswell from agner.org)
950 { ISD::SRA, MVT::v8i32, { 4, 4, 1, 3 } }, // vpsravd (Haswell from agner.org)
951 { ISD::SHL, MVT::v2i64, { 2, 3, 1, 1 } }, // vpsllvq (Haswell from agner.org)
952 { ISD::SRL, MVT::v2i64, { 2, 3, 1, 1 } }, // vpsrlvq (Haswell from agner.org)
953 { ISD::SHL, MVT::v4i64, { 4, 4, 1, 2 } }, // vpsllvq (Haswell from agner.org)
954 { ISD::SRL, MVT::v4i64, { 4, 4, 1, 2 } }, // vpsrlvq (Haswell from agner.org)
957 if (ST->hasAVX512()) {
958 if (ISD == ISD::SHL && LT.second == MVT::v32i16 && Op2Info.isConstant())
959 // On AVX512, a packed v32i16 shift left by a constant build_vector
960 // is lowered into a vector multiply (vpmullw).
961 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
962 Op1Info.getNoProps(), Op2Info.getNoProps());
965 // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts).
966 if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) {
967 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
968 Op2Info.isConstant())
969 // On AVX2, a packed v16i16 shift left by a constant build_vector
970 // is lowered into a vector multiply (vpmullw).
971 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
972 Op1Info.getNoProps(), Op2Info.getNoProps());
974 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
975 if (auto KindCost = Entry->Cost[CostKind])
976 return LT.first * *KindCost;
979 static const CostKindTblEntry XOPShiftCostTable[] = {
980 // 128bit shifts take 1cy, but right shifts require negation beforehand.
981 { ISD::SHL, MVT::v16i8, { 1, 3, 1, 1 } },
982 { ISD::SRL, MVT::v16i8, { 2, 3, 1, 1 } },
983 { ISD::SRA, MVT::v16i8, { 2, 3, 1, 1 } },
984 { ISD::SHL, MVT::v8i16, { 1, 3, 1, 1 } },
985 { ISD::SRL, MVT::v8i16, { 2, 3, 1, 1 } },
986 { ISD::SRA, MVT::v8i16, { 2, 3, 1, 1 } },
987 { ISD::SHL, MVT::v4i32, { 1, 3, 1, 1 } },
988 { ISD::SRL, MVT::v4i32, { 2, 3, 1, 1 } },
989 { ISD::SRA, MVT::v4i32, { 2, 3, 1, 1 } },
990 { ISD::SHL, MVT::v2i64, { 1, 3, 1, 1 } },
991 { ISD::SRL, MVT::v2i64, { 2, 3, 1, 1 } },
992 { ISD::SRA, MVT::v2i64, { 2, 3, 1, 1 } },
993 // 256bit shifts require splitting if AVX2 didn't catch them above.
994 { ISD::SHL, MVT::v32i8, { 4, 7, 5, 6 } },
995 { ISD::SRL, MVT::v32i8, { 6, 7, 5, 6 } },
996 { ISD::SRA, MVT::v32i8, { 6, 7, 5, 6 } },
997 { ISD::SHL, MVT::v16i16, { 4, 7, 5, 6 } },
998 { ISD::SRL, MVT::v16i16, { 6, 7, 5, 6 } },
999 { ISD::SRA, MVT::v16i16, { 6, 7, 5, 6 } },
1000 { ISD::SHL, MVT::v8i32, { 4, 7, 5, 6 } },
1001 { ISD::SRL, MVT::v8i32, { 6, 7, 5, 6 } },
1002 { ISD::SRA, MVT::v8i32, { 6, 7, 5, 6 } },
1003 { ISD::SHL, MVT::v4i64, { 4, 7, 5, 6 } },
1004 { ISD::SRL, MVT::v4i64, { 6, 7, 5, 6 } },
1005 { ISD::SRA, MVT::v4i64, { 6, 7, 5, 6 } },
1008 // Look for XOP lowering tricks.
1009 if (ST->hasXOP()) {
1010 // If the right shift is constant then we'll fold the negation so
1011 // it's as cheap as a left shift.
1012 int ShiftISD = ISD;
1013 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) && Op2Info.isConstant())
1014 ShiftISD = ISD::SHL;
1015 if (const auto *Entry =
1016 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
1017 if (auto KindCost = Entry->Cost[CostKind])
1018 return LT.first * *KindCost;
1021 if (ISD == ISD::SHL && !Op2Info.isUniform() && Op2Info.isConstant()) {
1022 MVT VT = LT.second;
1023 // Vector shift left by non uniform constant can be lowered
1024 // into vector multiply.
1025 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
1026 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
1027 ISD = ISD::MUL;
1030 static const CostKindTblEntry GLMCostTable[] = {
1031 { ISD::FDIV, MVT::f32, { 18, 19, 1, 1 } }, // divss
1032 { ISD::FDIV, MVT::v4f32, { 35, 36, 1, 1 } }, // divps
1033 { ISD::FDIV, MVT::f64, { 33, 34, 1, 1 } }, // divsd
1034 { ISD::FDIV, MVT::v2f64, { 65, 66, 1, 1 } }, // divpd
1037 if (ST->useGLMDivSqrtCosts())
1038 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, LT.second))
1039 if (auto KindCost = Entry->Cost[CostKind])
1040 return LT.first * *KindCost;
1042 static const CostKindTblEntry SLMCostTable[] = {
1043 { ISD::MUL, MVT::v4i32, { 11, 11, 1, 7 } }, // pmulld
1044 { ISD::MUL, MVT::v8i16, { 2, 5, 1, 1 } }, // pmullw
1045 { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // mulsd
1046 { ISD::FMUL, MVT::f32, { 1, 4, 1, 1 } }, // mulss
1047 { ISD::FMUL, MVT::v2f64, { 4, 7, 1, 1 } }, // mulpd
1048 { ISD::FMUL, MVT::v4f32, { 2, 5, 1, 1 } }, // mulps
1049 { ISD::FDIV, MVT::f32, { 17, 19, 1, 1 } }, // divss
1050 { ISD::FDIV, MVT::v4f32, { 39, 39, 1, 6 } }, // divps
1051 { ISD::FDIV, MVT::f64, { 32, 34, 1, 1 } }, // divsd
1052 { ISD::FDIV, MVT::v2f64, { 69, 69, 1, 6 } }, // divpd
1053 { ISD::FADD, MVT::v2f64, { 2, 4, 1, 1 } }, // addpd
1054 { ISD::FSUB, MVT::v2f64, { 2, 4, 1, 1 } }, // subpd
1055 // v2i64/v4i64 mul is custom lowered as a series of long:
1056 // multiplies(3), shifts(3) and adds(2)
1057 // slm muldq version throughput is 2 and addq throughput 4
1058 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
1059 // 3X4 (addq throughput) = 17
1060 { ISD::MUL, MVT::v2i64, { 17, 22, 9, 9 } },
1061 // slm addq\subq throughput is 4
1062 { ISD::ADD, MVT::v2i64, { 4, 2, 1, 2 } },
1063 { ISD::SUB, MVT::v2i64, { 4, 2, 1, 2 } },
1066 if (ST->useSLMArithCosts())
1067 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, LT.second))
1068 if (auto KindCost = Entry->Cost[CostKind])
1069 return LT.first * *KindCost;
1071 static const CostKindTblEntry AVX2CostTable[] = {
1072 { ISD::SHL, MVT::v16i8, { 6, 21,11,16 } }, // vpblendvb sequence.
1073 { ISD::SHL, MVT::v32i8, { 6, 23,11,22 } }, // vpblendvb sequence.
1074 { ISD::SHL, MVT::v8i16, { 5, 18, 5,10 } }, // extend/vpsrlvd/pack sequence.
1075 { ISD::SHL, MVT::v16i16, { 8, 10,10,14 } }, // extend/vpsrlvd/pack sequence.
1077 { ISD::SRL, MVT::v16i8, { 6, 27,12,18 } }, // vpblendvb sequence.
1078 { ISD::SRL, MVT::v32i8, { 8, 30,12,24 } }, // vpblendvb sequence.
1079 { ISD::SRL, MVT::v8i16, { 5, 11, 5,10 } }, // extend/vpsrlvd/pack sequence.
1080 { ISD::SRL, MVT::v16i16, { 8, 10,10,14 } }, // extend/vpsrlvd/pack sequence.
1082 { ISD::SRA, MVT::v16i8, { 17, 17,24,30 } }, // vpblendvb sequence.
1083 { ISD::SRA, MVT::v32i8, { 18, 20,24,43 } }, // vpblendvb sequence.
1084 { ISD::SRA, MVT::v8i16, { 5, 11, 5,10 } }, // extend/vpsravd/pack sequence.
1085 { ISD::SRA, MVT::v16i16, { 8, 10,10,14 } }, // extend/vpsravd/pack sequence.
1086 { ISD::SRA, MVT::v2i64, { 4, 5, 5, 5 } }, // srl/xor/sub sequence.
1087 { ISD::SRA, MVT::v4i64, { 8, 8, 5, 9 } }, // srl/xor/sub sequence.
1089 { ISD::SUB, MVT::v32i8, { 1, 1, 1, 2 } }, // psubb
1090 { ISD::ADD, MVT::v32i8, { 1, 1, 1, 2 } }, // paddb
1091 { ISD::SUB, MVT::v16i16, { 1, 1, 1, 2 } }, // psubw
1092 { ISD::ADD, MVT::v16i16, { 1, 1, 1, 2 } }, // paddw
1093 { ISD::SUB, MVT::v8i32, { 1, 1, 1, 2 } }, // psubd
1094 { ISD::ADD, MVT::v8i32, { 1, 1, 1, 2 } }, // paddd
1095 { ISD::SUB, MVT::v4i64, { 1, 1, 1, 2 } }, // psubq
1096 { ISD::ADD, MVT::v4i64, { 1, 1, 1, 2 } }, // paddq
1098 { ISD::MUL, MVT::v16i8, { 5, 18, 6,12 } }, // extend/pmullw/pack
1099 { ISD::MUL, MVT::v32i8, { 6, 11,10,19 } }, // unpack/pmullw
1100 { ISD::MUL, MVT::v16i16, { 2, 5, 1, 2 } }, // pmullw
1101 { ISD::MUL, MVT::v8i32, { 4, 10, 1, 2 } }, // pmulld
1102 { ISD::MUL, MVT::v4i32, { 2, 10, 1, 2 } }, // pmulld
1103 { ISD::MUL, MVT::v4i64, { 6, 10, 8,13 } }, // 3*pmuludq/3*shift/2*add
1104 { ISD::MUL, MVT::v2i64, { 6, 10, 8, 8 } }, // 3*pmuludq/3*shift/2*add
1106 { X86ISD::PMULUDQ, MVT::v4i64, { 1, 5, 1, 1 } },
1108 { ISD::FNEG, MVT::v4f64, { 1, 1, 1, 2 } }, // vxorpd
1109 { ISD::FNEG, MVT::v8f32, { 1, 1, 1, 2 } }, // vxorps
1111 { ISD::FADD, MVT::f64, { 1, 4, 1, 1 } }, // vaddsd
1112 { ISD::FADD, MVT::f32, { 1, 4, 1, 1 } }, // vaddss
1113 { ISD::FADD, MVT::v2f64, { 1, 4, 1, 1 } }, // vaddpd
1114 { ISD::FADD, MVT::v4f32, { 1, 4, 1, 1 } }, // vaddps
1115 { ISD::FADD, MVT::v4f64, { 1, 4, 1, 2 } }, // vaddpd
1116 { ISD::FADD, MVT::v8f32, { 1, 4, 1, 2 } }, // vaddps
1118 { ISD::FSUB, MVT::f64, { 1, 4, 1, 1 } }, // vsubsd
1119 { ISD::FSUB, MVT::f32, { 1, 4, 1, 1 } }, // vsubss
1120 { ISD::FSUB, MVT::v2f64, { 1, 4, 1, 1 } }, // vsubpd
1121 { ISD::FSUB, MVT::v4f32, { 1, 4, 1, 1 } }, // vsubps
1122 { ISD::FSUB, MVT::v4f64, { 1, 4, 1, 2 } }, // vsubpd
1123 { ISD::FSUB, MVT::v8f32, { 1, 4, 1, 2 } }, // vsubps
1125 { ISD::FMUL, MVT::f64, { 1, 5, 1, 1 } }, // vmulsd
1126 { ISD::FMUL, MVT::f32, { 1, 5, 1, 1 } }, // vmulss
1127 { ISD::FMUL, MVT::v2f64, { 1, 5, 1, 1 } }, // vmulpd
1128 { ISD::FMUL, MVT::v4f32, { 1, 5, 1, 1 } }, // vmulps
1129 { ISD::FMUL, MVT::v4f64, { 1, 5, 1, 2 } }, // vmulpd
1130 { ISD::FMUL, MVT::v8f32, { 1, 5, 1, 2 } }, // vmulps
1132 { ISD::FDIV, MVT::f32, { 7, 13, 1, 1 } }, // vdivss
1133 { ISD::FDIV, MVT::v4f32, { 7, 13, 1, 1 } }, // vdivps
1134 { ISD::FDIV, MVT::v8f32, { 14, 21, 1, 3 } }, // vdivps
1135 { ISD::FDIV, MVT::f64, { 14, 20, 1, 1 } }, // vdivsd
1136 { ISD::FDIV, MVT::v2f64, { 14, 20, 1, 1 } }, // vdivpd
1137 { ISD::FDIV, MVT::v4f64, { 28, 35, 1, 3 } }, // vdivpd
1140 // Look for AVX2 lowering tricks for custom cases.
1141 if (ST->hasAVX2())
1142 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
1143 if (auto KindCost = Entry->Cost[CostKind])
1144 return LT.first * *KindCost;
1146 static const CostKindTblEntry AVX1CostTable[] = {
1147 // We don't have to scalarize unsupported ops. We can issue two half-sized
1148 // operations and we only need to extract the upper YMM half.
1149 // Two ops + 1 extract + 1 insert = 4.
1150 { ISD::MUL, MVT::v32i8, { 12, 13, 22, 23 } }, // unpack/pmullw + split
1151 { ISD::MUL, MVT::v16i16, { 4, 8, 5, 6 } }, // pmullw + split
1152 { ISD::MUL, MVT::v8i32, { 5, 8, 5, 10 } }, // pmulld + split
1153 { ISD::MUL, MVT::v4i32, { 2, 5, 1, 3 } }, // pmulld
1154 { ISD::MUL, MVT::v4i64, { 12, 15, 19, 20 } },
1156 { ISD::AND, MVT::v32i8, { 1, 1, 1, 2 } }, // vandps
1157 { ISD::AND, MVT::v16i16, { 1, 1, 1, 2 } }, // vandps
1158 { ISD::AND, MVT::v8i32, { 1, 1, 1, 2 } }, // vandps
1159 { ISD::AND, MVT::v4i64, { 1, 1, 1, 2 } }, // vandps
1161 { ISD::OR, MVT::v32i8, { 1, 1, 1, 2 } }, // vorps
1162 { ISD::OR, MVT::v16i16, { 1, 1, 1, 2 } }, // vorps
1163 { ISD::OR, MVT::v8i32, { 1, 1, 1, 2 } }, // vorps
1164 { ISD::OR, MVT::v4i64, { 1, 1, 1, 2 } }, // vorps
1166 { ISD::XOR, MVT::v32i8, { 1, 1, 1, 2 } }, // vxorps
1167 { ISD::XOR, MVT::v16i16, { 1, 1, 1, 2 } }, // vxorps
1168 { ISD::XOR, MVT::v8i32, { 1, 1, 1, 2 } }, // vxorps
1169 { ISD::XOR, MVT::v4i64, { 1, 1, 1, 2 } }, // vxorps
1171 { ISD::SUB, MVT::v32i8, { 4, 2, 5, 6 } }, // psubb + split
1172 { ISD::ADD, MVT::v32i8, { 4, 2, 5, 6 } }, // paddb + split
1173 { ISD::SUB, MVT::v16i16, { 4, 2, 5, 6 } }, // psubw + split
1174 { ISD::ADD, MVT::v16i16, { 4, 2, 5, 6 } }, // paddw + split
1175 { ISD::SUB, MVT::v8i32, { 4, 2, 5, 6 } }, // psubd + split
1176 { ISD::ADD, MVT::v8i32, { 4, 2, 5, 6 } }, // paddd + split
1177 { ISD::SUB, MVT::v4i64, { 4, 2, 5, 6 } }, // psubq + split
1178 { ISD::ADD, MVT::v4i64, { 4, 2, 5, 6 } }, // paddq + split
1179 { ISD::SUB, MVT::v2i64, { 1, 1, 1, 1 } }, // psubq
1180 { ISD::ADD, MVT::v2i64, { 1, 1, 1, 1 } }, // paddq
1182 { ISD::SHL, MVT::v16i8, { 10, 21,11,17 } }, // pblendvb sequence.
1183 { ISD::SHL, MVT::v32i8, { 22, 22,27,40 } }, // pblendvb sequence + split.
1184 { ISD::SHL, MVT::v8i16, { 6, 9,11,11 } }, // pblendvb sequence.
1185 { ISD::SHL, MVT::v16i16, { 13, 16,24,25 } }, // pblendvb sequence + split.
1186 { ISD::SHL, MVT::v4i32, { 3, 11, 4, 6 } }, // pslld/paddd/cvttps2dq/pmulld
1187 { ISD::SHL, MVT::v8i32, { 9, 11,12,17 } }, // pslld/paddd/cvttps2dq/pmulld + split
1188 { ISD::SHL, MVT::v2i64, { 2, 4, 4, 6 } }, // Shift each lane + blend.
1189 { ISD::SHL, MVT::v4i64, { 6, 7,11,15 } }, // Shift each lane + blend + split.
1191 { ISD::SRL, MVT::v16i8, { 11, 27,12,18 } }, // pblendvb sequence.
1192 { ISD::SRL, MVT::v32i8, { 23, 23,30,43 } }, // pblendvb sequence + split.
1193 { ISD::SRL, MVT::v8i16, { 13, 16,14,22 } }, // pblendvb sequence.
1194 { ISD::SRL, MVT::v16i16, { 28, 30,31,48 } }, // pblendvb sequence + split.
1195 { ISD::SRL, MVT::v4i32, { 6, 7,12,16 } }, // Shift each lane + blend.
1196 { ISD::SRL, MVT::v8i32, { 14, 14,26,34 } }, // Shift each lane + blend + split.
1197 { ISD::SRL, MVT::v2i64, { 2, 4, 4, 6 } }, // Shift each lane + blend.
1198 { ISD::SRL, MVT::v4i64, { 6, 7,11,15 } }, // Shift each lane + blend + split.
1200 { ISD::SRA, MVT::v16i8, { 21, 22,24,36 } }, // pblendvb sequence.
1201 { ISD::SRA, MVT::v32i8, { 44, 45,51,76 } }, // pblendvb sequence + split.
1202 { ISD::SRA, MVT::v8i16, { 13, 16,14,22 } }, // pblendvb sequence.
1203 { ISD::SRA, MVT::v16i16, { 28, 30,31,48 } }, // pblendvb sequence + split.
1204 { ISD::SRA, MVT::v4i32, { 6, 7,12,16 } }, // Shift each lane + blend.
1205 { ISD::SRA, MVT::v8i32, { 14, 14,26,34 } }, // Shift each lane + blend + split.
1206 { ISD::SRA, MVT::v2i64, { 5, 6,10,14 } }, // Shift each lane + blend.
1207 { ISD::SRA, MVT::v4i64, { 12, 12,22,30 } }, // Shift each lane + blend + split.
1209 { ISD::FNEG, MVT::v4f64, { 2, 2, 1, 2 } }, // BTVER2 from http://www.agner.org/
1210 { ISD::FNEG, MVT::v8f32, { 2, 2, 1, 2 } }, // BTVER2 from http://www.agner.org/
1212 { ISD::FADD, MVT::f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1213 { ISD::FADD, MVT::f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1214 { ISD::FADD, MVT::v2f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1215 { ISD::FADD, MVT::v4f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1216 { ISD::FADD, MVT::v4f64, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
1217 { ISD::FADD, MVT::v8f32, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
1219 { ISD::FSUB, MVT::f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1220 { ISD::FSUB, MVT::f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1221 { ISD::FSUB, MVT::v2f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1222 { ISD::FSUB, MVT::v4f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1223 { ISD::FSUB, MVT::v4f64, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
1224 { ISD::FSUB, MVT::v8f32, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
1226 { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
1227 { ISD::FMUL, MVT::f32, { 1, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
1228 { ISD::FMUL, MVT::v2f64, { 2, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
1229 { ISD::FMUL, MVT::v4f32, { 1, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
1230 { ISD::FMUL, MVT::v4f64, { 4, 5, 1, 2 } }, // BTVER2 from http://www.agner.org/
1231 { ISD::FMUL, MVT::v8f32, { 2, 5, 1, 2 } }, // BTVER2 from http://www.agner.org/
1233 { ISD::FDIV, MVT::f32, { 14, 14, 1, 1 } }, // SNB from http://www.agner.org/
1234 { ISD::FDIV, MVT::v4f32, { 14, 14, 1, 1 } }, // SNB from http://www.agner.org/
1235 { ISD::FDIV, MVT::v8f32, { 28, 29, 1, 3 } }, // SNB from http://www.agner.org/
1236 { ISD::FDIV, MVT::f64, { 22, 22, 1, 1 } }, // SNB from http://www.agner.org/
1237 { ISD::FDIV, MVT::v2f64, { 22, 22, 1, 1 } }, // SNB from http://www.agner.org/
1238 { ISD::FDIV, MVT::v4f64, { 44, 45, 1, 3 } }, // SNB from http://www.agner.org/
1241 if (ST->hasAVX())
1242 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
1243 if (auto KindCost = Entry->Cost[CostKind])
1244 return LT.first * *KindCost;
1246 static const CostKindTblEntry SSE42CostTable[] = {
1247 { ISD::FADD, MVT::f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1248 { ISD::FADD, MVT::f32, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1249 { ISD::FADD, MVT::v2f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1250 { ISD::FADD, MVT::v4f32, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1252 { ISD::FSUB, MVT::f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1253 { ISD::FSUB, MVT::f32 , { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1254 { ISD::FSUB, MVT::v2f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1255 { ISD::FSUB, MVT::v4f32, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1257 { ISD::FMUL, MVT::f64, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
1258 { ISD::FMUL, MVT::f32, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
1259 { ISD::FMUL, MVT::v2f64, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
1260 { ISD::FMUL, MVT::v4f32, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
1262 { ISD::FDIV, MVT::f32, { 14, 14, 1, 1 } }, // Nehalem from http://www.agner.org/
1263 { ISD::FDIV, MVT::v4f32, { 14, 14, 1, 1 } }, // Nehalem from http://www.agner.org/
1264 { ISD::FDIV, MVT::f64, { 22, 22, 1, 1 } }, // Nehalem from http://www.agner.org/
1265 { ISD::FDIV, MVT::v2f64, { 22, 22, 1, 1 } }, // Nehalem from http://www.agner.org/
1267 { ISD::MUL, MVT::v2i64, { 6, 10,10,10 } } // 3*pmuludq/3*shift/2*add
1270 if (ST->hasSSE42())
1271 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
1272 if (auto KindCost = Entry->Cost[CostKind])
1273 return LT.first * *KindCost;
1275 static const CostKindTblEntry SSE41CostTable[] = {
1276 { ISD::SHL, MVT::v16i8, { 15, 24,17,22 } }, // pblendvb sequence.
1277 { ISD::SHL, MVT::v8i16, { 11, 14,11,11 } }, // pblendvb sequence.
1278 { ISD::SHL, MVT::v4i32, { 14, 20, 4,10 } }, // pslld/paddd/cvttps2dq/pmulld
1280 { ISD::SRL, MVT::v16i8, { 16, 27,18,24 } }, // pblendvb sequence.
1281 { ISD::SRL, MVT::v8i16, { 22, 26,23,27 } }, // pblendvb sequence.
1282 { ISD::SRL, MVT::v4i32, { 16, 17,15,19 } }, // Shift each lane + blend.
1283 { ISD::SRL, MVT::v2i64, { 4, 6, 5, 7 } }, // splat+shuffle sequence.
1285 { ISD::SRA, MVT::v16i8, { 38, 41,30,36 } }, // pblendvb sequence.
1286 { ISD::SRA, MVT::v8i16, { 22, 26,23,27 } }, // pblendvb sequence.
1287 { ISD::SRA, MVT::v4i32, { 16, 17,15,19 } }, // Shift each lane + blend.
1288 { ISD::SRA, MVT::v2i64, { 8, 17, 5, 7 } }, // splat+shuffle sequence.
1290 { ISD::MUL, MVT::v16i8, { 5, 18,10,12 } }, // 2*unpack/2*pmullw/2*and/pack
1291 { ISD::MUL, MVT::v4i32, { 2, 11, 1, 1 } } // pmulld (Nehalem from agner.org)
1294 if (ST->hasSSE41())
1295 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
1296 if (auto KindCost = Entry->Cost[CostKind])
1297 return LT.first * *KindCost;
1299 static const CostKindTblEntry SSE2CostTable[] = {
1300 // We don't correctly identify costs of casts because they are marked as
1301 // custom.
1302 { ISD::SHL, MVT::v16i8, { 13, 21,26,28 } }, // cmpgtb sequence.
1303 { ISD::SHL, MVT::v8i16, { 24, 27,16,20 } }, // cmpgtw sequence.
1304 { ISD::SHL, MVT::v4i32, { 17, 19,10,12 } }, // pslld/paddd/cvttps2dq/pmuludq.
1305 { ISD::SHL, MVT::v2i64, { 4, 6, 5, 7 } }, // splat+shuffle sequence.
1307 { ISD::SRL, MVT::v16i8, { 14, 28,27,30 } }, // cmpgtb sequence.
1308 { ISD::SRL, MVT::v8i16, { 16, 19,31,31 } }, // cmpgtw sequence.
1309 { ISD::SRL, MVT::v4i32, { 12, 12,15,19 } }, // Shift each lane + blend.
1310 { ISD::SRL, MVT::v2i64, { 4, 6, 5, 7 } }, // splat+shuffle sequence.
1312 { ISD::SRA, MVT::v16i8, { 27, 30,54,54 } }, // unpacked cmpgtb sequence.
1313 { ISD::SRA, MVT::v8i16, { 16, 19,31,31 } }, // cmpgtw sequence.
1314 { ISD::SRA, MVT::v4i32, { 12, 12,15,19 } }, // Shift each lane + blend.
1315 { ISD::SRA, MVT::v2i64, { 8, 11,12,16 } }, // srl/xor/sub splat+shuffle sequence.
1317 { ISD::AND, MVT::v16i8, { 1, 1, 1, 1 } }, // pand
1318 { ISD::AND, MVT::v8i16, { 1, 1, 1, 1 } }, // pand
1319 { ISD::AND, MVT::v4i32, { 1, 1, 1, 1 } }, // pand
1320 { ISD::AND, MVT::v2i64, { 1, 1, 1, 1 } }, // pand
1322 { ISD::OR, MVT::v16i8, { 1, 1, 1, 1 } }, // por
1323 { ISD::OR, MVT::v8i16, { 1, 1, 1, 1 } }, // por
1324 { ISD::OR, MVT::v4i32, { 1, 1, 1, 1 } }, // por
1325 { ISD::OR, MVT::v2i64, { 1, 1, 1, 1 } }, // por
1327 { ISD::XOR, MVT::v16i8, { 1, 1, 1, 1 } }, // pxor
1328 { ISD::XOR, MVT::v8i16, { 1, 1, 1, 1 } }, // pxor
1329 { ISD::XOR, MVT::v4i32, { 1, 1, 1, 1 } }, // pxor
1330 { ISD::XOR, MVT::v2i64, { 1, 1, 1, 1 } }, // pxor
1332 { ISD::ADD, MVT::v2i64, { 1, 2, 1, 2 } }, // paddq
1333 { ISD::SUB, MVT::v2i64, { 1, 2, 1, 2 } }, // psubq
1335 { ISD::MUL, MVT::v16i8, { 5, 18,12,12 } }, // 2*unpack/2*pmullw/2*and/pack
1336 { ISD::MUL, MVT::v8i16, { 1, 5, 1, 1 } }, // pmullw
1337 { ISD::MUL, MVT::v4i32, { 6, 8, 7, 7 } }, // 3*pmuludq/4*shuffle
1338 { ISD::MUL, MVT::v2i64, { 7, 10,10,10 } }, // 3*pmuludq/3*shift/2*add
1340 { X86ISD::PMULUDQ, MVT::v2i64, { 1, 5, 1, 1 } },
1342 { ISD::FDIV, MVT::f32, { 23, 23, 1, 1 } }, // Pentium IV from http://www.agner.org/
1343 { ISD::FDIV, MVT::v4f32, { 39, 39, 1, 1 } }, // Pentium IV from http://www.agner.org/
1344 { ISD::FDIV, MVT::f64, { 38, 38, 1, 1 } }, // Pentium IV from http://www.agner.org/
1345 { ISD::FDIV, MVT::v2f64, { 69, 69, 1, 1 } }, // Pentium IV from http://www.agner.org/
1347 { ISD::FNEG, MVT::f32, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
1348 { ISD::FNEG, MVT::f64, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
1349 { ISD::FNEG, MVT::v4f32, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
1350 { ISD::FNEG, MVT::v2f64, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
1352 { ISD::FADD, MVT::f32, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1353 { ISD::FADD, MVT::f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1354 { ISD::FADD, MVT::v2f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1356 { ISD::FSUB, MVT::f32, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1357 { ISD::FSUB, MVT::f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1358 { ISD::FSUB, MVT::v2f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1360 { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // Pentium IV from http://www.agner.org/
1361 { ISD::FMUL, MVT::v2f64, { 2, 5, 1, 1 } }, // Pentium IV from http://www.agner.org/
1364 if (ST->hasSSE2())
1365 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
1366 if (auto KindCost = Entry->Cost[CostKind])
1367 return LT.first * *KindCost;
1369 static const CostKindTblEntry SSE1CostTable[] = {
1370 { ISD::FDIV, MVT::f32, { 17, 18, 1, 1 } }, // Pentium III from http://www.agner.org/
1371 { ISD::FDIV, MVT::v4f32, { 34, 48, 1, 1 } }, // Pentium III from http://www.agner.org/
1373 { ISD::FNEG, MVT::f32, { 2, 2, 1, 2 } }, // Pentium III from http://www.agner.org/
1374 { ISD::FNEG, MVT::v4f32, { 2, 2, 1, 2 } }, // Pentium III from http://www.agner.org/
1376 { ISD::FADD, MVT::f32, { 1, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
1377 { ISD::FADD, MVT::v4f32, { 2, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
1379 { ISD::FSUB, MVT::f32, { 1, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
1380 { ISD::FSUB, MVT::v4f32, { 2, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
1382 { ISD::FMUL, MVT::f32, { 2, 5, 1, 1 } }, // Pentium III from http://www.agner.org/
1383 { ISD::FMUL, MVT::v4f32, { 2, 5, 1, 1 } }, // Pentium III from http://www.agner.org/
1386 if (ST->hasSSE1())
1387 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
1388 if (auto KindCost = Entry->Cost[CostKind])
1389 return LT.first * *KindCost;
1391 static const CostKindTblEntry X64CostTbl[] = { // 64-bit targets
1392 { ISD::ADD, MVT::i64, { 1 } }, // Core (Merom) from http://www.agner.org/
1393 { ISD::SUB, MVT::i64, { 1 } }, // Core (Merom) from http://www.agner.org/
1394 { ISD::MUL, MVT::i64, { 2, 6, 1, 2 } },
1397 if (ST->is64Bit())
1398 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
1399 if (auto KindCost = Entry->Cost[CostKind])
1400 return LT.first * *KindCost;
1402 static const CostKindTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1403 { ISD::ADD, MVT::i8, { 1 } }, // Pentium III from http://www.agner.org/
1404 { ISD::ADD, MVT::i16, { 1 } }, // Pentium III from http://www.agner.org/
1405 { ISD::ADD, MVT::i32, { 1 } }, // Pentium III from http://www.agner.org/
1407 { ISD::SUB, MVT::i8, { 1 } }, // Pentium III from http://www.agner.org/
1408 { ISD::SUB, MVT::i16, { 1 } }, // Pentium III from http://www.agner.org/
1409 { ISD::SUB, MVT::i32, { 1 } }, // Pentium III from http://www.agner.org/
1411 { ISD::MUL, MVT::i8, { 3, 4, 1, 1 } },
1412 { ISD::MUL, MVT::i16, { 2, 4, 1, 1 } },
1413 { ISD::MUL, MVT::i32, { 1, 4, 1, 1 } },
1415 { ISD::FNEG, MVT::f64, { 2, 2, 1, 3 } }, // (x87)
1416 { ISD::FADD, MVT::f64, { 2, 3, 1, 1 } }, // (x87)
1417 { ISD::FSUB, MVT::f64, { 2, 3, 1, 1 } }, // (x87)
1418 { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // (x87)
1419 { ISD::FDIV, MVT::f64, { 38, 38, 1, 1 } }, // (x87)
1422 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
1423 if (auto KindCost = Entry->Cost[CostKind])
1424 return LT.first * *KindCost;
1426 // It is not a good idea to vectorize division. We have to scalarize it and
1427 // in the process we will often end up having to spilling regular
1428 // registers. The overhead of division is going to dominate most kernels
1429 // anyways so try hard to prevent vectorization of division - it is
1430 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
1431 // to hide "20 cycles" for each lane.
1432 if (CostKind == TTI::TCK_RecipThroughput && LT.second.isVector() &&
1433 (ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
1434 ISD == ISD::UREM)) {
1435 InstructionCost ScalarCost =
1436 getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind,
1437 Op1Info.getNoProps(), Op2Info.getNoProps());
1438 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
1441 // Handle some basic single instruction code size cases.
1442 if (CostKind == TTI::TCK_CodeSize) {
1443 switch (ISD) {
1444 case ISD::FADD:
1445 case ISD::FSUB:
1446 case ISD::FMUL:
1447 case ISD::FDIV:
1448 case ISD::FNEG:
1449 case ISD::AND:
1450 case ISD::OR:
1451 case ISD::XOR:
1452 return LT.first;
1453 break;
1457 // Fallback to the default implementation.
1458 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
1459 Args, CxtI);
1462 InstructionCost
1463 X86TTIImpl::getAltInstrCost(VectorType *VecTy, unsigned Opcode0,
1464 unsigned Opcode1, const SmallBitVector &OpcodeMask,
1465 TTI::TargetCostKind CostKind) const {
1466 if (isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask))
1467 return TTI::TCC_Basic;
1468 return InstructionCost::getInvalid();
1471 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1472 VectorType *BaseTp,
1473 ArrayRef<int> Mask,
1474 TTI::TargetCostKind CostKind,
1475 int Index, VectorType *SubTp,
1476 ArrayRef<const Value *> Args) {
1477 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
1478 // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
1479 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(BaseTp);
1481 Kind = improveShuffleKindFromMask(Kind, Mask, BaseTp, Index, SubTp);
1483 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
1484 if (Kind == TTI::SK_Transpose)
1485 Kind = TTI::SK_PermuteTwoSrc;
1487 // For Broadcasts we are splatting the first element from the first input
1488 // register, so only need to reference that input and all the output
1489 // registers are the same.
1490 if (Kind == TTI::SK_Broadcast)
1491 LT.first = 1;
1493 // Treat <X x bfloat> shuffles as <X x half>.
1494 if (LT.second.isVector() && LT.second.getScalarType() == MVT::bf16)
1495 LT.second = LT.second.changeVectorElementType(MVT::f16);
1497 // Subvector extractions are free if they start at the beginning of a
1498 // vector and cheap if the subvectors are aligned.
1499 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
1500 int NumElts = LT.second.getVectorNumElements();
1501 if ((Index % NumElts) == 0)
1502 return 0;
1503 std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
1504 if (SubLT.second.isVector()) {
1505 int NumSubElts = SubLT.second.getVectorNumElements();
1506 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1507 return SubLT.first;
1508 // Handle some cases for widening legalization. For now we only handle
1509 // cases where the original subvector was naturally aligned and evenly
1510 // fit in its legalized subvector type.
1511 // FIXME: Remove some of the alignment restrictions.
1512 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
1513 // vectors.
1514 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
1515 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
1516 (NumSubElts % OrigSubElts) == 0 &&
1517 LT.second.getVectorElementType() ==
1518 SubLT.second.getVectorElementType() &&
1519 LT.second.getVectorElementType().getSizeInBits() ==
1520 BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1521 assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&
1522 "Unexpected number of elements!");
1523 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1524 LT.second.getVectorNumElements());
1525 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1526 SubLT.second.getVectorNumElements());
1527 int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1528 InstructionCost ExtractCost =
1529 getShuffleCost(TTI::SK_ExtractSubvector, VecTy, std::nullopt,
1530 CostKind, ExtractIndex, SubTy);
1532 // If the original size is 32-bits or more, we can use pshufd. Otherwise
1533 // if we have SSSE3 we can use pshufb.
1534 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1535 return ExtractCost + 1; // pshufd or pshufb
1537 assert(SubTp->getPrimitiveSizeInBits() == 16 &&
1538 "Unexpected vector size");
1540 return ExtractCost + 2; // worst case pshufhw + pshufd
1545 // Subvector insertions are cheap if the subvectors are aligned.
1546 // Note that in general, the insertion starting at the beginning of a vector
1547 // isn't free, because we need to preserve the rest of the wide vector.
1548 if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) {
1549 int NumElts = LT.second.getVectorNumElements();
1550 std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
1551 if (SubLT.second.isVector()) {
1552 int NumSubElts = SubLT.second.getVectorNumElements();
1553 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1554 return SubLT.first;
1557 // If the insertion isn't aligned, treat it like a 2-op shuffle.
1558 Kind = TTI::SK_PermuteTwoSrc;
1561 // Handle some common (illegal) sub-vector types as they are often very cheap
1562 // to shuffle even on targets without PSHUFB.
1563 EVT VT = TLI->getValueType(DL, BaseTp);
1564 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1565 !ST->hasSSSE3()) {
1566 static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1567 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw
1568 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw
1569 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw
1570 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw
1571 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck
1573 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw
1574 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw
1575 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus
1576 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck
1578 {TTI::SK_Splice, MVT::v4i16, 2}, // punpck+psrldq
1579 {TTI::SK_Splice, MVT::v2i16, 2}, // punpck+psrldq
1580 {TTI::SK_Splice, MVT::v4i8, 2}, // punpck+psrldq
1581 {TTI::SK_Splice, MVT::v2i8, 2}, // punpck+psrldq
1583 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw
1584 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw
1585 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw
1586 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw
1587 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck
1589 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1590 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1591 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw
1592 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw
1593 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck
1596 if (ST->hasSSE2())
1597 if (const auto *Entry =
1598 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1599 return Entry->Cost;
1602 // We are going to permute multiple sources and the result will be in multiple
1603 // destinations. Providing an accurate cost only for splits where the element
1604 // type remains the same.
1605 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1606 MVT LegalVT = LT.second;
1607 if (LegalVT.isVector() &&
1608 LegalVT.getVectorElementType().getSizeInBits() ==
1609 BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1610 LegalVT.getVectorNumElements() <
1611 cast<FixedVectorType>(BaseTp)->getNumElements()) {
1612 unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1613 unsigned LegalVTSize = LegalVT.getStoreSize();
1614 // Number of source vectors after legalization:
1615 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1616 // Number of destination vectors after legalization:
1617 InstructionCost NumOfDests = LT.first;
1619 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1620 LegalVT.getVectorNumElements());
1622 if (!Mask.empty() && NumOfDests.isValid()) {
1623 // Try to perform better estimation of the permutation.
1624 // 1. Split the source/destination vectors into real registers.
1625 // 2. Do the mask analysis to identify which real registers are
1626 // permuted. If more than 1 source registers are used for the
1627 // destination register building, the cost for this destination register
1628 // is (Number_of_source_register - 1) * Cost_PermuteTwoSrc. If only one
1629 // source register is used, build mask and calculate the cost as a cost
1630 // of PermuteSingleSrc.
1631 // Also, for the single register permute we try to identify if the
1632 // destination register is just a copy of the source register or the
1633 // copy of the previous destination register (the cost is
1634 // TTI::TCC_Basic). If the source register is just reused, the cost for
1635 // this operation is 0.
1636 NumOfDests =
1637 getTypeLegalizationCost(
1638 FixedVectorType::get(BaseTp->getElementType(), Mask.size()))
1639 .first;
1640 unsigned E = *NumOfDests.getValue();
1641 unsigned NormalizedVF =
1642 LegalVT.getVectorNumElements() * std::max(NumOfSrcs, E);
1643 unsigned NumOfSrcRegs = NormalizedVF / LegalVT.getVectorNumElements();
1644 unsigned NumOfDestRegs = NormalizedVF / LegalVT.getVectorNumElements();
1645 SmallVector<int> NormalizedMask(NormalizedVF, PoisonMaskElem);
1646 copy(Mask, NormalizedMask.begin());
1647 unsigned PrevSrcReg = 0;
1648 ArrayRef<int> PrevRegMask;
1649 InstructionCost Cost = 0;
1650 processShuffleMasks(
1651 NormalizedMask, NumOfSrcRegs, NumOfDestRegs, NumOfDestRegs, []() {},
1652 [this, SingleOpTy, CostKind, &PrevSrcReg, &PrevRegMask,
1653 &Cost](ArrayRef<int> RegMask, unsigned SrcReg, unsigned DestReg) {
1654 if (!ShuffleVectorInst::isIdentityMask(RegMask, RegMask.size())) {
1655 // Check if the previous register can be just copied to the next
1656 // one.
1657 if (PrevRegMask.empty() || PrevSrcReg != SrcReg ||
1658 PrevRegMask != RegMask)
1659 Cost += getShuffleCost(TTI::SK_PermuteSingleSrc, SingleOpTy,
1660 RegMask, CostKind, 0, nullptr);
1661 else
1662 // Just a copy of previous destination register.
1663 Cost += TTI::TCC_Basic;
1664 return;
1666 if (SrcReg != DestReg &&
1667 any_of(RegMask, [](int I) { return I != PoisonMaskElem; })) {
1668 // Just a copy of the source register.
1669 Cost += TTI::TCC_Basic;
1671 PrevSrcReg = SrcReg;
1672 PrevRegMask = RegMask;
1674 [this, SingleOpTy, CostKind, &Cost](ArrayRef<int> RegMask,
1675 unsigned /*Unused*/,
1676 unsigned /*Unused*/) {
1677 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, RegMask,
1678 CostKind, 0, nullptr);
1680 return Cost;
1683 InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1684 return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy,
1685 std::nullopt, CostKind, 0, nullptr);
1688 return BaseT::getShuffleCost(Kind, BaseTp, Mask, CostKind, Index, SubTp);
1691 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1692 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1693 // We assume that source and destination have the same vector type.
1694 InstructionCost NumOfDests = LT.first;
1695 InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1;
1696 LT.first = NumOfDests * NumOfShufflesPerDest;
1699 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1700 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1701 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1703 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1704 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1706 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1707 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1708 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b
1711 if (ST->hasVBMI())
1712 if (const auto *Entry =
1713 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1714 return LT.first * Entry->Cost;
1716 static const CostTblEntry AVX512BWShuffleTbl[] = {
1717 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1718 {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw
1719 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1721 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1722 {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw
1723 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1724 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
1726 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1727 {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw
1728 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1729 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw
1730 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
1732 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1733 {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w
1734 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1735 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w
1736 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1738 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1739 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb
1741 {TTI::SK_Splice, MVT::v32i16, 2}, // vshufi64x2 + palignr
1742 {TTI::SK_Splice, MVT::v32f16, 2}, // vshufi64x2 + palignr
1743 {TTI::SK_Splice, MVT::v64i8, 2}, // vshufi64x2 + palignr
1746 if (ST->hasBWI())
1747 if (const auto *Entry =
1748 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1749 return LT.first * Entry->Cost;
1751 static const CostKindTblEntry AVX512ShuffleTbl[] = {
1752 {TTI::SK_Broadcast, MVT::v8f64, { 1, 1, 1, 1 } }, // vbroadcastsd
1753 {TTI::SK_Broadcast, MVT::v16f32, { 1, 1, 1, 1 } }, // vbroadcastss
1754 {TTI::SK_Broadcast, MVT::v8i64, { 1, 1, 1, 1 } }, // vpbroadcastq
1755 {TTI::SK_Broadcast, MVT::v16i32, { 1, 1, 1, 1 } }, // vpbroadcastd
1756 {TTI::SK_Broadcast, MVT::v32i16, { 1, 1, 1, 1 } }, // vpbroadcastw
1757 {TTI::SK_Broadcast, MVT::v32f16, { 1, 1, 1, 1 } }, // vpbroadcastw
1758 {TTI::SK_Broadcast, MVT::v64i8, { 1, 1, 1, 1 } }, // vpbroadcastb
1760 {TTI::SK_Reverse, MVT::v8f64, { 1, 3, 1, 1 } }, // vpermpd
1761 {TTI::SK_Reverse, MVT::v16f32, { 1, 3, 1, 1 } }, // vpermps
1762 {TTI::SK_Reverse, MVT::v8i64, { 1, 3, 1, 1 } }, // vpermq
1763 {TTI::SK_Reverse, MVT::v16i32, { 1, 3, 1, 1 } }, // vpermd
1764 {TTI::SK_Reverse, MVT::v32i16, { 7, 7, 7, 7 } }, // per mca
1765 {TTI::SK_Reverse, MVT::v32f16, { 7, 7, 7, 7 } }, // per mca
1766 {TTI::SK_Reverse, MVT::v64i8, { 7, 7, 7, 7 } }, // per mca
1768 {TTI::SK_Splice, MVT::v8f64, { 1, 1, 1, 1 } }, // vpalignd
1769 {TTI::SK_Splice, MVT::v4f64, { 1, 1, 1, 1 } }, // vpalignd
1770 {TTI::SK_Splice, MVT::v16f32, { 1, 1, 1, 1 } }, // vpalignd
1771 {TTI::SK_Splice, MVT::v8f32, { 1, 1, 1, 1 } }, // vpalignd
1772 {TTI::SK_Splice, MVT::v8i64, { 1, 1, 1, 1 } }, // vpalignd
1773 {TTI::SK_Splice, MVT::v4i64, { 1, 1, 1, 1 } }, // vpalignd
1774 {TTI::SK_Splice, MVT::v16i32, { 1, 1, 1, 1 } }, // vpalignd
1775 {TTI::SK_Splice, MVT::v8i32, { 1, 1, 1, 1 } }, // vpalignd
1776 {TTI::SK_Splice, MVT::v32i16, { 4, 4, 4, 4 } }, // split + palignr
1777 {TTI::SK_Splice, MVT::v32f16, { 4, 4, 4, 4 } }, // split + palignr
1778 {TTI::SK_Splice, MVT::v64i8, { 4, 4, 4, 4 } }, // split + palignr
1780 {TTI::SK_PermuteSingleSrc, MVT::v8f64, { 1, 3, 1, 1 } }, // vpermpd
1781 {TTI::SK_PermuteSingleSrc, MVT::v4f64, { 1, 3, 1, 1 } }, // vpermpd
1782 {TTI::SK_PermuteSingleSrc, MVT::v2f64, { 1, 3, 1, 1 } }, // vpermpd
1783 {TTI::SK_PermuteSingleSrc, MVT::v16f32, { 1, 3, 1, 1 } }, // vpermps
1784 {TTI::SK_PermuteSingleSrc, MVT::v8f32, { 1, 3, 1, 1 } }, // vpermps
1785 {TTI::SK_PermuteSingleSrc, MVT::v4f32, { 1, 3, 1, 1 } }, // vpermps
1786 {TTI::SK_PermuteSingleSrc, MVT::v8i64, { 1, 3, 1, 1 } }, // vpermq
1787 {TTI::SK_PermuteSingleSrc, MVT::v4i64, { 1, 3, 1, 1 } }, // vpermq
1788 {TTI::SK_PermuteSingleSrc, MVT::v2i64, { 1, 3, 1, 1 } }, // vpermq
1789 {TTI::SK_PermuteSingleSrc, MVT::v16i32, { 1, 3, 1, 1 } }, // vpermd
1790 {TTI::SK_PermuteSingleSrc, MVT::v8i32, { 1, 3, 1, 1 } }, // vpermd
1791 {TTI::SK_PermuteSingleSrc, MVT::v4i32, { 1, 3, 1, 1 } }, // vpermd
1792 {TTI::SK_PermuteSingleSrc, MVT::v16i8, { 1, 3, 1, 1 } }, // pshufb
1794 {TTI::SK_PermuteTwoSrc, MVT::v8f64, { 1, 3, 1, 1 } }, // vpermt2pd
1795 {TTI::SK_PermuteTwoSrc, MVT::v16f32, { 1, 3, 1, 1 } }, // vpermt2ps
1796 {TTI::SK_PermuteTwoSrc, MVT::v8i64, { 1, 3, 1, 1 } }, // vpermt2q
1797 {TTI::SK_PermuteTwoSrc, MVT::v16i32, { 1, 3, 1, 1 } }, // vpermt2d
1798 {TTI::SK_PermuteTwoSrc, MVT::v4f64, { 1, 3, 1, 1 } }, // vpermt2pd
1799 {TTI::SK_PermuteTwoSrc, MVT::v8f32, { 1, 3, 1, 1 } }, // vpermt2ps
1800 {TTI::SK_PermuteTwoSrc, MVT::v4i64, { 1, 3, 1, 1 } }, // vpermt2q
1801 {TTI::SK_PermuteTwoSrc, MVT::v8i32, { 1, 3, 1, 1 } }, // vpermt2d
1802 {TTI::SK_PermuteTwoSrc, MVT::v2f64, { 1, 3, 1, 1 } }, // vpermt2pd
1803 {TTI::SK_PermuteTwoSrc, MVT::v4f32, { 1, 3, 1, 1 } }, // vpermt2ps
1804 {TTI::SK_PermuteTwoSrc, MVT::v2i64, { 1, 3, 1, 1 } }, // vpermt2q
1805 {TTI::SK_PermuteTwoSrc, MVT::v4i32, { 1, 3, 1, 1 } }, // vpermt2d
1807 // FIXME: This just applies the type legalization cost rules above
1808 // assuming these completely split.
1809 {TTI::SK_PermuteSingleSrc, MVT::v32i16, { 14, 14, 14, 14 } },
1810 {TTI::SK_PermuteSingleSrc, MVT::v32f16, { 14, 14, 14, 14 } },
1811 {TTI::SK_PermuteSingleSrc, MVT::v64i8, { 14, 14, 14, 14 } },
1812 {TTI::SK_PermuteTwoSrc, MVT::v32i16, { 42, 42, 42, 42 } },
1813 {TTI::SK_PermuteTwoSrc, MVT::v32f16, { 42, 42, 42, 42 } },
1814 {TTI::SK_PermuteTwoSrc, MVT::v64i8, { 42, 42, 42, 42 } },
1816 {TTI::SK_Select, MVT::v32i16, { 1, 1, 1, 1 } }, // vpternlogq
1817 {TTI::SK_Select, MVT::v32f16, { 1, 1, 1, 1 } }, // vpternlogq
1818 {TTI::SK_Select, MVT::v64i8, { 1, 1, 1, 1 } }, // vpternlogq
1819 {TTI::SK_Select, MVT::v8f64, { 1, 1, 1, 1 } }, // vblendmpd
1820 {TTI::SK_Select, MVT::v16f32, { 1, 1, 1, 1 } }, // vblendmps
1821 {TTI::SK_Select, MVT::v8i64, { 1, 1, 1, 1 } }, // vblendmq
1822 {TTI::SK_Select, MVT::v16i32, { 1, 1, 1, 1 } }, // vblendmd
1825 if (ST->hasAVX512())
1826 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1827 if (auto KindCost = Entry->Cost[CostKind])
1828 return LT.first * *KindCost;
1830 static const CostTblEntry AVX2ShuffleTbl[] = {
1831 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
1832 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
1833 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
1834 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
1835 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1836 {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw
1837 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
1839 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
1840 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
1841 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
1842 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
1843 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1844 {TTI::SK_Reverse, MVT::v16f16, 2}, // vperm2i128 + pshufb
1845 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
1847 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1848 {TTI::SK_Select, MVT::v16f16, 1}, // vpblendvb
1849 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
1851 {TTI::SK_Splice, MVT::v8i32, 2}, // vperm2i128 + vpalignr
1852 {TTI::SK_Splice, MVT::v8f32, 2}, // vperm2i128 + vpalignr
1853 {TTI::SK_Splice, MVT::v16i16, 2}, // vperm2i128 + vpalignr
1854 {TTI::SK_Splice, MVT::v16f16, 2}, // vperm2i128 + vpalignr
1855 {TTI::SK_Splice, MVT::v32i8, 2}, // vperm2i128 + vpalignr
1857 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1858 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1859 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1860 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1861 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1862 // + vpblendvb
1863 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 4}, // vperm2i128 + 2*vpshufb
1864 // + vpblendvb
1865 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
1866 // + vpblendvb
1868 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
1869 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
1870 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
1871 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
1872 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1873 // + vpblendvb
1874 {TTI::SK_PermuteTwoSrc, MVT::v16f16, 7}, // 2*vperm2i128 + 4*vpshufb
1875 // + vpblendvb
1876 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
1877 // + vpblendvb
1880 if (ST->hasAVX2())
1881 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1882 return LT.first * Entry->Cost;
1884 static const CostTblEntry XOPShuffleTbl[] = {
1885 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
1886 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
1887 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
1888 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
1889 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1890 // + vinsertf128
1891 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
1892 // + vinsertf128
1894 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1895 // + vinsertf128
1896 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
1897 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
1898 // + vinsertf128
1899 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
1902 if (ST->hasXOP())
1903 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1904 return LT.first * Entry->Cost;
1906 static const CostTblEntry AVX1ShuffleTbl[] = {
1907 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1908 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1909 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1910 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1911 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1912 {TTI::SK_Broadcast, MVT::v16f16, 3}, // vpshuflw + vpshufd + vinsertf128
1913 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
1915 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1916 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1917 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1918 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1919 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1920 // + vinsertf128
1921 {TTI::SK_Reverse, MVT::v16f16, 4}, // vextractf128 + 2*pshufb
1922 // + vinsertf128
1923 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
1924 // + vinsertf128
1926 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
1927 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
1928 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
1929 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
1930 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1931 {TTI::SK_Select, MVT::v16f16, 3}, // vpand + vpandn + vpor
1932 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
1934 {TTI::SK_Splice, MVT::v4i64, 2}, // vperm2f128 + shufpd
1935 {TTI::SK_Splice, MVT::v4f64, 2}, // vperm2f128 + shufpd
1936 {TTI::SK_Splice, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1937 {TTI::SK_Splice, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1938 {TTI::SK_Splice, MVT::v16i16, 5}, // 2*vperm2f128 + 2*vpalignr + vinsertf128
1939 {TTI::SK_Splice, MVT::v16f16, 5}, // 2*vperm2f128 + 2*vpalignr + vinsertf128
1940 {TTI::SK_Splice, MVT::v32i8, 5}, // 2*vperm2f128 + 2*vpalignr + vinsertf128
1942 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
1943 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
1944 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1945 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1946 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1947 // + 2*por + vinsertf128
1948 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 8}, // vextractf128 + 4*pshufb
1949 // + 2*por + vinsertf128
1950 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
1951 // + 2*por + vinsertf128
1953 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
1954 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
1955 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1956 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1957 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1958 // + 4*por + vinsertf128
1959 {TTI::SK_PermuteTwoSrc, MVT::v16f16, 15}, // 2*vextractf128 + 8*pshufb
1960 // + 4*por + vinsertf128
1961 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
1962 // + 4*por + vinsertf128
1965 if (ST->hasAVX())
1966 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1967 return LT.first * Entry->Cost;
1969 static const CostTblEntry SSE41ShuffleTbl[] = {
1970 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1971 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1972 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1973 {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1974 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1975 {TTI::SK_Select, MVT::v8f16, 1}, // pblendw
1976 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
1979 if (ST->hasSSE41())
1980 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1981 return LT.first * Entry->Cost;
1983 static const CostTblEntry SSSE3ShuffleTbl[] = {
1984 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1985 {TTI::SK_Broadcast, MVT::v8f16, 1}, // pshufb
1986 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1988 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1989 {TTI::SK_Reverse, MVT::v8f16, 1}, // pshufb
1990 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1992 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1993 {TTI::SK_Select, MVT::v8f16, 3}, // 2*pshufb + por
1994 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1996 {TTI::SK_Splice, MVT::v4i32, 1}, // palignr
1997 {TTI::SK_Splice, MVT::v4f32, 1}, // palignr
1998 {TTI::SK_Splice, MVT::v8i16, 1}, // palignr
1999 {TTI::SK_Splice, MVT::v8f16, 1}, // palignr
2000 {TTI::SK_Splice, MVT::v16i8, 1}, // palignr
2002 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
2003 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1}, // pshufb
2004 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
2006 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
2007 {TTI::SK_PermuteTwoSrc, MVT::v8f16, 3}, // 2*pshufb + por
2008 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
2011 if (ST->hasSSSE3())
2012 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
2013 return LT.first * Entry->Cost;
2015 static const CostTblEntry SSE2ShuffleTbl[] = {
2016 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
2017 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
2018 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
2019 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
2020 {TTI::SK_Broadcast, MVT::v8f16, 2}, // pshuflw + pshufd
2021 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
2023 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
2024 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
2025 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
2026 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
2027 {TTI::SK_Reverse, MVT::v8f16, 3}, // pshuflw + pshufhw + pshufd
2028 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
2029 // + 2*pshufd + 2*unpck + packus
2031 {TTI::SK_Select, MVT::v2i64, 1}, // movsd
2032 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
2033 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
2034 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
2035 {TTI::SK_Select, MVT::v8f16, 3}, // pand + pandn + por
2036 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
2038 {TTI::SK_Splice, MVT::v2i64, 1}, // shufpd
2039 {TTI::SK_Splice, MVT::v2f64, 1}, // shufpd
2040 {TTI::SK_Splice, MVT::v4i32, 2}, // 2*{unpck,movsd,pshufd}
2041 {TTI::SK_Splice, MVT::v8i16, 3}, // psrldq + psrlldq + por
2042 {TTI::SK_Splice, MVT::v8f16, 3}, // psrldq + psrlldq + por
2043 {TTI::SK_Splice, MVT::v16i8, 3}, // psrldq + psrlldq + por
2045 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
2046 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
2047 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
2048 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
2049 // + pshufd/unpck
2050 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 5}, // 2*pshuflw + 2*pshufhw
2051 // + pshufd/unpck
2052 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
2053 // + 2*pshufd + 2*unpck + 2*packus
2055 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
2056 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
2057 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
2058 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
2059 { TTI::SK_PermuteTwoSrc, MVT::v8f16, 8 }, // blend+permute
2060 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
2063 static const CostTblEntry SSE3BroadcastLoadTbl[] = {
2064 {TTI::SK_Broadcast, MVT::v2f64, 0}, // broadcast handled by movddup
2067 if (ST->hasSSE2()) {
2068 bool IsLoad =
2069 llvm::any_of(Args, [](const auto &V) { return isa<LoadInst>(V); });
2070 if (ST->hasSSE3() && IsLoad)
2071 if (const auto *Entry =
2072 CostTableLookup(SSE3BroadcastLoadTbl, Kind, LT.second)) {
2073 assert(isLegalBroadcastLoad(BaseTp->getElementType(),
2074 LT.second.getVectorElementCount()) &&
2075 "Table entry missing from isLegalBroadcastLoad()");
2076 return LT.first * Entry->Cost;
2079 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
2080 return LT.first * Entry->Cost;
2083 static const CostTblEntry SSE1ShuffleTbl[] = {
2084 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
2085 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
2086 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
2087 { TTI::SK_Splice, MVT::v4f32, 2 }, // 2*shufps
2088 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
2089 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
2092 if (ST->hasSSE1())
2093 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
2094 return LT.first * Entry->Cost;
2096 return BaseT::getShuffleCost(Kind, BaseTp, Mask, CostKind, Index, SubTp);
2099 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
2100 Type *Src,
2101 TTI::CastContextHint CCH,
2102 TTI::TargetCostKind CostKind,
2103 const Instruction *I) {
2104 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2105 assert(ISD && "Invalid opcode");
2107 // TODO: Allow non-throughput costs that aren't binary.
2108 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
2109 if (CostKind != TTI::TCK_RecipThroughput)
2110 return Cost == 0 ? 0 : 1;
2111 return Cost;
2114 // The cost tables include both specific, custom (non-legal) src/dst type
2115 // conversions and generic, legalized types. We test for customs first, before
2116 // falling back to legalization.
2117 // FIXME: Need a better design of the cost table to handle non-simple types of
2118 // potential massive combinations (elem_num x src_type x dst_type).
2119 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
2120 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
2121 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
2123 // Mask sign extend has an instruction.
2124 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
2125 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v2i1, 1 },
2126 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
2127 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v2i1, 1 },
2128 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
2129 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v4i1, 1 },
2130 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
2131 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v4i1, 1 },
2132 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
2133 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v8i1, 1 },
2134 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
2135 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
2136 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
2137 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
2138 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
2139 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
2140 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v64i1, 1 },
2142 // Mask zero extend is a sext + shift.
2143 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
2144 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v2i1, 2 },
2145 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
2146 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v2i1, 2 },
2147 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
2148 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v4i1, 2 },
2149 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
2150 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v4i1, 2 },
2151 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
2152 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v8i1, 2 },
2153 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
2154 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
2155 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
2156 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
2157 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
2158 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
2159 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v64i1, 2 },
2161 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 },
2162 { ISD::TRUNCATE, MVT::v2i1, MVT::v16i8, 2 },
2163 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 },
2164 { ISD::TRUNCATE, MVT::v2i1, MVT::v8i16, 2 },
2165 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 },
2166 { ISD::TRUNCATE, MVT::v4i1, MVT::v16i8, 2 },
2167 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 },
2168 { ISD::TRUNCATE, MVT::v4i1, MVT::v8i16, 2 },
2169 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 },
2170 { ISD::TRUNCATE, MVT::v8i1, MVT::v16i8, 2 },
2171 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 },
2172 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 },
2173 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 },
2174 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 },
2175 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 },
2176 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 },
2177 { ISD::TRUNCATE, MVT::v64i1, MVT::v32i16, 2 },
2179 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 },
2180 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm
2181 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // vpmovwb
2182 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // vpmovwb
2183 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // vpmovwb
2186 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
2187 // Mask sign extend has an instruction.
2188 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 },
2189 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v2i1, 1 },
2190 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 },
2191 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 },
2192 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 },
2193 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i1, 1 },
2194 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 },
2195 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 },
2197 // Mask zero extend is a sext + shift.
2198 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 },
2199 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v2i1, 2 },
2200 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 },
2201 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 },
2202 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 },
2203 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i1, 2 },
2204 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 },
2205 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
2207 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 },
2208 { ISD::TRUNCATE, MVT::v2i1, MVT::v4i32, 2 },
2209 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 },
2210 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 },
2211 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
2212 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 },
2213 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 },
2214 { ISD::TRUNCATE, MVT::v16i1, MVT::v8i64, 2 },
2216 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
2217 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
2219 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
2220 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
2222 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
2223 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
2225 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
2226 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
2229 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
2230 // 256-bit wide vectors.
2232 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
2233 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
2234 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
2235 { ISD::FP_EXTEND, MVT::v16f64, MVT::v16f32, 4 }, // 2*vcvtps2pd+vextractf64x4
2236 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
2238 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
2239 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
2240 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
2241 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd
2242 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
2243 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
2244 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
2245 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd
2246 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd
2247 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd
2248 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd
2249 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd
2250 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq
2251 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq
2252 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq
2253 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 2 }, // vpmovdb
2254 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 2 }, // vpmovdb
2255 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, // vpmovdb
2256 { ISD::TRUNCATE, MVT::v32i8, MVT::v16i32, 2 }, // vpmovdb
2257 { ISD::TRUNCATE, MVT::v64i8, MVT::v16i32, 2 }, // vpmovdb
2258 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, // vpmovdw
2259 { ISD::TRUNCATE, MVT::v32i16, MVT::v16i32, 2 }, // vpmovdw
2260 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 2 }, // vpmovqb
2261 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1 }, // vpshufb
2262 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, // vpmovqb
2263 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i64, 2 }, // vpmovqb
2264 { ISD::TRUNCATE, MVT::v32i8, MVT::v8i64, 2 }, // vpmovqb
2265 { ISD::TRUNCATE, MVT::v64i8, MVT::v8i64, 2 }, // vpmovqb
2266 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, // vpmovqw
2267 { ISD::TRUNCATE, MVT::v16i16, MVT::v8i64, 2 }, // vpmovqw
2268 { ISD::TRUNCATE, MVT::v32i16, MVT::v8i64, 2 }, // vpmovqw
2269 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, // vpmovqd
2270 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd
2271 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
2273 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32
2274 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 },
2275 { ISD::TRUNCATE, MVT::v64i8, MVT::v32i16, 8 },
2277 // Sign extend is zmm vpternlogd+vptruncdb.
2278 // Zero extend is zmm broadcast load+vptruncdw.
2279 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 },
2280 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 },
2281 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 },
2282 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 },
2283 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 },
2284 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 },
2285 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 },
2286 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 },
2288 // Sign extend is zmm vpternlogd+vptruncdw.
2289 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
2290 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 },
2291 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
2292 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 },
2293 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
2294 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 },
2295 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
2296 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 },
2297 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
2299 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd
2300 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld
2301 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd
2302 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld
2303 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd
2304 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld
2305 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq
2306 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq
2307 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq
2308 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq
2310 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd
2311 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld
2312 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq
2313 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq
2315 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
2316 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
2317 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
2318 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
2319 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
2320 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
2321 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
2322 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
2323 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
2324 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
2326 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
2327 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
2329 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
2330 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
2331 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 },
2332 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 },
2333 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
2334 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 },
2335 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
2336 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
2338 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
2339 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
2340 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 },
2341 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 },
2342 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
2343 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 },
2344 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
2345 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
2346 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
2347 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
2349 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 },
2350 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f64, 7 },
2351 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f64,15 },
2352 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f32,11 },
2353 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f64,31 },
2354 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 },
2355 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f64, 7 },
2356 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f32, 5 },
2357 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f64,15 },
2358 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 1 },
2359 { ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f64, 3 },
2361 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
2362 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 },
2363 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 },
2364 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
2365 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 },
2366 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 },
2369 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
2370 // Mask sign extend has an instruction.
2371 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
2372 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v2i1, 1 },
2373 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
2374 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v2i1, 1 },
2375 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
2376 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v4i1, 1 },
2377 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
2378 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v4i1, 1 },
2379 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
2380 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v8i1, 1 },
2381 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
2382 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
2383 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
2384 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
2385 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v32i1, 1 },
2386 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v64i1, 1 },
2387 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v64i1, 1 },
2389 // Mask zero extend is a sext + shift.
2390 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
2391 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v2i1, 2 },
2392 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
2393 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v2i1, 2 },
2394 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
2395 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v4i1, 2 },
2396 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
2397 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v4i1, 2 },
2398 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
2399 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v8i1, 2 },
2400 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
2401 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
2402 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
2403 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
2404 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v32i1, 2 },
2405 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v64i1, 2 },
2406 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v64i1, 2 },
2408 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 },
2409 { ISD::TRUNCATE, MVT::v2i1, MVT::v16i8, 2 },
2410 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 },
2411 { ISD::TRUNCATE, MVT::v2i1, MVT::v8i16, 2 },
2412 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 },
2413 { ISD::TRUNCATE, MVT::v4i1, MVT::v16i8, 2 },
2414 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 },
2415 { ISD::TRUNCATE, MVT::v4i1, MVT::v8i16, 2 },
2416 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 },
2417 { ISD::TRUNCATE, MVT::v8i1, MVT::v16i8, 2 },
2418 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 },
2419 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 },
2420 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 },
2421 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 },
2422 { ISD::TRUNCATE, MVT::v32i1, MVT::v16i16, 2 },
2423 { ISD::TRUNCATE, MVT::v64i1, MVT::v32i8, 2 },
2424 { ISD::TRUNCATE, MVT::v64i1, MVT::v16i16, 2 },
2426 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
2429 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
2430 // Mask sign extend has an instruction.
2431 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 },
2432 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v2i1, 1 },
2433 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 },
2434 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i1, 1 },
2435 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 },
2436 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i1, 1 },
2437 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i1, 1 },
2438 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 },
2440 // Mask zero extend is a sext + shift.
2441 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 },
2442 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v2i1, 2 },
2443 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 },
2444 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i1, 2 },
2445 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 },
2446 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i1, 2 },
2447 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i1, 2 },
2448 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 },
2450 { ISD::TRUNCATE, MVT::v16i1, MVT::v4i64, 2 },
2451 { ISD::TRUNCATE, MVT::v16i1, MVT::v8i32, 2 },
2452 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 },
2453 { ISD::TRUNCATE, MVT::v2i1, MVT::v4i32, 2 },
2454 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 },
2455 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 },
2456 { ISD::TRUNCATE, MVT::v8i1, MVT::v4i64, 2 },
2457 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
2459 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
2460 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
2461 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
2462 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
2464 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
2465 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
2466 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
2467 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
2469 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v4f32, 1 },
2470 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
2471 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
2472 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
2474 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v4f32, 1 },
2475 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
2476 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
2477 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
2480 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
2481 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
2482 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
2483 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
2484 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8
2485 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
2486 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
2487 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
2488 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16
2489 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd
2490 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd
2491 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd
2492 { ISD::TRUNCATE, MVT::v16i1, MVT::v8i32, 2 }, // vpslld+vptestmd
2493 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq
2494 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq
2495 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd
2496 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, // vpmovqb
2497 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, // vpmovqw
2498 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, // vpmovwb
2500 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
2501 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
2502 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 },
2503 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 },
2504 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 },
2505 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 },
2506 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 },
2507 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 },
2508 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 },
2509 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 },
2511 // sign extend is vpcmpeq+maskedmove+vpmovdw
2512 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
2513 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
2514 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 },
2515 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
2516 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 },
2517 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
2518 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 },
2519 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
2520 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
2522 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd
2523 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld
2524 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd
2525 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld
2526 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd
2527 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld
2528 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i1, 1 }, // vpternlogd
2529 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i1, 2 }, // vpternlogd+psrld
2531 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq
2532 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq
2533 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq
2534 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq
2536 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 1 },
2537 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 1 },
2538 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 1 },
2539 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 1 },
2540 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
2541 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
2542 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 1 },
2543 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 1 },
2544 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
2545 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
2546 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
2547 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
2549 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2550 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 },
2551 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2552 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 },
2554 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 },
2555 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
2556 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2557 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 },
2558 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2559 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 },
2560 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
2561 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
2562 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
2563 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
2564 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
2565 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
2566 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
2568 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 },
2569 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 },
2570 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f32, 5 },
2572 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 },
2573 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 },
2574 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
2575 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 1 },
2576 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
2577 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
2578 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
2581 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
2582 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
2583 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
2584 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
2585 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
2586 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
2587 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
2589 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 2 },
2590 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 2 },
2591 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 2 },
2592 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 2 },
2593 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
2594 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
2595 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 2 },
2596 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 2 },
2597 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
2598 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
2599 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
2600 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
2601 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
2602 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
2604 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
2606 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 4 },
2607 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 4 },
2608 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 1 },
2609 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 1 },
2610 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 1 },
2611 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 4 },
2612 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 4 },
2613 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 1 },
2614 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 1 },
2615 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 5 },
2616 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 },
2617 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
2619 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
2620 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
2622 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 1 },
2623 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 1 },
2624 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 1 },
2625 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 3 },
2627 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 3 },
2628 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 3 },
2629 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 1 },
2630 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 },
2631 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
2632 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4 },
2633 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 3 },
2634 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 4 },
2636 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 },
2637 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 },
2638 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 },
2639 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
2640 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
2641 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
2642 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 3 },
2644 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 },
2645 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 },
2646 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 },
2647 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
2648 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
2649 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
2650 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 2 },
2651 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
2652 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
2653 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 },
2656 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
2657 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
2658 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
2659 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
2660 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
2661 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
2662 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
2664 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 3 },
2665 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 3 },
2666 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 3 },
2667 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 3 },
2668 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
2669 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
2670 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 3 },
2671 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 3 },
2672 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
2673 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
2674 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
2675 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
2677 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 },
2678 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 },
2679 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 },
2680 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 },
2681 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 },
2683 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
2684 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
2685 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // and+extract+packuswb
2686 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 5 },
2687 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
2688 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 5 },
2689 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 3 }, // and+extract+2*packusdw
2690 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
2692 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
2693 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
2694 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
2695 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 },
2696 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 },
2697 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2698 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 },
2699 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
2700 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
2701 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 },
2702 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 5 },
2703 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 8 },
2705 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
2706 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
2707 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
2708 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 },
2709 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 },
2710 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2711 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 },
2712 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 4 },
2713 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 4 },
2714 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2715 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
2716 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
2717 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 10 },
2718 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 10 },
2719 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 18 },
2720 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
2721 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 10 },
2723 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 },
2724 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f64, 2 },
2725 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v8f32, 2 },
2726 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v4f64, 2 },
2727 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 2 },
2728 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f64, 2 },
2729 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 2 },
2730 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v4f64, 2 },
2731 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 2 },
2732 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 2 },
2733 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 5 },
2735 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v8f32, 2 },
2736 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f64, 2 },
2737 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v8f32, 2 },
2738 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v4f64, 2 },
2739 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 2 },
2740 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f64, 2 },
2741 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 2 },
2742 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v4f64, 2 },
2743 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 },
2744 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
2745 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 6 },
2746 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 7 },
2747 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 7 },
2749 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
2750 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
2753 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
2754 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 1 },
2755 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 1 },
2756 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 1 },
2757 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 1 },
2758 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2759 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2760 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 1 },
2761 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 1 },
2762 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2763 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2764 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2765 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2767 // These truncates end up widening elements.
2768 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ
2769 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ
2770 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD
2772 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 2 },
2773 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 2 },
2774 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 2 },
2776 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 1 },
2777 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 1 },
2778 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 1 },
2779 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 1 },
2780 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 },
2781 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2782 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 },
2783 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2784 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
2785 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 1 },
2786 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
2788 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 1 },
2789 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 1 },
2790 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 },
2791 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
2792 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 },
2793 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2794 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 },
2795 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2796 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 3 },
2797 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 },
2798 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 2 },
2799 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 12 },
2800 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 22 },
2801 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 4 },
2803 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 1 },
2804 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 1 },
2805 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 1 },
2806 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 1 },
2807 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 2 },
2808 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 2 },
2809 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 1 },
2810 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 1 },
2811 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
2812 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 1 },
2814 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 1 },
2815 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
2816 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 1 },
2817 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 },
2818 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 2 },
2819 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 2 },
2820 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 1 },
2821 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 1 },
2822 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 4 },
2823 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
2826 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
2827 // These are somewhat magic numbers justified by comparing the
2828 // output of llvm-mca for our various supported scheduler models
2829 // and basing it off the worst case scenario.
2830 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 3 },
2831 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 3 },
2832 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 3 },
2833 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 3 },
2834 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 3 },
2835 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 },
2836 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 3 },
2837 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 },
2838 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 },
2839 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4 },
2840 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 8 },
2841 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 8 },
2843 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 3 },
2844 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 3 },
2845 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 8 },
2846 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 9 },
2847 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 },
2848 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 4 },
2849 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 4 },
2850 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 },
2851 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 7 },
2852 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 7 },
2853 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2854 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 15 },
2855 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 18 },
2857 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 4 },
2858 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 4 },
2859 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 4 },
2860 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 4 },
2861 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 6 },
2862 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 6 },
2863 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 5 },
2864 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 5 },
2865 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 4 },
2866 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 4 },
2868 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 4 },
2869 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
2870 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 4 },
2871 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 15 },
2872 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 6 },
2873 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 6 },
2874 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 5 },
2875 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 5 },
2876 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 8 },
2877 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 8 },
2879 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 4 },
2880 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 4 },
2881 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 2 },
2882 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 3 },
2883 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2884 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 2 },
2885 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 2 },
2886 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 3 },
2887 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2888 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 2 },
2889 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2890 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 2 },
2892 // These truncates are really widening elements.
2893 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD
2894 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ
2895 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD
2896 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD
2897 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD
2898 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW
2900 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 2 }, // PAND+PACKUSWB
2901 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
2902 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 3 }, // PAND+2*PACKUSWB
2903 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
2904 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 },
2905 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 3 },
2906 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
2907 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32,10 },
2908 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB
2909 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW
2910 { ISD::TRUNCATE, MVT::v4i32, MVT::v2i64, 1 }, // PSHUFD
2913 // Attempt to map directly to (simple) MVT types to let us match custom entries.
2914 EVT SrcTy = TLI->getValueType(DL, Src);
2915 EVT DstTy = TLI->getValueType(DL, Dst);
2917 // The function getSimpleVT only handles simple value types.
2918 if (SrcTy.isSimple() && DstTy.isSimple()) {
2919 MVT SimpleSrcTy = SrcTy.getSimpleVT();
2920 MVT SimpleDstTy = DstTy.getSimpleVT();
2922 if (ST->useAVX512Regs()) {
2923 if (ST->hasBWI())
2924 if (const auto *Entry = ConvertCostTableLookup(
2925 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2926 return AdjustCost(Entry->Cost);
2928 if (ST->hasDQI())
2929 if (const auto *Entry = ConvertCostTableLookup(
2930 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2931 return AdjustCost(Entry->Cost);
2933 if (ST->hasAVX512())
2934 if (const auto *Entry = ConvertCostTableLookup(
2935 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2936 return AdjustCost(Entry->Cost);
2939 if (ST->hasBWI())
2940 if (const auto *Entry = ConvertCostTableLookup(
2941 AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2942 return AdjustCost(Entry->Cost);
2944 if (ST->hasDQI())
2945 if (const auto *Entry = ConvertCostTableLookup(
2946 AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2947 return AdjustCost(Entry->Cost);
2949 if (ST->hasAVX512())
2950 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2951 SimpleDstTy, SimpleSrcTy))
2952 return AdjustCost(Entry->Cost);
2954 if (ST->hasAVX2()) {
2955 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2956 SimpleDstTy, SimpleSrcTy))
2957 return AdjustCost(Entry->Cost);
2960 if (ST->hasAVX()) {
2961 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2962 SimpleDstTy, SimpleSrcTy))
2963 return AdjustCost(Entry->Cost);
2966 if (ST->hasSSE41()) {
2967 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2968 SimpleDstTy, SimpleSrcTy))
2969 return AdjustCost(Entry->Cost);
2972 if (ST->hasSSE2()) {
2973 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2974 SimpleDstTy, SimpleSrcTy))
2975 return AdjustCost(Entry->Cost);
2979 // Fall back to legalized types.
2980 std::pair<InstructionCost, MVT> LTSrc = getTypeLegalizationCost(Src);
2981 std::pair<InstructionCost, MVT> LTDest = getTypeLegalizationCost(Dst);
2983 // If we're truncating to the same legalized type - just assume its free.
2984 if (ISD == ISD::TRUNCATE && LTSrc.second == LTDest.second)
2985 return TTI::TCC_Free;
2987 if (ST->useAVX512Regs()) {
2988 if (ST->hasBWI())
2989 if (const auto *Entry = ConvertCostTableLookup(
2990 AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second))
2991 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2993 if (ST->hasDQI())
2994 if (const auto *Entry = ConvertCostTableLookup(
2995 AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second))
2996 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2998 if (ST->hasAVX512())
2999 if (const auto *Entry = ConvertCostTableLookup(
3000 AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second))
3001 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
3004 if (ST->hasBWI())
3005 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
3006 LTDest.second, LTSrc.second))
3007 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
3009 if (ST->hasDQI())
3010 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
3011 LTDest.second, LTSrc.second))
3012 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
3014 if (ST->hasAVX512())
3015 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
3016 LTDest.second, LTSrc.second))
3017 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
3019 if (ST->hasAVX2())
3020 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
3021 LTDest.second, LTSrc.second))
3022 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
3024 if (ST->hasAVX())
3025 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
3026 LTDest.second, LTSrc.second))
3027 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
3029 if (ST->hasSSE41())
3030 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
3031 LTDest.second, LTSrc.second))
3032 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
3034 if (ST->hasSSE2())
3035 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
3036 LTDest.second, LTSrc.second))
3037 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
3039 // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for
3040 // sitofp.
3041 if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) &&
3042 1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) {
3043 Type *ExtSrc = Src->getWithNewBitWidth(32);
3044 unsigned ExtOpc =
3045 (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt;
3047 // For scalar loads the extend would be free.
3048 InstructionCost ExtCost = 0;
3049 if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0))))
3050 ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind);
3052 return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc,
3053 TTI::CastContextHint::None, CostKind);
3056 // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi
3057 // i32.
3058 if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) &&
3059 1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) {
3060 Type *TruncDst = Dst->getWithNewBitWidth(32);
3061 return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) +
3062 getCastInstrCost(Instruction::Trunc, Dst, TruncDst,
3063 TTI::CastContextHint::None, CostKind);
3066 return AdjustCost(
3067 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
3070 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
3071 Type *CondTy,
3072 CmpInst::Predicate VecPred,
3073 TTI::TargetCostKind CostKind,
3074 const Instruction *I) {
3075 // Early out if this type isn't scalar/vector integer/float.
3076 if (!(ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy()))
3077 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
3080 // Legalize the type.
3081 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
3083 MVT MTy = LT.second;
3085 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3086 assert(ISD && "Invalid opcode");
3088 InstructionCost ExtraCost = 0;
3089 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
3090 // Some vector comparison predicates cost extra instructions.
3091 // TODO: Should we invert this and assume worst case cmp costs
3092 // and reduce for particular predicates?
3093 if (MTy.isVector() &&
3094 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
3095 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
3096 ST->hasBWI())) {
3097 // Fallback to I if a specific predicate wasn't specified.
3098 CmpInst::Predicate Pred = VecPred;
3099 if (I && (Pred == CmpInst::BAD_ICMP_PREDICATE ||
3100 Pred == CmpInst::BAD_FCMP_PREDICATE))
3101 Pred = cast<CmpInst>(I)->getPredicate();
3103 switch (Pred) {
3104 case CmpInst::Predicate::ICMP_NE:
3105 // xor(cmpeq(x,y),-1)
3106 ExtraCost = 1;
3107 break;
3108 case CmpInst::Predicate::ICMP_SGE:
3109 case CmpInst::Predicate::ICMP_SLE:
3110 // xor(cmpgt(x,y),-1)
3111 ExtraCost = 1;
3112 break;
3113 case CmpInst::Predicate::ICMP_ULT:
3114 case CmpInst::Predicate::ICMP_UGT:
3115 // cmpgt(xor(x,signbit),xor(y,signbit))
3116 // xor(cmpeq(pmaxu(x,y),x),-1)
3117 ExtraCost = 2;
3118 break;
3119 case CmpInst::Predicate::ICMP_ULE:
3120 case CmpInst::Predicate::ICMP_UGE:
3121 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
3122 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
3123 // cmpeq(psubus(x,y),0)
3124 // cmpeq(pminu(x,y),x)
3125 ExtraCost = 1;
3126 } else {
3127 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
3128 ExtraCost = 3;
3130 break;
3131 case CmpInst::Predicate::FCMP_ONE:
3132 case CmpInst::Predicate::FCMP_UEQ:
3133 // Without AVX we need to expand FCMP_ONE/FCMP_UEQ cases.
3134 // Use FCMP_UEQ expansion - FCMP_ONE should be the same.
3135 if (CondTy && !ST->hasAVX())
3136 return getCmpSelInstrCost(Opcode, ValTy, CondTy,
3137 CmpInst::Predicate::FCMP_UNO, CostKind) +
3138 getCmpSelInstrCost(Opcode, ValTy, CondTy,
3139 CmpInst::Predicate::FCMP_OEQ, CostKind) +
3140 getArithmeticInstrCost(Instruction::Or, CondTy, CostKind);
3142 break;
3143 case CmpInst::Predicate::BAD_ICMP_PREDICATE:
3144 case CmpInst::Predicate::BAD_FCMP_PREDICATE:
3145 // Assume worst case scenario and add the maximum extra cost.
3146 ExtraCost = 3;
3147 break;
3148 default:
3149 break;
3154 static const CostKindTblEntry SLMCostTbl[] = {
3155 // slm pcmpeq/pcmpgt throughput is 2
3156 { ISD::SETCC, MVT::v2i64, { 2, 5, 1, 2 } },
3157 // slm pblendvb/blendvpd/blendvps throughput is 4
3158 { ISD::SELECT, MVT::v2f64, { 4, 4, 1, 3 } }, // vblendvpd
3159 { ISD::SELECT, MVT::v4f32, { 4, 4, 1, 3 } }, // vblendvps
3160 { ISD::SELECT, MVT::v2i64, { 4, 4, 1, 3 } }, // pblendvb
3161 { ISD::SELECT, MVT::v8i32, { 4, 4, 1, 3 } }, // pblendvb
3162 { ISD::SELECT, MVT::v8i16, { 4, 4, 1, 3 } }, // pblendvb
3163 { ISD::SELECT, MVT::v16i8, { 4, 4, 1, 3 } }, // pblendvb
3166 static const CostKindTblEntry AVX512BWCostTbl[] = {
3167 { ISD::SETCC, MVT::v32i16, { 1, 1, 1, 1 } },
3168 { ISD::SETCC, MVT::v16i16, { 1, 1, 1, 1 } },
3169 { ISD::SETCC, MVT::v64i8, { 1, 1, 1, 1 } },
3170 { ISD::SETCC, MVT::v32i8, { 1, 1, 1, 1 } },
3172 { ISD::SELECT, MVT::v32i16, { 1, 1, 1, 1 } },
3173 { ISD::SELECT, MVT::v64i8, { 1, 1, 1, 1 } },
3176 static const CostKindTblEntry AVX512CostTbl[] = {
3177 { ISD::SETCC, MVT::v8f64, { 1, 4, 1, 1 } },
3178 { ISD::SETCC, MVT::v4f64, { 1, 4, 1, 1 } },
3179 { ISD::SETCC, MVT::v16f32, { 1, 4, 1, 1 } },
3180 { ISD::SETCC, MVT::v8f32, { 1, 4, 1, 1 } },
3182 { ISD::SETCC, MVT::v8i64, { 1, 1, 1, 1 } },
3183 { ISD::SETCC, MVT::v4i64, { 1, 1, 1, 1 } },
3184 { ISD::SETCC, MVT::v2i64, { 1, 1, 1, 1 } },
3185 { ISD::SETCC, MVT::v16i32, { 1, 1, 1, 1 } },
3186 { ISD::SETCC, MVT::v8i32, { 1, 1, 1, 1 } },
3187 { ISD::SETCC, MVT::v32i16, { 3, 7, 5, 5 } },
3188 { ISD::SETCC, MVT::v64i8, { 3, 7, 5, 5 } },
3190 { ISD::SELECT, MVT::v8i64, { 1, 1, 1, 1 } },
3191 { ISD::SELECT, MVT::v4i64, { 1, 1, 1, 1 } },
3192 { ISD::SELECT, MVT::v2i64, { 1, 1, 1, 1 } },
3193 { ISD::SELECT, MVT::v16i32, { 1, 1, 1, 1 } },
3194 { ISD::SELECT, MVT::v8i32, { 1, 1, 1, 1 } },
3195 { ISD::SELECT, MVT::v4i32, { 1, 1, 1, 1 } },
3196 { ISD::SELECT, MVT::v8f64, { 1, 1, 1, 1 } },
3197 { ISD::SELECT, MVT::v4f64, { 1, 1, 1, 1 } },
3198 { ISD::SELECT, MVT::v2f64, { 1, 1, 1, 1 } },
3199 { ISD::SELECT, MVT::f64, { 1, 1, 1, 1 } },
3200 { ISD::SELECT, MVT::v16f32, { 1, 1, 1, 1 } },
3201 { ISD::SELECT, MVT::v8f32 , { 1, 1, 1, 1 } },
3202 { ISD::SELECT, MVT::v4f32, { 1, 1, 1, 1 } },
3203 { ISD::SELECT, MVT::f32 , { 1, 1, 1, 1 } },
3205 { ISD::SELECT, MVT::v32i16, { 2, 2, 4, 4 } },
3206 { ISD::SELECT, MVT::v16i16, { 1, 1, 1, 1 } },
3207 { ISD::SELECT, MVT::v8i16, { 1, 1, 1, 1 } },
3208 { ISD::SELECT, MVT::v64i8, { 2, 2, 4, 4 } },
3209 { ISD::SELECT, MVT::v32i8, { 1, 1, 1, 1 } },
3210 { ISD::SELECT, MVT::v16i8, { 1, 1, 1, 1 } },
3213 static const CostKindTblEntry AVX2CostTbl[] = {
3214 { ISD::SETCC, MVT::v4f64, { 1, 4, 1, 2 } },
3215 { ISD::SETCC, MVT::v2f64, { 1, 4, 1, 1 } },
3216 { ISD::SETCC, MVT::f64, { 1, 4, 1, 1 } },
3217 { ISD::SETCC, MVT::v8f32, { 1, 4, 1, 2 } },
3218 { ISD::SETCC, MVT::v4f32, { 1, 4, 1, 1 } },
3219 { ISD::SETCC, MVT::f32, { 1, 4, 1, 1 } },
3221 { ISD::SETCC, MVT::v4i64, { 1, 1, 1, 2 } },
3222 { ISD::SETCC, MVT::v8i32, { 1, 1, 1, 2 } },
3223 { ISD::SETCC, MVT::v16i16, { 1, 1, 1, 2 } },
3224 { ISD::SETCC, MVT::v32i8, { 1, 1, 1, 2 } },
3226 { ISD::SELECT, MVT::v4f64, { 2, 2, 1, 2 } }, // vblendvpd
3227 { ISD::SELECT, MVT::v8f32, { 2, 2, 1, 2 } }, // vblendvps
3228 { ISD::SELECT, MVT::v4i64, { 2, 2, 1, 2 } }, // pblendvb
3229 { ISD::SELECT, MVT::v8i32, { 2, 2, 1, 2 } }, // pblendvb
3230 { ISD::SELECT, MVT::v16i16, { 2, 2, 1, 2 } }, // pblendvb
3231 { ISD::SELECT, MVT::v32i8, { 2, 2, 1, 2 } }, // pblendvb
3234 static const CostKindTblEntry XOPCostTbl[] = {
3235 { ISD::SETCC, MVT::v4i64, { 4, 2, 5, 6 } },
3236 { ISD::SETCC, MVT::v2i64, { 1, 1, 1, 1 } },
3239 static const CostKindTblEntry AVX1CostTbl[] = {
3240 { ISD::SETCC, MVT::v4f64, { 2, 3, 1, 2 } },
3241 { ISD::SETCC, MVT::v2f64, { 1, 3, 1, 1 } },
3242 { ISD::SETCC, MVT::f64, { 1, 3, 1, 1 } },
3243 { ISD::SETCC, MVT::v8f32, { 2, 3, 1, 2 } },
3244 { ISD::SETCC, MVT::v4f32, { 1, 3, 1, 1 } },
3245 { ISD::SETCC, MVT::f32, { 1, 3, 1, 1 } },
3247 // AVX1 does not support 8-wide integer compare.
3248 { ISD::SETCC, MVT::v4i64, { 4, 2, 5, 6 } },
3249 { ISD::SETCC, MVT::v8i32, { 4, 2, 5, 6 } },
3250 { ISD::SETCC, MVT::v16i16, { 4, 2, 5, 6 } },
3251 { ISD::SETCC, MVT::v32i8, { 4, 2, 5, 6 } },
3253 { ISD::SELECT, MVT::v4f64, { 3, 3, 1, 2 } }, // vblendvpd
3254 { ISD::SELECT, MVT::v8f32, { 3, 3, 1, 2 } }, // vblendvps
3255 { ISD::SELECT, MVT::v4i64, { 3, 3, 1, 2 } }, // vblendvpd
3256 { ISD::SELECT, MVT::v8i32, { 3, 3, 1, 2 } }, // vblendvps
3257 { ISD::SELECT, MVT::v16i16, { 3, 3, 3, 3 } }, // vandps + vandnps + vorps
3258 { ISD::SELECT, MVT::v32i8, { 3, 3, 3, 3 } }, // vandps + vandnps + vorps
3261 static const CostKindTblEntry SSE42CostTbl[] = {
3262 { ISD::SETCC, MVT::v2i64, { 1, 2, 1, 2 } },
3265 static const CostKindTblEntry SSE41CostTbl[] = {
3266 { ISD::SETCC, MVT::v2f64, { 1, 5, 1, 1 } },
3267 { ISD::SETCC, MVT::v4f32, { 1, 5, 1, 1 } },
3269 { ISD::SELECT, MVT::v2f64, { 2, 2, 1, 2 } }, // blendvpd
3270 { ISD::SELECT, MVT::f64, { 2, 2, 1, 2 } }, // blendvpd
3271 { ISD::SELECT, MVT::v4f32, { 2, 2, 1, 2 } }, // blendvps
3272 { ISD::SELECT, MVT::f32 , { 2, 2, 1, 2 } }, // blendvps
3273 { ISD::SELECT, MVT::v2i64, { 2, 2, 1, 2 } }, // pblendvb
3274 { ISD::SELECT, MVT::v4i32, { 2, 2, 1, 2 } }, // pblendvb
3275 { ISD::SELECT, MVT::v8i16, { 2, 2, 1, 2 } }, // pblendvb
3276 { ISD::SELECT, MVT::v16i8, { 2, 2, 1, 2 } }, // pblendvb
3279 static const CostKindTblEntry SSE2CostTbl[] = {
3280 { ISD::SETCC, MVT::v2f64, { 2, 5, 1, 1 } },
3281 { ISD::SETCC, MVT::f64, { 1, 5, 1, 1 } },
3283 { ISD::SETCC, MVT::v2i64, { 5, 4, 5, 5 } }, // pcmpeqd/pcmpgtd expansion
3284 { ISD::SETCC, MVT::v4i32, { 1, 1, 1, 1 } },
3285 { ISD::SETCC, MVT::v8i16, { 1, 1, 1, 1 } },
3286 { ISD::SETCC, MVT::v16i8, { 1, 1, 1, 1 } },
3288 { ISD::SELECT, MVT::v2f64, { 2, 2, 3, 3 } }, // andpd + andnpd + orpd
3289 { ISD::SELECT, MVT::f64, { 2, 2, 3, 3 } }, // andpd + andnpd + orpd
3290 { ISD::SELECT, MVT::v2i64, { 2, 2, 3, 3 } }, // pand + pandn + por
3291 { ISD::SELECT, MVT::v4i32, { 2, 2, 3, 3 } }, // pand + pandn + por
3292 { ISD::SELECT, MVT::v8i16, { 2, 2, 3, 3 } }, // pand + pandn + por
3293 { ISD::SELECT, MVT::v16i8, { 2, 2, 3, 3 } }, // pand + pandn + por
3296 static const CostKindTblEntry SSE1CostTbl[] = {
3297 { ISD::SETCC, MVT::v4f32, { 2, 5, 1, 1 } },
3298 { ISD::SETCC, MVT::f32, { 1, 5, 1, 1 } },
3300 { ISD::SELECT, MVT::v4f32, { 2, 2, 3, 3 } }, // andps + andnps + orps
3301 { ISD::SELECT, MVT::f32, { 2, 2, 3, 3 } }, // andps + andnps + orps
3304 if (ST->useSLMArithCosts())
3305 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
3306 if (auto KindCost = Entry->Cost[CostKind])
3307 return LT.first * (ExtraCost + *KindCost);
3309 if (ST->hasBWI())
3310 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3311 if (auto KindCost = Entry->Cost[CostKind])
3312 return LT.first * (ExtraCost + *KindCost);
3314 if (ST->hasAVX512())
3315 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3316 if (auto KindCost = Entry->Cost[CostKind])
3317 return LT.first * (ExtraCost + *KindCost);
3319 if (ST->hasAVX2())
3320 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3321 if (auto KindCost = Entry->Cost[CostKind])
3322 return LT.first * (ExtraCost + *KindCost);
3324 if (ST->hasXOP())
3325 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3326 if (auto KindCost = Entry->Cost[CostKind])
3327 return LT.first * (ExtraCost + *KindCost);
3329 if (ST->hasAVX())
3330 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3331 if (auto KindCost = Entry->Cost[CostKind])
3332 return LT.first * (ExtraCost + *KindCost);
3334 if (ST->hasSSE42())
3335 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3336 if (auto KindCost = Entry->Cost[CostKind])
3337 return LT.first * (ExtraCost + *KindCost);
3339 if (ST->hasSSE41())
3340 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3341 if (auto KindCost = Entry->Cost[CostKind])
3342 return LT.first * (ExtraCost + *KindCost);
3344 if (ST->hasSSE2())
3345 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3346 if (auto KindCost = Entry->Cost[CostKind])
3347 return LT.first * (ExtraCost + *KindCost);
3349 if (ST->hasSSE1())
3350 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3351 if (auto KindCost = Entry->Cost[CostKind])
3352 return LT.first * (ExtraCost + *KindCost);
3354 // Assume a 3cy latency for fp select ops.
3355 if (CostKind == TTI::TCK_Latency && Opcode == Instruction::Select)
3356 if (ValTy->getScalarType()->isFloatingPointTy())
3357 return 3;
3359 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
3362 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
3364 InstructionCost
3365 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
3366 TTI::TargetCostKind CostKind) {
3367 // Costs should match the codegen from:
3368 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
3369 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
3370 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
3371 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
3372 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
3374 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
3375 // specialized in these tables yet.
3376 static const CostKindTblEntry AVX512VBMI2CostTbl[] = {
3377 { ISD::FSHL, MVT::v8i64, { 1, 1, 1, 1 } },
3378 { ISD::FSHL, MVT::v4i64, { 1, 1, 1, 1 } },
3379 { ISD::FSHL, MVT::v2i64, { 1, 1, 1, 1 } },
3380 { ISD::FSHL, MVT::v16i32, { 1, 1, 1, 1 } },
3381 { ISD::FSHL, MVT::v8i32, { 1, 1, 1, 1 } },
3382 { ISD::FSHL, MVT::v4i32, { 1, 1, 1, 1 } },
3383 { ISD::FSHL, MVT::v32i16, { 1, 1, 1, 1 } },
3384 { ISD::FSHL, MVT::v16i16, { 1, 1, 1, 1 } },
3385 { ISD::FSHL, MVT::v8i16, { 1, 1, 1, 1 } },
3386 { ISD::ROTL, MVT::v32i16, { 1, 1, 1, 1 } },
3387 { ISD::ROTL, MVT::v16i16, { 1, 1, 1, 1 } },
3388 { ISD::ROTL, MVT::v8i16, { 1, 1, 1, 1 } },
3389 { ISD::ROTR, MVT::v32i16, { 1, 1, 1, 1 } },
3390 { ISD::ROTR, MVT::v16i16, { 1, 1, 1, 1 } },
3391 { ISD::ROTR, MVT::v8i16, { 1, 1, 1, 1 } },
3393 static const CostKindTblEntry AVX512BITALGCostTbl[] = {
3394 { ISD::CTPOP, MVT::v32i16, { 1, 1, 1, 1 } },
3395 { ISD::CTPOP, MVT::v64i8, { 1, 1, 1, 1 } },
3396 { ISD::CTPOP, MVT::v16i16, { 1, 1, 1, 1 } },
3397 { ISD::CTPOP, MVT::v32i8, { 1, 1, 1, 1 } },
3398 { ISD::CTPOP, MVT::v8i16, { 1, 1, 1, 1 } },
3399 { ISD::CTPOP, MVT::v16i8, { 1, 1, 1, 1 } },
3401 static const CostKindTblEntry AVX512VPOPCNTDQCostTbl[] = {
3402 { ISD::CTPOP, MVT::v8i64, { 1, 1, 1, 1 } },
3403 { ISD::CTPOP, MVT::v16i32, { 1, 1, 1, 1 } },
3404 { ISD::CTPOP, MVT::v4i64, { 1, 1, 1, 1 } },
3405 { ISD::CTPOP, MVT::v8i32, { 1, 1, 1, 1 } },
3406 { ISD::CTPOP, MVT::v2i64, { 1, 1, 1, 1 } },
3407 { ISD::CTPOP, MVT::v4i32, { 1, 1, 1, 1 } },
3409 static const CostKindTblEntry AVX512CDCostTbl[] = {
3410 { ISD::CTLZ, MVT::v8i64, { 1, 5, 1, 1 } },
3411 { ISD::CTLZ, MVT::v16i32, { 1, 5, 1, 1 } },
3412 { ISD::CTLZ, MVT::v32i16, { 18, 27, 23, 27 } },
3413 { ISD::CTLZ, MVT::v64i8, { 3, 16, 9, 11 } },
3414 { ISD::CTLZ, MVT::v4i64, { 1, 5, 1, 1 } },
3415 { ISD::CTLZ, MVT::v8i32, { 1, 5, 1, 1 } },
3416 { ISD::CTLZ, MVT::v16i16, { 8, 19, 11, 13 } },
3417 { ISD::CTLZ, MVT::v32i8, { 2, 11, 9, 10 } },
3418 { ISD::CTLZ, MVT::v2i64, { 1, 5, 1, 1 } },
3419 { ISD::CTLZ, MVT::v4i32, { 1, 5, 1, 1 } },
3420 { ISD::CTLZ, MVT::v8i16, { 3, 15, 4, 6 } },
3421 { ISD::CTLZ, MVT::v16i8, { 2, 10, 9, 10 } },
3423 { ISD::CTTZ, MVT::v8i64, { 2, 8, 6, 7 } },
3424 { ISD::CTTZ, MVT::v16i32, { 2, 8, 6, 7 } },
3425 { ISD::CTTZ, MVT::v4i64, { 1, 8, 6, 6 } },
3426 { ISD::CTTZ, MVT::v8i32, { 1, 8, 6, 6 } },
3427 { ISD::CTTZ, MVT::v2i64, { 1, 8, 6, 6 } },
3428 { ISD::CTTZ, MVT::v4i32, { 1, 8, 6, 6 } },
3430 static const CostKindTblEntry AVX512BWCostTbl[] = {
3431 { ISD::ABS, MVT::v32i16, { 1, 1, 1, 1 } },
3432 { ISD::ABS, MVT::v64i8, { 1, 1, 1, 1 } },
3433 { ISD::BITREVERSE, MVT::v2i64, { 3, 10, 10, 11 } },
3434 { ISD::BITREVERSE, MVT::v4i64, { 3, 11, 10, 11 } },
3435 { ISD::BITREVERSE, MVT::v8i64, { 3, 12, 10, 14 } },
3436 { ISD::BITREVERSE, MVT::v4i32, { 3, 10, 10, 11 } },
3437 { ISD::BITREVERSE, MVT::v8i32, { 3, 11, 10, 11 } },
3438 { ISD::BITREVERSE, MVT::v16i32, { 3, 12, 10, 14 } },
3439 { ISD::BITREVERSE, MVT::v8i16, { 3, 10, 10, 11 } },
3440 { ISD::BITREVERSE, MVT::v16i16, { 3, 11, 10, 11 } },
3441 { ISD::BITREVERSE, MVT::v32i16, { 3, 12, 10, 14 } },
3442 { ISD::BITREVERSE, MVT::v16i8, { 2, 5, 9, 9 } },
3443 { ISD::BITREVERSE, MVT::v32i8, { 2, 5, 9, 9 } },
3444 { ISD::BITREVERSE, MVT::v64i8, { 2, 5, 9, 12 } },
3445 { ISD::BSWAP, MVT::v2i64, { 1, 1, 1, 2 } },
3446 { ISD::BSWAP, MVT::v4i64, { 1, 1, 1, 2 } },
3447 { ISD::BSWAP, MVT::v8i64, { 1, 1, 1, 2 } },
3448 { ISD::BSWAP, MVT::v4i32, { 1, 1, 1, 2 } },
3449 { ISD::BSWAP, MVT::v8i32, { 1, 1, 1, 2 } },
3450 { ISD::BSWAP, MVT::v16i32, { 1, 1, 1, 2 } },
3451 { ISD::BSWAP, MVT::v8i16, { 1, 1, 1, 2 } },
3452 { ISD::BSWAP, MVT::v16i16, { 1, 1, 1, 2 } },
3453 { ISD::BSWAP, MVT::v32i16, { 1, 1, 1, 2 } },
3454 { ISD::CTLZ, MVT::v8i64, { 8, 22, 23, 23 } },
3455 { ISD::CTLZ, MVT::v16i32, { 8, 23, 25, 25 } },
3456 { ISD::CTLZ, MVT::v32i16, { 4, 15, 15, 16 } },
3457 { ISD::CTLZ, MVT::v64i8, { 3, 12, 10, 9 } },
3458 { ISD::CTPOP, MVT::v2i64, { 3, 7, 10, 10 } },
3459 { ISD::CTPOP, MVT::v4i64, { 3, 7, 10, 10 } },
3460 { ISD::CTPOP, MVT::v8i64, { 3, 8, 10, 12 } },
3461 { ISD::CTPOP, MVT::v4i32, { 7, 11, 14, 14 } },
3462 { ISD::CTPOP, MVT::v8i32, { 7, 11, 14, 14 } },
3463 { ISD::CTPOP, MVT::v16i32, { 7, 12, 14, 16 } },
3464 { ISD::CTPOP, MVT::v8i16, { 2, 7, 11, 11 } },
3465 { ISD::CTPOP, MVT::v16i16, { 2, 7, 11, 11 } },
3466 { ISD::CTPOP, MVT::v32i16, { 3, 7, 11, 13 } },
3467 { ISD::CTPOP, MVT::v16i8, { 2, 4, 8, 8 } },
3468 { ISD::CTPOP, MVT::v32i8, { 2, 4, 8, 8 } },
3469 { ISD::CTPOP, MVT::v64i8, { 2, 5, 8, 10 } },
3470 { ISD::CTTZ, MVT::v8i16, { 3, 9, 14, 14 } },
3471 { ISD::CTTZ, MVT::v16i16, { 3, 9, 14, 14 } },
3472 { ISD::CTTZ, MVT::v32i16, { 3, 10, 14, 16 } },
3473 { ISD::CTTZ, MVT::v16i8, { 2, 6, 11, 11 } },
3474 { ISD::CTTZ, MVT::v32i8, { 2, 6, 11, 11 } },
3475 { ISD::CTTZ, MVT::v64i8, { 3, 7, 11, 13 } },
3476 { ISD::ROTL, MVT::v32i16, { 2, 8, 6, 8 } },
3477 { ISD::ROTL, MVT::v16i16, { 2, 8, 6, 7 } },
3478 { ISD::ROTL, MVT::v8i16, { 2, 7, 6, 7 } },
3479 { ISD::ROTL, MVT::v64i8, { 5, 6, 11, 12 } },
3480 { ISD::ROTL, MVT::v32i8, { 5, 15, 7, 10 } },
3481 { ISD::ROTL, MVT::v16i8, { 5, 15, 7, 10 } },
3482 { ISD::ROTR, MVT::v32i16, { 2, 8, 6, 8 } },
3483 { ISD::ROTR, MVT::v16i16, { 2, 8, 6, 7 } },
3484 { ISD::ROTR, MVT::v8i16, { 2, 7, 6, 7 } },
3485 { ISD::ROTR, MVT::v64i8, { 5, 6, 12, 14 } },
3486 { ISD::ROTR, MVT::v32i8, { 5, 14, 6, 9 } },
3487 { ISD::ROTR, MVT::v16i8, { 5, 14, 6, 9 } },
3488 { ISD::SADDSAT, MVT::v32i16, { 1 } },
3489 { ISD::SADDSAT, MVT::v64i8, { 1 } },
3490 { ISD::SMAX, MVT::v32i16, { 1, 1, 1, 1 } },
3491 { ISD::SMAX, MVT::v64i8, { 1, 1, 1, 1 } },
3492 { ISD::SMIN, MVT::v32i16, { 1, 1, 1, 1 } },
3493 { ISD::SMIN, MVT::v64i8, { 1, 1, 1, 1 } },
3494 { ISD::SSUBSAT, MVT::v32i16, { 1 } },
3495 { ISD::SSUBSAT, MVT::v64i8, { 1 } },
3496 { ISD::UADDSAT, MVT::v32i16, { 1 } },
3497 { ISD::UADDSAT, MVT::v64i8, { 1 } },
3498 { ISD::UMAX, MVT::v32i16, { 1, 1, 1, 1 } },
3499 { ISD::UMAX, MVT::v64i8, { 1, 1, 1, 1 } },
3500 { ISD::UMIN, MVT::v32i16, { 1, 1, 1, 1 } },
3501 { ISD::UMIN, MVT::v64i8, { 1, 1, 1, 1 } },
3502 { ISD::USUBSAT, MVT::v32i16, { 1 } },
3503 { ISD::USUBSAT, MVT::v64i8, { 1 } },
3505 static const CostKindTblEntry AVX512CostTbl[] = {
3506 { ISD::ABS, MVT::v8i64, { 1, 1, 1, 1 } },
3507 { ISD::ABS, MVT::v4i64, { 1, 1, 1, 1 } },
3508 { ISD::ABS, MVT::v2i64, { 1, 1, 1, 1 } },
3509 { ISD::ABS, MVT::v16i32, { 1, 1, 1, 1 } },
3510 { ISD::ABS, MVT::v8i32, { 1, 1, 1, 1 } },
3511 { ISD::ABS, MVT::v32i16, { 2, 7, 4, 4 } },
3512 { ISD::ABS, MVT::v16i16, { 1, 1, 1, 1 } },
3513 { ISD::ABS, MVT::v64i8, { 2, 7, 4, 4 } },
3514 { ISD::ABS, MVT::v32i8, { 1, 1, 1, 1 } },
3515 { ISD::BITREVERSE, MVT::v8i64, { 9, 13, 20, 20 } },
3516 { ISD::BITREVERSE, MVT::v16i32, { 9, 13, 20, 20 } },
3517 { ISD::BITREVERSE, MVT::v32i16, { 9, 13, 20, 20 } },
3518 { ISD::BITREVERSE, MVT::v64i8, { 6, 11, 17, 17 } },
3519 { ISD::BSWAP, MVT::v8i64, { 4, 7, 5, 5 } },
3520 { ISD::BSWAP, MVT::v16i32, { 4, 7, 5, 5 } },
3521 { ISD::BSWAP, MVT::v32i16, { 4, 7, 5, 5 } },
3522 { ISD::CTLZ, MVT::v8i64, { 10, 28, 32, 32 } },
3523 { ISD::CTLZ, MVT::v16i32, { 12, 30, 38, 38 } },
3524 { ISD::CTLZ, MVT::v32i16, { 8, 15, 29, 29 } },
3525 { ISD::CTLZ, MVT::v64i8, { 6, 11, 19, 19 } },
3526 { ISD::CTPOP, MVT::v8i64, { 16, 16, 19, 19 } },
3527 { ISD::CTPOP, MVT::v16i32, { 24, 19, 27, 27 } },
3528 { ISD::CTPOP, MVT::v32i16, { 18, 15, 22, 22 } },
3529 { ISD::CTPOP, MVT::v64i8, { 12, 11, 16, 16 } },
3530 { ISD::CTTZ, MVT::v8i64, { 2, 8, 6, 7 } },
3531 { ISD::CTTZ, MVT::v16i32, { 2, 8, 6, 7 } },
3532 { ISD::CTTZ, MVT::v32i16, { 7, 17, 27, 27 } },
3533 { ISD::CTTZ, MVT::v64i8, { 6, 13, 21, 21 } },
3534 { ISD::ROTL, MVT::v8i64, { 1, 1, 1, 1 } },
3535 { ISD::ROTL, MVT::v4i64, { 1, 1, 1, 1 } },
3536 { ISD::ROTL, MVT::v2i64, { 1, 1, 1, 1 } },
3537 { ISD::ROTL, MVT::v16i32, { 1, 1, 1, 1 } },
3538 { ISD::ROTL, MVT::v8i32, { 1, 1, 1, 1 } },
3539 { ISD::ROTL, MVT::v4i32, { 1, 1, 1, 1 } },
3540 { ISD::ROTR, MVT::v8i64, { 1, 1, 1, 1 } },
3541 { ISD::ROTR, MVT::v4i64, { 1, 1, 1, 1 } },
3542 { ISD::ROTR, MVT::v2i64, { 1, 1, 1, 1 } },
3543 { ISD::ROTR, MVT::v16i32, { 1, 1, 1, 1 } },
3544 { ISD::ROTR, MVT::v8i32, { 1, 1, 1, 1 } },
3545 { ISD::ROTR, MVT::v4i32, { 1, 1, 1, 1 } },
3546 { ISD::SMAX, MVT::v8i64, { 1, 3, 1, 1 } },
3547 { ISD::SMAX, MVT::v16i32, { 1, 1, 1, 1 } },
3548 { ISD::SMAX, MVT::v32i16, { 3, 7, 5, 5 } },
3549 { ISD::SMAX, MVT::v64i8, { 3, 7, 5, 5 } },
3550 { ISD::SMAX, MVT::v4i64, { 1, 3, 1, 1 } },
3551 { ISD::SMAX, MVT::v2i64, { 1, 3, 1, 1 } },
3552 { ISD::SMIN, MVT::v8i64, { 1, 3, 1, 1 } },
3553 { ISD::SMIN, MVT::v16i32, { 1, 1, 1, 1 } },
3554 { ISD::SMIN, MVT::v32i16, { 3, 7, 5, 5 } },
3555 { ISD::SMIN, MVT::v64i8, { 3, 7, 5, 5 } },
3556 { ISD::SMIN, MVT::v4i64, { 1, 3, 1, 1 } },
3557 { ISD::SMIN, MVT::v2i64, { 1, 3, 1, 1 } },
3558 { ISD::UMAX, MVT::v8i64, { 1, 3, 1, 1 } },
3559 { ISD::UMAX, MVT::v16i32, { 1, 1, 1, 1 } },
3560 { ISD::UMAX, MVT::v32i16, { 3, 7, 5, 5 } },
3561 { ISD::UMAX, MVT::v64i8, { 3, 7, 5, 5 } },
3562 { ISD::UMAX, MVT::v4i64, { 1, 3, 1, 1 } },
3563 { ISD::UMAX, MVT::v2i64, { 1, 3, 1, 1 } },
3564 { ISD::UMIN, MVT::v8i64, { 1, 3, 1, 1 } },
3565 { ISD::UMIN, MVT::v16i32, { 1, 1, 1, 1 } },
3566 { ISD::UMIN, MVT::v32i16, { 3, 7, 5, 5 } },
3567 { ISD::UMIN, MVT::v64i8, { 3, 7, 5, 5 } },
3568 { ISD::UMIN, MVT::v4i64, { 1, 3, 1, 1 } },
3569 { ISD::UMIN, MVT::v2i64, { 1, 3, 1, 1 } },
3570 { ISD::USUBSAT, MVT::v16i32, { 2 } }, // pmaxud + psubd
3571 { ISD::USUBSAT, MVT::v2i64, { 2 } }, // pmaxuq + psubq
3572 { ISD::USUBSAT, MVT::v4i64, { 2 } }, // pmaxuq + psubq
3573 { ISD::USUBSAT, MVT::v8i64, { 2 } }, // pmaxuq + psubq
3574 { ISD::UADDSAT, MVT::v16i32, { 3 } }, // not + pminud + paddd
3575 { ISD::UADDSAT, MVT::v2i64, { 3 } }, // not + pminuq + paddq
3576 { ISD::UADDSAT, MVT::v4i64, { 3 } }, // not + pminuq + paddq
3577 { ISD::UADDSAT, MVT::v8i64, { 3 } }, // not + pminuq + paddq
3578 { ISD::SADDSAT, MVT::v32i16, { 2 } },
3579 { ISD::SADDSAT, MVT::v64i8, { 2 } },
3580 { ISD::SSUBSAT, MVT::v32i16, { 2 } },
3581 { ISD::SSUBSAT, MVT::v64i8, { 2 } },
3582 { ISD::UADDSAT, MVT::v32i16, { 2 } },
3583 { ISD::UADDSAT, MVT::v64i8, { 2 } },
3584 { ISD::USUBSAT, MVT::v32i16, { 2 } },
3585 { ISD::USUBSAT, MVT::v64i8, { 2 } },
3586 { ISD::FMAXNUM, MVT::f32, { 2, 2, 3, 3 } },
3587 { ISD::FMAXNUM, MVT::v4f32, { 1, 1, 3, 3 } },
3588 { ISD::FMAXNUM, MVT::v8f32, { 2, 2, 3, 3 } },
3589 { ISD::FMAXNUM, MVT::v16f32, { 4, 4, 3, 3 } },
3590 { ISD::FMAXNUM, MVT::f64, { 2, 2, 3, 3 } },
3591 { ISD::FMAXNUM, MVT::v2f64, { 1, 1, 3, 3 } },
3592 { ISD::FMAXNUM, MVT::v4f64, { 2, 2, 3, 3 } },
3593 { ISD::FMAXNUM, MVT::v8f64, { 3, 3, 3, 3 } },
3594 { ISD::FSQRT, MVT::f32, { 3, 12, 1, 1 } }, // Skylake from http://www.agner.org/
3595 { ISD::FSQRT, MVT::v4f32, { 3, 12, 1, 1 } }, // Skylake from http://www.agner.org/
3596 { ISD::FSQRT, MVT::v8f32, { 6, 12, 1, 1 } }, // Skylake from http://www.agner.org/
3597 { ISD::FSQRT, MVT::v16f32, { 12, 20, 1, 3 } }, // Skylake from http://www.agner.org/
3598 { ISD::FSQRT, MVT::f64, { 6, 18, 1, 1 } }, // Skylake from http://www.agner.org/
3599 { ISD::FSQRT, MVT::v2f64, { 6, 18, 1, 1 } }, // Skylake from http://www.agner.org/
3600 { ISD::FSQRT, MVT::v4f64, { 12, 18, 1, 1 } }, // Skylake from http://www.agner.org/
3601 { ISD::FSQRT, MVT::v8f64, { 24, 32, 1, 3 } }, // Skylake from http://www.agner.org/
3603 static const CostKindTblEntry XOPCostTbl[] = {
3604 { ISD::BITREVERSE, MVT::v4i64, { 3, 6, 5, 6 } },
3605 { ISD::BITREVERSE, MVT::v8i32, { 3, 6, 5, 6 } },
3606 { ISD::BITREVERSE, MVT::v16i16, { 3, 6, 5, 6 } },
3607 { ISD::BITREVERSE, MVT::v32i8, { 3, 6, 5, 6 } },
3608 { ISD::BITREVERSE, MVT::v2i64, { 2, 7, 1, 1 } },
3609 { ISD::BITREVERSE, MVT::v4i32, { 2, 7, 1, 1 } },
3610 { ISD::BITREVERSE, MVT::v8i16, { 2, 7, 1, 1 } },
3611 { ISD::BITREVERSE, MVT::v16i8, { 2, 7, 1, 1 } },
3612 { ISD::BITREVERSE, MVT::i64, { 2, 2, 3, 4 } },
3613 { ISD::BITREVERSE, MVT::i32, { 2, 2, 3, 4 } },
3614 { ISD::BITREVERSE, MVT::i16, { 2, 2, 3, 4 } },
3615 { ISD::BITREVERSE, MVT::i8, { 2, 2, 3, 4 } },
3616 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
3617 { ISD::ROTL, MVT::v4i64, { 4, 7, 5, 6 } },
3618 { ISD::ROTL, MVT::v8i32, { 4, 7, 5, 6 } },
3619 { ISD::ROTL, MVT::v16i16, { 4, 7, 5, 6 } },
3620 { ISD::ROTL, MVT::v32i8, { 4, 7, 5, 6 } },
3621 { ISD::ROTL, MVT::v2i64, { 1, 3, 1, 1 } },
3622 { ISD::ROTL, MVT::v4i32, { 1, 3, 1, 1 } },
3623 { ISD::ROTL, MVT::v8i16, { 1, 3, 1, 1 } },
3624 { ISD::ROTL, MVT::v16i8, { 1, 3, 1, 1 } },
3625 { ISD::ROTR, MVT::v4i64, { 4, 7, 8, 9 } },
3626 { ISD::ROTR, MVT::v8i32, { 4, 7, 8, 9 } },
3627 { ISD::ROTR, MVT::v16i16, { 4, 7, 8, 9 } },
3628 { ISD::ROTR, MVT::v32i8, { 4, 7, 8, 9 } },
3629 { ISD::ROTR, MVT::v2i64, { 1, 3, 3, 3 } },
3630 { ISD::ROTR, MVT::v4i32, { 1, 3, 3, 3 } },
3631 { ISD::ROTR, MVT::v8i16, { 1, 3, 3, 3 } },
3632 { ISD::ROTR, MVT::v16i8, { 1, 3, 3, 3 } }
3634 static const CostKindTblEntry AVX2CostTbl[] = {
3635 { ISD::ABS, MVT::v2i64, { 2, 4, 3, 5 } }, // VBLENDVPD(X,VPSUBQ(0,X),X)
3636 { ISD::ABS, MVT::v4i64, { 2, 4, 3, 5 } }, // VBLENDVPD(X,VPSUBQ(0,X),X)
3637 { ISD::ABS, MVT::v4i32, { 1, 1, 1, 1 } },
3638 { ISD::ABS, MVT::v8i32, { 1, 1, 1, 2 } },
3639 { ISD::ABS, MVT::v8i16, { 1, 1, 1, 1 } },
3640 { ISD::ABS, MVT::v16i16, { 1, 1, 1, 2 } },
3641 { ISD::ABS, MVT::v16i8, { 1, 1, 1, 1 } },
3642 { ISD::ABS, MVT::v32i8, { 1, 1, 1, 2 } },
3643 { ISD::BITREVERSE, MVT::v2i64, { 3, 11, 10, 11 } },
3644 { ISD::BITREVERSE, MVT::v4i64, { 5, 11, 10, 17 } },
3645 { ISD::BITREVERSE, MVT::v4i32, { 3, 11, 10, 11 } },
3646 { ISD::BITREVERSE, MVT::v8i32, { 5, 11, 10, 17 } },
3647 { ISD::BITREVERSE, MVT::v8i16, { 3, 11, 10, 11 } },
3648 { ISD::BITREVERSE, MVT::v16i16, { 5, 11, 10, 17 } },
3649 { ISD::BITREVERSE, MVT::v16i8, { 3, 6, 9, 9 } },
3650 { ISD::BITREVERSE, MVT::v32i8, { 4, 5, 9, 15 } },
3651 { ISD::BSWAP, MVT::v2i64, { 1, 2, 1, 2 } },
3652 { ISD::BSWAP, MVT::v4i64, { 1, 3, 1, 2 } },
3653 { ISD::BSWAP, MVT::v4i32, { 1, 2, 1, 2 } },
3654 { ISD::BSWAP, MVT::v8i32, { 1, 3, 1, 2 } },
3655 { ISD::BSWAP, MVT::v8i16, { 1, 2, 1, 2 } },
3656 { ISD::BSWAP, MVT::v16i16, { 1, 3, 1, 2 } },
3657 { ISD::CTLZ, MVT::v2i64, { 7, 18, 24, 25 } },
3658 { ISD::CTLZ, MVT::v4i64, { 14, 18, 24, 44 } },
3659 { ISD::CTLZ, MVT::v4i32, { 5, 16, 19, 20 } },
3660 { ISD::CTLZ, MVT::v8i32, { 10, 16, 19, 34 } },
3661 { ISD::CTLZ, MVT::v8i16, { 4, 13, 14, 15 } },
3662 { ISD::CTLZ, MVT::v16i16, { 6, 14, 14, 24 } },
3663 { ISD::CTLZ, MVT::v16i8, { 3, 12, 9, 10 } },
3664 { ISD::CTLZ, MVT::v32i8, { 4, 12, 9, 14 } },
3665 { ISD::CTPOP, MVT::v2i64, { 3, 9, 10, 10 } },
3666 { ISD::CTPOP, MVT::v4i64, { 4, 9, 10, 14 } },
3667 { ISD::CTPOP, MVT::v4i32, { 7, 12, 14, 14 } },
3668 { ISD::CTPOP, MVT::v8i32, { 7, 12, 14, 18 } },
3669 { ISD::CTPOP, MVT::v8i16, { 3, 7, 11, 11 } },
3670 { ISD::CTPOP, MVT::v16i16, { 6, 8, 11, 18 } },
3671 { ISD::CTPOP, MVT::v16i8, { 2, 5, 8, 8 } },
3672 { ISD::CTPOP, MVT::v32i8, { 3, 5, 8, 12 } },
3673 { ISD::CTTZ, MVT::v2i64, { 4, 11, 13, 13 } },
3674 { ISD::CTTZ, MVT::v4i64, { 5, 11, 13, 20 } },
3675 { ISD::CTTZ, MVT::v4i32, { 7, 14, 17, 17 } },
3676 { ISD::CTTZ, MVT::v8i32, { 7, 15, 17, 24 } },
3677 { ISD::CTTZ, MVT::v8i16, { 4, 9, 14, 14 } },
3678 { ISD::CTTZ, MVT::v16i16, { 6, 9, 14, 24 } },
3679 { ISD::CTTZ, MVT::v16i8, { 3, 7, 11, 11 } },
3680 { ISD::CTTZ, MVT::v32i8, { 5, 7, 11, 18 } },
3681 { ISD::SADDSAT, MVT::v16i16, { 1 } },
3682 { ISD::SADDSAT, MVT::v32i8, { 1 } },
3683 { ISD::SMAX, MVT::v2i64, { 2, 7, 2, 3 } },
3684 { ISD::SMAX, MVT::v4i64, { 2, 7, 2, 3 } },
3685 { ISD::SMAX, MVT::v8i32, { 1, 1, 1, 2 } },
3686 { ISD::SMAX, MVT::v16i16, { 1, 1, 1, 2 } },
3687 { ISD::SMAX, MVT::v32i8, { 1, 1, 1, 2 } },
3688 { ISD::SMIN, MVT::v2i64, { 2, 7, 2, 3 } },
3689 { ISD::SMIN, MVT::v4i64, { 2, 7, 2, 3 } },
3690 { ISD::SMIN, MVT::v8i32, { 1, 1, 1, 2 } },
3691 { ISD::SMIN, MVT::v16i16, { 1, 1, 1, 2 } },
3692 { ISD::SMIN, MVT::v32i8, { 1, 1, 1, 2 } },
3693 { ISD::SSUBSAT, MVT::v16i16, { 1 } },
3694 { ISD::SSUBSAT, MVT::v32i8, { 1 } },
3695 { ISD::UADDSAT, MVT::v16i16, { 1 } },
3696 { ISD::UADDSAT, MVT::v32i8, { 1 } },
3697 { ISD::UADDSAT, MVT::v8i32, { 3 } }, // not + pminud + paddd
3698 { ISD::UMAX, MVT::v2i64, { 2, 8, 5, 6 } },
3699 { ISD::UMAX, MVT::v4i64, { 2, 8, 5, 8 } },
3700 { ISD::UMAX, MVT::v8i32, { 1, 1, 1, 2 } },
3701 { ISD::UMAX, MVT::v16i16, { 1, 1, 1, 2 } },
3702 { ISD::UMAX, MVT::v32i8, { 1, 1, 1, 2 } },
3703 { ISD::UMIN, MVT::v2i64, { 2, 8, 5, 6 } },
3704 { ISD::UMIN, MVT::v4i64, { 2, 8, 5, 8 } },
3705 { ISD::UMIN, MVT::v8i32, { 1, 1, 1, 2 } },
3706 { ISD::UMIN, MVT::v16i16, { 1, 1, 1, 2 } },
3707 { ISD::UMIN, MVT::v32i8, { 1, 1, 1, 2 } },
3708 { ISD::USUBSAT, MVT::v16i16, { 1 } },
3709 { ISD::USUBSAT, MVT::v32i8, { 1 } },
3710 { ISD::USUBSAT, MVT::v8i32, { 2 } }, // pmaxud + psubd
3711 { ISD::FMAXNUM, MVT::f32, { 2, 7, 3, 5 } }, // MAXSS + CMPUNORDSS + BLENDVPS
3712 { ISD::FMAXNUM, MVT::v4f32, { 2, 7, 3, 5 } }, // MAXPS + CMPUNORDPS + BLENDVPS
3713 { ISD::FMAXNUM, MVT::v8f32, { 3, 7, 3, 6 } }, // MAXPS + CMPUNORDPS + BLENDVPS
3714 { ISD::FMAXNUM, MVT::f64, { 2, 7, 3, 5 } }, // MAXSD + CMPUNORDSD + BLENDVPD
3715 { ISD::FMAXNUM, MVT::v2f64, { 2, 7, 3, 5 } }, // MAXPD + CMPUNORDPD + BLENDVPD
3716 { ISD::FMAXNUM, MVT::v4f64, { 3, 7, 3, 6 } }, // MAXPD + CMPUNORDPD + BLENDVPD
3717 { ISD::FSQRT, MVT::f32, { 7, 15, 1, 1 } }, // vsqrtss
3718 { ISD::FSQRT, MVT::v4f32, { 7, 15, 1, 1 } }, // vsqrtps
3719 { ISD::FSQRT, MVT::v8f32, { 14, 21, 1, 3 } }, // vsqrtps
3720 { ISD::FSQRT, MVT::f64, { 14, 21, 1, 1 } }, // vsqrtsd
3721 { ISD::FSQRT, MVT::v2f64, { 14, 21, 1, 1 } }, // vsqrtpd
3722 { ISD::FSQRT, MVT::v4f64, { 28, 35, 1, 3 } }, // vsqrtpd
3724 static const CostKindTblEntry AVX1CostTbl[] = {
3725 { ISD::ABS, MVT::v4i64, { 6, 8, 6, 12 } }, // VBLENDVPD(X,VPSUBQ(0,X),X)
3726 { ISD::ABS, MVT::v8i32, { 3, 6, 4, 5 } },
3727 { ISD::ABS, MVT::v16i16, { 3, 6, 4, 5 } },
3728 { ISD::ABS, MVT::v32i8, { 3, 6, 4, 5 } },
3729 { ISD::BITREVERSE, MVT::v4i64, { 17, 20, 20, 33 } }, // 2 x 128-bit Op + extract/insert
3730 { ISD::BITREVERSE, MVT::v2i64, { 8, 13, 10, 16 } },
3731 { ISD::BITREVERSE, MVT::v8i32, { 17, 20, 20, 33 } }, // 2 x 128-bit Op + extract/insert
3732 { ISD::BITREVERSE, MVT::v4i32, { 8, 13, 10, 16 } },
3733 { ISD::BITREVERSE, MVT::v16i16, { 17, 20, 20, 33 } }, // 2 x 128-bit Op + extract/insert
3734 { ISD::BITREVERSE, MVT::v8i16, { 8, 13, 10, 16 } },
3735 { ISD::BITREVERSE, MVT::v32i8, { 13, 15, 17, 26 } }, // 2 x 128-bit Op + extract/insert
3736 { ISD::BITREVERSE, MVT::v16i8, { 7, 7, 9, 13 } },
3737 { ISD::BSWAP, MVT::v4i64, { 5, 6, 5, 10 } },
3738 { ISD::BSWAP, MVT::v2i64, { 2, 2, 1, 3 } },
3739 { ISD::BSWAP, MVT::v8i32, { 5, 6, 5, 10 } },
3740 { ISD::BSWAP, MVT::v4i32, { 2, 2, 1, 3 } },
3741 { ISD::BSWAP, MVT::v16i16, { 5, 6, 5, 10 } },
3742 { ISD::BSWAP, MVT::v8i16, { 2, 2, 1, 3 } },
3743 { ISD::CTLZ, MVT::v4i64, { 29, 33, 49, 58 } }, // 2 x 128-bit Op + extract/insert
3744 { ISD::CTLZ, MVT::v2i64, { 14, 24, 24, 28 } },
3745 { ISD::CTLZ, MVT::v8i32, { 24, 28, 39, 48 } }, // 2 x 128-bit Op + extract/insert
3746 { ISD::CTLZ, MVT::v4i32, { 12, 20, 19, 23 } },
3747 { ISD::CTLZ, MVT::v16i16, { 19, 22, 29, 38 } }, // 2 x 128-bit Op + extract/insert
3748 { ISD::CTLZ, MVT::v8i16, { 9, 16, 14, 18 } },
3749 { ISD::CTLZ, MVT::v32i8, { 14, 15, 19, 28 } }, // 2 x 128-bit Op + extract/insert
3750 { ISD::CTLZ, MVT::v16i8, { 7, 12, 9, 13 } },
3751 { ISD::CTPOP, MVT::v4i64, { 14, 18, 19, 28 } }, // 2 x 128-bit Op + extract/insert
3752 { ISD::CTPOP, MVT::v2i64, { 7, 14, 10, 14 } },
3753 { ISD::CTPOP, MVT::v8i32, { 18, 24, 27, 36 } }, // 2 x 128-bit Op + extract/insert
3754 { ISD::CTPOP, MVT::v4i32, { 9, 20, 14, 18 } },
3755 { ISD::CTPOP, MVT::v16i16, { 16, 21, 22, 31 } }, // 2 x 128-bit Op + extract/insert
3756 { ISD::CTPOP, MVT::v8i16, { 8, 18, 11, 15 } },
3757 { ISD::CTPOP, MVT::v32i8, { 13, 15, 16, 25 } }, // 2 x 128-bit Op + extract/insert
3758 { ISD::CTPOP, MVT::v16i8, { 6, 12, 8, 12 } },
3759 { ISD::CTTZ, MVT::v4i64, { 17, 22, 24, 33 } }, // 2 x 128-bit Op + extract/insert
3760 { ISD::CTTZ, MVT::v2i64, { 9, 19, 13, 17 } },
3761 { ISD::CTTZ, MVT::v8i32, { 21, 27, 32, 41 } }, // 2 x 128-bit Op + extract/insert
3762 { ISD::CTTZ, MVT::v4i32, { 11, 24, 17, 21 } },
3763 { ISD::CTTZ, MVT::v16i16, { 18, 24, 27, 36 } }, // 2 x 128-bit Op + extract/insert
3764 { ISD::CTTZ, MVT::v8i16, { 9, 21, 14, 18 } },
3765 { ISD::CTTZ, MVT::v32i8, { 15, 18, 21, 30 } }, // 2 x 128-bit Op + extract/insert
3766 { ISD::CTTZ, MVT::v16i8, { 8, 16, 11, 15 } },
3767 { ISD::SADDSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
3768 { ISD::SADDSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
3769 { ISD::SMAX, MVT::v4i64, { 6, 9, 6, 12 } }, // 2 x 128-bit Op + extract/insert
3770 { ISD::SMAX, MVT::v2i64, { 3, 7, 2, 4 } },
3771 { ISD::SMAX, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3772 { ISD::SMAX, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3773 { ISD::SMAX, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3774 { ISD::SMIN, MVT::v4i64, { 6, 9, 6, 12 } }, // 2 x 128-bit Op + extract/insert
3775 { ISD::SMIN, MVT::v2i64, { 3, 7, 2, 3 } },
3776 { ISD::SMIN, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3777 { ISD::SMIN, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3778 { ISD::SMIN, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3779 { ISD::SSUBSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
3780 { ISD::SSUBSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
3781 { ISD::UADDSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
3782 { ISD::UADDSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
3783 { ISD::UADDSAT, MVT::v8i32, { 8 } }, // 2 x 128-bit Op + extract/insert
3784 { ISD::UMAX, MVT::v4i64, { 9, 10, 11, 17 } }, // 2 x 128-bit Op + extract/insert
3785 { ISD::UMAX, MVT::v2i64, { 4, 8, 5, 7 } },
3786 { ISD::UMAX, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3787 { ISD::UMAX, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3788 { ISD::UMAX, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3789 { ISD::UMIN, MVT::v4i64, { 9, 10, 11, 17 } }, // 2 x 128-bit Op + extract/insert
3790 { ISD::UMIN, MVT::v2i64, { 4, 8, 5, 7 } },
3791 { ISD::UMIN, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3792 { ISD::UMIN, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3793 { ISD::UMIN, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3794 { ISD::USUBSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
3795 { ISD::USUBSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
3796 { ISD::USUBSAT, MVT::v8i32, { 6 } }, // 2 x 128-bit Op + extract/insert
3797 { ISD::FMAXNUM, MVT::f32, { 3, 6, 3, 5 } }, // MAXSS + CMPUNORDSS + BLENDVPS
3798 { ISD::FMAXNUM, MVT::v4f32, { 3, 6, 3, 5 } }, // MAXPS + CMPUNORDPS + BLENDVPS
3799 { ISD::FMAXNUM, MVT::v8f32, { 5, 7, 3, 10 } }, // MAXPS + CMPUNORDPS + BLENDVPS
3800 { ISD::FMAXNUM, MVT::f64, { 3, 6, 3, 5 } }, // MAXSD + CMPUNORDSD + BLENDVPD
3801 { ISD::FMAXNUM, MVT::v2f64, { 3, 6, 3, 5 } }, // MAXPD + CMPUNORDPD + BLENDVPD
3802 { ISD::FMAXNUM, MVT::v4f64, { 5, 7, 3, 10 } }, // MAXPD + CMPUNORDPD + BLENDVPD
3803 { ISD::FSQRT, MVT::f32, { 21, 21, 1, 1 } }, // vsqrtss
3804 { ISD::FSQRT, MVT::v4f32, { 21, 21, 1, 1 } }, // vsqrtps
3805 { ISD::FSQRT, MVT::v8f32, { 42, 42, 1, 3 } }, // vsqrtps
3806 { ISD::FSQRT, MVT::f64, { 27, 27, 1, 1 } }, // vsqrtsd
3807 { ISD::FSQRT, MVT::v2f64, { 27, 27, 1, 1 } }, // vsqrtpd
3808 { ISD::FSQRT, MVT::v4f64, { 54, 54, 1, 3 } }, // vsqrtpd
3810 static const CostKindTblEntry GLMCostTbl[] = {
3811 { ISD::FSQRT, MVT::f32, { 19, 20, 1, 1 } }, // sqrtss
3812 { ISD::FSQRT, MVT::v4f32, { 37, 41, 1, 5 } }, // sqrtps
3813 { ISD::FSQRT, MVT::f64, { 34, 35, 1, 1 } }, // sqrtsd
3814 { ISD::FSQRT, MVT::v2f64, { 67, 71, 1, 5 } }, // sqrtpd
3816 static const CostKindTblEntry SLMCostTbl[] = {
3817 { ISD::BSWAP, MVT::v2i64, { 5, 5, 1, 5 } },
3818 { ISD::BSWAP, MVT::v4i32, { 5, 5, 1, 5 } },
3819 { ISD::BSWAP, MVT::v8i16, { 5, 5, 1, 5 } },
3820 { ISD::FSQRT, MVT::f32, { 20, 20, 1, 1 } }, // sqrtss
3821 { ISD::FSQRT, MVT::v4f32, { 40, 41, 1, 5 } }, // sqrtps
3822 { ISD::FSQRT, MVT::f64, { 35, 35, 1, 1 } }, // sqrtsd
3823 { ISD::FSQRT, MVT::v2f64, { 70, 71, 1, 5 } }, // sqrtpd
3825 static const CostKindTblEntry SSE42CostTbl[] = {
3826 { ISD::USUBSAT, MVT::v4i32, { 2 } }, // pmaxud + psubd
3827 { ISD::UADDSAT, MVT::v4i32, { 3 } }, // not + pminud + paddd
3828 { ISD::FMAXNUM, MVT::f32, { 5, 5, 7, 7 } }, // MAXSS + CMPUNORDSS + BLENDVPS
3829 { ISD::FMAXNUM, MVT::v4f32, { 4, 4, 4, 5 } }, // MAXPS + CMPUNORDPS + BLENDVPS
3830 { ISD::FMAXNUM, MVT::f64, { 5, 5, 7, 7 } }, // MAXSD + CMPUNORDSD + BLENDVPD
3831 { ISD::FMAXNUM, MVT::v2f64, { 4, 4, 4, 5 } }, // MAXPD + CMPUNORDPD + BLENDVPD
3832 { ISD::FSQRT, MVT::f32, { 18, 18, 1, 1 } }, // Nehalem from http://www.agner.org/
3833 { ISD::FSQRT, MVT::v4f32, { 18, 18, 1, 1 } }, // Nehalem from http://www.agner.org/
3835 static const CostKindTblEntry SSE41CostTbl[] = {
3836 { ISD::ABS, MVT::v2i64, { 3, 4, 3, 5 } }, // BLENDVPD(X,PSUBQ(0,X),X)
3837 { ISD::SMAX, MVT::v2i64, { 3, 7, 2, 3 } },
3838 { ISD::SMAX, MVT::v4i32, { 1, 1, 1, 1 } },
3839 { ISD::SMAX, MVT::v16i8, { 1, 1, 1, 1 } },
3840 { ISD::SMIN, MVT::v2i64, { 3, 7, 2, 3 } },
3841 { ISD::SMIN, MVT::v4i32, { 1, 1, 1, 1 } },
3842 { ISD::SMIN, MVT::v16i8, { 1, 1, 1, 1 } },
3843 { ISD::UMAX, MVT::v2i64, { 2, 11, 6, 7 } },
3844 { ISD::UMAX, MVT::v4i32, { 1, 1, 1, 1 } },
3845 { ISD::UMAX, MVT::v8i16, { 1, 1, 1, 1 } },
3846 { ISD::UMIN, MVT::v2i64, { 2, 11, 6, 7 } },
3847 { ISD::UMIN, MVT::v4i32, { 1, 1, 1, 1 } },
3848 { ISD::UMIN, MVT::v8i16, { 1, 1, 1, 1 } },
3850 static const CostKindTblEntry SSSE3CostTbl[] = {
3851 { ISD::ABS, MVT::v4i32, { 1, 2, 1, 1 } },
3852 { ISD::ABS, MVT::v8i16, { 1, 2, 1, 1 } },
3853 { ISD::ABS, MVT::v16i8, { 1, 2, 1, 1 } },
3854 { ISD::BITREVERSE, MVT::v2i64, { 16, 20, 11, 21 } },
3855 { ISD::BITREVERSE, MVT::v4i32, { 16, 20, 11, 21 } },
3856 { ISD::BITREVERSE, MVT::v8i16, { 16, 20, 11, 21 } },
3857 { ISD::BITREVERSE, MVT::v16i8, { 11, 12, 10, 16 } },
3858 { ISD::BSWAP, MVT::v2i64, { 2, 3, 1, 5 } },
3859 { ISD::BSWAP, MVT::v4i32, { 2, 3, 1, 5 } },
3860 { ISD::BSWAP, MVT::v8i16, { 2, 3, 1, 5 } },
3861 { ISD::CTLZ, MVT::v2i64, { 18, 28, 28, 35 } },
3862 { ISD::CTLZ, MVT::v4i32, { 15, 20, 22, 28 } },
3863 { ISD::CTLZ, MVT::v8i16, { 13, 17, 16, 22 } },
3864 { ISD::CTLZ, MVT::v16i8, { 11, 15, 10, 16 } },
3865 { ISD::CTPOP, MVT::v2i64, { 13, 19, 12, 18 } },
3866 { ISD::CTPOP, MVT::v4i32, { 18, 24, 16, 22 } },
3867 { ISD::CTPOP, MVT::v8i16, { 13, 18, 14, 20 } },
3868 { ISD::CTPOP, MVT::v16i8, { 11, 12, 10, 16 } },
3869 { ISD::CTTZ, MVT::v2i64, { 13, 25, 15, 22 } },
3870 { ISD::CTTZ, MVT::v4i32, { 18, 26, 19, 25 } },
3871 { ISD::CTTZ, MVT::v8i16, { 13, 20, 17, 23 } },
3872 { ISD::CTTZ, MVT::v16i8, { 11, 16, 13, 19 } }
3874 static const CostKindTblEntry SSE2CostTbl[] = {
3875 { ISD::ABS, MVT::v2i64, { 3, 6, 5, 5 } },
3876 { ISD::ABS, MVT::v4i32, { 1, 4, 4, 4 } },
3877 { ISD::ABS, MVT::v8i16, { 1, 2, 3, 3 } },
3878 { ISD::ABS, MVT::v16i8, { 1, 2, 3, 3 } },
3879 { ISD::BITREVERSE, MVT::v2i64, { 16, 20, 32, 32 } },
3880 { ISD::BITREVERSE, MVT::v4i32, { 16, 20, 30, 30 } },
3881 { ISD::BITREVERSE, MVT::v8i16, { 16, 20, 25, 25 } },
3882 { ISD::BITREVERSE, MVT::v16i8, { 11, 12, 21, 21 } },
3883 { ISD::BSWAP, MVT::v2i64, { 5, 6, 11, 11 } },
3884 { ISD::BSWAP, MVT::v4i32, { 5, 5, 9, 9 } },
3885 { ISD::BSWAP, MVT::v8i16, { 5, 5, 4, 5 } },
3886 { ISD::CTLZ, MVT::v2i64, { 10, 45, 36, 38 } },
3887 { ISD::CTLZ, MVT::v4i32, { 10, 45, 38, 40 } },
3888 { ISD::CTLZ, MVT::v8i16, { 9, 38, 32, 34 } },
3889 { ISD::CTLZ, MVT::v16i8, { 8, 39, 29, 32 } },
3890 { ISD::CTPOP, MVT::v2i64, { 12, 26, 16, 18 } },
3891 { ISD::CTPOP, MVT::v4i32, { 15, 29, 21, 23 } },
3892 { ISD::CTPOP, MVT::v8i16, { 13, 25, 18, 20 } },
3893 { ISD::CTPOP, MVT::v16i8, { 10, 21, 14, 16 } },
3894 { ISD::CTTZ, MVT::v2i64, { 14, 28, 19, 21 } },
3895 { ISD::CTTZ, MVT::v4i32, { 18, 31, 24, 26 } },
3896 { ISD::CTTZ, MVT::v8i16, { 16, 27, 21, 23 } },
3897 { ISD::CTTZ, MVT::v16i8, { 13, 23, 17, 19 } },
3898 { ISD::SADDSAT, MVT::v8i16, { 1 } },
3899 { ISD::SADDSAT, MVT::v16i8, { 1 } },
3900 { ISD::SMAX, MVT::v2i64, { 4, 8, 15, 15 } },
3901 { ISD::SMAX, MVT::v4i32, { 2, 4, 5, 5 } },
3902 { ISD::SMAX, MVT::v8i16, { 1, 1, 1, 1 } },
3903 { ISD::SMAX, MVT::v16i8, { 2, 4, 5, 5 } },
3904 { ISD::SMIN, MVT::v2i64, { 4, 8, 15, 15 } },
3905 { ISD::SMIN, MVT::v4i32, { 2, 4, 5, 5 } },
3906 { ISD::SMIN, MVT::v8i16, { 1, 1, 1, 1 } },
3907 { ISD::SMIN, MVT::v16i8, { 2, 4, 5, 5 } },
3908 { ISD::SSUBSAT, MVT::v8i16, { 1 } },
3909 { ISD::SSUBSAT, MVT::v16i8, { 1 } },
3910 { ISD::UADDSAT, MVT::v8i16, { 1 } },
3911 { ISD::UADDSAT, MVT::v16i8, { 1 } },
3912 { ISD::UMAX, MVT::v2i64, { 4, 8, 15, 15 } },
3913 { ISD::UMAX, MVT::v4i32, { 2, 5, 8, 8 } },
3914 { ISD::UMAX, MVT::v8i16, { 1, 3, 3, 3 } },
3915 { ISD::UMAX, MVT::v16i8, { 1, 1, 1, 1 } },
3916 { ISD::UMIN, MVT::v2i64, { 4, 8, 15, 15 } },
3917 { ISD::UMIN, MVT::v4i32, { 2, 5, 8, 8 } },
3918 { ISD::UMIN, MVT::v8i16, { 1, 3, 3, 3 } },
3919 { ISD::UMIN, MVT::v16i8, { 1, 1, 1, 1 } },
3920 { ISD::USUBSAT, MVT::v8i16, { 1 } },
3921 { ISD::USUBSAT, MVT::v16i8, { 1 } },
3922 { ISD::FMAXNUM, MVT::f64, { 5, 5, 7, 7 } },
3923 { ISD::FMAXNUM, MVT::v2f64, { 4, 6, 6, 6 } },
3924 { ISD::FSQRT, MVT::f64, { 32, 32, 1, 1 } }, // Nehalem from http://www.agner.org/
3925 { ISD::FSQRT, MVT::v2f64, { 32, 32, 1, 1 } }, // Nehalem from http://www.agner.org/
3927 static const CostKindTblEntry SSE1CostTbl[] = {
3928 { ISD::FMAXNUM, MVT::f32, { 5, 5, 7, 7 } },
3929 { ISD::FMAXNUM, MVT::v4f32, { 4, 6, 6, 6 } },
3930 { ISD::FSQRT, MVT::f32, { 28, 30, 1, 2 } }, // Pentium III from http://www.agner.org/
3931 { ISD::FSQRT, MVT::v4f32, { 56, 56, 1, 2 } }, // Pentium III from http://www.agner.org/
3933 static const CostKindTblEntry BMI64CostTbl[] = { // 64-bit targets
3934 { ISD::CTTZ, MVT::i64, { 1 } },
3936 static const CostKindTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
3937 { ISD::CTTZ, MVT::i32, { 1 } },
3938 { ISD::CTTZ, MVT::i16, { 1 } },
3939 { ISD::CTTZ, MVT::i8, { 1 } },
3941 static const CostKindTblEntry LZCNT64CostTbl[] = { // 64-bit targets
3942 { ISD::CTLZ, MVT::i64, { 1 } },
3944 static const CostKindTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
3945 { ISD::CTLZ, MVT::i32, { 1 } },
3946 { ISD::CTLZ, MVT::i16, { 2 } },
3947 { ISD::CTLZ, MVT::i8, { 2 } },
3949 static const CostKindTblEntry POPCNT64CostTbl[] = { // 64-bit targets
3950 { ISD::CTPOP, MVT::i64, { 1, 1, 1, 1 } }, // popcnt
3952 static const CostKindTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
3953 { ISD::CTPOP, MVT::i32, { 1, 1, 1, 1 } }, // popcnt
3954 { ISD::CTPOP, MVT::i16, { 1, 1, 2, 2 } }, // popcnt(zext())
3955 { ISD::CTPOP, MVT::i8, { 1, 1, 2, 2 } }, // popcnt(zext())
3957 static const CostKindTblEntry X64CostTbl[] = { // 64-bit targets
3958 { ISD::ABS, MVT::i64, { 1, 2, 3, 4 } }, // SUB+CMOV
3959 { ISD::BITREVERSE, MVT::i64, { 10, 12, 20, 22 } },
3960 { ISD::BSWAP, MVT::i64, { 1, 2, 1, 2 } },
3961 { ISD::CTLZ, MVT::i64, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
3962 { ISD::CTLZ_ZERO_UNDEF, MVT::i64,{ 1, 1, 1, 1 } }, // BSR+XOR
3963 { ISD::CTTZ, MVT::i64, { 3 } }, // TEST+BSF+CMOV/BRANCH
3964 { ISD::CTTZ_ZERO_UNDEF, MVT::i64,{ 1, 1, 1, 1 } }, // BSR
3965 { ISD::CTPOP, MVT::i64, { 10, 6, 19, 19 } },
3966 { ISD::ROTL, MVT::i64, { 2, 3, 1, 3 } },
3967 { ISD::ROTR, MVT::i64, { 2, 3, 1, 3 } },
3968 { X86ISD::VROTLI, MVT::i64, { 1, 1, 1, 1 } },
3969 { ISD::FSHL, MVT::i64, { 4, 4, 1, 4 } },
3970 { ISD::SMAX, MVT::i64, { 1, 3, 2, 3 } },
3971 { ISD::SMIN, MVT::i64, { 1, 3, 2, 3 } },
3972 { ISD::UMAX, MVT::i64, { 1, 3, 2, 3 } },
3973 { ISD::UMIN, MVT::i64, { 1, 3, 2, 3 } },
3974 { ISD::SADDO, MVT::i64, { 1 } },
3975 { ISD::UADDO, MVT::i64, { 1 } },
3976 { ISD::UMULO, MVT::i64, { 2 } }, // mulq + seto
3978 static const CostKindTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3979 { ISD::ABS, MVT::i32, { 1, 2, 3, 4 } }, // SUB+XOR+SRA or SUB+CMOV
3980 { ISD::ABS, MVT::i16, { 2, 2, 3, 4 } }, // SUB+XOR+SRA or SUB+CMOV
3981 { ISD::ABS, MVT::i8, { 2, 4, 4, 4 } }, // SUB+XOR+SRA
3982 { ISD::BITREVERSE, MVT::i32, { 9, 12, 17, 19 } },
3983 { ISD::BITREVERSE, MVT::i16, { 9, 12, 17, 19 } },
3984 { ISD::BITREVERSE, MVT::i8, { 7, 9, 13, 14 } },
3985 { ISD::BSWAP, MVT::i32, { 1, 1, 1, 1 } },
3986 { ISD::BSWAP, MVT::i16, { 1, 2, 1, 2 } }, // ROL
3987 { ISD::CTLZ, MVT::i32, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
3988 { ISD::CTLZ, MVT::i16, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
3989 { ISD::CTLZ, MVT::i8, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
3990 { ISD::CTLZ_ZERO_UNDEF, MVT::i32,{ 1, 1, 1, 1 } }, // BSR+XOR
3991 { ISD::CTLZ_ZERO_UNDEF, MVT::i16,{ 2, 2, 3, 3 } }, // BSR+XOR
3992 { ISD::CTLZ_ZERO_UNDEF, MVT::i8, { 2, 2, 3, 3 } }, // BSR+XOR
3993 { ISD::CTTZ, MVT::i32, { 3 } }, // TEST+BSF+CMOV/BRANCH
3994 { ISD::CTTZ, MVT::i16, { 3 } }, // TEST+BSF+CMOV/BRANCH
3995 { ISD::CTTZ, MVT::i8, { 3 } }, // TEST+BSF+CMOV/BRANCH
3996 { ISD::CTTZ_ZERO_UNDEF, MVT::i32,{ 1, 1, 1, 1 } }, // BSF
3997 { ISD::CTTZ_ZERO_UNDEF, MVT::i16,{ 2, 2, 1, 1 } }, // BSF
3998 { ISD::CTTZ_ZERO_UNDEF, MVT::i8, { 2, 2, 1, 1 } }, // BSF
3999 { ISD::CTPOP, MVT::i32, { 8, 7, 15, 15 } },
4000 { ISD::CTPOP, MVT::i16, { 9, 8, 17, 17 } },
4001 { ISD::CTPOP, MVT::i8, { 7, 6, 13, 13 } },
4002 { ISD::ROTL, MVT::i32, { 2, 3, 1, 3 } },
4003 { ISD::ROTL, MVT::i16, { 2, 3, 1, 3 } },
4004 { ISD::ROTL, MVT::i8, { 2, 3, 1, 3 } },
4005 { ISD::ROTR, MVT::i32, { 2, 3, 1, 3 } },
4006 { ISD::ROTR, MVT::i16, { 2, 3, 1, 3 } },
4007 { ISD::ROTR, MVT::i8, { 2, 3, 1, 3 } },
4008 { X86ISD::VROTLI, MVT::i32, { 1, 1, 1, 1 } },
4009 { X86ISD::VROTLI, MVT::i16, { 1, 1, 1, 1 } },
4010 { X86ISD::VROTLI, MVT::i8, { 1, 1, 1, 1 } },
4011 { ISD::FSHL, MVT::i32, { 4, 4, 1, 4 } },
4012 { ISD::FSHL, MVT::i16, { 4, 4, 2, 5 } },
4013 { ISD::FSHL, MVT::i8, { 4, 4, 2, 5 } },
4014 { ISD::SMAX, MVT::i32, { 1, 2, 2, 3 } },
4015 { ISD::SMAX, MVT::i16, { 1, 4, 2, 4 } },
4016 { ISD::SMAX, MVT::i8, { 1, 4, 2, 4 } },
4017 { ISD::SMIN, MVT::i32, { 1, 2, 2, 3 } },
4018 { ISD::SMIN, MVT::i16, { 1, 4, 2, 4 } },
4019 { ISD::SMIN, MVT::i8, { 1, 4, 2, 4 } },
4020 { ISD::UMAX, MVT::i32, { 1, 2, 2, 3 } },
4021 { ISD::UMAX, MVT::i16, { 1, 4, 2, 4 } },
4022 { ISD::UMAX, MVT::i8, { 1, 4, 2, 4 } },
4023 { ISD::UMIN, MVT::i32, { 1, 2, 2, 3 } },
4024 { ISD::UMIN, MVT::i16, { 1, 4, 2, 4 } },
4025 { ISD::UMIN, MVT::i8, { 1, 4, 2, 4 } },
4026 { ISD::SADDO, MVT::i32, { 1 } },
4027 { ISD::SADDO, MVT::i16, { 1 } },
4028 { ISD::SADDO, MVT::i8, { 1 } },
4029 { ISD::UADDO, MVT::i32, { 1 } },
4030 { ISD::UADDO, MVT::i16, { 1 } },
4031 { ISD::UADDO, MVT::i8, { 1 } },
4032 { ISD::UMULO, MVT::i32, { 2 } }, // mul + seto
4033 { ISD::UMULO, MVT::i16, { 2 } },
4034 { ISD::UMULO, MVT::i8, { 2 } },
4037 Type *RetTy = ICA.getReturnType();
4038 Type *OpTy = RetTy;
4039 Intrinsic::ID IID = ICA.getID();
4040 unsigned ISD = ISD::DELETED_NODE;
4041 switch (IID) {
4042 default:
4043 break;
4044 case Intrinsic::abs:
4045 ISD = ISD::ABS;
4046 break;
4047 case Intrinsic::bitreverse:
4048 ISD = ISD::BITREVERSE;
4049 break;
4050 case Intrinsic::bswap:
4051 ISD = ISD::BSWAP;
4052 break;
4053 case Intrinsic::ctlz:
4054 ISD = ISD::CTLZ;
4055 break;
4056 case Intrinsic::ctpop:
4057 ISD = ISD::CTPOP;
4058 break;
4059 case Intrinsic::cttz:
4060 ISD = ISD::CTTZ;
4061 break;
4062 case Intrinsic::fshl:
4063 ISD = ISD::FSHL;
4064 if (!ICA.isTypeBasedOnly()) {
4065 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
4066 if (Args[0] == Args[1]) {
4067 ISD = ISD::ROTL;
4068 // Handle scalar constant rotation amounts.
4069 // TODO: Handle vector + funnel-shift cases.
4070 if (isa_and_nonnull<ConstantInt>(Args[2]))
4071 ISD = X86ISD::VROTLI;
4074 break;
4075 case Intrinsic::fshr:
4076 // FSHR has same costs so don't duplicate.
4077 ISD = ISD::FSHL;
4078 if (!ICA.isTypeBasedOnly()) {
4079 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
4080 if (Args[0] == Args[1]) {
4081 // Handle scalar constant rotation amount.
4082 // TODO: Handle vector + funnel-shift cases.
4083 ISD = ISD::ROTR;
4084 if (isa_and_nonnull<ConstantInt>(Args[2]))
4085 ISD = X86ISD::VROTLI;
4088 break;
4089 case Intrinsic::maxnum:
4090 case Intrinsic::minnum:
4091 // FMINNUM has same costs so don't duplicate.
4092 ISD = ISD::FMAXNUM;
4093 break;
4094 case Intrinsic::sadd_sat:
4095 ISD = ISD::SADDSAT;
4096 break;
4097 case Intrinsic::smax:
4098 ISD = ISD::SMAX;
4099 break;
4100 case Intrinsic::smin:
4101 ISD = ISD::SMIN;
4102 break;
4103 case Intrinsic::ssub_sat:
4104 ISD = ISD::SSUBSAT;
4105 break;
4106 case Intrinsic::uadd_sat:
4107 ISD = ISD::UADDSAT;
4108 break;
4109 case Intrinsic::umax:
4110 ISD = ISD::UMAX;
4111 break;
4112 case Intrinsic::umin:
4113 ISD = ISD::UMIN;
4114 break;
4115 case Intrinsic::usub_sat:
4116 ISD = ISD::USUBSAT;
4117 break;
4118 case Intrinsic::sqrt:
4119 ISD = ISD::FSQRT;
4120 break;
4121 case Intrinsic::sadd_with_overflow:
4122 case Intrinsic::ssub_with_overflow:
4123 // SSUBO has same costs so don't duplicate.
4124 ISD = ISD::SADDO;
4125 OpTy = RetTy->getContainedType(0);
4126 break;
4127 case Intrinsic::uadd_with_overflow:
4128 case Intrinsic::usub_with_overflow:
4129 // USUBO has same costs so don't duplicate.
4130 ISD = ISD::UADDO;
4131 OpTy = RetTy->getContainedType(0);
4132 break;
4133 case Intrinsic::umul_with_overflow:
4134 case Intrinsic::smul_with_overflow:
4135 // SMULO has same costs so don't duplicate.
4136 ISD = ISD::UMULO;
4137 OpTy = RetTy->getContainedType(0);
4138 break;
4141 if (ISD != ISD::DELETED_NODE) {
4142 // Legalize the type.
4143 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(OpTy);
4144 MVT MTy = LT.second;
4146 // Attempt to lookup cost.
4147 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
4148 MTy.isVector()) {
4149 // With PSHUFB the code is very similar for all types. If we have integer
4150 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
4151 // we also need a PSHUFB.
4152 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
4154 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
4155 // instructions. We also need an extract and an insert.
4156 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
4157 (ST->hasBWI() && MTy.is512BitVector())))
4158 Cost = Cost * 2 + 2;
4160 return LT.first * Cost;
4163 // Without BMI/LZCNT see if we're only looking for a *_ZERO_UNDEF cost.
4164 if (((ISD == ISD::CTTZ && !ST->hasBMI()) ||
4165 (ISD == ISD::CTLZ && !ST->hasLZCNT())) &&
4166 !MTy.isVector() && !ICA.isTypeBasedOnly()) {
4167 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
4168 if (auto *Cst = dyn_cast<ConstantInt>(Args[1]))
4169 if (Cst->isAllOnesValue())
4170 ISD = ISD == ISD::CTTZ ? ISD::CTTZ_ZERO_UNDEF : ISD::CTLZ_ZERO_UNDEF;
4173 // FSQRT is a single instruction.
4174 if (ISD == ISD::FSQRT && CostKind == TTI::TCK_CodeSize)
4175 return LT.first;
4177 auto adjustTableCost = [](int ISD, unsigned Cost,
4178 InstructionCost LegalizationCost,
4179 FastMathFlags FMF) {
4180 // If there are no NANs to deal with, then these are reduced to a
4181 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
4182 // assume is used in the non-fast case.
4183 if (ISD == ISD::FMAXNUM || ISD == ISD::FMINNUM) {
4184 if (FMF.noNaNs())
4185 return LegalizationCost * 1;
4187 return LegalizationCost * (int)Cost;
4190 if (ST->useGLMDivSqrtCosts())
4191 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
4192 if (auto KindCost = Entry->Cost[CostKind])
4193 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4194 ICA.getFlags());
4196 if (ST->useSLMArithCosts())
4197 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
4198 if (auto KindCost = Entry->Cost[CostKind])
4199 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4200 ICA.getFlags());
4202 if (ST->hasVBMI2())
4203 if (const auto *Entry = CostTableLookup(AVX512VBMI2CostTbl, ISD, MTy))
4204 if (auto KindCost = Entry->Cost[CostKind])
4205 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4206 ICA.getFlags());
4208 if (ST->hasBITALG())
4209 if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy))
4210 if (auto KindCost = Entry->Cost[CostKind])
4211 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4212 ICA.getFlags());
4214 if (ST->hasVPOPCNTDQ())
4215 if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy))
4216 if (auto KindCost = Entry->Cost[CostKind])
4217 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4218 ICA.getFlags());
4220 if (ST->hasCDI())
4221 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
4222 if (auto KindCost = Entry->Cost[CostKind])
4223 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4224 ICA.getFlags());
4226 if (ST->hasBWI())
4227 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
4228 if (auto KindCost = Entry->Cost[CostKind])
4229 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4230 ICA.getFlags());
4232 if (ST->hasAVX512())
4233 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
4234 if (auto KindCost = Entry->Cost[CostKind])
4235 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4236 ICA.getFlags());
4238 if (ST->hasXOP())
4239 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
4240 if (auto KindCost = Entry->Cost[CostKind])
4241 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4242 ICA.getFlags());
4244 if (ST->hasAVX2())
4245 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
4246 if (auto KindCost = Entry->Cost[CostKind])
4247 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4248 ICA.getFlags());
4250 if (ST->hasAVX())
4251 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
4252 if (auto KindCost = Entry->Cost[CostKind])
4253 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4254 ICA.getFlags());
4256 if (ST->hasSSE42())
4257 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
4258 if (auto KindCost = Entry->Cost[CostKind])
4259 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4260 ICA.getFlags());
4262 if (ST->hasSSE41())
4263 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
4264 if (auto KindCost = Entry->Cost[CostKind])
4265 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4266 ICA.getFlags());
4268 if (ST->hasSSSE3())
4269 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
4270 if (auto KindCost = Entry->Cost[CostKind])
4271 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4272 ICA.getFlags());
4274 if (ST->hasSSE2())
4275 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
4276 if (auto KindCost = Entry->Cost[CostKind])
4277 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4278 ICA.getFlags());
4280 if (ST->hasSSE1())
4281 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
4282 if (auto KindCost = Entry->Cost[CostKind])
4283 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4284 ICA.getFlags());
4286 if (ST->hasBMI()) {
4287 if (ST->is64Bit())
4288 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
4289 if (auto KindCost = Entry->Cost[CostKind])
4290 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4291 ICA.getFlags());
4293 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
4294 if (auto KindCost = Entry->Cost[CostKind])
4295 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4296 ICA.getFlags());
4299 if (ST->hasLZCNT()) {
4300 if (ST->is64Bit())
4301 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
4302 if (auto KindCost = Entry->Cost[CostKind])
4303 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4304 ICA.getFlags());
4306 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
4307 if (auto KindCost = Entry->Cost[CostKind])
4308 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4309 ICA.getFlags());
4312 if (ST->hasPOPCNT()) {
4313 if (ST->is64Bit())
4314 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
4315 if (auto KindCost = Entry->Cost[CostKind])
4316 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4317 ICA.getFlags());
4319 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
4320 if (auto KindCost = Entry->Cost[CostKind])
4321 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4322 ICA.getFlags());
4325 if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) {
4326 if (const Instruction *II = ICA.getInst()) {
4327 if (II->hasOneUse() && isa<StoreInst>(II->user_back()))
4328 return TTI::TCC_Free;
4329 if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) {
4330 if (LI->hasOneUse())
4331 return TTI::TCC_Free;
4336 if (ST->is64Bit())
4337 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
4338 if (auto KindCost = Entry->Cost[CostKind])
4339 return adjustTableCost(Entry->ISD, *KindCost, LT.first,
4340 ICA.getFlags());
4342 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
4343 if (auto KindCost = Entry->Cost[CostKind])
4344 return adjustTableCost(Entry->ISD, *KindCost, LT.first, ICA.getFlags());
4347 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
4350 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
4351 TTI::TargetCostKind CostKind,
4352 unsigned Index, Value *Op0,
4353 Value *Op1) {
4354 static const CostTblEntry SLMCostTbl[] = {
4355 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 },
4356 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 },
4357 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 },
4358 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 }
4361 assert(Val->isVectorTy() && "This must be a vector type");
4362 Type *ScalarType = Val->getScalarType();
4363 InstructionCost RegisterFileMoveCost = 0;
4365 // Non-immediate extraction/insertion can be handled as a sequence of
4366 // aliased loads+stores via the stack.
4367 if (Index == -1U && (Opcode == Instruction::ExtractElement ||
4368 Opcode == Instruction::InsertElement)) {
4369 // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns:
4370 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
4372 // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling.
4373 assert(isa<FixedVectorType>(Val) && "Fixed vector type expected");
4374 Align VecAlign = DL.getPrefTypeAlign(Val);
4375 Align SclAlign = DL.getPrefTypeAlign(ScalarType);
4377 // Extract - store vector to stack, load scalar.
4378 if (Opcode == Instruction::ExtractElement) {
4379 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, CostKind) +
4380 getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0,
4381 CostKind);
4383 // Insert - store vector to stack, store scalar, load vector.
4384 if (Opcode == Instruction::InsertElement) {
4385 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, CostKind) +
4386 getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0,
4387 CostKind) +
4388 getMemoryOpCost(Instruction::Load, Val, VecAlign, 0, CostKind);
4392 if (Index != -1U && (Opcode == Instruction::ExtractElement ||
4393 Opcode == Instruction::InsertElement)) {
4394 // Extraction of vXi1 elements are now efficiently handled by MOVMSK.
4395 if (Opcode == Instruction::ExtractElement &&
4396 ScalarType->getScalarSizeInBits() == 1 &&
4397 cast<FixedVectorType>(Val)->getNumElements() > 1)
4398 return 1;
4400 // Legalize the type.
4401 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Val);
4403 // This type is legalized to a scalar type.
4404 if (!LT.second.isVector())
4405 return 0;
4407 // The type may be split. Normalize the index to the new type.
4408 unsigned SizeInBits = LT.second.getSizeInBits();
4409 unsigned NumElts = LT.second.getVectorNumElements();
4410 unsigned SubNumElts = NumElts;
4411 Index = Index % NumElts;
4413 // For >128-bit vectors, we need to extract higher 128-bit subvectors.
4414 // For inserts, we also need to insert the subvector back.
4415 if (SizeInBits > 128) {
4416 assert((SizeInBits % 128) == 0 && "Illegal vector");
4417 unsigned NumSubVecs = SizeInBits / 128;
4418 SubNumElts = NumElts / NumSubVecs;
4419 if (SubNumElts <= Index) {
4420 RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1);
4421 Index %= SubNumElts;
4425 MVT MScalarTy = LT.second.getScalarType();
4426 auto IsCheapPInsrPExtrInsertPS = [&]() {
4427 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
4428 // Also, assume insertps is relatively cheap on all >= SSE41 targets.
4429 return (MScalarTy == MVT::i16 && ST->hasSSE2()) ||
4430 (MScalarTy.isInteger() && ST->hasSSE41()) ||
4431 (MScalarTy == MVT::f32 && ST->hasSSE41() &&
4432 Opcode == Instruction::InsertElement);
4435 if (Index == 0) {
4436 // Floating point scalars are already located in index #0.
4437 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
4438 // true for all.
4439 if (ScalarType->isFloatingPointTy() &&
4440 (Opcode != Instruction::InsertElement || !Op0 ||
4441 isa<UndefValue>(Op0)))
4442 return RegisterFileMoveCost;
4444 if (Opcode == Instruction::InsertElement &&
4445 isa_and_nonnull<UndefValue>(Op0)) {
4446 // Consider the gather cost to be cheap.
4447 if (isa_and_nonnull<LoadInst>(Op1))
4448 return RegisterFileMoveCost;
4449 if (!IsCheapPInsrPExtrInsertPS()) {
4450 // mov constant-to-GPR + movd/movq GPR -> XMM.
4451 if (isa_and_nonnull<Constant>(Op1) && Op1->getType()->isIntegerTy())
4452 return 2 + RegisterFileMoveCost;
4453 // Assume movd/movq GPR -> XMM is relatively cheap on all targets.
4454 return 1 + RegisterFileMoveCost;
4458 // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
4459 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
4460 return 1 + RegisterFileMoveCost;
4463 int ISD = TLI->InstructionOpcodeToISD(Opcode);
4464 assert(ISD && "Unexpected vector opcode");
4465 if (ST->useSLMArithCosts())
4466 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
4467 return Entry->Cost + RegisterFileMoveCost;
4469 // Consider cheap cases.
4470 if (IsCheapPInsrPExtrInsertPS())
4471 return 1 + RegisterFileMoveCost;
4473 // For extractions we just need to shuffle the element to index 0, which
4474 // should be very cheap (assume cost = 1). For insertions we need to shuffle
4475 // the elements to its destination. In both cases we must handle the
4476 // subvector move(s).
4477 // If the vector type is already less than 128-bits then don't reduce it.
4478 // TODO: Under what circumstances should we shuffle using the full width?
4479 InstructionCost ShuffleCost = 1;
4480 if (Opcode == Instruction::InsertElement) {
4481 auto *SubTy = cast<VectorType>(Val);
4482 EVT VT = TLI->getValueType(DL, Val);
4483 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
4484 SubTy = FixedVectorType::get(ScalarType, SubNumElts);
4485 ShuffleCost = getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, std::nullopt,
4486 CostKind, 0, SubTy);
4488 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
4489 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
4492 return BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1) +
4493 RegisterFileMoveCost;
4496 InstructionCost
4497 X86TTIImpl::getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
4498 bool Insert, bool Extract,
4499 TTI::TargetCostKind CostKind) {
4500 assert(DemandedElts.getBitWidth() ==
4501 cast<FixedVectorType>(Ty)->getNumElements() &&
4502 "Vector size mismatch");
4504 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
4505 MVT MScalarTy = LT.second.getScalarType();
4506 unsigned LegalVectorBitWidth = LT.second.getSizeInBits();
4507 InstructionCost Cost = 0;
4509 constexpr unsigned LaneBitWidth = 128;
4510 assert((LegalVectorBitWidth < LaneBitWidth ||
4511 (LegalVectorBitWidth % LaneBitWidth) == 0) &&
4512 "Illegal vector");
4514 const int NumLegalVectors = *LT.first.getValue();
4515 assert(NumLegalVectors >= 0 && "Negative cost!");
4517 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
4518 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
4519 if (Insert) {
4520 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
4521 (MScalarTy.isInteger() && ST->hasSSE41()) ||
4522 (MScalarTy == MVT::f32 && ST->hasSSE41())) {
4523 // For types we can insert directly, insertion into 128-bit sub vectors is
4524 // cheap, followed by a cheap chain of concatenations.
4525 if (LegalVectorBitWidth <= LaneBitWidth) {
4526 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert,
4527 /*Extract*/ false, CostKind);
4528 } else {
4529 // In each 128-lane, if at least one index is demanded but not all
4530 // indices are demanded and this 128-lane is not the first 128-lane of
4531 // the legalized-vector, then this 128-lane needs a extracti128; If in
4532 // each 128-lane, there is at least one demanded index, this 128-lane
4533 // needs a inserti128.
4535 // The following cases will help you build a better understanding:
4536 // Assume we insert several elements into a v8i32 vector in avx2,
4537 // Case#1: inserting into 1th index needs vpinsrd + inserti128.
4538 // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
4539 // inserti128.
4540 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
4541 assert((LegalVectorBitWidth % LaneBitWidth) == 0 && "Illegal vector");
4542 unsigned NumLegalLanes = LegalVectorBitWidth / LaneBitWidth;
4543 unsigned NumLanesTotal = NumLegalLanes * NumLegalVectors;
4544 unsigned NumLegalElts =
4545 LT.second.getVectorNumElements() * NumLegalVectors;
4546 assert(NumLegalElts >= DemandedElts.getBitWidth() &&
4547 "Vector has been legalized to smaller element count");
4548 assert((NumLegalElts % NumLanesTotal) == 0 &&
4549 "Unexpected elts per lane");
4550 unsigned NumEltsPerLane = NumLegalElts / NumLanesTotal;
4552 APInt WidenedDemandedElts = DemandedElts.zext(NumLegalElts);
4553 auto *LaneTy =
4554 FixedVectorType::get(Ty->getElementType(), NumEltsPerLane);
4556 for (unsigned I = 0; I != NumLanesTotal; ++I) {
4557 APInt LaneEltMask = WidenedDemandedElts.extractBits(
4558 NumEltsPerLane, NumEltsPerLane * I);
4559 if (LaneEltMask.isZero())
4560 continue;
4561 // FIXME: we don't need to extract if all non-demanded elements
4562 // are legalization-inserted padding.
4563 if (!LaneEltMask.isAllOnes())
4564 Cost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
4565 CostKind, I * NumEltsPerLane, LaneTy);
4566 Cost += BaseT::getScalarizationOverhead(LaneTy, LaneEltMask, Insert,
4567 /*Extract*/ false, CostKind);
4570 APInt AffectedLanes =
4571 APIntOps::ScaleBitMask(WidenedDemandedElts, NumLanesTotal);
4572 APInt FullyAffectedLegalVectors = APIntOps::ScaleBitMask(
4573 AffectedLanes, NumLegalVectors, /*MatchAllBits=*/true);
4574 for (int LegalVec = 0; LegalVec != NumLegalVectors; ++LegalVec) {
4575 for (unsigned Lane = 0; Lane != NumLegalLanes; ++Lane) {
4576 unsigned I = NumLegalLanes * LegalVec + Lane;
4577 // No need to insert unaffected lane; or lane 0 of each legal vector
4578 // iff ALL lanes of that vector were affected and will be inserted.
4579 if (!AffectedLanes[I] ||
4580 (Lane == 0 && FullyAffectedLegalVectors[LegalVec]))
4581 continue;
4582 Cost += getShuffleCost(TTI::SK_InsertSubvector, Ty, std::nullopt,
4583 CostKind, I * NumEltsPerLane, LaneTy);
4587 } else if (LT.second.isVector()) {
4588 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
4589 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
4590 // series of UNPCK followed by CONCAT_VECTORS - all of these can be
4591 // considered cheap.
4592 if (Ty->isIntOrIntVectorTy())
4593 Cost += DemandedElts.popcount();
4595 // Get the smaller of the legalized or original pow2-extended number of
4596 // vector elements, which represents the number of unpacks we'll end up
4597 // performing.
4598 unsigned NumElts = LT.second.getVectorNumElements();
4599 unsigned Pow2Elts =
4600 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
4601 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
4605 if (Extract) {
4606 // vXi1 can be efficiently extracted with MOVMSK.
4607 // TODO: AVX512 predicate mask handling.
4608 // NOTE: This doesn't work well for roundtrip scalarization.
4609 if (!Insert && Ty->getScalarSizeInBits() == 1 && !ST->hasAVX512()) {
4610 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
4611 unsigned MaxElts = ST->hasAVX2() ? 32 : 16;
4612 unsigned MOVMSKCost = (NumElts + MaxElts - 1) / MaxElts;
4613 return MOVMSKCost;
4616 if (LT.second.isVector()) {
4617 unsigned NumLegalElts =
4618 LT.second.getVectorNumElements() * NumLegalVectors;
4619 assert(NumLegalElts >= DemandedElts.getBitWidth() &&
4620 "Vector has been legalized to smaller element count");
4622 // If we're extracting elements from a 128-bit subvector lane,
4623 // we only need to extract each lane once, not for every element.
4624 if (LegalVectorBitWidth > LaneBitWidth) {
4625 unsigned NumLegalLanes = LegalVectorBitWidth / LaneBitWidth;
4626 unsigned NumLanesTotal = NumLegalLanes * NumLegalVectors;
4627 assert((NumLegalElts % NumLanesTotal) == 0 &&
4628 "Unexpected elts per lane");
4629 unsigned NumEltsPerLane = NumLegalElts / NumLanesTotal;
4631 // Add cost for each demanded 128-bit subvector extraction.
4632 // Luckily this is a lot easier than for insertion.
4633 APInt WidenedDemandedElts = DemandedElts.zext(NumLegalElts);
4634 auto *LaneTy =
4635 FixedVectorType::get(Ty->getElementType(), NumEltsPerLane);
4637 for (unsigned I = 0; I != NumLanesTotal; ++I) {
4638 APInt LaneEltMask = WidenedDemandedElts.extractBits(
4639 NumEltsPerLane, I * NumEltsPerLane);
4640 if (LaneEltMask.isZero())
4641 continue;
4642 Cost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
4643 CostKind, I * NumEltsPerLane, LaneTy);
4644 Cost += BaseT::getScalarizationOverhead(
4645 LaneTy, LaneEltMask, /*Insert*/ false, Extract, CostKind);
4648 return Cost;
4652 // Fallback to default extraction.
4653 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ false,
4654 Extract, CostKind);
4657 return Cost;
4660 InstructionCost
4661 X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
4662 int VF, const APInt &DemandedDstElts,
4663 TTI::TargetCostKind CostKind) {
4664 const unsigned EltTyBits = DL.getTypeSizeInBits(EltTy);
4665 // We don't differentiate element types here, only element bit width.
4666 EltTy = IntegerType::getIntNTy(EltTy->getContext(), EltTyBits);
4668 auto bailout = [&]() {
4669 return BaseT::getReplicationShuffleCost(EltTy, ReplicationFactor, VF,
4670 DemandedDstElts, CostKind);
4673 // For now, only deal with AVX512 cases.
4674 if (!ST->hasAVX512())
4675 return bailout();
4677 // Do we have a native shuffle for this element type, or should we promote?
4678 unsigned PromEltTyBits = EltTyBits;
4679 switch (EltTyBits) {
4680 case 32:
4681 case 64:
4682 break; // AVX512F.
4683 case 16:
4684 if (!ST->hasBWI())
4685 PromEltTyBits = 32; // promote to i32, AVX512F.
4686 break; // AVX512BW
4687 case 8:
4688 if (!ST->hasVBMI())
4689 PromEltTyBits = 32; // promote to i32, AVX512F.
4690 break; // AVX512VBMI
4691 case 1:
4692 // There is no support for shuffling i1 elements. We *must* promote.
4693 if (ST->hasBWI()) {
4694 if (ST->hasVBMI())
4695 PromEltTyBits = 8; // promote to i8, AVX512VBMI.
4696 else
4697 PromEltTyBits = 16; // promote to i16, AVX512BW.
4698 break;
4700 PromEltTyBits = 32; // promote to i32, AVX512F.
4701 break;
4702 default:
4703 return bailout();
4705 auto *PromEltTy = IntegerType::getIntNTy(EltTy->getContext(), PromEltTyBits);
4707 auto *SrcVecTy = FixedVectorType::get(EltTy, VF);
4708 auto *PromSrcVecTy = FixedVectorType::get(PromEltTy, VF);
4710 int NumDstElements = VF * ReplicationFactor;
4711 auto *PromDstVecTy = FixedVectorType::get(PromEltTy, NumDstElements);
4712 auto *DstVecTy = FixedVectorType::get(EltTy, NumDstElements);
4714 // Legalize the types.
4715 MVT LegalSrcVecTy = getTypeLegalizationCost(SrcVecTy).second;
4716 MVT LegalPromSrcVecTy = getTypeLegalizationCost(PromSrcVecTy).second;
4717 MVT LegalPromDstVecTy = getTypeLegalizationCost(PromDstVecTy).second;
4718 MVT LegalDstVecTy = getTypeLegalizationCost(DstVecTy).second;
4719 // They should have legalized into vector types.
4720 if (!LegalSrcVecTy.isVector() || !LegalPromSrcVecTy.isVector() ||
4721 !LegalPromDstVecTy.isVector() || !LegalDstVecTy.isVector())
4722 return bailout();
4724 if (PromEltTyBits != EltTyBits) {
4725 // If we have to perform the shuffle with wider elt type than our data type,
4726 // then we will first need to anyext (we don't care about the new bits)
4727 // the source elements, and then truncate Dst elements.
4728 InstructionCost PromotionCost;
4729 PromotionCost += getCastInstrCost(
4730 Instruction::SExt, /*Dst=*/PromSrcVecTy, /*Src=*/SrcVecTy,
4731 TargetTransformInfo::CastContextHint::None, CostKind);
4732 PromotionCost +=
4733 getCastInstrCost(Instruction::Trunc, /*Dst=*/DstVecTy,
4734 /*Src=*/PromDstVecTy,
4735 TargetTransformInfo::CastContextHint::None, CostKind);
4736 return PromotionCost + getReplicationShuffleCost(PromEltTy,
4737 ReplicationFactor, VF,
4738 DemandedDstElts, CostKind);
4741 assert(LegalSrcVecTy.getScalarSizeInBits() == EltTyBits &&
4742 LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() &&
4743 "We expect that the legalization doesn't affect the element width, "
4744 "doesn't coalesce/split elements.");
4746 unsigned NumEltsPerDstVec = LegalDstVecTy.getVectorNumElements();
4747 unsigned NumDstVectors =
4748 divideCeil(DstVecTy->getNumElements(), NumEltsPerDstVec);
4750 auto *SingleDstVecTy = FixedVectorType::get(EltTy, NumEltsPerDstVec);
4752 // Not all the produced Dst elements may be demanded. In our case,
4753 // given that a single Dst vector is formed by a single shuffle,
4754 // if all elements that will form a single Dst vector aren't demanded,
4755 // then we won't need to do that shuffle, so adjust the cost accordingly.
4756 APInt DemandedDstVectors = APIntOps::ScaleBitMask(
4757 DemandedDstElts.zext(NumDstVectors * NumEltsPerDstVec), NumDstVectors);
4758 unsigned NumDstVectorsDemanded = DemandedDstVectors.popcount();
4760 InstructionCost SingleShuffleCost = getShuffleCost(
4761 TTI::SK_PermuteSingleSrc, SingleDstVecTy, /*Mask=*/std::nullopt, CostKind,
4762 /*Index=*/0, /*SubTp=*/nullptr);
4763 return NumDstVectorsDemanded * SingleShuffleCost;
4766 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
4767 MaybeAlign Alignment,
4768 unsigned AddressSpace,
4769 TTI::TargetCostKind CostKind,
4770 TTI::OperandValueInfo OpInfo,
4771 const Instruction *I) {
4772 // TODO: Handle other cost kinds.
4773 if (CostKind != TTI::TCK_RecipThroughput) {
4774 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
4775 // Store instruction with index and scale costs 2 Uops.
4776 // Check the preceding GEP to identify non-const indices.
4777 if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
4778 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
4779 return TTI::TCC_Basic * 2;
4782 return TTI::TCC_Basic;
4785 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
4786 "Invalid Opcode");
4787 // Type legalization can't handle structs
4788 if (TLI->getValueType(DL, Src, true) == MVT::Other)
4789 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
4790 CostKind);
4792 // Legalize the type.
4793 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
4795 auto *VTy = dyn_cast<FixedVectorType>(Src);
4797 InstructionCost Cost = 0;
4799 // Add a cost for constant load to vector.
4800 if (Opcode == Instruction::Store && OpInfo.isConstant())
4801 Cost += getMemoryOpCost(Instruction::Load, Src, DL.getABITypeAlign(Src),
4802 /*AddressSpace=*/0, CostKind);
4804 // Handle the simple case of non-vectors.
4805 // NOTE: this assumes that legalization never creates vector from scalars!
4806 if (!VTy || !LT.second.isVector()) {
4807 // Each load/store unit costs 1.
4808 return (LT.second.isFloatingPoint() ? Cost : 0) + LT.first * 1;
4811 bool IsLoad = Opcode == Instruction::Load;
4813 Type *EltTy = VTy->getElementType();
4815 const int EltTyBits = DL.getTypeSizeInBits(EltTy);
4817 // Source of truth: how many elements were there in the original IR vector?
4818 const unsigned SrcNumElt = VTy->getNumElements();
4820 // How far have we gotten?
4821 int NumEltRemaining = SrcNumElt;
4822 // Note that we intentionally capture by-reference, NumEltRemaining changes.
4823 auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; };
4825 const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8);
4827 // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
4828 const unsigned XMMBits = 128;
4829 if (XMMBits % EltTyBits != 0)
4830 // Vector size must be a multiple of the element size. I.e. no padding.
4831 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
4832 CostKind);
4833 const int NumEltPerXMM = XMMBits / EltTyBits;
4835 auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM);
4837 for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0;
4838 NumEltRemaining > 0; CurrOpSizeBytes /= 2) {
4839 // How many elements would a single op deal with at once?
4840 if ((8 * CurrOpSizeBytes) % EltTyBits != 0)
4841 // Vector size must be a multiple of the element size. I.e. no padding.
4842 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
4843 CostKind);
4844 int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits;
4846 assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?");
4847 assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) ||
4848 (CurrOpSizeBytes == MaxLegalOpSizeBytes)) &&
4849 "Unless we haven't halved the op size yet, "
4850 "we have less than two op's sized units of work left.");
4852 auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM
4853 ? FixedVectorType::get(EltTy, CurrNumEltPerOp)
4854 : XMMVecTy;
4856 assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 &&
4857 "After halving sizes, the vector elt count is no longer a multiple "
4858 "of number of elements per operation?");
4859 auto *CoalescedVecTy =
4860 CurrNumEltPerOp == 1
4861 ? CurrVecTy
4862 : FixedVectorType::get(
4863 IntegerType::get(Src->getContext(),
4864 EltTyBits * CurrNumEltPerOp),
4865 CurrVecTy->getNumElements() / CurrNumEltPerOp);
4866 assert(DL.getTypeSizeInBits(CoalescedVecTy) ==
4867 DL.getTypeSizeInBits(CurrVecTy) &&
4868 "coalesciing elements doesn't change vector width.");
4870 while (NumEltRemaining > 0) {
4871 assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?");
4873 // Can we use this vector size, as per the remaining element count?
4874 // Iff the vector is naturally aligned, we can do a wide load regardless.
4875 if (NumEltRemaining < CurrNumEltPerOp &&
4876 (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
4877 CurrOpSizeBytes != 1)
4878 break; // Try smalled vector size.
4880 bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0;
4882 // If we have fully processed the previous reg, we need to replenish it.
4883 if (SubVecEltsLeft == 0) {
4884 SubVecEltsLeft += CurrVecTy->getNumElements();
4885 // And that's free only for the 0'th subvector of a legalized vector.
4886 if (!Is0thSubVec)
4887 Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector
4888 : TTI::ShuffleKind::SK_ExtractSubvector,
4889 VTy, std::nullopt, CostKind, NumEltDone(),
4890 CurrVecTy);
4893 // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
4894 // for smaller widths (32/16/8) we have to insert/extract them separately.
4895 // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
4896 // but let's pretend that it is also true for 16/8 bit wide ops...)
4897 if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) {
4898 int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM;
4899 assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && "");
4900 int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp;
4901 APInt DemandedElts =
4902 APInt::getBitsSet(CoalescedVecTy->getNumElements(),
4903 CoalescedVecEltIdx, CoalescedVecEltIdx + 1);
4904 assert(DemandedElts.popcount() == 1 && "Inserting single value");
4905 Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad,
4906 !IsLoad, CostKind);
4909 // This isn't exactly right. We're using slow unaligned 32-byte accesses
4910 // as a proxy for a double-pumped AVX memory interface such as on
4911 // Sandybridge.
4912 // Sub-32-bit loads/stores will be slower either with PINSR*/PEXTR* or
4913 // will be scalarized.
4914 if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow())
4915 Cost += 2;
4916 else if (CurrOpSizeBytes < 4)
4917 Cost += 2;
4918 else
4919 Cost += 1;
4921 SubVecEltsLeft -= CurrNumEltPerOp;
4922 NumEltRemaining -= CurrNumEltPerOp;
4923 Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
4927 assert(NumEltRemaining <= 0 && "Should have processed all the elements.");
4929 return Cost;
4932 InstructionCost
4933 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
4934 unsigned AddressSpace,
4935 TTI::TargetCostKind CostKind) {
4936 bool IsLoad = (Instruction::Load == Opcode);
4937 bool IsStore = (Instruction::Store == Opcode);
4939 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
4940 if (!SrcVTy)
4941 // To calculate scalar take the regular cost, without mask
4942 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
4944 unsigned NumElem = SrcVTy->getNumElements();
4945 auto *MaskTy =
4946 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
4947 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
4948 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) {
4949 // Scalarization
4950 APInt DemandedElts = APInt::getAllOnes(NumElem);
4951 InstructionCost MaskSplitCost = getScalarizationOverhead(
4952 MaskTy, DemandedElts, /*Insert*/ false, /*Extract*/ true, CostKind);
4953 InstructionCost ScalarCompareCost = getCmpSelInstrCost(
4954 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
4955 CmpInst::BAD_ICMP_PREDICATE, CostKind);
4956 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4957 InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
4958 InstructionCost ValueSplitCost = getScalarizationOverhead(
4959 SrcVTy, DemandedElts, IsLoad, IsStore, CostKind);
4960 InstructionCost MemopCost =
4961 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4962 Alignment, AddressSpace, CostKind);
4963 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
4966 // Legalize the type.
4967 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(SrcVTy);
4968 auto VT = TLI->getValueType(DL, SrcVTy);
4969 InstructionCost Cost = 0;
4970 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
4971 LT.second.getVectorNumElements() == NumElem)
4972 // Promotion requires extend/truncate for data and a shuffle for mask.
4973 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, std::nullopt,
4974 CostKind, 0, nullptr) +
4975 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, std::nullopt,
4976 CostKind, 0, nullptr);
4978 else if (LT.first * LT.second.getVectorNumElements() > NumElem) {
4979 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
4980 LT.second.getVectorNumElements());
4981 // Expanding requires fill mask with zeroes
4982 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, std::nullopt,
4983 CostKind, 0, MaskTy);
4986 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
4987 if (!ST->hasAVX512())
4988 return Cost + LT.first * (IsLoad ? 2 : 8);
4990 // AVX-512 masked load/store is cheaper
4991 return Cost + LT.first;
4994 InstructionCost
4995 X86TTIImpl::getPointersChainCost(ArrayRef<const Value *> Ptrs,
4996 const Value *Base,
4997 const TTI::PointersChainInfo &Info,
4998 Type *AccessTy, TTI::TargetCostKind CostKind) {
4999 if (Info.isSameBase() && Info.isKnownStride()) {
5000 // If all the pointers have known stride all the differences are translated
5001 // into constants. X86 memory addressing allows encoding it into
5002 // displacement. So we just need to take the base GEP cost.
5003 if (const auto *BaseGEP = dyn_cast<GetElementPtrInst>(Base)) {
5004 SmallVector<const Value *> Indices(BaseGEP->indices());
5005 return getGEPCost(BaseGEP->getSourceElementType(),
5006 BaseGEP->getPointerOperand(), Indices, nullptr,
5007 CostKind);
5009 return TTI::TCC_Free;
5011 return BaseT::getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind);
5014 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
5015 ScalarEvolution *SE,
5016 const SCEV *Ptr) {
5017 // Address computations in vectorized code with non-consecutive addresses will
5018 // likely result in more instructions compared to scalar code where the
5019 // computation can more often be merged into the index mode. The resulting
5020 // extra micro-ops can significantly decrease throughput.
5021 const unsigned NumVectorInstToHideOverhead = 10;
5023 // Cost modeling of Strided Access Computation is hidden by the indexing
5024 // modes of X86 regardless of the stride value. We dont believe that there
5025 // is a difference between constant strided access in gerenal and constant
5026 // strided value which is less than or equal to 64.
5027 // Even in the case of (loop invariant) stride whose value is not known at
5028 // compile time, the address computation will not incur more than one extra
5029 // ADD instruction.
5030 if (Ty->isVectorTy() && SE && !ST->hasAVX2()) {
5031 // TODO: AVX2 is the current cut-off because we don't have correct
5032 // interleaving costs for prior ISA's.
5033 if (!BaseT::isStridedAccess(Ptr))
5034 return NumVectorInstToHideOverhead;
5035 if (!BaseT::getConstantStrideStep(SE, Ptr))
5036 return 1;
5039 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
5042 InstructionCost
5043 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
5044 std::optional<FastMathFlags> FMF,
5045 TTI::TargetCostKind CostKind) {
5046 if (TTI::requiresOrderedReduction(FMF))
5047 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
5049 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
5050 // and make it as the cost.
5052 static const CostTblEntry SLMCostTbl[] = {
5053 { ISD::FADD, MVT::v2f64, 3 },
5054 { ISD::ADD, MVT::v2i64, 5 },
5057 static const CostTblEntry SSE2CostTbl[] = {
5058 { ISD::FADD, MVT::v2f64, 2 },
5059 { ISD::FADD, MVT::v2f32, 2 },
5060 { ISD::FADD, MVT::v4f32, 4 },
5061 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
5062 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32
5063 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
5064 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3".
5065 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3".
5066 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
5067 { ISD::ADD, MVT::v2i8, 2 },
5068 { ISD::ADD, MVT::v4i8, 2 },
5069 { ISD::ADD, MVT::v8i8, 2 },
5070 { ISD::ADD, MVT::v16i8, 3 },
5073 static const CostTblEntry AVX1CostTbl[] = {
5074 { ISD::FADD, MVT::v4f64, 3 },
5075 { ISD::FADD, MVT::v4f32, 3 },
5076 { ISD::FADD, MVT::v8f32, 4 },
5077 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
5078 { ISD::ADD, MVT::v4i64, 3 },
5079 { ISD::ADD, MVT::v8i32, 5 },
5080 { ISD::ADD, MVT::v16i16, 5 },
5081 { ISD::ADD, MVT::v32i8, 4 },
5084 int ISD = TLI->InstructionOpcodeToISD(Opcode);
5085 assert(ISD && "Invalid opcode");
5087 // Before legalizing the type, give a chance to look up illegal narrow types
5088 // in the table.
5089 // FIXME: Is there a better way to do this?
5090 EVT VT = TLI->getValueType(DL, ValTy);
5091 if (VT.isSimple()) {
5092 MVT MTy = VT.getSimpleVT();
5093 if (ST->useSLMArithCosts())
5094 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
5095 return Entry->Cost;
5097 if (ST->hasAVX())
5098 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
5099 return Entry->Cost;
5101 if (ST->hasSSE2())
5102 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
5103 return Entry->Cost;
5106 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
5108 MVT MTy = LT.second;
5110 auto *ValVTy = cast<FixedVectorType>(ValTy);
5112 // Special case: vXi8 mul reductions are performed as vXi16.
5113 if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) {
5114 auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16);
5115 auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements());
5116 return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy,
5117 TargetTransformInfo::CastContextHint::None,
5118 CostKind) +
5119 getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind);
5122 InstructionCost ArithmeticCost = 0;
5123 if (LT.first != 1 && MTy.isVector() &&
5124 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
5125 // Type needs to be split. We need LT.first - 1 arithmetic ops.
5126 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
5127 MTy.getVectorNumElements());
5128 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
5129 ArithmeticCost *= LT.first - 1;
5132 if (ST->useSLMArithCosts())
5133 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
5134 return ArithmeticCost + Entry->Cost;
5136 if (ST->hasAVX())
5137 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
5138 return ArithmeticCost + Entry->Cost;
5140 if (ST->hasSSE2())
5141 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
5142 return ArithmeticCost + Entry->Cost;
5144 // FIXME: These assume a naive kshift+binop lowering, which is probably
5145 // conservative in most cases.
5146 static const CostTblEntry AVX512BoolReduction[] = {
5147 { ISD::AND, MVT::v2i1, 3 },
5148 { ISD::AND, MVT::v4i1, 5 },
5149 { ISD::AND, MVT::v8i1, 7 },
5150 { ISD::AND, MVT::v16i1, 9 },
5151 { ISD::AND, MVT::v32i1, 11 },
5152 { ISD::AND, MVT::v64i1, 13 },
5153 { ISD::OR, MVT::v2i1, 3 },
5154 { ISD::OR, MVT::v4i1, 5 },
5155 { ISD::OR, MVT::v8i1, 7 },
5156 { ISD::OR, MVT::v16i1, 9 },
5157 { ISD::OR, MVT::v32i1, 11 },
5158 { ISD::OR, MVT::v64i1, 13 },
5161 static const CostTblEntry AVX2BoolReduction[] = {
5162 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp
5163 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp
5164 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp
5165 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp
5168 static const CostTblEntry AVX1BoolReduction[] = {
5169 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp
5170 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp
5171 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
5172 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
5173 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp
5174 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp
5175 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
5176 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
5179 static const CostTblEntry SSE2BoolReduction[] = {
5180 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp
5181 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp
5182 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp
5183 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp
5184 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp
5185 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp
5186 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp
5187 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp
5190 // Handle bool allof/anyof patterns.
5191 if (ValVTy->getElementType()->isIntegerTy(1)) {
5192 InstructionCost ArithmeticCost = 0;
5193 if (LT.first != 1 && MTy.isVector() &&
5194 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
5195 // Type needs to be split. We need LT.first - 1 arithmetic ops.
5196 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
5197 MTy.getVectorNumElements());
5198 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
5199 ArithmeticCost *= LT.first - 1;
5202 if (ST->hasAVX512())
5203 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
5204 return ArithmeticCost + Entry->Cost;
5205 if (ST->hasAVX2())
5206 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
5207 return ArithmeticCost + Entry->Cost;
5208 if (ST->hasAVX())
5209 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
5210 return ArithmeticCost + Entry->Cost;
5211 if (ST->hasSSE2())
5212 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
5213 return ArithmeticCost + Entry->Cost;
5215 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
5218 unsigned NumVecElts = ValVTy->getNumElements();
5219 unsigned ScalarSize = ValVTy->getScalarSizeInBits();
5221 // Special case power of 2 reductions where the scalar type isn't changed
5222 // by type legalization.
5223 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
5224 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
5226 InstructionCost ReductionCost = 0;
5228 auto *Ty = ValVTy;
5229 if (LT.first != 1 && MTy.isVector() &&
5230 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
5231 // Type needs to be split. We need LT.first - 1 arithmetic ops.
5232 Ty = FixedVectorType::get(ValVTy->getElementType(),
5233 MTy.getVectorNumElements());
5234 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
5235 ReductionCost *= LT.first - 1;
5236 NumVecElts = MTy.getVectorNumElements();
5239 // Now handle reduction with the legal type, taking into account size changes
5240 // at each level.
5241 while (NumVecElts > 1) {
5242 // Determine the size of the remaining vector we need to reduce.
5243 unsigned Size = NumVecElts * ScalarSize;
5244 NumVecElts /= 2;
5245 // If we're reducing from 256/512 bits, use an extract_subvector.
5246 if (Size > 128) {
5247 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
5248 ReductionCost +=
5249 getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt, CostKind,
5250 NumVecElts, SubTy);
5251 Ty = SubTy;
5252 } else if (Size == 128) {
5253 // Reducing from 128 bits is a permute of v2f64/v2i64.
5254 FixedVectorType *ShufTy;
5255 if (ValVTy->isFloatingPointTy())
5256 ShufTy =
5257 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
5258 else
5259 ShufTy =
5260 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
5261 ReductionCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy,
5262 std::nullopt, CostKind, 0, nullptr);
5263 } else if (Size == 64) {
5264 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
5265 FixedVectorType *ShufTy;
5266 if (ValVTy->isFloatingPointTy())
5267 ShufTy =
5268 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
5269 else
5270 ShufTy =
5271 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
5272 ReductionCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy,
5273 std::nullopt, CostKind, 0, nullptr);
5274 } else {
5275 // Reducing from smaller size is a shift by immediate.
5276 auto *ShiftTy = FixedVectorType::get(
5277 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
5278 ReductionCost += getArithmeticInstrCost(
5279 Instruction::LShr, ShiftTy, CostKind,
5280 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5281 {TargetTransformInfo::OK_UniformConstantValue, TargetTransformInfo::OP_None});
5284 // Add the arithmetic op for this level.
5285 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
5288 // Add the final extract element to the cost.
5289 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty,
5290 CostKind, 0, nullptr, nullptr);
5293 InstructionCost X86TTIImpl::getMinMaxCost(Intrinsic::ID IID, Type *Ty,
5294 TTI::TargetCostKind CostKind,
5295 FastMathFlags FMF) {
5296 IntrinsicCostAttributes ICA(IID, Ty, {Ty, Ty}, FMF);
5297 return getIntrinsicInstrCost(ICA, CostKind);
5300 InstructionCost
5301 X86TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *ValTy,
5302 FastMathFlags FMF,
5303 TTI::TargetCostKind CostKind) {
5304 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
5306 MVT MTy = LT.second;
5308 int ISD;
5309 if (ValTy->isIntOrIntVectorTy()) {
5310 ISD = (IID == Intrinsic::umin || IID == Intrinsic::umax) ? ISD::UMIN
5311 : ISD::SMIN;
5312 } else {
5313 assert(ValTy->isFPOrFPVectorTy() &&
5314 "Expected float point or integer vector type.");
5315 ISD = (IID == Intrinsic::minnum || IID == Intrinsic::maxnum)
5316 ? ISD::FMINNUM
5317 : ISD::FMINIMUM;
5320 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
5321 // and make it as the cost.
5323 static const CostTblEntry SSE2CostTbl[] = {
5324 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
5325 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
5326 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
5329 static const CostTblEntry SSE41CostTbl[] = {
5330 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
5331 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
5332 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
5333 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
5334 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
5335 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
5336 {ISD::SMIN, MVT::v2i8, 3}, // pminsb
5337 {ISD::SMIN, MVT::v4i8, 5}, // pminsb
5338 {ISD::SMIN, MVT::v8i8, 7}, // pminsb
5339 {ISD::SMIN, MVT::v16i8, 6},
5340 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2
5341 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2
5342 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2
5343 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
5346 static const CostTblEntry AVX1CostTbl[] = {
5347 {ISD::SMIN, MVT::v16i16, 6},
5348 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
5349 {ISD::SMIN, MVT::v32i8, 8},
5350 {ISD::UMIN, MVT::v32i8, 8},
5353 static const CostTblEntry AVX512BWCostTbl[] = {
5354 {ISD::SMIN, MVT::v32i16, 8},
5355 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
5356 {ISD::SMIN, MVT::v64i8, 10},
5357 {ISD::UMIN, MVT::v64i8, 10},
5360 // Before legalizing the type, give a chance to look up illegal narrow types
5361 // in the table.
5362 // FIXME: Is there a better way to do this?
5363 EVT VT = TLI->getValueType(DL, ValTy);
5364 if (VT.isSimple()) {
5365 MVT MTy = VT.getSimpleVT();
5366 if (ST->hasBWI())
5367 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
5368 return Entry->Cost;
5370 if (ST->hasAVX())
5371 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
5372 return Entry->Cost;
5374 if (ST->hasSSE41())
5375 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
5376 return Entry->Cost;
5378 if (ST->hasSSE2())
5379 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
5380 return Entry->Cost;
5383 auto *ValVTy = cast<FixedVectorType>(ValTy);
5384 unsigned NumVecElts = ValVTy->getNumElements();
5386 auto *Ty = ValVTy;
5387 InstructionCost MinMaxCost = 0;
5388 if (LT.first != 1 && MTy.isVector() &&
5389 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
5390 // Type needs to be split. We need LT.first - 1 operations ops.
5391 Ty = FixedVectorType::get(ValVTy->getElementType(),
5392 MTy.getVectorNumElements());
5393 MinMaxCost = getMinMaxCost(IID, Ty, CostKind, FMF);
5394 MinMaxCost *= LT.first - 1;
5395 NumVecElts = MTy.getVectorNumElements();
5398 if (ST->hasBWI())
5399 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
5400 return MinMaxCost + Entry->Cost;
5402 if (ST->hasAVX())
5403 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
5404 return MinMaxCost + Entry->Cost;
5406 if (ST->hasSSE41())
5407 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
5408 return MinMaxCost + Entry->Cost;
5410 if (ST->hasSSE2())
5411 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
5412 return MinMaxCost + Entry->Cost;
5414 unsigned ScalarSize = ValTy->getScalarSizeInBits();
5416 // Special case power of 2 reductions where the scalar type isn't changed
5417 // by type legalization.
5418 if (!isPowerOf2_32(ValVTy->getNumElements()) ||
5419 ScalarSize != MTy.getScalarSizeInBits())
5420 return BaseT::getMinMaxReductionCost(IID, ValTy, FMF, CostKind);
5422 // Now handle reduction with the legal type, taking into account size changes
5423 // at each level.
5424 while (NumVecElts > 1) {
5425 // Determine the size of the remaining vector we need to reduce.
5426 unsigned Size = NumVecElts * ScalarSize;
5427 NumVecElts /= 2;
5428 // If we're reducing from 256/512 bits, use an extract_subvector.
5429 if (Size > 128) {
5430 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
5431 MinMaxCost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
5432 CostKind, NumVecElts, SubTy);
5433 Ty = SubTy;
5434 } else if (Size == 128) {
5435 // Reducing from 128 bits is a permute of v2f64/v2i64.
5436 VectorType *ShufTy;
5437 if (ValTy->isFloatingPointTy())
5438 ShufTy =
5439 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
5440 else
5441 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
5442 MinMaxCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy,
5443 std::nullopt, CostKind, 0, nullptr);
5444 } else if (Size == 64) {
5445 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
5446 FixedVectorType *ShufTy;
5447 if (ValTy->isFloatingPointTy())
5448 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
5449 else
5450 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
5451 MinMaxCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy,
5452 std::nullopt, CostKind, 0, nullptr);
5453 } else {
5454 // Reducing from smaller size is a shift by immediate.
5455 auto *ShiftTy = FixedVectorType::get(
5456 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
5457 MinMaxCost += getArithmeticInstrCost(
5458 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
5459 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5460 {TargetTransformInfo::OK_UniformConstantValue, TargetTransformInfo::OP_None});
5463 // Add the arithmetic op for this level.
5464 MinMaxCost += getMinMaxCost(IID, Ty, CostKind, FMF);
5467 // Add the final extract element to the cost.
5468 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty,
5469 CostKind, 0, nullptr, nullptr);
5472 /// Calculate the cost of materializing a 64-bit value. This helper
5473 /// method might only calculate a fraction of a larger immediate. Therefore it
5474 /// is valid to return a cost of ZERO.
5475 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
5476 if (Val == 0)
5477 return TTI::TCC_Free;
5479 if (isInt<32>(Val))
5480 return TTI::TCC_Basic;
5482 return 2 * TTI::TCC_Basic;
5485 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
5486 TTI::TargetCostKind CostKind) {
5487 assert(Ty->isIntegerTy());
5489 unsigned BitSize = Ty->getPrimitiveSizeInBits();
5490 if (BitSize == 0)
5491 return ~0U;
5493 // Never hoist constants larger than 128bit, because this might lead to
5494 // incorrect code generation or assertions in codegen.
5495 // Fixme: Create a cost model for types larger than i128 once the codegen
5496 // issues have been fixed.
5497 if (BitSize > 128)
5498 return TTI::TCC_Free;
5500 if (Imm == 0)
5501 return TTI::TCC_Free;
5503 // Sign-extend all constants to a multiple of 64-bit.
5504 APInt ImmVal = Imm;
5505 if (BitSize % 64 != 0)
5506 ImmVal = Imm.sext(alignTo(BitSize, 64));
5508 // Split the constant into 64-bit chunks and calculate the cost for each
5509 // chunk.
5510 InstructionCost Cost = 0;
5511 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
5512 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
5513 int64_t Val = Tmp.getSExtValue();
5514 Cost += getIntImmCost(Val);
5516 // We need at least one instruction to materialize the constant.
5517 return std::max<InstructionCost>(1, Cost);
5520 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
5521 const APInt &Imm, Type *Ty,
5522 TTI::TargetCostKind CostKind,
5523 Instruction *Inst) {
5524 assert(Ty->isIntegerTy());
5526 unsigned BitSize = Ty->getPrimitiveSizeInBits();
5527 // There is no cost model for constants with a bit size of 0. Return TCC_Free
5528 // here, so that constant hoisting will ignore this constant.
5529 if (BitSize == 0)
5530 return TTI::TCC_Free;
5532 unsigned ImmIdx = ~0U;
5533 switch (Opcode) {
5534 default:
5535 return TTI::TCC_Free;
5536 case Instruction::GetElementPtr:
5537 // Always hoist the base address of a GetElementPtr. This prevents the
5538 // creation of new constants for every base constant that gets constant
5539 // folded with the offset.
5540 if (Idx == 0)
5541 return 2 * TTI::TCC_Basic;
5542 return TTI::TCC_Free;
5543 case Instruction::Store:
5544 ImmIdx = 0;
5545 break;
5546 case Instruction::ICmp:
5547 // This is an imperfect hack to prevent constant hoisting of
5548 // compares that might be trying to check if a 64-bit value fits in
5549 // 32-bits. The backend can optimize these cases using a right shift by 32.
5550 // Ideally we would check the compare predicate here. There also other
5551 // similar immediates the backend can use shifts for.
5552 if (Idx == 1 && Imm.getBitWidth() == 64) {
5553 uint64_t ImmVal = Imm.getZExtValue();
5554 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
5555 return TTI::TCC_Free;
5557 ImmIdx = 1;
5558 break;
5559 case Instruction::And:
5560 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
5561 // by using a 32-bit operation with implicit zero extension. Detect such
5562 // immediates here as the normal path expects bit 31 to be sign extended.
5563 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.isIntN(32))
5564 return TTI::TCC_Free;
5565 ImmIdx = 1;
5566 break;
5567 case Instruction::Add:
5568 case Instruction::Sub:
5569 // For add/sub, we can use the opposite instruction for INT32_MIN.
5570 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
5571 return TTI::TCC_Free;
5572 ImmIdx = 1;
5573 break;
5574 case Instruction::UDiv:
5575 case Instruction::SDiv:
5576 case Instruction::URem:
5577 case Instruction::SRem:
5578 // Division by constant is typically expanded later into a different
5579 // instruction sequence. This completely changes the constants.
5580 // Report them as "free" to stop ConstantHoist from marking them as opaque.
5581 return TTI::TCC_Free;
5582 case Instruction::Mul:
5583 case Instruction::Or:
5584 case Instruction::Xor:
5585 ImmIdx = 1;
5586 break;
5587 // Always return TCC_Free for the shift value of a shift instruction.
5588 case Instruction::Shl:
5589 case Instruction::LShr:
5590 case Instruction::AShr:
5591 if (Idx == 1)
5592 return TTI::TCC_Free;
5593 break;
5594 case Instruction::Trunc:
5595 case Instruction::ZExt:
5596 case Instruction::SExt:
5597 case Instruction::IntToPtr:
5598 case Instruction::PtrToInt:
5599 case Instruction::BitCast:
5600 case Instruction::PHI:
5601 case Instruction::Call:
5602 case Instruction::Select:
5603 case Instruction::Ret:
5604 case Instruction::Load:
5605 break;
5608 if (Idx == ImmIdx) {
5609 uint64_t NumConstants = divideCeil(BitSize, 64);
5610 InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
5611 return (Cost <= NumConstants * TTI::TCC_Basic)
5612 ? static_cast<int>(TTI::TCC_Free)
5613 : Cost;
5616 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
5619 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
5620 const APInt &Imm, Type *Ty,
5621 TTI::TargetCostKind CostKind) {
5622 assert(Ty->isIntegerTy());
5624 unsigned BitSize = Ty->getPrimitiveSizeInBits();
5625 // There is no cost model for constants with a bit size of 0. Return TCC_Free
5626 // here, so that constant hoisting will ignore this constant.
5627 if (BitSize == 0)
5628 return TTI::TCC_Free;
5630 switch (IID) {
5631 default:
5632 return TTI::TCC_Free;
5633 case Intrinsic::sadd_with_overflow:
5634 case Intrinsic::uadd_with_overflow:
5635 case Intrinsic::ssub_with_overflow:
5636 case Intrinsic::usub_with_overflow:
5637 case Intrinsic::smul_with_overflow:
5638 case Intrinsic::umul_with_overflow:
5639 if ((Idx == 1) && Imm.getBitWidth() <= 64 && Imm.isSignedIntN(32))
5640 return TTI::TCC_Free;
5641 break;
5642 case Intrinsic::experimental_stackmap:
5643 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && Imm.isSignedIntN(64)))
5644 return TTI::TCC_Free;
5645 break;
5646 case Intrinsic::experimental_patchpoint_void:
5647 case Intrinsic::experimental_patchpoint_i64:
5648 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && Imm.isSignedIntN(64)))
5649 return TTI::TCC_Free;
5650 break;
5652 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
5655 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode,
5656 TTI::TargetCostKind CostKind,
5657 const Instruction *I) {
5658 if (CostKind != TTI::TCK_RecipThroughput)
5659 return Opcode == Instruction::PHI ? 0 : 1;
5660 // Branches are assumed to be predicted.
5661 return 0;
5664 int X86TTIImpl::getGatherOverhead() const {
5665 // Some CPUs have more overhead for gather. The specified overhead is relative
5666 // to the Load operation. "2" is the number provided by Intel architects. This
5667 // parameter is used for cost estimation of Gather Op and comparison with
5668 // other alternatives.
5669 // TODO: Remove the explicit hasAVX512()?, That would mean we would only
5670 // enable gather with a -march.
5671 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
5672 return 2;
5674 return 1024;
5677 int X86TTIImpl::getScatterOverhead() const {
5678 if (ST->hasAVX512())
5679 return 2;
5681 return 1024;
5684 // Return an average cost of Gather / Scatter instruction, maybe improved later.
5685 // FIXME: Add TargetCostKind support.
5686 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy,
5687 const Value *Ptr, Align Alignment,
5688 unsigned AddressSpace) {
5690 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
5691 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
5693 // Try to reduce index size from 64 bit (default for GEP)
5694 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
5695 // operation will use 16 x 64 indices which do not fit in a zmm and needs
5696 // to split. Also check that the base pointer is the same for all lanes,
5697 // and that there's at most one variable index.
5698 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
5699 unsigned IndexSize = DL.getPointerSizeInBits();
5700 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
5701 if (IndexSize < 64 || !GEP)
5702 return IndexSize;
5704 unsigned NumOfVarIndices = 0;
5705 const Value *Ptrs = GEP->getPointerOperand();
5706 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
5707 return IndexSize;
5708 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
5709 if (isa<Constant>(GEP->getOperand(I)))
5710 continue;
5711 Type *IndxTy = GEP->getOperand(I)->getType();
5712 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
5713 IndxTy = IndexVTy->getElementType();
5714 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
5715 !isa<SExtInst>(GEP->getOperand(I))) ||
5716 ++NumOfVarIndices > 1)
5717 return IndexSize; // 64
5719 return (unsigned)32;
5722 // Trying to reduce IndexSize to 32 bits for vector 16.
5723 // By default the IndexSize is equal to pointer size.
5724 unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
5725 ? getIndexSizeInBits(Ptr, DL)
5726 : DL.getPointerSizeInBits();
5728 auto *IndexVTy = FixedVectorType::get(
5729 IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
5730 std::pair<InstructionCost, MVT> IdxsLT = getTypeLegalizationCost(IndexVTy);
5731 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(SrcVTy);
5732 InstructionCost::CostType SplitFactor =
5733 *std::max(IdxsLT.first, SrcLT.first).getValue();
5734 if (SplitFactor > 1) {
5735 // Handle splitting of vector of pointers
5736 auto *SplitSrcTy =
5737 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
5738 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
5739 AddressSpace);
5742 // The gather / scatter cost is given by Intel architects. It is a rough
5743 // number since we are looking at one instruction in a time.
5744 const int GSOverhead = (Opcode == Instruction::Load)
5745 ? getGatherOverhead()
5746 : getScatterOverhead();
5747 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
5748 MaybeAlign(Alignment), AddressSpace,
5749 TTI::TCK_RecipThroughput);
5752 /// Return the cost of full scalarization of gather / scatter operation.
5754 /// Opcode - Load or Store instruction.
5755 /// SrcVTy - The type of the data vector that should be gathered or scattered.
5756 /// VariableMask - The mask is non-constant at compile time.
5757 /// Alignment - Alignment for one element.
5758 /// AddressSpace - pointer[s] address space.
5760 /// FIXME: Add TargetCostKind support.
5761 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
5762 bool VariableMask, Align Alignment,
5763 unsigned AddressSpace) {
5764 Type *ScalarTy = SrcVTy->getScalarType();
5765 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
5766 APInt DemandedElts = APInt::getAllOnes(VF);
5767 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5769 InstructionCost MaskUnpackCost = 0;
5770 if (VariableMask) {
5771 auto *MaskTy =
5772 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
5773 MaskUnpackCost = getScalarizationOverhead(
5774 MaskTy, DemandedElts, /*Insert=*/false, /*Extract=*/true, CostKind);
5775 InstructionCost ScalarCompareCost = getCmpSelInstrCost(
5776 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
5777 CmpInst::BAD_ICMP_PREDICATE, CostKind);
5778 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
5779 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
5782 InstructionCost AddressUnpackCost = getScalarizationOverhead(
5783 FixedVectorType::get(PointerType::getUnqual(ScalarTy->getContext()), VF),
5784 DemandedElts, /*Insert=*/false, /*Extract=*/true, CostKind);
5786 // The cost of the scalar loads/stores.
5787 InstructionCost MemoryOpCost =
5788 VF * getMemoryOpCost(Opcode, ScalarTy, MaybeAlign(Alignment),
5789 AddressSpace, CostKind);
5791 // The cost of forming the vector from loaded scalars/
5792 // scalarizing the vector to perform scalar stores.
5793 InstructionCost InsertExtractCost = getScalarizationOverhead(
5794 cast<FixedVectorType>(SrcVTy), DemandedElts,
5795 /*Insert=*/Opcode == Instruction::Load,
5796 /*Extract=*/Opcode == Instruction::Store, CostKind);
5798 return AddressUnpackCost + MemoryOpCost + MaskUnpackCost + InsertExtractCost;
5801 /// Calculate the cost of Gather / Scatter operation
5802 InstructionCost X86TTIImpl::getGatherScatterOpCost(
5803 unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask,
5804 Align Alignment, TTI::TargetCostKind CostKind,
5805 const Instruction *I = nullptr) {
5806 if (CostKind != TTI::TCK_RecipThroughput) {
5807 if ((Opcode == Instruction::Load &&
5808 isLegalMaskedGather(SrcVTy, Align(Alignment)) &&
5809 !forceScalarizeMaskedGather(cast<VectorType>(SrcVTy),
5810 Align(Alignment))) ||
5811 (Opcode == Instruction::Store &&
5812 isLegalMaskedScatter(SrcVTy, Align(Alignment)) &&
5813 !forceScalarizeMaskedScatter(cast<VectorType>(SrcVTy),
5814 Align(Alignment))))
5815 return 1;
5816 return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
5817 Alignment, CostKind, I);
5820 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
5821 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
5822 if (!PtrTy && Ptr->getType()->isVectorTy())
5823 PtrTy = dyn_cast<PointerType>(
5824 cast<VectorType>(Ptr->getType())->getElementType());
5825 assert(PtrTy && "Unexpected type for Ptr argument");
5826 unsigned AddressSpace = PtrTy->getAddressSpace();
5828 if ((Opcode == Instruction::Load &&
5829 (!isLegalMaskedGather(SrcVTy, Align(Alignment)) ||
5830 forceScalarizeMaskedGather(cast<VectorType>(SrcVTy),
5831 Align(Alignment)))) ||
5832 (Opcode == Instruction::Store &&
5833 (!isLegalMaskedScatter(SrcVTy, Align(Alignment)) ||
5834 forceScalarizeMaskedScatter(cast<VectorType>(SrcVTy),
5835 Align(Alignment)))))
5836 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
5837 AddressSpace);
5839 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
5842 bool X86TTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
5843 const TargetTransformInfo::LSRCost &C2) {
5844 // X86 specific here are "instruction number 1st priority".
5845 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
5846 C1.NumIVMuls, C1.NumBaseAdds,
5847 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
5848 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
5849 C2.NumIVMuls, C2.NumBaseAdds,
5850 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
5853 bool X86TTIImpl::canMacroFuseCmp() {
5854 return ST->hasMacroFusion() || ST->hasBranchFusion();
5857 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
5858 if (!ST->hasAVX())
5859 return false;
5861 // The backend can't handle a single element vector.
5862 if (isa<VectorType>(DataTy) &&
5863 cast<FixedVectorType>(DataTy)->getNumElements() == 1)
5864 return false;
5865 Type *ScalarTy = DataTy->getScalarType();
5867 if (ScalarTy->isPointerTy())
5868 return true;
5870 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
5871 return true;
5873 if (ScalarTy->isHalfTy() && ST->hasBWI())
5874 return true;
5876 if (ScalarTy->isBFloatTy() && ST->hasBF16())
5877 return true;
5879 if (!ScalarTy->isIntegerTy())
5880 return false;
5882 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
5883 return IntWidth == 32 || IntWidth == 64 ||
5884 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
5887 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
5888 return isLegalMaskedLoad(DataType, Alignment);
5891 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
5892 unsigned DataSize = DL.getTypeStoreSize(DataType);
5893 // The only supported nontemporal loads are for aligned vectors of 16 or 32
5894 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
5895 // (the equivalent stores only require AVX).
5896 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
5897 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2();
5899 return false;
5902 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
5903 unsigned DataSize = DL.getTypeStoreSize(DataType);
5905 // SSE4A supports nontemporal stores of float and double at arbitrary
5906 // alignment.
5907 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
5908 return true;
5910 // Besides the SSE4A subtarget exception above, only aligned stores are
5911 // available nontemporaly on any other subtarget. And only stores with a size
5912 // of 4..32 bytes (powers of 2, only) are permitted.
5913 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
5914 !isPowerOf2_32(DataSize))
5915 return false;
5917 // 32-byte vector nontemporal stores are supported by AVX (the equivalent
5918 // loads require AVX2).
5919 if (DataSize == 32)
5920 return ST->hasAVX();
5921 if (DataSize == 16)
5922 return ST->hasSSE1();
5923 return true;
5926 bool X86TTIImpl::isLegalBroadcastLoad(Type *ElementTy,
5927 ElementCount NumElements) const {
5928 // movddup
5929 return ST->hasSSE3() && !NumElements.isScalable() &&
5930 NumElements.getFixedValue() == 2 &&
5931 ElementTy == Type::getDoubleTy(ElementTy->getContext());
5934 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
5935 if (!isa<VectorType>(DataTy))
5936 return false;
5938 if (!ST->hasAVX512())
5939 return false;
5941 // The backend can't handle a single element vector.
5942 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
5943 return false;
5945 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
5947 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
5948 return true;
5950 if (!ScalarTy->isIntegerTy())
5951 return false;
5953 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
5954 return IntWidth == 32 || IntWidth == 64 ||
5955 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
5958 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
5959 return isLegalMaskedExpandLoad(DataTy);
5962 bool X86TTIImpl::supportsGather() const {
5963 // Some CPUs have better gather performance than others.
5964 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
5965 // enable gather with a -march.
5966 return ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2());
5969 bool X86TTIImpl::forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) {
5970 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
5971 // Vector-4 of gather/scatter instruction does not exist on KNL. We can extend
5972 // it to 8 elements, but zeroing upper bits of the mask vector will add more
5973 // instructions. Right now we give the scalar cost of vector-4 for KNL. TODO:
5974 // Check, maybe the gather/scatter instruction is better in the VariableMask
5975 // case.
5976 unsigned NumElts = cast<FixedVectorType>(VTy)->getNumElements();
5977 return NumElts == 1 ||
5978 (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX())));
5981 bool X86TTIImpl::isLegalMaskedGatherScatter(Type *DataTy, Align Alignment) {
5982 Type *ScalarTy = DataTy->getScalarType();
5983 if (ScalarTy->isPointerTy())
5984 return true;
5986 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
5987 return true;
5989 if (!ScalarTy->isIntegerTy())
5990 return false;
5992 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
5993 return IntWidth == 32 || IntWidth == 64;
5996 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
5997 if (!supportsGather() || !ST->preferGather())
5998 return false;
5999 return isLegalMaskedGatherScatter(DataTy, Alignment);
6002 bool X86TTIImpl::isLegalAltInstr(VectorType *VecTy, unsigned Opcode0,
6003 unsigned Opcode1,
6004 const SmallBitVector &OpcodeMask) const {
6005 // ADDSUBPS 4xf32 SSE3
6006 // VADDSUBPS 4xf32 AVX
6007 // VADDSUBPS 8xf32 AVX2
6008 // ADDSUBPD 2xf64 SSE3
6009 // VADDSUBPD 2xf64 AVX
6010 // VADDSUBPD 4xf64 AVX2
6012 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
6013 assert(OpcodeMask.size() == NumElements && "Mask and VecTy are incompatible");
6014 if (!isPowerOf2_32(NumElements))
6015 return false;
6016 // Check the opcode pattern. We apply the mask on the opcode arguments and
6017 // then check if it is what we expect.
6018 for (int Lane : seq<int>(0, NumElements)) {
6019 unsigned Opc = OpcodeMask.test(Lane) ? Opcode1 : Opcode0;
6020 // We expect FSub for even lanes and FAdd for odd lanes.
6021 if (Lane % 2 == 0 && Opc != Instruction::FSub)
6022 return false;
6023 if (Lane % 2 == 1 && Opc != Instruction::FAdd)
6024 return false;
6026 // Now check that the pattern is supported by the target ISA.
6027 Type *ElemTy = cast<VectorType>(VecTy)->getElementType();
6028 if (ElemTy->isFloatTy())
6029 return ST->hasSSE3() && NumElements % 4 == 0;
6030 if (ElemTy->isDoubleTy())
6031 return ST->hasSSE3() && NumElements % 2 == 0;
6032 return false;
6035 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
6036 // AVX2 doesn't support scatter
6037 if (!ST->hasAVX512() || !ST->preferScatter())
6038 return false;
6039 return isLegalMaskedGatherScatter(DataType, Alignment);
6042 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
6043 EVT VT = TLI->getValueType(DL, DataType);
6044 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
6047 bool X86TTIImpl::isExpensiveToSpeculativelyExecute(const Instruction* I) {
6048 // FDIV is always expensive, even if it has a very low uop count.
6049 // TODO: Still necessary for recent CPUs with low latency/throughput fdiv?
6050 if (I->getOpcode() == Instruction::FDiv)
6051 return true;
6053 return BaseT::isExpensiveToSpeculativelyExecute(I);
6056 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
6057 return false;
6060 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
6061 const Function *Callee) const {
6062 const TargetMachine &TM = getTLI()->getTargetMachine();
6064 // Work this as a subsetting of subtarget features.
6065 const FeatureBitset &CallerBits =
6066 TM.getSubtargetImpl(*Caller)->getFeatureBits();
6067 const FeatureBitset &CalleeBits =
6068 TM.getSubtargetImpl(*Callee)->getFeatureBits();
6070 // Check whether features are the same (apart from the ignore list).
6071 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
6072 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
6073 if (RealCallerBits == RealCalleeBits)
6074 return true;
6076 // If the features are a subset, we need to additionally check for calls
6077 // that may become ABI-incompatible as a result of inlining.
6078 if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
6079 return false;
6081 for (const Instruction &I : instructions(Callee)) {
6082 if (const auto *CB = dyn_cast<CallBase>(&I)) {
6083 // Having more target features is fine for inline ASM.
6084 if (CB->isInlineAsm())
6085 continue;
6087 SmallVector<Type *, 8> Types;
6088 for (Value *Arg : CB->args())
6089 Types.push_back(Arg->getType());
6090 if (!CB->getType()->isVoidTy())
6091 Types.push_back(CB->getType());
6093 // Simple types are always ABI compatible.
6094 auto IsSimpleTy = [](Type *Ty) {
6095 return !Ty->isVectorTy() && !Ty->isAggregateType();
6097 if (all_of(Types, IsSimpleTy))
6098 continue;
6100 if (Function *NestedCallee = CB->getCalledFunction()) {
6101 // Assume that intrinsics are always ABI compatible.
6102 if (NestedCallee->isIntrinsic())
6103 continue;
6105 // Do a precise compatibility check.
6106 if (!areTypesABICompatible(Caller, NestedCallee, Types))
6107 return false;
6108 } else {
6109 // We don't know the target features of the callee,
6110 // assume it is incompatible.
6111 return false;
6115 return true;
6118 bool X86TTIImpl::areTypesABICompatible(const Function *Caller,
6119 const Function *Callee,
6120 const ArrayRef<Type *> &Types) const {
6121 if (!BaseT::areTypesABICompatible(Caller, Callee, Types))
6122 return false;
6124 // If we get here, we know the target features match. If one function
6125 // considers 512-bit vectors legal and the other does not, consider them
6126 // incompatible.
6127 const TargetMachine &TM = getTLI()->getTargetMachine();
6129 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
6130 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
6131 return true;
6133 // Consider the arguments compatible if they aren't vectors or aggregates.
6134 // FIXME: Look at the size of vectors.
6135 // FIXME: Look at the element types of aggregates to see if there are vectors.
6136 return llvm::none_of(Types,
6137 [](Type *T) { return T->isVectorTy() || T->isAggregateType(); });
6140 X86TTIImpl::TTI::MemCmpExpansionOptions
6141 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
6142 TTI::MemCmpExpansionOptions Options;
6143 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
6144 Options.NumLoadsPerBlock = 2;
6145 // All GPR and vector loads can be unaligned.
6146 Options.AllowOverlappingLoads = true;
6147 if (IsZeroCmp) {
6148 // Only enable vector loads for equality comparison. Right now the vector
6149 // version is not as fast for three way compare (see #33329).
6150 const unsigned PreferredWidth = ST->getPreferVectorWidth();
6151 if (PreferredWidth >= 512 && ST->hasAVX512() && ST->hasEVEX512())
6152 Options.LoadSizes.push_back(64);
6153 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
6154 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
6156 if (ST->is64Bit()) {
6157 Options.LoadSizes.push_back(8);
6159 Options.LoadSizes.push_back(4);
6160 Options.LoadSizes.push_back(2);
6161 Options.LoadSizes.push_back(1);
6162 return Options;
6165 bool X86TTIImpl::prefersVectorizedAddressing() const {
6166 return supportsGather();
6169 bool X86TTIImpl::supportsEfficientVectorElementLoadStore() const {
6170 return false;
6173 bool X86TTIImpl::enableInterleavedAccessVectorization() {
6174 // TODO: We expect this to be beneficial regardless of arch,
6175 // but there are currently some unexplained performance artifacts on Atom.
6176 // As a temporary solution, disable on Atom.
6177 return !(ST->isAtom());
6180 // Get estimation for interleaved load/store operations and strided load.
6181 // \p Indices contains indices for strided load.
6182 // \p Factor - the factor of interleaving.
6183 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
6184 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
6185 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
6186 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
6187 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
6188 // VecTy for interleave memop is <VF*Factor x Elt>.
6189 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
6190 // VecTy = <12 x i32>.
6192 // Calculate the number of memory operations (NumOfMemOps), required
6193 // for load/store the VecTy.
6194 MVT LegalVT = getTypeLegalizationCost(VecTy).second;
6195 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
6196 unsigned LegalVTSize = LegalVT.getStoreSize();
6197 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
6199 // Get the cost of one memory operation.
6200 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
6201 LegalVT.getVectorNumElements());
6202 InstructionCost MemOpCost;
6203 bool UseMaskedMemOp = UseMaskForCond || UseMaskForGaps;
6204 if (UseMaskedMemOp)
6205 MemOpCost = getMaskedMemoryOpCost(Opcode, SingleMemOpTy, Alignment,
6206 AddressSpace, CostKind);
6207 else
6208 MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment),
6209 AddressSpace, CostKind);
6211 unsigned VF = VecTy->getNumElements() / Factor;
6212 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
6214 InstructionCost MaskCost;
6215 if (UseMaskedMemOp) {
6216 APInt DemandedLoadStoreElts = APInt::getZero(VecTy->getNumElements());
6217 for (unsigned Index : Indices) {
6218 assert(Index < Factor && "Invalid index for interleaved memory op");
6219 for (unsigned Elm = 0; Elm < VF; Elm++)
6220 DemandedLoadStoreElts.setBit(Index + Elm * Factor);
6223 Type *I1Type = Type::getInt1Ty(VecTy->getContext());
6225 MaskCost = getReplicationShuffleCost(
6226 I1Type, Factor, VF,
6227 UseMaskForGaps ? DemandedLoadStoreElts
6228 : APInt::getAllOnes(VecTy->getNumElements()),
6229 CostKind);
6231 // The Gaps mask is invariant and created outside the loop, therefore the
6232 // cost of creating it is not accounted for here. However if we have both
6233 // a MaskForGaps and some other mask that guards the execution of the
6234 // memory access, we need to account for the cost of And-ing the two masks
6235 // inside the loop.
6236 if (UseMaskForGaps) {
6237 auto *MaskVT = FixedVectorType::get(I1Type, VecTy->getNumElements());
6238 MaskCost += getArithmeticInstrCost(BinaryOperator::And, MaskVT, CostKind);
6242 if (Opcode == Instruction::Load) {
6243 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
6244 // contain the cost of the optimized shuffle sequence that the
6245 // X86InterleavedAccess pass will generate.
6246 // The cost of loads and stores are computed separately from the table.
6248 // X86InterleavedAccess support only the following interleaved-access group.
6249 static const CostTblEntry AVX512InterleavedLoadTbl[] = {
6250 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
6251 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
6252 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
6255 if (const auto *Entry =
6256 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
6257 return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost;
6258 //If an entry does not exist, fallback to the default implementation.
6260 // Kind of shuffle depends on number of loaded values.
6261 // If we load the entire data in one register, we can use a 1-src shuffle.
6262 // Otherwise, we'll merge 2 sources in each operation.
6263 TTI::ShuffleKind ShuffleKind =
6264 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
6266 InstructionCost ShuffleCost = getShuffleCost(
6267 ShuffleKind, SingleMemOpTy, std::nullopt, CostKind, 0, nullptr);
6269 unsigned NumOfLoadsInInterleaveGrp =
6270 Indices.size() ? Indices.size() : Factor;
6271 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
6272 VecTy->getNumElements() / Factor);
6273 InstructionCost NumOfResults =
6274 getTypeLegalizationCost(ResultTy).first * NumOfLoadsInInterleaveGrp;
6276 // About a half of the loads may be folded in shuffles when we have only
6277 // one result. If we have more than one result, or the loads are masked,
6278 // we do not fold loads at all.
6279 unsigned NumOfUnfoldedLoads =
6280 UseMaskedMemOp || NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
6282 // Get a number of shuffle operations per result.
6283 unsigned NumOfShufflesPerResult =
6284 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
6286 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
6287 // When we have more than one destination, we need additional instructions
6288 // to keep sources.
6289 InstructionCost NumOfMoves = 0;
6290 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
6291 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
6293 InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
6294 MaskCost + NumOfUnfoldedLoads * MemOpCost +
6295 NumOfMoves;
6297 return Cost;
6300 // Store.
6301 assert(Opcode == Instruction::Store &&
6302 "Expected Store Instruction at this point");
6303 // X86InterleavedAccess support only the following interleaved-access group.
6304 static const CostTblEntry AVX512InterleavedStoreTbl[] = {
6305 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
6306 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
6307 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
6309 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
6310 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
6311 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
6312 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
6315 if (const auto *Entry =
6316 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
6317 return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost;
6318 //If an entry does not exist, fallback to the default implementation.
6320 // There is no strided stores meanwhile. And store can't be folded in
6321 // shuffle.
6322 unsigned NumOfSources = Factor; // The number of values to be merged.
6323 InstructionCost ShuffleCost = getShuffleCost(
6324 TTI::SK_PermuteTwoSrc, SingleMemOpTy, std::nullopt, CostKind, 0, nullptr);
6325 unsigned NumOfShufflesPerStore = NumOfSources - 1;
6327 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
6328 // We need additional instructions to keep sources.
6329 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
6330 InstructionCost Cost =
6331 MaskCost +
6332 NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
6333 NumOfMoves;
6334 return Cost;
6337 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
6338 unsigned Opcode, Type *BaseTy, unsigned Factor, ArrayRef<unsigned> Indices,
6339 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
6340 bool UseMaskForCond, bool UseMaskForGaps) {
6341 auto *VecTy = cast<FixedVectorType>(BaseTy);
6343 auto isSupportedOnAVX512 = [&](Type *VecTy) {
6344 Type *EltTy = cast<VectorType>(VecTy)->getElementType();
6345 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
6346 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
6347 return true;
6348 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8) || EltTy->isHalfTy())
6349 return ST->hasBWI();
6350 if (EltTy->isBFloatTy())
6351 return ST->hasBF16();
6352 return false;
6354 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy))
6355 return getInterleavedMemoryOpCostAVX512(
6356 Opcode, VecTy, Factor, Indices, Alignment,
6357 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
6359 if (UseMaskForCond || UseMaskForGaps)
6360 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
6361 Alignment, AddressSpace, CostKind,
6362 UseMaskForCond, UseMaskForGaps);
6364 // Get estimation for interleaved load/store operations for SSE-AVX2.
6365 // As opposed to AVX-512, SSE-AVX2 do not have generic shuffles that allow
6366 // computing the cost using a generic formula as a function of generic
6367 // shuffles. We therefore use a lookup table instead, filled according to
6368 // the instruction sequences that codegen currently generates.
6370 // VecTy for interleave memop is <VF*Factor x Elt>.
6371 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
6372 // VecTy = <12 x i32>.
6373 MVT LegalVT = getTypeLegalizationCost(VecTy).second;
6375 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
6376 // the VF=2, while v2i128 is an unsupported MVT vector type
6377 // (see MachineValueType.h::getVectorVT()).
6378 if (!LegalVT.isVector())
6379 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
6380 Alignment, AddressSpace, CostKind);
6382 unsigned VF = VecTy->getNumElements() / Factor;
6383 Type *ScalarTy = VecTy->getElementType();
6384 // Deduplicate entries, model floats/pointers as appropriately-sized integers.
6385 if (!ScalarTy->isIntegerTy())
6386 ScalarTy =
6387 Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy));
6389 // Get the cost of all the memory operations.
6390 // FIXME: discount dead loads.
6391 InstructionCost MemOpCosts = getMemoryOpCost(
6392 Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind);
6394 auto *VT = FixedVectorType::get(ScalarTy, VF);
6395 EVT ETy = TLI->getValueType(DL, VT);
6396 if (!ETy.isSimple())
6397 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
6398 Alignment, AddressSpace, CostKind);
6400 // TODO: Complete for other data-types and strides.
6401 // Each combination of Stride, element bit width and VF results in a different
6402 // sequence; The cost tables are therefore accessed with:
6403 // Factor (stride) and VectorType=VFxiN.
6404 // The Cost accounts only for the shuffle sequence;
6405 // The cost of the loads/stores is accounted for separately.
6407 static const CostTblEntry AVX2InterleavedLoadTbl[] = {
6408 {2, MVT::v2i8, 2}, // (load 4i8 and) deinterleave into 2 x 2i8
6409 {2, MVT::v4i8, 2}, // (load 8i8 and) deinterleave into 2 x 4i8
6410 {2, MVT::v8i8, 2}, // (load 16i8 and) deinterleave into 2 x 8i8
6411 {2, MVT::v16i8, 4}, // (load 32i8 and) deinterleave into 2 x 16i8
6412 {2, MVT::v32i8, 6}, // (load 64i8 and) deinterleave into 2 x 32i8
6414 {2, MVT::v8i16, 6}, // (load 16i16 and) deinterleave into 2 x 8i16
6415 {2, MVT::v16i16, 9}, // (load 32i16 and) deinterleave into 2 x 16i16
6416 {2, MVT::v32i16, 18}, // (load 64i16 and) deinterleave into 2 x 32i16
6418 {2, MVT::v8i32, 4}, // (load 16i32 and) deinterleave into 2 x 8i32
6419 {2, MVT::v16i32, 8}, // (load 32i32 and) deinterleave into 2 x 16i32
6420 {2, MVT::v32i32, 16}, // (load 64i32 and) deinterleave into 2 x 32i32
6422 {2, MVT::v4i64, 4}, // (load 8i64 and) deinterleave into 2 x 4i64
6423 {2, MVT::v8i64, 8}, // (load 16i64 and) deinterleave into 2 x 8i64
6424 {2, MVT::v16i64, 16}, // (load 32i64 and) deinterleave into 2 x 16i64
6425 {2, MVT::v32i64, 32}, // (load 64i64 and) deinterleave into 2 x 32i64
6427 {3, MVT::v2i8, 3}, // (load 6i8 and) deinterleave into 3 x 2i8
6428 {3, MVT::v4i8, 3}, // (load 12i8 and) deinterleave into 3 x 4i8
6429 {3, MVT::v8i8, 6}, // (load 24i8 and) deinterleave into 3 x 8i8
6430 {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8
6431 {3, MVT::v32i8, 14}, // (load 96i8 and) deinterleave into 3 x 32i8
6433 {3, MVT::v2i16, 5}, // (load 6i16 and) deinterleave into 3 x 2i16
6434 {3, MVT::v4i16, 7}, // (load 12i16 and) deinterleave into 3 x 4i16
6435 {3, MVT::v8i16, 9}, // (load 24i16 and) deinterleave into 3 x 8i16
6436 {3, MVT::v16i16, 28}, // (load 48i16 and) deinterleave into 3 x 16i16
6437 {3, MVT::v32i16, 56}, // (load 96i16 and) deinterleave into 3 x 32i16
6439 {3, MVT::v2i32, 3}, // (load 6i32 and) deinterleave into 3 x 2i32
6440 {3, MVT::v4i32, 3}, // (load 12i32 and) deinterleave into 3 x 4i32
6441 {3, MVT::v8i32, 7}, // (load 24i32 and) deinterleave into 3 x 8i32
6442 {3, MVT::v16i32, 14}, // (load 48i32 and) deinterleave into 3 x 16i32
6443 {3, MVT::v32i32, 32}, // (load 96i32 and) deinterleave into 3 x 32i32
6445 {3, MVT::v2i64, 1}, // (load 6i64 and) deinterleave into 3 x 2i64
6446 {3, MVT::v4i64, 5}, // (load 12i64 and) deinterleave into 3 x 4i64
6447 {3, MVT::v8i64, 10}, // (load 24i64 and) deinterleave into 3 x 8i64
6448 {3, MVT::v16i64, 20}, // (load 48i64 and) deinterleave into 3 x 16i64
6450 {4, MVT::v2i8, 4}, // (load 8i8 and) deinterleave into 4 x 2i8
6451 {4, MVT::v4i8, 4}, // (load 16i8 and) deinterleave into 4 x 4i8
6452 {4, MVT::v8i8, 12}, // (load 32i8 and) deinterleave into 4 x 8i8
6453 {4, MVT::v16i8, 24}, // (load 64i8 and) deinterleave into 4 x 16i8
6454 {4, MVT::v32i8, 56}, // (load 128i8 and) deinterleave into 4 x 32i8
6456 {4, MVT::v2i16, 6}, // (load 8i16 and) deinterleave into 4 x 2i16
6457 {4, MVT::v4i16, 17}, // (load 16i16 and) deinterleave into 4 x 4i16
6458 {4, MVT::v8i16, 33}, // (load 32i16 and) deinterleave into 4 x 8i16
6459 {4, MVT::v16i16, 75}, // (load 64i16 and) deinterleave into 4 x 16i16
6460 {4, MVT::v32i16, 150}, // (load 128i16 and) deinterleave into 4 x 32i16
6462 {4, MVT::v2i32, 4}, // (load 8i32 and) deinterleave into 4 x 2i32
6463 {4, MVT::v4i32, 8}, // (load 16i32 and) deinterleave into 4 x 4i32
6464 {4, MVT::v8i32, 16}, // (load 32i32 and) deinterleave into 4 x 8i32
6465 {4, MVT::v16i32, 32}, // (load 64i32 and) deinterleave into 4 x 16i32
6466 {4, MVT::v32i32, 68}, // (load 128i32 and) deinterleave into 4 x 32i32
6468 {4, MVT::v2i64, 6}, // (load 8i64 and) deinterleave into 4 x 2i64
6469 {4, MVT::v4i64, 8}, // (load 16i64 and) deinterleave into 4 x 4i64
6470 {4, MVT::v8i64, 20}, // (load 32i64 and) deinterleave into 4 x 8i64
6471 {4, MVT::v16i64, 40}, // (load 64i64 and) deinterleave into 4 x 16i64
6473 {6, MVT::v2i8, 6}, // (load 12i8 and) deinterleave into 6 x 2i8
6474 {6, MVT::v4i8, 14}, // (load 24i8 and) deinterleave into 6 x 4i8
6475 {6, MVT::v8i8, 18}, // (load 48i8 and) deinterleave into 6 x 8i8
6476 {6, MVT::v16i8, 43}, // (load 96i8 and) deinterleave into 6 x 16i8
6477 {6, MVT::v32i8, 82}, // (load 192i8 and) deinterleave into 6 x 32i8
6479 {6, MVT::v2i16, 13}, // (load 12i16 and) deinterleave into 6 x 2i16
6480 {6, MVT::v4i16, 9}, // (load 24i16 and) deinterleave into 6 x 4i16
6481 {6, MVT::v8i16, 39}, // (load 48i16 and) deinterleave into 6 x 8i16
6482 {6, MVT::v16i16, 106}, // (load 96i16 and) deinterleave into 6 x 16i16
6483 {6, MVT::v32i16, 212}, // (load 192i16 and) deinterleave into 6 x 32i16
6485 {6, MVT::v2i32, 6}, // (load 12i32 and) deinterleave into 6 x 2i32
6486 {6, MVT::v4i32, 15}, // (load 24i32 and) deinterleave into 6 x 4i32
6487 {6, MVT::v8i32, 31}, // (load 48i32 and) deinterleave into 6 x 8i32
6488 {6, MVT::v16i32, 64}, // (load 96i32 and) deinterleave into 6 x 16i32
6490 {6, MVT::v2i64, 6}, // (load 12i64 and) deinterleave into 6 x 2i64
6491 {6, MVT::v4i64, 18}, // (load 24i64 and) deinterleave into 6 x 4i64
6492 {6, MVT::v8i64, 36}, // (load 48i64 and) deinterleave into 6 x 8i64
6494 {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32
6497 static const CostTblEntry SSSE3InterleavedLoadTbl[] = {
6498 {2, MVT::v4i16, 2}, // (load 8i16 and) deinterleave into 2 x 4i16
6501 static const CostTblEntry SSE2InterleavedLoadTbl[] = {
6502 {2, MVT::v2i16, 2}, // (load 4i16 and) deinterleave into 2 x 2i16
6503 {2, MVT::v4i16, 7}, // (load 8i16 and) deinterleave into 2 x 4i16
6505 {2, MVT::v2i32, 2}, // (load 4i32 and) deinterleave into 2 x 2i32
6506 {2, MVT::v4i32, 2}, // (load 8i32 and) deinterleave into 2 x 4i32
6508 {2, MVT::v2i64, 2}, // (load 4i64 and) deinterleave into 2 x 2i64
6511 static const CostTblEntry AVX2InterleavedStoreTbl[] = {
6512 {2, MVT::v16i8, 3}, // interleave 2 x 16i8 into 32i8 (and store)
6513 {2, MVT::v32i8, 4}, // interleave 2 x 32i8 into 64i8 (and store)
6515 {2, MVT::v8i16, 3}, // interleave 2 x 8i16 into 16i16 (and store)
6516 {2, MVT::v16i16, 4}, // interleave 2 x 16i16 into 32i16 (and store)
6517 {2, MVT::v32i16, 8}, // interleave 2 x 32i16 into 64i16 (and store)
6519 {2, MVT::v4i32, 2}, // interleave 2 x 4i32 into 8i32 (and store)
6520 {2, MVT::v8i32, 4}, // interleave 2 x 8i32 into 16i32 (and store)
6521 {2, MVT::v16i32, 8}, // interleave 2 x 16i32 into 32i32 (and store)
6522 {2, MVT::v32i32, 16}, // interleave 2 x 32i32 into 64i32 (and store)
6524 {2, MVT::v2i64, 2}, // interleave 2 x 2i64 into 4i64 (and store)
6525 {2, MVT::v4i64, 4}, // interleave 2 x 4i64 into 8i64 (and store)
6526 {2, MVT::v8i64, 8}, // interleave 2 x 8i64 into 16i64 (and store)
6527 {2, MVT::v16i64, 16}, // interleave 2 x 16i64 into 32i64 (and store)
6528 {2, MVT::v32i64, 32}, // interleave 2 x 32i64 into 64i64 (and store)
6530 {3, MVT::v2i8, 4}, // interleave 3 x 2i8 into 6i8 (and store)
6531 {3, MVT::v4i8, 4}, // interleave 3 x 4i8 into 12i8 (and store)
6532 {3, MVT::v8i8, 6}, // interleave 3 x 8i8 into 24i8 (and store)
6533 {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store)
6534 {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store)
6536 {3, MVT::v2i16, 4}, // interleave 3 x 2i16 into 6i16 (and store)
6537 {3, MVT::v4i16, 6}, // interleave 3 x 4i16 into 12i16 (and store)
6538 {3, MVT::v8i16, 12}, // interleave 3 x 8i16 into 24i16 (and store)
6539 {3, MVT::v16i16, 27}, // interleave 3 x 16i16 into 48i16 (and store)
6540 {3, MVT::v32i16, 54}, // interleave 3 x 32i16 into 96i16 (and store)
6542 {3, MVT::v2i32, 4}, // interleave 3 x 2i32 into 6i32 (and store)
6543 {3, MVT::v4i32, 5}, // interleave 3 x 4i32 into 12i32 (and store)
6544 {3, MVT::v8i32, 11}, // interleave 3 x 8i32 into 24i32 (and store)
6545 {3, MVT::v16i32, 22}, // interleave 3 x 16i32 into 48i32 (and store)
6546 {3, MVT::v32i32, 48}, // interleave 3 x 32i32 into 96i32 (and store)
6548 {3, MVT::v2i64, 4}, // interleave 3 x 2i64 into 6i64 (and store)
6549 {3, MVT::v4i64, 6}, // interleave 3 x 4i64 into 12i64 (and store)
6550 {3, MVT::v8i64, 12}, // interleave 3 x 8i64 into 24i64 (and store)
6551 {3, MVT::v16i64, 24}, // interleave 3 x 16i64 into 48i64 (and store)
6553 {4, MVT::v2i8, 4}, // interleave 4 x 2i8 into 8i8 (and store)
6554 {4, MVT::v4i8, 4}, // interleave 4 x 4i8 into 16i8 (and store)
6555 {4, MVT::v8i8, 4}, // interleave 4 x 8i8 into 32i8 (and store)
6556 {4, MVT::v16i8, 8}, // interleave 4 x 16i8 into 64i8 (and store)
6557 {4, MVT::v32i8, 12}, // interleave 4 x 32i8 into 128i8 (and store)
6559 {4, MVT::v2i16, 2}, // interleave 4 x 2i16 into 8i16 (and store)
6560 {4, MVT::v4i16, 6}, // interleave 4 x 4i16 into 16i16 (and store)
6561 {4, MVT::v8i16, 10}, // interleave 4 x 8i16 into 32i16 (and store)
6562 {4, MVT::v16i16, 32}, // interleave 4 x 16i16 into 64i16 (and store)
6563 {4, MVT::v32i16, 64}, // interleave 4 x 32i16 into 128i16 (and store)
6565 {4, MVT::v2i32, 5}, // interleave 4 x 2i32 into 8i32 (and store)
6566 {4, MVT::v4i32, 6}, // interleave 4 x 4i32 into 16i32 (and store)
6567 {4, MVT::v8i32, 16}, // interleave 4 x 8i32 into 32i32 (and store)
6568 {4, MVT::v16i32, 32}, // interleave 4 x 16i32 into 64i32 (and store)
6569 {4, MVT::v32i32, 64}, // interleave 4 x 32i32 into 128i32 (and store)
6571 {4, MVT::v2i64, 6}, // interleave 4 x 2i64 into 8i64 (and store)
6572 {4, MVT::v4i64, 8}, // interleave 4 x 4i64 into 16i64 (and store)
6573 {4, MVT::v8i64, 20}, // interleave 4 x 8i64 into 32i64 (and store)
6574 {4, MVT::v16i64, 40}, // interleave 4 x 16i64 into 64i64 (and store)
6576 {6, MVT::v2i8, 7}, // interleave 6 x 2i8 into 12i8 (and store)
6577 {6, MVT::v4i8, 9}, // interleave 6 x 4i8 into 24i8 (and store)
6578 {6, MVT::v8i8, 16}, // interleave 6 x 8i8 into 48i8 (and store)
6579 {6, MVT::v16i8, 27}, // interleave 6 x 16i8 into 96i8 (and store)
6580 {6, MVT::v32i8, 90}, // interleave 6 x 32i8 into 192i8 (and store)
6582 {6, MVT::v2i16, 10}, // interleave 6 x 2i16 into 12i16 (and store)
6583 {6, MVT::v4i16, 15}, // interleave 6 x 4i16 into 24i16 (and store)
6584 {6, MVT::v8i16, 21}, // interleave 6 x 8i16 into 48i16 (and store)
6585 {6, MVT::v16i16, 58}, // interleave 6 x 16i16 into 96i16 (and store)
6586 {6, MVT::v32i16, 90}, // interleave 6 x 32i16 into 192i16 (and store)
6588 {6, MVT::v2i32, 9}, // interleave 6 x 2i32 into 12i32 (and store)
6589 {6, MVT::v4i32, 12}, // interleave 6 x 4i32 into 24i32 (and store)
6590 {6, MVT::v8i32, 33}, // interleave 6 x 8i32 into 48i32 (and store)
6591 {6, MVT::v16i32, 66}, // interleave 6 x 16i32 into 96i32 (and store)
6593 {6, MVT::v2i64, 8}, // interleave 6 x 2i64 into 12i64 (and store)
6594 {6, MVT::v4i64, 15}, // interleave 6 x 4i64 into 24i64 (and store)
6595 {6, MVT::v8i64, 30}, // interleave 6 x 8i64 into 48i64 (and store)
6598 static const CostTblEntry SSE2InterleavedStoreTbl[] = {
6599 {2, MVT::v2i8, 1}, // interleave 2 x 2i8 into 4i8 (and store)
6600 {2, MVT::v4i8, 1}, // interleave 2 x 4i8 into 8i8 (and store)
6601 {2, MVT::v8i8, 1}, // interleave 2 x 8i8 into 16i8 (and store)
6603 {2, MVT::v2i16, 1}, // interleave 2 x 2i16 into 4i16 (and store)
6604 {2, MVT::v4i16, 1}, // interleave 2 x 4i16 into 8i16 (and store)
6606 {2, MVT::v2i32, 1}, // interleave 2 x 2i32 into 4i32 (and store)
6609 if (Opcode == Instruction::Load) {
6610 auto GetDiscountedCost = [Factor, NumMembers = Indices.size(),
6611 MemOpCosts](const CostTblEntry *Entry) {
6612 // NOTE: this is just an approximation!
6613 // It can over/under -estimate the cost!
6614 return MemOpCosts + divideCeil(NumMembers * Entry->Cost, Factor);
6617 if (ST->hasAVX2())
6618 if (const auto *Entry = CostTableLookup(AVX2InterleavedLoadTbl, Factor,
6619 ETy.getSimpleVT()))
6620 return GetDiscountedCost(Entry);
6622 if (ST->hasSSSE3())
6623 if (const auto *Entry = CostTableLookup(SSSE3InterleavedLoadTbl, Factor,
6624 ETy.getSimpleVT()))
6625 return GetDiscountedCost(Entry);
6627 if (ST->hasSSE2())
6628 if (const auto *Entry = CostTableLookup(SSE2InterleavedLoadTbl, Factor,
6629 ETy.getSimpleVT()))
6630 return GetDiscountedCost(Entry);
6631 } else {
6632 assert(Opcode == Instruction::Store &&
6633 "Expected Store Instruction at this point");
6634 assert((!Indices.size() || Indices.size() == Factor) &&
6635 "Interleaved store only supports fully-interleaved groups.");
6636 if (ST->hasAVX2())
6637 if (const auto *Entry = CostTableLookup(AVX2InterleavedStoreTbl, Factor,
6638 ETy.getSimpleVT()))
6639 return MemOpCosts + Entry->Cost;
6641 if (ST->hasSSE2())
6642 if (const auto *Entry = CostTableLookup(SSE2InterleavedStoreTbl, Factor,
6643 ETy.getSimpleVT()))
6644 return MemOpCosts + Entry->Cost;
6647 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
6648 Alignment, AddressSpace, CostKind,
6649 UseMaskForCond, UseMaskForGaps);
6652 InstructionCost X86TTIImpl::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
6653 int64_t BaseOffset,
6654 bool HasBaseReg, int64_t Scale,
6655 unsigned AddrSpace) const {
6656 // Scaling factors are not free at all.
6657 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
6658 // will take 2 allocations in the out of order engine instead of 1
6659 // for plain addressing mode, i.e. inst (reg1).
6660 // E.g.,
6661 // vaddps (%rsi,%rdx), %ymm0, %ymm1
6662 // Requires two allocations (one for the load, one for the computation)
6663 // whereas:
6664 // vaddps (%rsi), %ymm0, %ymm1
6665 // Requires just 1 allocation, i.e., freeing allocations for other operations
6666 // and having less micro operations to execute.
6668 // For some X86 architectures, this is even worse because for instance for
6669 // stores, the complex addressing mode forces the instruction to use the
6670 // "load" ports instead of the dedicated "store" port.
6671 // E.g., on Haswell:
6672 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
6673 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
6674 TargetLoweringBase::AddrMode AM;
6675 AM.BaseGV = BaseGV;
6676 AM.BaseOffs = BaseOffset;
6677 AM.HasBaseReg = HasBaseReg;
6678 AM.Scale = Scale;
6679 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
6680 // Scale represents reg2 * scale, thus account for 1
6681 // as soon as we use a second register.
6682 return AM.Scale != 0;
6683 return -1;