[AMDGPU] Check for immediate SrcC in mfma in AsmParser
[llvm-core.git] / lib / Target / SystemZ / SystemZTargetTransformInfo.cpp
blob145cf87ef9f592190ad7bb8940b0e40259049254
1 //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a TargetTransformInfo analysis pass specific to the
10 // SystemZ target machine. It uses the target's detailed information to provide
11 // more precise answers to certain TTI queries, while letting the target
12 // independent and default TTI implementations handle the rest.
14 //===----------------------------------------------------------------------===//
16 #include "SystemZTargetTransformInfo.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/BasicTTIImpl.h"
19 #include "llvm/CodeGen/CostTable.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/Support/Debug.h"
23 using namespace llvm;
25 #define DEBUG_TYPE "systemztti"
27 //===----------------------------------------------------------------------===//
29 // SystemZ cost model.
31 //===----------------------------------------------------------------------===//
33 int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
34 assert(Ty->isIntegerTy());
36 unsigned BitSize = Ty->getPrimitiveSizeInBits();
37 // There is no cost model for constants with a bit size of 0. Return TCC_Free
38 // here, so that constant hoisting will ignore this constant.
39 if (BitSize == 0)
40 return TTI::TCC_Free;
41 // No cost model for operations on integers larger than 64 bit implemented yet.
42 if (BitSize > 64)
43 return TTI::TCC_Free;
45 if (Imm == 0)
46 return TTI::TCC_Free;
48 if (Imm.getBitWidth() <= 64) {
49 // Constants loaded via lgfi.
50 if (isInt<32>(Imm.getSExtValue()))
51 return TTI::TCC_Basic;
52 // Constants loaded via llilf.
53 if (isUInt<32>(Imm.getZExtValue()))
54 return TTI::TCC_Basic;
55 // Constants loaded via llihf:
56 if ((Imm.getZExtValue() & 0xffffffff) == 0)
57 return TTI::TCC_Basic;
59 return 2 * TTI::TCC_Basic;
62 return 4 * TTI::TCC_Basic;
65 int SystemZTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
66 const APInt &Imm, Type *Ty) {
67 assert(Ty->isIntegerTy());
69 unsigned BitSize = Ty->getPrimitiveSizeInBits();
70 // There is no cost model for constants with a bit size of 0. Return TCC_Free
71 // here, so that constant hoisting will ignore this constant.
72 if (BitSize == 0)
73 return TTI::TCC_Free;
74 // No cost model for operations on integers larger than 64 bit implemented yet.
75 if (BitSize > 64)
76 return TTI::TCC_Free;
78 switch (Opcode) {
79 default:
80 return TTI::TCC_Free;
81 case Instruction::GetElementPtr:
82 // Always hoist the base address of a GetElementPtr. This prevents the
83 // creation of new constants for every base constant that gets constant
84 // folded with the offset.
85 if (Idx == 0)
86 return 2 * TTI::TCC_Basic;
87 return TTI::TCC_Free;
88 case Instruction::Store:
89 if (Idx == 0 && Imm.getBitWidth() <= 64) {
90 // Any 8-bit immediate store can by implemented via mvi.
91 if (BitSize == 8)
92 return TTI::TCC_Free;
93 // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
94 if (isInt<16>(Imm.getSExtValue()))
95 return TTI::TCC_Free;
97 break;
98 case Instruction::ICmp:
99 if (Idx == 1 && Imm.getBitWidth() <= 64) {
100 // Comparisons against signed 32-bit immediates implemented via cgfi.
101 if (isInt<32>(Imm.getSExtValue()))
102 return TTI::TCC_Free;
103 // Comparisons against unsigned 32-bit immediates implemented via clgfi.
104 if (isUInt<32>(Imm.getZExtValue()))
105 return TTI::TCC_Free;
107 break;
108 case Instruction::Add:
109 case Instruction::Sub:
110 if (Idx == 1 && Imm.getBitWidth() <= 64) {
111 // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
112 if (isUInt<32>(Imm.getZExtValue()))
113 return TTI::TCC_Free;
114 // Or their negation, by swapping addition vs. subtraction.
115 if (isUInt<32>(-Imm.getSExtValue()))
116 return TTI::TCC_Free;
118 break;
119 case Instruction::Mul:
120 if (Idx == 1 && Imm.getBitWidth() <= 64) {
121 // We use msgfi to multiply by 32-bit signed immediates.
122 if (isInt<32>(Imm.getSExtValue()))
123 return TTI::TCC_Free;
125 break;
126 case Instruction::Or:
127 case Instruction::Xor:
128 if (Idx == 1 && Imm.getBitWidth() <= 64) {
129 // Masks supported by oilf/xilf.
130 if (isUInt<32>(Imm.getZExtValue()))
131 return TTI::TCC_Free;
132 // Masks supported by oihf/xihf.
133 if ((Imm.getZExtValue() & 0xffffffff) == 0)
134 return TTI::TCC_Free;
136 break;
137 case Instruction::And:
138 if (Idx == 1 && Imm.getBitWidth() <= 64) {
139 // Any 32-bit AND operation can by implemented via nilf.
140 if (BitSize <= 32)
141 return TTI::TCC_Free;
142 // 64-bit masks supported by nilf.
143 if (isUInt<32>(~Imm.getZExtValue()))
144 return TTI::TCC_Free;
145 // 64-bit masks supported by nilh.
146 if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff)
147 return TTI::TCC_Free;
148 // Some 64-bit AND operations can be implemented via risbg.
149 const SystemZInstrInfo *TII = ST->getInstrInfo();
150 unsigned Start, End;
151 if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End))
152 return TTI::TCC_Free;
154 break;
155 case Instruction::Shl:
156 case Instruction::LShr:
157 case Instruction::AShr:
158 // Always return TCC_Free for the shift value of a shift instruction.
159 if (Idx == 1)
160 return TTI::TCC_Free;
161 break;
162 case Instruction::UDiv:
163 case Instruction::SDiv:
164 case Instruction::URem:
165 case Instruction::SRem:
166 case Instruction::Trunc:
167 case Instruction::ZExt:
168 case Instruction::SExt:
169 case Instruction::IntToPtr:
170 case Instruction::PtrToInt:
171 case Instruction::BitCast:
172 case Instruction::PHI:
173 case Instruction::Call:
174 case Instruction::Select:
175 case Instruction::Ret:
176 case Instruction::Load:
177 break;
180 return SystemZTTIImpl::getIntImmCost(Imm, Ty);
183 int SystemZTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
184 const APInt &Imm, Type *Ty) {
185 assert(Ty->isIntegerTy());
187 unsigned BitSize = Ty->getPrimitiveSizeInBits();
188 // There is no cost model for constants with a bit size of 0. Return TCC_Free
189 // here, so that constant hoisting will ignore this constant.
190 if (BitSize == 0)
191 return TTI::TCC_Free;
192 // No cost model for operations on integers larger than 64 bit implemented yet.
193 if (BitSize > 64)
194 return TTI::TCC_Free;
196 switch (IID) {
197 default:
198 return TTI::TCC_Free;
199 case Intrinsic::sadd_with_overflow:
200 case Intrinsic::uadd_with_overflow:
201 case Intrinsic::ssub_with_overflow:
202 case Intrinsic::usub_with_overflow:
203 // These get expanded to include a normal addition/subtraction.
204 if (Idx == 1 && Imm.getBitWidth() <= 64) {
205 if (isUInt<32>(Imm.getZExtValue()))
206 return TTI::TCC_Free;
207 if (isUInt<32>(-Imm.getSExtValue()))
208 return TTI::TCC_Free;
210 break;
211 case Intrinsic::smul_with_overflow:
212 case Intrinsic::umul_with_overflow:
213 // These get expanded to include a normal multiplication.
214 if (Idx == 1 && Imm.getBitWidth() <= 64) {
215 if (isInt<32>(Imm.getSExtValue()))
216 return TTI::TCC_Free;
218 break;
219 case Intrinsic::experimental_stackmap:
220 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
221 return TTI::TCC_Free;
222 break;
223 case Intrinsic::experimental_patchpoint_void:
224 case Intrinsic::experimental_patchpoint_i64:
225 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
226 return TTI::TCC_Free;
227 break;
229 return SystemZTTIImpl::getIntImmCost(Imm, Ty);
232 TargetTransformInfo::PopcntSupportKind
233 SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) {
234 assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2");
235 if (ST->hasPopulationCount() && TyWidth <= 64)
236 return TTI::PSK_FastHardware;
237 return TTI::PSK_Software;
240 void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
241 TTI::UnrollingPreferences &UP) {
242 // Find out if L contains a call, what the machine instruction count
243 // estimate is, and how many stores there are.
244 bool HasCall = false;
245 unsigned NumStores = 0;
246 for (auto &BB : L->blocks())
247 for (auto &I : *BB) {
248 if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) {
249 ImmutableCallSite CS(&I);
250 if (const Function *F = CS.getCalledFunction()) {
251 if (isLoweredToCall(F))
252 HasCall = true;
253 if (F->getIntrinsicID() == Intrinsic::memcpy ||
254 F->getIntrinsicID() == Intrinsic::memset)
255 NumStores++;
256 } else { // indirect call.
257 HasCall = true;
260 if (isa<StoreInst>(&I)) {
261 Type *MemAccessTy = I.getOperand(0)->getType();
262 NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, 0, 0);
266 // The z13 processor will run out of store tags if too many stores
267 // are fed into it too quickly. Therefore make sure there are not
268 // too many stores in the resulting unrolled loop.
269 unsigned const Max = (NumStores ? (12 / NumStores) : UINT_MAX);
271 if (HasCall) {
272 // Only allow full unrolling if loop has any calls.
273 UP.FullUnrollMaxCount = Max;
274 UP.MaxCount = 1;
275 return;
278 UP.MaxCount = Max;
279 if (UP.MaxCount <= 1)
280 return;
282 // Allow partial and runtime trip count unrolling.
283 UP.Partial = UP.Runtime = true;
285 UP.PartialThreshold = 75;
286 UP.DefaultUnrollRuntimeCount = 4;
288 // Allow expensive instructions in the pre-header of the loop.
289 UP.AllowExpensiveTripCount = true;
291 UP.Force = true;
295 bool SystemZTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
296 TargetTransformInfo::LSRCost &C2) {
297 // SystemZ specific: check instruction count (first), and don't care about
298 // ImmCost, since offsets are checked explicitly.
299 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
300 C1.NumIVMuls, C1.NumBaseAdds,
301 C1.ScaleCost, C1.SetupCost) <
302 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
303 C2.NumIVMuls, C2.NumBaseAdds,
304 C2.ScaleCost, C2.SetupCost);
307 unsigned SystemZTTIImpl::getNumberOfRegisters(bool Vector) {
308 if (!Vector)
309 // Discount the stack pointer. Also leave out %r0, since it can't
310 // be used in an address.
311 return 14;
312 if (ST->hasVector())
313 return 32;
314 return 0;
317 unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) const {
318 if (!Vector)
319 return 64;
320 if (ST->hasVector())
321 return 128;
322 return 0;
325 bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
326 EVT VT = TLI->getValueType(DL, DataType);
327 return (VT.isScalarInteger() && TLI->isTypeLegal(VT));
330 // Return the bit size for the scalar type or vector element
331 // type. getScalarSizeInBits() returns 0 for a pointer type.
332 static unsigned getScalarSizeInBits(Type *Ty) {
333 unsigned Size =
334 (Ty->isPtrOrPtrVectorTy() ? 64U : Ty->getScalarSizeInBits());
335 assert(Size > 0 && "Element must have non-zero size.");
336 return Size;
339 // getNumberOfParts() calls getTypeLegalizationCost() which splits the vector
340 // type until it is legal. This would e.g. return 4 for <6 x i64>, instead of
341 // 3.
342 static unsigned getNumVectorRegs(Type *Ty) {
343 assert(Ty->isVectorTy() && "Expected vector type");
344 unsigned WideBits = getScalarSizeInBits(Ty) * Ty->getVectorNumElements();
345 assert(WideBits > 0 && "Could not compute size of vector");
346 return ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U));
349 int SystemZTTIImpl::getArithmeticInstrCost(
350 unsigned Opcode, Type *Ty,
351 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
352 TTI::OperandValueProperties Opd1PropInfo,
353 TTI::OperandValueProperties Opd2PropInfo,
354 ArrayRef<const Value *> Args) {
356 // TODO: return a good value for BB-VECTORIZER that includes the
357 // immediate loads, which we do not want to count for the loop
358 // vectorizer, since they are hopefully hoisted out of the loop. This
359 // would require a new parameter 'InLoop', but not sure if constant
360 // args are common enough to motivate this.
362 unsigned ScalarBits = Ty->getScalarSizeInBits();
364 // There are thre cases of division and remainder: Dividing with a register
365 // needs a divide instruction. A divisor which is a power of two constant
366 // can be implemented with a sequence of shifts. Any other constant needs a
367 // multiply and shifts.
368 const unsigned DivInstrCost = 20;
369 const unsigned DivMulSeqCost = 10;
370 const unsigned SDivPow2Cost = 4;
372 bool SignedDivRem =
373 Opcode == Instruction::SDiv || Opcode == Instruction::SRem;
374 bool UnsignedDivRem =
375 Opcode == Instruction::UDiv || Opcode == Instruction::URem;
377 // Check for a constant divisor.
378 bool DivRemConst = false;
379 bool DivRemConstPow2 = false;
380 if ((SignedDivRem || UnsignedDivRem) && Args.size() == 2) {
381 if (const Constant *C = dyn_cast<Constant>(Args[1])) {
382 const ConstantInt *CVal =
383 (C->getType()->isVectorTy()
384 ? dyn_cast_or_null<const ConstantInt>(C->getSplatValue())
385 : dyn_cast<const ConstantInt>(C));
386 if (CVal != nullptr &&
387 (CVal->getValue().isPowerOf2() || (-CVal->getValue()).isPowerOf2()))
388 DivRemConstPow2 = true;
389 else
390 DivRemConst = true;
394 if (Ty->isVectorTy()) {
395 assert(ST->hasVector() &&
396 "getArithmeticInstrCost() called with vector type.");
397 unsigned VF = Ty->getVectorNumElements();
398 unsigned NumVectors = getNumVectorRegs(Ty);
400 // These vector operations are custom handled, but are still supported
401 // with one instruction per vector, regardless of element size.
402 if (Opcode == Instruction::Shl || Opcode == Instruction::LShr ||
403 Opcode == Instruction::AShr) {
404 return NumVectors;
407 if (DivRemConstPow2)
408 return (NumVectors * (SignedDivRem ? SDivPow2Cost : 1));
409 if (DivRemConst)
410 return VF * DivMulSeqCost + getScalarizationOverhead(Ty, Args);
411 if ((SignedDivRem || UnsignedDivRem) && VF > 4)
412 // Temporary hack: disable high vectorization factors with integer
413 // division/remainder, which will get scalarized and handled with
414 // GR128 registers. The mischeduler is not clever enough to avoid
415 // spilling yet.
416 return 1000;
418 // These FP operations are supported with a single vector instruction for
419 // double (base implementation assumes float generally costs 2). For
420 // FP128, the scalar cost is 1, and there is no overhead since the values
421 // are already in scalar registers.
422 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
423 Opcode == Instruction::FMul || Opcode == Instruction::FDiv) {
424 switch (ScalarBits) {
425 case 32: {
426 // The vector enhancements facility 1 provides v4f32 instructions.
427 if (ST->hasVectorEnhancements1())
428 return NumVectors;
429 // Return the cost of multiple scalar invocation plus the cost of
430 // inserting and extracting the values.
431 unsigned ScalarCost =
432 getArithmeticInstrCost(Opcode, Ty->getScalarType());
433 unsigned Cost = (VF * ScalarCost) + getScalarizationOverhead(Ty, Args);
434 // FIXME: VF 2 for these FP operations are currently just as
435 // expensive as for VF 4.
436 if (VF == 2)
437 Cost *= 2;
438 return Cost;
440 case 64:
441 case 128:
442 return NumVectors;
443 default:
444 break;
448 // There is no native support for FRem.
449 if (Opcode == Instruction::FRem) {
450 unsigned Cost = (VF * LIBCALL_COST) + getScalarizationOverhead(Ty, Args);
451 // FIXME: VF 2 for float is currently just as expensive as for VF 4.
452 if (VF == 2 && ScalarBits == 32)
453 Cost *= 2;
454 return Cost;
457 else { // Scalar:
458 // These FP operations are supported with a dedicated instruction for
459 // float, double and fp128 (base implementation assumes float generally
460 // costs 2).
461 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
462 Opcode == Instruction::FMul || Opcode == Instruction::FDiv)
463 return 1;
465 // There is no native support for FRem.
466 if (Opcode == Instruction::FRem)
467 return LIBCALL_COST;
469 // Give discount for some combined logical operations if supported.
470 if (Args.size() == 2 && ST->hasMiscellaneousExtensions3()) {
471 if (Opcode == Instruction::Xor) {
472 for (const Value *A : Args) {
473 if (const Instruction *I = dyn_cast<Instruction>(A))
474 if (I->hasOneUse() &&
475 (I->getOpcode() == Instruction::And ||
476 I->getOpcode() == Instruction::Or ||
477 I->getOpcode() == Instruction::Xor))
478 return 0;
481 else if (Opcode == Instruction::Or || Opcode == Instruction::And) {
482 for (const Value *A : Args) {
483 if (const Instruction *I = dyn_cast<Instruction>(A))
484 if (I->hasOneUse() && I->getOpcode() == Instruction::Xor)
485 return 0;
490 // Or requires one instruction, although it has custom handling for i64.
491 if (Opcode == Instruction::Or)
492 return 1;
494 if (Opcode == Instruction::Xor && ScalarBits == 1) {
495 if (ST->hasLoadStoreOnCond2())
496 return 5; // 2 * (li 0; loc 1); xor
497 return 7; // 2 * ipm sequences ; xor ; shift ; compare
500 if (DivRemConstPow2)
501 return (SignedDivRem ? SDivPow2Cost : 1);
502 if (DivRemConst)
503 return DivMulSeqCost;
504 if (SignedDivRem || UnsignedDivRem)
505 return DivInstrCost;
508 // Fallback to the default implementation.
509 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
510 Opd1PropInfo, Opd2PropInfo, Args);
513 int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
514 Type *SubTp) {
515 assert (Tp->isVectorTy());
516 assert (ST->hasVector() && "getShuffleCost() called.");
517 unsigned NumVectors = getNumVectorRegs(Tp);
519 // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
521 // FP128 values are always in scalar registers, so there is no work
522 // involved with a shuffle, except for broadcast. In that case register
523 // moves are done with a single instruction per element.
524 if (Tp->getScalarType()->isFP128Ty())
525 return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0);
527 switch (Kind) {
528 case TargetTransformInfo::SK_ExtractSubvector:
529 // ExtractSubvector Index indicates start offset.
531 // Extracting a subvector from first index is a noop.
532 return (Index == 0 ? 0 : NumVectors);
534 case TargetTransformInfo::SK_Broadcast:
535 // Loop vectorizer calls here to figure out the extra cost of
536 // broadcasting a loaded value to all elements of a vector. Since vlrep
537 // loads and replicates with a single instruction, adjust the returned
538 // value.
539 return NumVectors - 1;
541 default:
543 // SystemZ supports single instruction permutation / replication.
544 return NumVectors;
547 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
550 // Return the log2 difference of the element sizes of the two vector types.
551 static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) {
552 unsigned Bits0 = Ty0->getScalarSizeInBits();
553 unsigned Bits1 = Ty1->getScalarSizeInBits();
555 if (Bits1 > Bits0)
556 return (Log2_32(Bits1) - Log2_32(Bits0));
558 return (Log2_32(Bits0) - Log2_32(Bits1));
561 // Return the number of instructions needed to truncate SrcTy to DstTy.
562 unsigned SystemZTTIImpl::
563 getVectorTruncCost(Type *SrcTy, Type *DstTy) {
564 assert (SrcTy->isVectorTy() && DstTy->isVectorTy());
565 assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() &&
566 "Packing must reduce size of vector type.");
567 assert (SrcTy->getVectorNumElements() == DstTy->getVectorNumElements() &&
568 "Packing should not change number of elements.");
570 // TODO: Since fp32 is expanded, the extract cost should always be 0.
572 unsigned NumParts = getNumVectorRegs(SrcTy);
573 if (NumParts <= 2)
574 // Up to 2 vector registers can be truncated efficiently with pack or
575 // permute. The latter requires an immediate mask to be loaded, which
576 // typically gets hoisted out of a loop. TODO: return a good value for
577 // BB-VECTORIZER that includes the immediate loads, which we do not want
578 // to count for the loop vectorizer.
579 return 1;
581 unsigned Cost = 0;
582 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
583 unsigned VF = SrcTy->getVectorNumElements();
584 for (unsigned P = 0; P < Log2Diff; ++P) {
585 if (NumParts > 1)
586 NumParts /= 2;
587 Cost += NumParts;
590 // Currently, a general mix of permutes and pack instructions is output by
591 // isel, which follow the cost computation above except for this case which
592 // is one instruction less:
593 if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 &&
594 DstTy->getScalarSizeInBits() == 8)
595 Cost--;
597 return Cost;
600 // Return the cost of converting a vector bitmask produced by a compare
601 // (SrcTy), to the type of the select or extend instruction (DstTy).
602 unsigned SystemZTTIImpl::
603 getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) {
604 assert (SrcTy->isVectorTy() && DstTy->isVectorTy() &&
605 "Should only be called with vector types.");
607 unsigned PackCost = 0;
608 unsigned SrcScalarBits = SrcTy->getScalarSizeInBits();
609 unsigned DstScalarBits = DstTy->getScalarSizeInBits();
610 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
611 if (SrcScalarBits > DstScalarBits)
612 // The bitmask will be truncated.
613 PackCost = getVectorTruncCost(SrcTy, DstTy);
614 else if (SrcScalarBits < DstScalarBits) {
615 unsigned DstNumParts = getNumVectorRegs(DstTy);
616 // Each vector select needs its part of the bitmask unpacked.
617 PackCost = Log2Diff * DstNumParts;
618 // Extra cost for moving part of mask before unpacking.
619 PackCost += DstNumParts - 1;
622 return PackCost;
625 // Return the type of the compared operands. This is needed to compute the
626 // cost for a Select / ZExt or SExt instruction.
627 static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) {
628 Type *OpTy = nullptr;
629 if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0)))
630 OpTy = CI->getOperand(0)->getType();
631 else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0)))
632 if (LogicI->getNumOperands() == 2)
633 if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0)))
634 if (isa<CmpInst>(LogicI->getOperand(1)))
635 OpTy = CI0->getOperand(0)->getType();
637 if (OpTy != nullptr) {
638 if (VF == 1) {
639 assert (!OpTy->isVectorTy() && "Expected scalar type");
640 return OpTy;
642 // Return the potentially vectorized type based on 'I' and 'VF'. 'I' may
643 // be either scalar or already vectorized with a same or lesser VF.
644 Type *ElTy = OpTy->getScalarType();
645 return VectorType::get(ElTy, VF);
648 return nullptr;
651 // Get the cost of converting a boolean vector to a vector with same width
652 // and element size as Dst, plus the cost of zero extending if needed.
653 unsigned SystemZTTIImpl::
654 getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst,
655 const Instruction *I) {
656 assert (Dst->isVectorTy());
657 unsigned VF = Dst->getVectorNumElements();
658 unsigned Cost = 0;
659 // If we know what the widths of the compared operands, get any cost of
660 // converting it to match Dst. Otherwise assume same widths.
661 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
662 if (CmpOpTy != nullptr)
663 Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst);
664 if (Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP)
665 // One 'vn' per dst vector with an immediate mask.
666 Cost += getNumVectorRegs(Dst);
667 return Cost;
670 int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
671 const Instruction *I) {
672 unsigned DstScalarBits = Dst->getScalarSizeInBits();
673 unsigned SrcScalarBits = Src->getScalarSizeInBits();
675 if (Src->isVectorTy()) {
676 assert (ST->hasVector() && "getCastInstrCost() called with vector type.");
677 assert (Dst->isVectorTy());
678 unsigned VF = Src->getVectorNumElements();
679 unsigned NumDstVectors = getNumVectorRegs(Dst);
680 unsigned NumSrcVectors = getNumVectorRegs(Src);
682 if (Opcode == Instruction::Trunc) {
683 if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits())
684 return 0; // Check for NOOP conversions.
685 return getVectorTruncCost(Src, Dst);
688 if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
689 if (SrcScalarBits >= 8) {
690 // ZExt/SExt will be handled with one unpack per doubling of width.
691 unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst);
693 // For types that spans multiple vector registers, some additional
694 // instructions are used to setup the unpacking.
695 unsigned NumSrcVectorOps =
696 (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors)
697 : (NumDstVectors / 2));
699 return (NumUnpacks * NumDstVectors) + NumSrcVectorOps;
701 else if (SrcScalarBits == 1)
702 return getBoolVecToIntConversionCost(Opcode, Dst, I);
705 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP ||
706 Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) {
707 // TODO: Fix base implementation which could simplify things a bit here
708 // (seems to miss on differentiating on scalar/vector types).
710 // Only 64 bit vector conversions are natively supported before arch13.
711 if (DstScalarBits == 64 || ST->hasVectorEnhancements2()) {
712 if (SrcScalarBits == DstScalarBits)
713 return NumDstVectors;
715 if (SrcScalarBits == 1)
716 return getBoolVecToIntConversionCost(Opcode, Dst, I) + NumDstVectors;
719 // Return the cost of multiple scalar invocation plus the cost of
720 // inserting and extracting the values. Base implementation does not
721 // realize float->int gets scalarized.
722 unsigned ScalarCost = getCastInstrCost(Opcode, Dst->getScalarType(),
723 Src->getScalarType());
724 unsigned TotCost = VF * ScalarCost;
725 bool NeedsInserts = true, NeedsExtracts = true;
726 // FP128 registers do not get inserted or extracted.
727 if (DstScalarBits == 128 &&
728 (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP))
729 NeedsInserts = false;
730 if (SrcScalarBits == 128 &&
731 (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI))
732 NeedsExtracts = false;
734 TotCost += getScalarizationOverhead(Src, false, NeedsExtracts);
735 TotCost += getScalarizationOverhead(Dst, NeedsInserts, false);
737 // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
738 if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32)
739 TotCost *= 2;
741 return TotCost;
744 if (Opcode == Instruction::FPTrunc) {
745 if (SrcScalarBits == 128) // fp128 -> double/float + inserts of elements.
746 return VF /*ldxbr/lexbr*/ + getScalarizationOverhead(Dst, true, false);
747 else // double -> float
748 return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/);
751 if (Opcode == Instruction::FPExt) {
752 if (SrcScalarBits == 32 && DstScalarBits == 64) {
753 // float -> double is very rare and currently unoptimized. Instead of
754 // using vldeb, which can do two at a time, all conversions are
755 // scalarized.
756 return VF * 2;
758 // -> fp128. VF * lxdb/lxeb + extraction of elements.
759 return VF + getScalarizationOverhead(Src, false, true);
762 else { // Scalar
763 assert (!Dst->isVectorTy());
765 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP) {
766 if (SrcScalarBits >= 32 ||
767 (I != nullptr && isa<LoadInst>(I->getOperand(0))))
768 return 1;
769 return SrcScalarBits > 1 ? 2 /*i8/i16 extend*/ : 5 /*branch seq.*/;
772 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
773 Src->isIntegerTy(1)) {
774 if (ST->hasLoadStoreOnCond2())
775 return 2; // li 0; loc 1
777 // This should be extension of a compare i1 result, which is done with
778 // ipm and a varying sequence of instructions.
779 unsigned Cost = 0;
780 if (Opcode == Instruction::SExt)
781 Cost = (DstScalarBits < 64 ? 3 : 4);
782 if (Opcode == Instruction::ZExt)
783 Cost = 3;
784 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr);
785 if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy())
786 // If operands of an fp-type was compared, this costs +1.
787 Cost++;
788 return Cost;
792 return BaseT::getCastInstrCost(Opcode, Dst, Src, I);
795 // Scalar i8 / i16 operations will typically be made after first extending
796 // the operands to i32.
797 static unsigned getOperandsExtensionCost(const Instruction *I) {
798 unsigned ExtCost = 0;
799 for (Value *Op : I->operands())
800 // A load of i8 or i16 sign/zero extends to i32.
801 if (!isa<LoadInst>(Op) && !isa<ConstantInt>(Op))
802 ExtCost++;
804 return ExtCost;
807 int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
808 Type *CondTy, const Instruction *I) {
809 if (ValTy->isVectorTy()) {
810 assert (ST->hasVector() && "getCmpSelInstrCost() called with vector type.");
811 unsigned VF = ValTy->getVectorNumElements();
813 // Called with a compare instruction.
814 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
815 unsigned PredicateExtraCost = 0;
816 if (I != nullptr) {
817 // Some predicates cost one or two extra instructions.
818 switch (cast<CmpInst>(I)->getPredicate()) {
819 case CmpInst::Predicate::ICMP_NE:
820 case CmpInst::Predicate::ICMP_UGE:
821 case CmpInst::Predicate::ICMP_ULE:
822 case CmpInst::Predicate::ICMP_SGE:
823 case CmpInst::Predicate::ICMP_SLE:
824 PredicateExtraCost = 1;
825 break;
826 case CmpInst::Predicate::FCMP_ONE:
827 case CmpInst::Predicate::FCMP_ORD:
828 case CmpInst::Predicate::FCMP_UEQ:
829 case CmpInst::Predicate::FCMP_UNO:
830 PredicateExtraCost = 2;
831 break;
832 default:
833 break;
837 // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
838 // floats. FIXME: <2 x float> generates same code as <4 x float>.
839 unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1);
840 unsigned NumVecs_cmp = getNumVectorRegs(ValTy);
842 unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost));
843 return Cost;
845 else { // Called with a select instruction.
846 assert (Opcode == Instruction::Select);
848 // We can figure out the extra cost of packing / unpacking if the
849 // instruction was passed and the compare instruction is found.
850 unsigned PackCost = 0;
851 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
852 if (CmpOpTy != nullptr)
853 PackCost =
854 getVectorBitmaskConversionCost(CmpOpTy, ValTy);
856 return getNumVectorRegs(ValTy) /*vsel*/ + PackCost;
859 else { // Scalar
860 switch (Opcode) {
861 case Instruction::ICmp: {
862 // A loaded value compared with 0 with multiple users becomes Load and
863 // Test. The load is then not foldable, so return 0 cost for the ICmp.
864 unsigned ScalarBits = ValTy->getScalarSizeInBits();
865 if (I != nullptr && ScalarBits >= 32)
866 if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
867 if (const ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
868 if (!Ld->hasOneUse() && Ld->getParent() == I->getParent() &&
869 C->getZExtValue() == 0)
870 return 0;
872 unsigned Cost = 1;
873 if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16)
874 Cost += (I != nullptr ? getOperandsExtensionCost(I) : 2);
875 return Cost;
877 case Instruction::Select:
878 if (ValTy->isFloatingPointTy())
879 return 4; // No load on condition for FP - costs a conditional jump.
880 return 1; // Load On Condition / Select Register.
884 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, nullptr);
887 int SystemZTTIImpl::
888 getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
889 // vlvgp will insert two grs into a vector register, so only count half the
890 // number of instructions.
891 if (Opcode == Instruction::InsertElement && Val->isIntOrIntVectorTy(64))
892 return ((Index % 2 == 0) ? 1 : 0);
894 if (Opcode == Instruction::ExtractElement) {
895 int Cost = ((getScalarSizeInBits(Val) == 1) ? 2 /*+test-under-mask*/ : 1);
897 // Give a slight penalty for moving out of vector pipeline to FXU unit.
898 if (Index == 0 && Val->isIntOrIntVectorTy())
899 Cost += 1;
901 return Cost;
904 return BaseT::getVectorInstrCost(Opcode, Val, Index);
907 // Check if a load may be folded as a memory operand in its user.
908 bool SystemZTTIImpl::
909 isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) {
910 if (!Ld->hasOneUse())
911 return false;
912 FoldedValue = Ld;
913 const Instruction *UserI = cast<Instruction>(*Ld->user_begin());
914 unsigned LoadedBits = getScalarSizeInBits(Ld->getType());
915 unsigned TruncBits = 0;
916 unsigned SExtBits = 0;
917 unsigned ZExtBits = 0;
918 if (UserI->hasOneUse()) {
919 unsigned UserBits = UserI->getType()->getScalarSizeInBits();
920 if (isa<TruncInst>(UserI))
921 TruncBits = UserBits;
922 else if (isa<SExtInst>(UserI))
923 SExtBits = UserBits;
924 else if (isa<ZExtInst>(UserI))
925 ZExtBits = UserBits;
927 if (TruncBits || SExtBits || ZExtBits) {
928 FoldedValue = UserI;
929 UserI = cast<Instruction>(*UserI->user_begin());
930 // Load (single use) -> trunc/extend (single use) -> UserI
932 if ((UserI->getOpcode() == Instruction::Sub ||
933 UserI->getOpcode() == Instruction::SDiv ||
934 UserI->getOpcode() == Instruction::UDiv) &&
935 UserI->getOperand(1) != FoldedValue)
936 return false; // Not commutative, only RHS foldable.
937 // LoadOrTruncBits holds the number of effectively loaded bits, but 0 if an
938 // extension was made of the load.
939 unsigned LoadOrTruncBits =
940 ((SExtBits || ZExtBits) ? 0 : (TruncBits ? TruncBits : LoadedBits));
941 switch (UserI->getOpcode()) {
942 case Instruction::Add: // SE: 16->32, 16/32->64, z14:16->64. ZE: 32->64
943 case Instruction::Sub:
944 case Instruction::ICmp:
945 if (LoadedBits == 32 && ZExtBits == 64)
946 return true;
947 LLVM_FALLTHROUGH;
948 case Instruction::Mul: // SE: 16->32, 32->64, z14:16->64
949 if (UserI->getOpcode() != Instruction::ICmp) {
950 if (LoadedBits == 16 &&
951 (SExtBits == 32 ||
952 (SExtBits == 64 && ST->hasMiscellaneousExtensions2())))
953 return true;
954 if (LoadOrTruncBits == 16)
955 return true;
957 LLVM_FALLTHROUGH;
958 case Instruction::SDiv:// SE: 32->64
959 if (LoadedBits == 32 && SExtBits == 64)
960 return true;
961 LLVM_FALLTHROUGH;
962 case Instruction::UDiv:
963 case Instruction::And:
964 case Instruction::Or:
965 case Instruction::Xor:
966 // This also makes sense for float operations, but disabled for now due
967 // to regressions.
968 // case Instruction::FCmp:
969 // case Instruction::FAdd:
970 // case Instruction::FSub:
971 // case Instruction::FMul:
972 // case Instruction::FDiv:
974 // All possible extensions of memory checked above.
976 // Comparison between memory and immediate.
977 if (UserI->getOpcode() == Instruction::ICmp)
978 if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1)))
979 if (isUInt<16>(CI->getZExtValue()))
980 return true;
981 return (LoadOrTruncBits == 32 || LoadOrTruncBits == 64);
982 break;
984 return false;
987 static bool isBswapIntrinsicCall(const Value *V) {
988 if (const Instruction *I = dyn_cast<Instruction>(V))
989 if (auto *CI = dyn_cast<CallInst>(I))
990 if (auto *F = CI->getCalledFunction())
991 if (F->getIntrinsicID() == Intrinsic::bswap)
992 return true;
993 return false;
996 int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
997 unsigned Alignment, unsigned AddressSpace,
998 const Instruction *I) {
999 assert(!Src->isVoidTy() && "Invalid type");
1001 if (!Src->isVectorTy() && Opcode == Instruction::Load && I != nullptr) {
1002 // Store the load or its truncated or extended value in FoldedValue.
1003 const Instruction *FoldedValue = nullptr;
1004 if (isFoldableLoad(cast<LoadInst>(I), FoldedValue)) {
1005 const Instruction *UserI = cast<Instruction>(*FoldedValue->user_begin());
1006 assert (UserI->getNumOperands() == 2 && "Expected a binop.");
1008 // UserI can't fold two loads, so in that case return 0 cost only
1009 // half of the time.
1010 for (unsigned i = 0; i < 2; ++i) {
1011 if (UserI->getOperand(i) == FoldedValue)
1012 continue;
1014 if (Instruction *OtherOp = dyn_cast<Instruction>(UserI->getOperand(i))){
1015 LoadInst *OtherLoad = dyn_cast<LoadInst>(OtherOp);
1016 if (!OtherLoad &&
1017 (isa<TruncInst>(OtherOp) || isa<SExtInst>(OtherOp) ||
1018 isa<ZExtInst>(OtherOp)))
1019 OtherLoad = dyn_cast<LoadInst>(OtherOp->getOperand(0));
1020 if (OtherLoad && isFoldableLoad(OtherLoad, FoldedValue/*dummy*/))
1021 return i == 0; // Both operands foldable.
1025 return 0; // Only I is foldable in user.
1029 unsigned NumOps =
1030 (Src->isVectorTy() ? getNumVectorRegs(Src) : getNumberOfParts(Src));
1032 // Store/Load reversed saves one instruction.
1033 if (((!Src->isVectorTy() && NumOps == 1) || ST->hasVectorEnhancements2()) &&
1034 I != nullptr) {
1035 if (Opcode == Instruction::Load && I->hasOneUse()) {
1036 const Instruction *LdUser = cast<Instruction>(*I->user_begin());
1037 // In case of load -> bswap -> store, return normal cost for the load.
1038 if (isBswapIntrinsicCall(LdUser) &&
1039 (!LdUser->hasOneUse() || !isa<StoreInst>(*LdUser->user_begin())))
1040 return 0;
1042 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
1043 const Value *StoredVal = SI->getValueOperand();
1044 if (StoredVal->hasOneUse() && isBswapIntrinsicCall(StoredVal))
1045 return 0;
1049 if (Src->getScalarSizeInBits() == 128)
1050 // 128 bit scalars are held in a pair of two 64 bit registers.
1051 NumOps *= 2;
1053 return NumOps;
1056 // The generic implementation of getInterleavedMemoryOpCost() is based on
1057 // adding costs of the memory operations plus all the extracts and inserts
1058 // needed for using / defining the vector operands. The SystemZ version does
1059 // roughly the same but bases the computations on vector permutations
1060 // instead.
1061 int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
1062 unsigned Factor,
1063 ArrayRef<unsigned> Indices,
1064 unsigned Alignment,
1065 unsigned AddressSpace,
1066 bool UseMaskForCond,
1067 bool UseMaskForGaps) {
1068 if (UseMaskForCond || UseMaskForGaps)
1069 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1070 Alignment, AddressSpace,
1071 UseMaskForCond, UseMaskForGaps);
1072 assert(isa<VectorType>(VecTy) &&
1073 "Expect a vector type for interleaved memory op");
1075 // Return the ceiling of dividing A by B.
1076 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
1078 unsigned NumElts = VecTy->getVectorNumElements();
1079 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1080 unsigned VF = NumElts / Factor;
1081 unsigned NumEltsPerVecReg = (128U / getScalarSizeInBits(VecTy));
1082 unsigned NumVectorMemOps = getNumVectorRegs(VecTy);
1083 unsigned NumPermutes = 0;
1085 if (Opcode == Instruction::Load) {
1086 // Loading interleave groups may have gaps, which may mean fewer
1087 // loads. Find out how many vectors will be loaded in total, and in how
1088 // many of them each value will be in.
1089 BitVector UsedInsts(NumVectorMemOps, false);
1090 std::vector<BitVector> ValueVecs(Factor, BitVector(NumVectorMemOps, false));
1091 for (unsigned Index : Indices)
1092 for (unsigned Elt = 0; Elt < VF; ++Elt) {
1093 unsigned Vec = (Index + Elt * Factor) / NumEltsPerVecReg;
1094 UsedInsts.set(Vec);
1095 ValueVecs[Index].set(Vec);
1097 NumVectorMemOps = UsedInsts.count();
1099 for (unsigned Index : Indices) {
1100 // Estimate that each loaded source vector containing this Index
1101 // requires one operation, except that vperm can handle two input
1102 // registers first time for each dst vector.
1103 unsigned NumSrcVecs = ValueVecs[Index].count();
1104 unsigned NumDstVecs = ceil(VF * getScalarSizeInBits(VecTy), 128U);
1105 assert (NumSrcVecs >= NumDstVecs && "Expected at least as many sources");
1106 NumPermutes += std::max(1U, NumSrcVecs - NumDstVecs);
1108 } else {
1109 // Estimate the permutes for each stored vector as the smaller of the
1110 // number of elements and the number of source vectors. Subtract one per
1111 // dst vector for vperm (S.A.).
1112 unsigned NumSrcVecs = std::min(NumEltsPerVecReg, Factor);
1113 unsigned NumDstVecs = NumVectorMemOps;
1114 assert (NumSrcVecs > 1 && "Expected at least two source vectors.");
1115 NumPermutes += (NumDstVecs * NumSrcVecs) - NumDstVecs;
1118 // Cost of load/store operations and the permutations needed.
1119 return NumVectorMemOps + NumPermutes;
1122 static int getVectorIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy) {
1123 if (RetTy->isVectorTy() && ID == Intrinsic::bswap)
1124 return getNumVectorRegs(RetTy); // VPERM
1125 return -1;
1128 int SystemZTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1129 ArrayRef<Value *> Args,
1130 FastMathFlags FMF, unsigned VF) {
1131 int Cost = getVectorIntrinsicInstrCost(ID, RetTy);
1132 if (Cost != -1)
1133 return Cost;
1134 return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
1137 int SystemZTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1138 ArrayRef<Type *> Tys,
1139 FastMathFlags FMF,
1140 unsigned ScalarizationCostPassed) {
1141 int Cost = getVectorIntrinsicInstrCost(ID, RetTy);
1142 if (Cost != -1)
1143 return Cost;
1144 return BaseT::getIntrinsicInstrCost(ID, RetTy, Tys,
1145 FMF, ScalarizationCostPassed);