1 //===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains instruction defs that are common to all hw codegen
12 //===----------------------------------------------------------------------===//
14 class AMDGPUInst <dag outs, dag ins, string asm = "",
15 list<dag> pattern = []> : Instruction {
16 field bit isRegisterLoad = 0;
17 field bit isRegisterStore = 0;
19 let Namespace = "AMDGPU";
20 let OutOperandList = outs;
21 let InOperandList = ins;
23 let Pattern = pattern;
24 let Itinerary = NullALU;
26 // SoftFail is a field the disassembler can use to provide a way for
27 // instructions to not match without killing the whole decode process. It is
28 // mainly used for ARM, but Tablegen expects this field to exist or it fails
29 // to build the decode table.
30 field bits<64> SoftFail = 0;
32 let DecoderNamespace = Namespace;
34 let TSFlags{63} = isRegisterLoad;
35 let TSFlags{62} = isRegisterStore;
38 class AMDGPUShaderInst <dag outs, dag ins, string asm = "",
39 list<dag> pattern = []> : AMDGPUInst<outs, ins, asm, pattern> {
41 field bits<32> Inst = 0xffffffff;
44 //===---------------------------------------------------------------------===//
46 //===---------------------------------------------------------------------===//
48 class ILFormat<dag outs, dag ins, string asmstr, list<dag> pattern>
51 let Namespace = "AMDGPU";
52 dag OutOperandList = outs;
53 dag InOperandList = ins;
54 let Pattern = pattern;
55 let AsmString = !strconcat(asmstr, "\n");
57 let Itinerary = NullALU;
59 bit hasZeroOpFlag = 0;
62 let hasSideEffects = 0;
63 let isCodeGenOnly = 1;
66 def TruePredicate : Predicate<"true">;
68 class PredicateControl {
69 Predicate SubtargetPredicate = TruePredicate;
70 list<Predicate> AssemblerPredicates = [];
71 Predicate AssemblerPredicate = TruePredicate;
72 list<Predicate> OtherPredicates = [];
73 list<Predicate> Predicates = !listconcat([SubtargetPredicate,
78 class AMDGPUPat<dag pattern, dag result> : Pat<pattern, result>,
81 def FP16Denormals : Predicate<"Subtarget->hasFP16Denormals()">;
82 def FP32Denormals : Predicate<"Subtarget->hasFP32Denormals()">;
83 def FP64Denormals : Predicate<"Subtarget->hasFP64Denormals()">;
84 def NoFP16Denormals : Predicate<"!Subtarget->hasFP16Denormals()">;
85 def NoFP32Denormals : Predicate<"!Subtarget->hasFP32Denormals()">;
86 def NoFP64Denormals : Predicate<"!Subtarget->hasFP64Denormals()">;
87 def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
88 def FMA : Predicate<"Subtarget->hasFMA()">;
90 def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
92 def u16ImmTarget : AsmOperandClass {
94 let RenderMethod = "addImmOperands";
97 def s16ImmTarget : AsmOperandClass {
99 let RenderMethod = "addImmOperands";
102 let OperandType = "OPERAND_IMMEDIATE" in {
104 def u32imm : Operand<i32> {
105 let PrintMethod = "printU32ImmOperand";
108 def u16imm : Operand<i16> {
109 let PrintMethod = "printU16ImmOperand";
110 let ParserMatchClass = u16ImmTarget;
113 def s16imm : Operand<i16> {
114 let PrintMethod = "printU16ImmOperand";
115 let ParserMatchClass = s16ImmTarget;
118 def u8imm : Operand<i8> {
119 let PrintMethod = "printU8ImmOperand";
122 } // End OperandType = "OPERAND_IMMEDIATE"
124 //===--------------------------------------------------------------------===//
126 //===--------------------------------------------------------------------===//
127 def brtarget : Operand<OtherVT>;
129 //===----------------------------------------------------------------------===//
131 //===----------------------------------------------------------------------===//
133 class HasOneUseUnaryOp<SDPatternOperator op> : PatFrag<
136 [{ return N->hasOneUse(); }]
139 class HasOneUseBinOp<SDPatternOperator op> : PatFrag<
140 (ops node:$src0, node:$src1),
142 [{ return N->hasOneUse(); }]
145 class HasOneUseTernaryOp<SDPatternOperator op> : PatFrag<
146 (ops node:$src0, node:$src1, node:$src2),
147 (op $src0, $src1, $src2),
148 [{ return N->hasOneUse(); }]
151 let Properties = [SDNPCommutative, SDNPAssociative] in {
152 def smax_oneuse : HasOneUseBinOp<smax>;
153 def smin_oneuse : HasOneUseBinOp<smin>;
154 def umax_oneuse : HasOneUseBinOp<umax>;
155 def umin_oneuse : HasOneUseBinOp<umin>;
157 def fminnum_oneuse : HasOneUseBinOp<fminnum>;
158 def fmaxnum_oneuse : HasOneUseBinOp<fmaxnum>;
160 def fminnum_ieee_oneuse : HasOneUseBinOp<fminnum_ieee>;
161 def fmaxnum_ieee_oneuse : HasOneUseBinOp<fmaxnum_ieee>;
164 def and_oneuse : HasOneUseBinOp<and>;
165 def or_oneuse : HasOneUseBinOp<or>;
166 def xor_oneuse : HasOneUseBinOp<xor>;
167 } // Properties = [SDNPCommutative, SDNPAssociative]
169 def not_oneuse : HasOneUseUnaryOp<not>;
171 def add_oneuse : HasOneUseBinOp<add>;
172 def sub_oneuse : HasOneUseBinOp<sub>;
174 def srl_oneuse : HasOneUseBinOp<srl>;
175 def shl_oneuse : HasOneUseBinOp<shl>;
177 def select_oneuse : HasOneUseTernaryOp<select>;
179 def AMDGPUmul_u24_oneuse : HasOneUseBinOp<AMDGPUmul_u24>;
180 def AMDGPUmul_i24_oneuse : HasOneUseBinOp<AMDGPUmul_i24>;
182 def srl_16 : PatFrag<
183 (ops node:$src0), (srl_oneuse node:$src0, (i32 16))
187 def hi_i16_elt : PatFrag<
188 (ops node:$src0), (i16 (trunc (i32 (srl_16 node:$src0))))
192 def hi_f16_elt : PatLeaf<
194 if (N->getOpcode() != ISD::BITCAST)
196 SDValue Tmp = N->getOperand(0);
198 if (Tmp.getOpcode() != ISD::SRL)
200 if (const auto *RHS = dyn_cast<ConstantSDNode>(Tmp.getOperand(1))
201 return RHS->getZExtValue() == 16;
205 //===----------------------------------------------------------------------===//
206 // PatLeafs for floating-point comparisons
207 //===----------------------------------------------------------------------===//
209 def COND_OEQ : PatLeaf <
211 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
214 def COND_ONE : PatLeaf <
216 [{return N->get() == ISD::SETONE || N->get() == ISD::SETNE;}]
219 def COND_OGT : PatLeaf <
221 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
224 def COND_OGE : PatLeaf <
226 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
229 def COND_OLT : PatLeaf <
231 [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}]
234 def COND_OLE : PatLeaf <
236 [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
239 def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
240 def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
242 //===----------------------------------------------------------------------===//
243 // PatLeafs for unsigned / unordered comparisons
244 //===----------------------------------------------------------------------===//
246 def COND_UEQ : PatLeaf <(cond), [{return N->get() == ISD::SETUEQ;}]>;
247 def COND_UNE : PatLeaf <(cond), [{return N->get() == ISD::SETUNE;}]>;
248 def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
249 def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
250 def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
251 def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
253 // XXX - For some reason R600 version is preferring to use unordered
255 def COND_UNE_NE : PatLeaf <
257 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
260 //===----------------------------------------------------------------------===//
261 // PatLeafs for signed comparisons
262 //===----------------------------------------------------------------------===//
264 def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>;
265 def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>;
266 def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>;
267 def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>;
269 //===----------------------------------------------------------------------===//
270 // PatLeafs for integer equality
271 //===----------------------------------------------------------------------===//
273 def COND_EQ : PatLeaf <
275 [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}]
278 def COND_NE : PatLeaf <
280 [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}]
283 def COND_NULL : PatLeaf <
285 [{(void)N; return false;}]
288 //===----------------------------------------------------------------------===//
289 // PatLeafs for Texture Constants
290 //===----------------------------------------------------------------------===//
292 def TEX_ARRAY : PatLeaf<
294 [{uint32_t TType = (uint32_t)N->getZExtValue();
295 return TType == 9 || TType == 10 || TType == 16;
299 def TEX_RECT : PatLeaf<
301 [{uint32_t TType = (uint32_t)N->getZExtValue();
306 def TEX_SHADOW : PatLeaf<
308 [{uint32_t TType = (uint32_t)N->getZExtValue();
309 return (TType >= 6 && TType <= 8) || TType == 13;
313 def TEX_SHADOW_ARRAY : PatLeaf<
315 [{uint32_t TType = (uint32_t)N->getZExtValue();
316 return TType == 11 || TType == 12 || TType == 17;
320 //===----------------------------------------------------------------------===//
321 // Load/Store Pattern Fragments
322 //===----------------------------------------------------------------------===//
324 class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
325 return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
328 class Aligned16Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
329 return cast<MemSDNode>(N)->getAlignment() >= 16;
332 class LoadFrag <SDPatternOperator op> : PatFrag<(ops node:$ptr), (op node:$ptr)>;
334 class StoreFrag<SDPatternOperator op> : PatFrag <
335 (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
338 class StoreHi16<SDPatternOperator op> : PatFrag <
339 (ops node:$value, node:$ptr), (op (srl node:$value, (i32 16)), node:$ptr)
342 class PrivateAddress : CodePatPred<[{
343 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
346 class ConstantAddress : CodePatPred<[{
347 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
350 class LocalAddress : CodePatPred<[{
351 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
354 class GlobalAddress : CodePatPred<[{
355 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
358 class GlobalLoadAddress : CodePatPred<[{
359 auto AS = cast<MemSDNode>(N)->getAddressSpace();
360 return AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::CONSTANT_ADDRESS;
363 class FlatLoadAddress : CodePatPred<[{
364 const auto AS = cast<MemSDNode>(N)->getAddressSpace();
365 return AS == AMDGPUAS::FLAT_ADDRESS ||
366 AS == AMDGPUAS::GLOBAL_ADDRESS ||
367 AS == AMDGPUAS::CONSTANT_ADDRESS;
370 class FlatStoreAddress : CodePatPred<[{
371 const auto AS = cast<MemSDNode>(N)->getAddressSpace();
372 return AS == AMDGPUAS::FLAT_ADDRESS ||
373 AS == AMDGPUAS::GLOBAL_ADDRESS;
376 class AZExtLoadBase <SDPatternOperator ld_node>: PatFrag<(ops node:$ptr),
377 (ld_node node:$ptr), [{
378 LoadSDNode *L = cast<LoadSDNode>(N);
379 return L->getExtensionType() == ISD::ZEXTLOAD ||
380 L->getExtensionType() == ISD::EXTLOAD;
383 def az_extload : AZExtLoadBase <unindexedload>;
385 def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
386 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
389 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
390 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
393 def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
394 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
397 class PrivateLoad <SDPatternOperator op> : LoadFrag <op>, PrivateAddress;
398 class PrivateStore <SDPatternOperator op> : StoreFrag <op>, PrivateAddress;
400 class LocalLoad <SDPatternOperator op> : LoadFrag <op>, LocalAddress;
401 class LocalStore <SDPatternOperator op> : StoreFrag <op>, LocalAddress;
403 class GlobalLoad <SDPatternOperator op> : LoadFrag<op>, GlobalLoadAddress;
404 class GlobalStore <SDPatternOperator op> : StoreFrag<op>, GlobalAddress;
406 class FlatLoad <SDPatternOperator op> : LoadFrag <op>, FlatLoadAddress;
407 class FlatStore <SDPatternOperator op> : StoreFrag <op>, FlatStoreAddress;
409 class ConstantLoad <SDPatternOperator op> : LoadFrag <op>, ConstantAddress;
412 def load_private : PrivateLoad <load>;
413 def az_extloadi8_private : PrivateLoad <az_extloadi8>;
414 def sextloadi8_private : PrivateLoad <sextloadi8>;
415 def az_extloadi16_private : PrivateLoad <az_extloadi16>;
416 def sextloadi16_private : PrivateLoad <sextloadi16>;
418 def store_private : PrivateStore <store>;
419 def truncstorei8_private : PrivateStore<truncstorei8>;
420 def truncstorei16_private : PrivateStore <truncstorei16>;
421 def store_hi16_private : StoreHi16 <truncstorei16>, PrivateAddress;
422 def truncstorei8_hi16_private : StoreHi16<truncstorei8>, PrivateAddress;
425 def load_global : GlobalLoad <load>;
426 def sextloadi8_global : GlobalLoad <sextloadi8>;
427 def az_extloadi8_global : GlobalLoad <az_extloadi8>;
428 def sextloadi16_global : GlobalLoad <sextloadi16>;
429 def az_extloadi16_global : GlobalLoad <az_extloadi16>;
430 def atomic_load_global : GlobalLoad<atomic_load>;
432 def store_global : GlobalStore <store>;
433 def truncstorei8_global : GlobalStore <truncstorei8>;
434 def truncstorei16_global : GlobalStore <truncstorei16>;
435 def store_atomic_global : GlobalStore<atomic_store>;
436 def truncstorei8_hi16_global : StoreHi16 <truncstorei8>, GlobalAddress;
437 def truncstorei16_hi16_global : StoreHi16 <truncstorei16>, GlobalAddress;
439 def load_local : LocalLoad <load>;
440 def az_extloadi8_local : LocalLoad <az_extloadi8>;
441 def sextloadi8_local : LocalLoad <sextloadi8>;
442 def az_extloadi16_local : LocalLoad <az_extloadi16>;
443 def sextloadi16_local : LocalLoad <sextloadi16>;
444 def atomic_load_32_local : LocalLoad<atomic_load_32>;
445 def atomic_load_64_local : LocalLoad<atomic_load_64>;
447 def store_local : LocalStore <store>;
448 def truncstorei8_local : LocalStore <truncstorei8>;
449 def truncstorei16_local : LocalStore <truncstorei16>;
450 def store_local_hi16 : StoreHi16 <truncstorei16>, LocalAddress;
451 def truncstorei8_local_hi16 : StoreHi16<truncstorei8>, LocalAddress;
452 def atomic_store_local : LocalStore <atomic_store>;
454 def load_align8_local : Aligned8Bytes <
455 (ops node:$ptr), (load_local node:$ptr)
458 def load_align16_local : Aligned16Bytes <
459 (ops node:$ptr), (load_local node:$ptr)
462 def store_align8_local : Aligned8Bytes <
463 (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr)
466 def store_align16_local : Aligned16Bytes <
467 (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr)
470 def load_flat : FlatLoad <load>;
471 def az_extloadi8_flat : FlatLoad <az_extloadi8>;
472 def sextloadi8_flat : FlatLoad <sextloadi8>;
473 def az_extloadi16_flat : FlatLoad <az_extloadi16>;
474 def sextloadi16_flat : FlatLoad <sextloadi16>;
475 def atomic_load_flat : FlatLoad<atomic_load>;
477 def store_flat : FlatStore <store>;
478 def truncstorei8_flat : FlatStore <truncstorei8>;
479 def truncstorei16_flat : FlatStore <truncstorei16>;
480 def atomic_store_flat : FlatStore <atomic_store>;
481 def truncstorei8_hi16_flat : StoreHi16<truncstorei8>, FlatStoreAddress;
482 def truncstorei16_hi16_flat : StoreHi16<truncstorei16>, FlatStoreAddress;
485 def constant_load : ConstantLoad<load>;
486 def sextloadi8_constant : ConstantLoad <sextloadi8>;
487 def az_extloadi8_constant : ConstantLoad <az_extloadi8>;
488 def sextloadi16_constant : ConstantLoad <sextloadi16>;
489 def az_extloadi16_constant : ConstantLoad <az_extloadi16>;
492 class local_binary_atomic_op<SDNode atomic_op> :
493 PatFrag<(ops node:$ptr, node:$value),
494 (atomic_op node:$ptr, node:$value), [{
495 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
498 def atomic_swap_local : local_binary_atomic_op<atomic_swap>;
499 def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>;
500 def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>;
501 def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>;
502 def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>;
503 def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>;
504 def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>;
505 def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>;
506 def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>;
507 def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>;
508 def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
510 def mskor_global : PatFrag<(ops node:$val, node:$ptr),
511 (AMDGPUstore_mskor node:$val, node:$ptr), [{
512 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
515 class AtomicCmpSwapLocal <SDNode cmp_swap_node> : PatFrag<
516 (ops node:$ptr, node:$cmp, node:$swap),
517 (cmp_swap_node node:$ptr, node:$cmp, node:$swap), [{
518 AtomicSDNode *AN = cast<AtomicSDNode>(N);
519 return AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
522 def atomic_cmp_swap_local : AtomicCmpSwapLocal <atomic_cmp_swap>;
524 multiclass global_binary_atomic_op<SDNode atomic_op> {
526 (ops node:$ptr, node:$value),
527 (atomic_op node:$ptr, node:$value),
528 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}]>;
530 def _noret : PatFrag<
531 (ops node:$ptr, node:$value),
532 (atomic_op node:$ptr, node:$value),
533 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (SDValue(N, 0).use_empty());}]>;
536 (ops node:$ptr, node:$value),
537 (atomic_op node:$ptr, node:$value),
538 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (!SDValue(N, 0).use_empty());}]>;
541 defm atomic_swap_global : global_binary_atomic_op<atomic_swap>;
542 defm atomic_add_global : global_binary_atomic_op<atomic_load_add>;
543 defm atomic_and_global : global_binary_atomic_op<atomic_load_and>;
544 defm atomic_max_global : global_binary_atomic_op<atomic_load_max>;
545 defm atomic_min_global : global_binary_atomic_op<atomic_load_min>;
546 defm atomic_or_global : global_binary_atomic_op<atomic_load_or>;
547 defm atomic_sub_global : global_binary_atomic_op<atomic_load_sub>;
548 defm atomic_umax_global : global_binary_atomic_op<atomic_load_umax>;
549 defm atomic_umin_global : global_binary_atomic_op<atomic_load_umin>;
550 defm atomic_xor_global : global_binary_atomic_op<atomic_load_xor>;
553 def AMDGPUatomic_cmp_swap_global : PatFrag<
554 (ops node:$ptr, node:$value),
555 (AMDGPUatomic_cmp_swap node:$ptr, node:$value)>, GlobalAddress;
557 def atomic_cmp_swap_global : PatFrag<
558 (ops node:$ptr, node:$cmp, node:$value),
559 (atomic_cmp_swap node:$ptr, node:$cmp, node:$value)>, GlobalAddress;
562 def atomic_cmp_swap_global_noret : PatFrag<
563 (ops node:$ptr, node:$cmp, node:$value),
564 (atomic_cmp_swap node:$ptr, node:$cmp, node:$value),
565 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (SDValue(N, 0).use_empty());}]>;
567 def atomic_cmp_swap_global_ret : PatFrag<
568 (ops node:$ptr, node:$cmp, node:$value),
569 (atomic_cmp_swap node:$ptr, node:$cmp, node:$value),
570 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (!SDValue(N, 0).use_empty());}]>;
572 //===----------------------------------------------------------------------===//
573 // Misc Pattern Fragments
574 //===----------------------------------------------------------------------===//
577 int TWO_PI = 0x40c90fdb;
579 int TWO_PI_INV = 0x3e22f983;
580 int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
581 int FP16_ONE = 0x3C00;
582 int FP16_NEG_ONE = 0xBC00;
583 int V2FP16_ONE = 0x3C003C00;
584 int FP32_ONE = 0x3f800000;
585 int FP32_NEG_ONE = 0xbf800000;
586 int FP64_ONE = 0x3ff0000000000000;
587 int FP64_NEG_ONE = 0xbff0000000000000;
589 def CONST : Constants;
591 def FP_ZERO : PatLeaf <
593 [{return N->getValueAPF().isZero();}]
596 def FP_ONE : PatLeaf <
598 [{return N->isExactlyValue(1.0);}]
601 def FP_HALF : PatLeaf <
603 [{return N->isExactlyValue(0.5);}]
606 /* Generic helper patterns for intrinsics */
607 /* -------------------------------------- */
609 class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
611 (fpow f32:$src0, f32:$src1),
612 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0)))
615 /* Other helper patterns */
616 /* --------------------- */
618 /* Extract element pattern */
619 class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
622 (sub_type (extractelt vec_type:$src, sub_idx)),
623 (EXTRACT_SUBREG $src, sub_reg)
626 /* Insert element pattern */
627 class Insert_Element <ValueType elem_type, ValueType vec_type,
628 int sub_idx, SubRegIndex sub_reg>
630 (insertelt vec_type:$vec, elem_type:$elem, sub_idx),
631 (INSERT_SUBREG $vec, $elem, sub_reg)
634 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
635 // can handle COPY instructions.
636 // bitconvert pattern
637 class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : AMDGPUPat <
638 (dt (bitconvert (st rc:$src0))),
642 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
643 // can handle COPY instructions.
644 class DwordAddrPat<ValueType vt, RegisterClass rc> : AMDGPUPat <
645 (vt (AMDGPUdwordaddr (vt rc:$addr))),
651 multiclass BFIPatterns <Instruction BFI_INT,
652 Instruction LoadImm32,
653 RegisterClass RC64> {
654 // Definition from ISA doc:
655 // (y & x) | (z & ~x)
657 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
663 (or (and i64:$y, i64:$x), (and i64:$z, (not i64:$x))),
665 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub0)),
666 (i32 (EXTRACT_SUBREG $y, sub0)),
667 (i32 (EXTRACT_SUBREG $z, sub0))), sub0,
668 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub1)),
669 (i32 (EXTRACT_SUBREG $y, sub1)),
670 (i32 (EXTRACT_SUBREG $z, sub1))), sub1)
673 // SHA-256 Ch function
676 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
682 (xor i64:$z, (and i64:$x, (xor i64:$y, i64:$z))),
684 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub0)),
685 (i32 (EXTRACT_SUBREG $y, sub0)),
686 (i32 (EXTRACT_SUBREG $z, sub0))), sub0,
687 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub1)),
688 (i32 (EXTRACT_SUBREG $y, sub1)),
689 (i32 (EXTRACT_SUBREG $z, sub1))), sub1)
693 (fcopysign f32:$src0, f32:$src1),
694 (BFI_INT (LoadImm32 (i32 0x7fffffff)), $src0, $src1)
698 (f32 (fcopysign f32:$src0, f64:$src1)),
699 (BFI_INT (LoadImm32 (i32 0x7fffffff)), $src0,
700 (i32 (EXTRACT_SUBREG $src1, sub1)))
704 (f64 (fcopysign f64:$src0, f64:$src1)),
706 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
707 (BFI_INT (LoadImm32 (i32 0x7fffffff)),
708 (i32 (EXTRACT_SUBREG $src0, sub1)),
709 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1)
713 (f64 (fcopysign f64:$src0, f32:$src1)),
715 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
716 (BFI_INT (LoadImm32 (i32 0x7fffffff)),
717 (i32 (EXTRACT_SUBREG $src0, sub1)),
722 // SHA-256 Ma patterns
724 // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y
725 multiclass SHA256MaPattern <Instruction BFI_INT, Instruction XOR, RegisterClass RC64> {
727 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))),
728 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y)
732 (or (and i64:$x, i64:$z), (and i64:$y, (or i64:$x, i64:$z))),
734 (BFI_INT (XOR (i32 (EXTRACT_SUBREG $x, sub0)),
735 (i32 (EXTRACT_SUBREG $y, sub0))),
736 (i32 (EXTRACT_SUBREG $z, sub0)),
737 (i32 (EXTRACT_SUBREG $y, sub0))), sub0,
738 (BFI_INT (XOR (i32 (EXTRACT_SUBREG $x, sub1)),
739 (i32 (EXTRACT_SUBREG $y, sub1))),
740 (i32 (EXTRACT_SUBREG $z, sub1)),
741 (i32 (EXTRACT_SUBREG $y, sub1))), sub1)
745 // Bitfield extract patterns
747 def IMMZeroBasedBitfieldMask : PatLeaf <(imm), [{
748 return isMask_32(N->getZExtValue());
751 def IMMPopCount : SDNodeXForm<imm, [{
752 return CurDAG->getTargetConstant(countPopulation(N->getZExtValue()), SDLoc(N),
756 multiclass BFEPattern <Instruction UBFE, Instruction SBFE, Instruction MOV> {
758 (i32 (and (i32 (srl i32:$src, i32:$rshift)), IMMZeroBasedBitfieldMask:$mask)),
759 (UBFE $src, $rshift, (MOV (i32 (IMMPopCount $mask))))
762 // x & ((1 << y) - 1)
764 (and i32:$src, (add_oneuse (shl_oneuse 1, i32:$width), -1)),
765 (UBFE $src, (MOV (i32 0)), $width)
770 (and i32:$src, (xor_oneuse (shl_oneuse -1, i32:$width), -1)),
771 (UBFE $src, (MOV (i32 0)), $width)
774 // x & (-1 >> (bitwidth - y))
776 (and i32:$src, (srl_oneuse -1, (sub 32, i32:$width))),
777 (UBFE $src, (MOV (i32 0)), $width)
780 // x << (bitwidth - y) >> (bitwidth - y)
782 (srl (shl_oneuse i32:$src, (sub 32, i32:$width)), (sub 32, i32:$width)),
783 (UBFE $src, (MOV (i32 0)), $width)
787 (sra (shl_oneuse i32:$src, (sub 32, i32:$width)), (sub 32, i32:$width)),
788 (SBFE $src, (MOV (i32 0)), $width)
793 class ROTRPattern <Instruction BIT_ALIGN> : AMDGPUPat <
794 (rotr i32:$src0, i32:$src1),
795 (BIT_ALIGN $src0, $src0, $src1)
798 multiclass IntMed3Pat<Instruction med3Inst,
799 SDPatternOperator min,
800 SDPatternOperator max,
801 SDPatternOperator min_oneuse,
802 SDPatternOperator max_oneuse,
803 ValueType vt = i32> {
805 // This matches 16 permutations of
806 // min(max(a, b), max(min(a, b), c))
808 (min (max_oneuse vt:$src0, vt:$src1),
809 (max_oneuse (min_oneuse vt:$src0, vt:$src1), vt:$src2)),
810 (med3Inst vt:$src0, vt:$src1, vt:$src2)
813 // This matches 16 permutations of
814 // max(min(x, y), min(max(x, y), z))
816 (max (min_oneuse vt:$src0, vt:$src1),
817 (min_oneuse (max_oneuse vt:$src0, vt:$src1), vt:$src2)),
818 (med3Inst $src0, $src1, $src2)
822 // Special conversion patterns
824 def cvt_rpi_i32_f32 : PatFrag <
826 (fp_to_sint (ffloor (fadd $src, FP_HALF))),
827 [{ (void) N; return TM.Options.NoNaNsFPMath; }]
830 def cvt_flr_i32_f32 : PatFrag <
832 (fp_to_sint (ffloor $src)),
833 [{ (void)N; return TM.Options.NoNaNsFPMath; }]
836 let AddedComplexity = 2 in {
837 class IMad24Pat<Instruction Inst, bit HasClamp = 0> : AMDGPUPat <
838 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
839 !if(HasClamp, (Inst $src0, $src1, $src2, (i1 0)),
840 (Inst $src0, $src1, $src2))
843 class UMad24Pat<Instruction Inst, bit HasClamp = 0> : AMDGPUPat <
844 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
845 !if(HasClamp, (Inst $src0, $src1, $src2, (i1 0)),
846 (Inst $src0, $src1, $src2))
848 } // AddedComplexity.
850 class RcpPat<Instruction RcpInst, ValueType vt> : AMDGPUPat <
851 (fdiv FP_ONE, vt:$src),
855 class RsqPat<Instruction RsqInst, ValueType vt> : AMDGPUPat <
856 (AMDGPUrcp (fsqrt vt:$src)),
860 // Instructions which select to the same v_min_f*
861 def fminnum_like : PatFrags<(ops node:$src0, node:$src1),
862 [(fminnum_ieee node:$src0, node:$src1),
863 (fminnum node:$src0, node:$src1)]
866 // Instructions which select to the same v_max_f*
867 def fmaxnum_like : PatFrags<(ops node:$src0, node:$src1),
868 [(fmaxnum_ieee node:$src0, node:$src1),
869 (fmaxnum node:$src0, node:$src1)]
872 def fminnum_like_oneuse : PatFrags<(ops node:$src0, node:$src1),
873 [(fminnum_ieee_oneuse node:$src0, node:$src1),
874 (fminnum_oneuse node:$src0, node:$src1)]
877 def fmaxnum_like_oneuse : PatFrags<(ops node:$src0, node:$src1),
878 [(fmaxnum_ieee_oneuse node:$src0, node:$src1),
879 (fmaxnum_oneuse node:$src0, node:$src1)]