1 //===-- SystemZOperators.td - SystemZ-specific operators ------*- tblgen-*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
12 def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i64>,
14 def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i64>,
16 def SDT_ZCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
17 def SDT_ZCmp : SDTypeProfile<1, 2,
20 def SDT_ZICmp : SDTypeProfile<1, 3,
24 def SDT_ZBRCCMask : SDTypeProfile<0, 4,
29 def SDT_ZSelectCCMask : SDTypeProfile<1, 5,
35 def SDT_ZWrapPtr : SDTypeProfile<1, 1,
38 def SDT_ZWrapOffset : SDTypeProfile<1, 2,
42 def SDT_ZAdjDynAlloc : SDTypeProfile<1, 0, [SDTCisVT<0, i64>]>;
43 def SDT_ZGR128Binary : SDTypeProfile<1, 2,
44 [SDTCisVT<0, untyped>,
47 def SDT_ZBinaryWithFlags : SDTypeProfile<2, 2,
52 def SDT_ZBinaryWithCarry : SDTypeProfile<2, 3,
58 def SDT_ZAtomicLoadBinaryW : SDTypeProfile<1, 5,
65 def SDT_ZAtomicCmpSwapW : SDTypeProfile<2, 6,
74 def SDT_ZAtomicCmpSwap : SDTypeProfile<2, 3,
80 def SDT_ZAtomicLoad128 : SDTypeProfile<1, 1,
81 [SDTCisVT<0, untyped>,
83 def SDT_ZAtomicStore128 : SDTypeProfile<0, 2,
84 [SDTCisVT<0, untyped>,
86 def SDT_ZAtomicCmpSwap128 : SDTypeProfile<2, 3,
87 [SDTCisVT<0, untyped>,
91 SDTCisVT<4, untyped>]>;
92 def SDT_ZMemMemLength : SDTypeProfile<0, 3,
96 def SDT_ZMemMemLengthCC : SDTypeProfile<1, 3,
101 def SDT_ZMemMemLoop : SDTypeProfile<0, 4,
106 def SDT_ZMemMemLoopCC : SDTypeProfile<1, 4,
112 def SDT_ZString : SDTypeProfile<1, 3,
117 def SDT_ZStringCC : SDTypeProfile<2, 3,
123 def SDT_ZIPM : SDTypeProfile<1, 1,
126 def SDT_ZPrefetch : SDTypeProfile<0, 2,
129 def SDT_ZTBegin : SDTypeProfile<1, 2,
133 def SDT_ZTEnd : SDTypeProfile<1, 0,
135 def SDT_ZInsertVectorElt : SDTypeProfile<1, 3,
139 def SDT_ZExtractVectorElt : SDTypeProfile<1, 2,
142 def SDT_ZReplicate : SDTypeProfile<1, 1,
144 def SDT_ZVecUnaryConv : SDTypeProfile<1, 1,
147 def SDT_ZVecUnary : SDTypeProfile<1, 1,
149 SDTCisSameAs<0, 1>]>;
150 def SDT_ZVecUnaryCC : SDTypeProfile<2, 1,
153 SDTCisSameAs<0, 2>]>;
154 def SDT_ZVecBinary : SDTypeProfile<1, 2,
157 SDTCisSameAs<0, 2>]>;
158 def SDT_ZVecBinaryCC : SDTypeProfile<2, 2,
162 SDTCisSameAs<0, 2>]>;
163 def SDT_ZVecBinaryInt : SDTypeProfile<1, 2,
167 def SDT_ZVecBinaryConv : SDTypeProfile<1, 2,
170 SDTCisSameAs<1, 2>]>;
171 def SDT_ZVecBinaryConvCC : SDTypeProfile<2, 2,
175 SDTCisSameAs<2, 3>]>;
176 def SDT_ZVecBinaryConvIntCC : SDTypeProfile<2, 2,
181 def SDT_ZRotateMask : SDTypeProfile<1, 2,
185 def SDT_ZJoinDwords : SDTypeProfile<1, 2,
189 def SDT_ZVecTernary : SDTypeProfile<1, 3,
193 SDTCisSameAs<0, 3>]>;
194 def SDT_ZVecTernaryConvCC : SDTypeProfile<2, 3,
199 SDTCisSameAs<0, 4>]>;
200 def SDT_ZVecTernaryInt : SDTypeProfile<1, 3,
205 def SDT_ZVecTernaryIntCC : SDTypeProfile<2, 3,
211 def SDT_ZVecQuaternaryInt : SDTypeProfile<1, 4,
217 def SDT_ZVecQuaternaryIntCC : SDTypeProfile<2, 4,
224 def SDT_ZTest : SDTypeProfile<1, 2,
228 //===----------------------------------------------------------------------===//
230 //===----------------------------------------------------------------------===//
232 // These are target-independent nodes, but have target-specific formats.
233 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
234 [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>;
235 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
236 [SDNPHasChain, SDNPSideEffect, SDNPOptInGlue,
238 def global_offset_table : SDNode<"ISD::GLOBAL_OFFSET_TABLE", SDTPtrLeaf>;
240 // Nodes for SystemZISD::*. See SystemZISelLowering.h for more details.
241 def z_retflag : SDNode<"SystemZISD::RET_FLAG", SDTNone,
242 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
243 def z_call : SDNode<"SystemZISD::CALL", SDT_ZCall,
244 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
246 def z_sibcall : SDNode<"SystemZISD::SIBCALL", SDT_ZCall,
247 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
249 def z_tls_gdcall : SDNode<"SystemZISD::TLS_GDCALL", SDT_ZCall,
250 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
252 def z_tls_ldcall : SDNode<"SystemZISD::TLS_LDCALL", SDT_ZCall,
253 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
255 def z_pcrel_wrapper : SDNode<"SystemZISD::PCREL_WRAPPER", SDT_ZWrapPtr, []>;
256 def z_pcrel_offset : SDNode<"SystemZISD::PCREL_OFFSET",
257 SDT_ZWrapOffset, []>;
258 def z_iabs : SDNode<"SystemZISD::IABS", SDTIntUnaryOp, []>;
259 def z_icmp : SDNode<"SystemZISD::ICMP", SDT_ZICmp>;
260 def z_fcmp : SDNode<"SystemZISD::FCMP", SDT_ZCmp>;
261 def z_tm : SDNode<"SystemZISD::TM", SDT_ZICmp>;
262 def z_br_ccmask_1 : SDNode<"SystemZISD::BR_CCMASK", SDT_ZBRCCMask,
264 def z_select_ccmask_1 : SDNode<"SystemZISD::SELECT_CCMASK",
266 def z_ipm_1 : SDNode<"SystemZISD::IPM", SDT_ZIPM>;
267 def z_adjdynalloc : SDNode<"SystemZISD::ADJDYNALLOC", SDT_ZAdjDynAlloc>;
268 def z_popcnt : SDNode<"SystemZISD::POPCNT", SDTIntUnaryOp>;
269 def z_smul_lohi : SDNode<"SystemZISD::SMUL_LOHI", SDT_ZGR128Binary>;
270 def z_umul_lohi : SDNode<"SystemZISD::UMUL_LOHI", SDT_ZGR128Binary>;
271 def z_sdivrem : SDNode<"SystemZISD::SDIVREM", SDT_ZGR128Binary>;
272 def z_udivrem : SDNode<"SystemZISD::UDIVREM", SDT_ZGR128Binary>;
273 def z_saddo : SDNode<"SystemZISD::SADDO", SDT_ZBinaryWithFlags>;
274 def z_ssubo : SDNode<"SystemZISD::SSUBO", SDT_ZBinaryWithFlags>;
275 def z_uaddo : SDNode<"SystemZISD::UADDO", SDT_ZBinaryWithFlags>;
276 def z_usubo : SDNode<"SystemZISD::USUBO", SDT_ZBinaryWithFlags>;
277 def z_addcarry_1 : SDNode<"SystemZISD::ADDCARRY", SDT_ZBinaryWithCarry>;
278 def z_subcarry_1 : SDNode<"SystemZISD::SUBCARRY", SDT_ZBinaryWithCarry>;
280 def z_membarrier : SDNode<"SystemZISD::MEMBARRIER", SDTNone,
281 [SDNPHasChain, SDNPSideEffect]>;
283 def z_loadbswap : SDNode<"SystemZISD::LRV", SDTLoad,
284 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
285 def z_storebswap : SDNode<"SystemZISD::STRV", SDTStore,
286 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
287 def z_loadeswap : SDNode<"SystemZISD::VLER", SDTLoad,
288 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
289 def z_storeeswap : SDNode<"SystemZISD::VSTER", SDTStore,
290 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
292 def z_tdc : SDNode<"SystemZISD::TDC", SDT_ZTest>;
294 // Defined because the index is an i32 rather than a pointer.
295 def z_vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
296 SDT_ZInsertVectorElt>;
297 def z_vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT",
298 SDT_ZExtractVectorElt>;
299 def z_byte_mask : SDNode<"SystemZISD::BYTE_MASK", SDT_ZReplicate>;
300 def z_rotate_mask : SDNode<"SystemZISD::ROTATE_MASK", SDT_ZRotateMask>;
301 def z_replicate : SDNode<"SystemZISD::REPLICATE", SDT_ZReplicate>;
302 def z_join_dwords : SDNode<"SystemZISD::JOIN_DWORDS", SDT_ZJoinDwords>;
303 def z_splat : SDNode<"SystemZISD::SPLAT", SDT_ZVecBinaryInt>;
304 def z_merge_high : SDNode<"SystemZISD::MERGE_HIGH", SDT_ZVecBinary>;
305 def z_merge_low : SDNode<"SystemZISD::MERGE_LOW", SDT_ZVecBinary>;
306 def z_shl_double : SDNode<"SystemZISD::SHL_DOUBLE", SDT_ZVecTernaryInt>;
307 def z_permute_dwords : SDNode<"SystemZISD::PERMUTE_DWORDS",
309 def z_permute : SDNode<"SystemZISD::PERMUTE", SDT_ZVecTernary>;
310 def z_pack : SDNode<"SystemZISD::PACK", SDT_ZVecBinaryConv>;
311 def z_packs_cc : SDNode<"SystemZISD::PACKS_CC", SDT_ZVecBinaryConvCC>;
312 def z_packls_cc : SDNode<"SystemZISD::PACKLS_CC", SDT_ZVecBinaryConvCC>;
313 def z_unpack_high : SDNode<"SystemZISD::UNPACK_HIGH", SDT_ZVecUnaryConv>;
314 def z_unpackl_high : SDNode<"SystemZISD::UNPACKL_HIGH", SDT_ZVecUnaryConv>;
315 def z_unpack_low : SDNode<"SystemZISD::UNPACK_LOW", SDT_ZVecUnaryConv>;
316 def z_unpackl_low : SDNode<"SystemZISD::UNPACKL_LOW", SDT_ZVecUnaryConv>;
317 def z_vshl_by_scalar : SDNode<"SystemZISD::VSHL_BY_SCALAR",
319 def z_vsrl_by_scalar : SDNode<"SystemZISD::VSRL_BY_SCALAR",
321 def z_vsra_by_scalar : SDNode<"SystemZISD::VSRA_BY_SCALAR",
323 def z_vsum : SDNode<"SystemZISD::VSUM", SDT_ZVecBinaryConv>;
324 def z_vicmpe : SDNode<"SystemZISD::VICMPE", SDT_ZVecBinary>;
325 def z_vicmph : SDNode<"SystemZISD::VICMPH", SDT_ZVecBinary>;
326 def z_vicmphl : SDNode<"SystemZISD::VICMPHL", SDT_ZVecBinary>;
327 def z_vicmpes : SDNode<"SystemZISD::VICMPES", SDT_ZVecBinaryCC>;
328 def z_vicmphs : SDNode<"SystemZISD::VICMPHS", SDT_ZVecBinaryCC>;
329 def z_vicmphls : SDNode<"SystemZISD::VICMPHLS", SDT_ZVecBinaryCC>;
330 def z_vfcmpe : SDNode<"SystemZISD::VFCMPE", SDT_ZVecBinaryConv>;
331 def z_vfcmph : SDNode<"SystemZISD::VFCMPH", SDT_ZVecBinaryConv>;
332 def z_vfcmphe : SDNode<"SystemZISD::VFCMPHE", SDT_ZVecBinaryConv>;
333 def z_vfcmpes : SDNode<"SystemZISD::VFCMPES", SDT_ZVecBinaryConvCC>;
334 def z_vfcmphs : SDNode<"SystemZISD::VFCMPHS", SDT_ZVecBinaryConvCC>;
335 def z_vfcmphes : SDNode<"SystemZISD::VFCMPHES", SDT_ZVecBinaryConvCC>;
336 def z_vextend : SDNode<"SystemZISD::VEXTEND", SDT_ZVecUnaryConv>;
337 def z_vround : SDNode<"SystemZISD::VROUND", SDT_ZVecUnaryConv>;
338 def z_vtm : SDNode<"SystemZISD::VTM", SDT_ZCmp>;
339 def z_vfae_cc : SDNode<"SystemZISD::VFAE_CC", SDT_ZVecTernaryIntCC>;
340 def z_vfaez_cc : SDNode<"SystemZISD::VFAEZ_CC", SDT_ZVecTernaryIntCC>;
341 def z_vfee_cc : SDNode<"SystemZISD::VFEE_CC", SDT_ZVecBinaryCC>;
342 def z_vfeez_cc : SDNode<"SystemZISD::VFEEZ_CC", SDT_ZVecBinaryCC>;
343 def z_vfene_cc : SDNode<"SystemZISD::VFENE_CC", SDT_ZVecBinaryCC>;
344 def z_vfenez_cc : SDNode<"SystemZISD::VFENEZ_CC", SDT_ZVecBinaryCC>;
345 def z_vistr_cc : SDNode<"SystemZISD::VISTR_CC", SDT_ZVecUnaryCC>;
346 def z_vstrc_cc : SDNode<"SystemZISD::VSTRC_CC",
347 SDT_ZVecQuaternaryIntCC>;
348 def z_vstrcz_cc : SDNode<"SystemZISD::VSTRCZ_CC",
349 SDT_ZVecQuaternaryIntCC>;
350 def z_vstrs_cc : SDNode<"SystemZISD::VSTRS_CC",
351 SDT_ZVecTernaryConvCC>;
352 def z_vstrsz_cc : SDNode<"SystemZISD::VSTRSZ_CC",
353 SDT_ZVecTernaryConvCC>;
354 def z_vftci : SDNode<"SystemZISD::VFTCI", SDT_ZVecBinaryConvIntCC>;
356 class AtomicWOp<string name, SDTypeProfile profile = SDT_ZAtomicLoadBinaryW>
357 : SDNode<"SystemZISD::"##name, profile,
358 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
360 def z_atomic_swapw : AtomicWOp<"ATOMIC_SWAPW">;
361 def z_atomic_loadw_add : AtomicWOp<"ATOMIC_LOADW_ADD">;
362 def z_atomic_loadw_sub : AtomicWOp<"ATOMIC_LOADW_SUB">;
363 def z_atomic_loadw_and : AtomicWOp<"ATOMIC_LOADW_AND">;
364 def z_atomic_loadw_or : AtomicWOp<"ATOMIC_LOADW_OR">;
365 def z_atomic_loadw_xor : AtomicWOp<"ATOMIC_LOADW_XOR">;
366 def z_atomic_loadw_nand : AtomicWOp<"ATOMIC_LOADW_NAND">;
367 def z_atomic_loadw_min : AtomicWOp<"ATOMIC_LOADW_MIN">;
368 def z_atomic_loadw_max : AtomicWOp<"ATOMIC_LOADW_MAX">;
369 def z_atomic_loadw_umin : AtomicWOp<"ATOMIC_LOADW_UMIN">;
370 def z_atomic_loadw_umax : AtomicWOp<"ATOMIC_LOADW_UMAX">;
372 def z_atomic_cmp_swap : SDNode<"SystemZISD::ATOMIC_CMP_SWAP",
374 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
376 def z_atomic_cmp_swapw : SDNode<"SystemZISD::ATOMIC_CMP_SWAPW",
378 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
381 def z_atomic_load_128 : SDNode<"SystemZISD::ATOMIC_LOAD_128",
383 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
384 def z_atomic_store_128 : SDNode<"SystemZISD::ATOMIC_STORE_128",
386 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
387 def z_atomic_cmp_swap_128 : SDNode<"SystemZISD::ATOMIC_CMP_SWAP_128",
388 SDT_ZAtomicCmpSwap128,
389 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
392 def z_mvc : SDNode<"SystemZISD::MVC", SDT_ZMemMemLength,
393 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
394 def z_mvc_loop : SDNode<"SystemZISD::MVC_LOOP", SDT_ZMemMemLoop,
395 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
396 def z_nc : SDNode<"SystemZISD::NC", SDT_ZMemMemLength,
397 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
398 def z_nc_loop : SDNode<"SystemZISD::NC_LOOP", SDT_ZMemMemLoop,
399 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
400 def z_oc : SDNode<"SystemZISD::OC", SDT_ZMemMemLength,
401 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
402 def z_oc_loop : SDNode<"SystemZISD::OC_LOOP", SDT_ZMemMemLoop,
403 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
404 def z_xc : SDNode<"SystemZISD::XC", SDT_ZMemMemLength,
405 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
406 def z_xc_loop : SDNode<"SystemZISD::XC_LOOP", SDT_ZMemMemLoop,
407 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
408 def z_clc : SDNode<"SystemZISD::CLC", SDT_ZMemMemLengthCC,
409 [SDNPHasChain, SDNPMayLoad]>;
410 def z_clc_loop : SDNode<"SystemZISD::CLC_LOOP", SDT_ZMemMemLoopCC,
411 [SDNPHasChain, SDNPMayLoad]>;
412 def z_strcmp : SDNode<"SystemZISD::STRCMP", SDT_ZStringCC,
413 [SDNPHasChain, SDNPMayLoad]>;
414 def z_stpcpy : SDNode<"SystemZISD::STPCPY", SDT_ZString,
415 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
416 def z_search_string : SDNode<"SystemZISD::SEARCH_STRING", SDT_ZStringCC,
417 [SDNPHasChain, SDNPMayLoad]>;
418 def z_prefetch : SDNode<"SystemZISD::PREFETCH", SDT_ZPrefetch,
419 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
422 def z_tbegin : SDNode<"SystemZISD::TBEGIN", SDT_ZTBegin,
423 [SDNPHasChain, SDNPMayStore, SDNPSideEffect]>;
424 def z_tbegin_nofloat : SDNode<"SystemZISD::TBEGIN_NOFLOAT", SDT_ZTBegin,
425 [SDNPHasChain, SDNPMayStore, SDNPSideEffect]>;
426 def z_tend : SDNode<"SystemZISD::TEND", SDT_ZTEnd,
427 [SDNPHasChain, SDNPSideEffect]>;
429 def z_vshl : SDNode<"ISD::SHL", SDT_ZVecBinary>;
430 def z_vsra : SDNode<"ISD::SRA", SDT_ZVecBinary>;
431 def z_vsrl : SDNode<"ISD::SRL", SDT_ZVecBinary>;
433 //===----------------------------------------------------------------------===//
435 //===----------------------------------------------------------------------===//
437 def z_loadbswap16 : PatFrag<(ops node:$addr), (z_loadbswap node:$addr), [{
438 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
440 def z_loadbswap32 : PatFrag<(ops node:$addr), (z_loadbswap node:$addr), [{
441 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
443 def z_loadbswap64 : PatFrag<(ops node:$addr), (z_loadbswap node:$addr), [{
444 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
447 def z_storebswap16 : PatFrag<(ops node:$src, node:$addr),
448 (z_storebswap node:$src, node:$addr), [{
449 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
451 def z_storebswap32 : PatFrag<(ops node:$src, node:$addr),
452 (z_storebswap node:$src, node:$addr), [{
453 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
455 def z_storebswap64 : PatFrag<(ops node:$src, node:$addr),
456 (z_storebswap node:$src, node:$addr), [{
457 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
460 // Fragments including CC as an implicit source.
462 : PatFrag<(ops node:$valid, node:$mask, node:$bb),
463 (z_br_ccmask_1 node:$valid, node:$mask, node:$bb, CC)>;
465 : PatFrag<(ops node:$true, node:$false, node:$valid, node:$mask),
466 (z_select_ccmask_1 node:$true, node:$false,
467 node:$valid, node:$mask, CC)>;
468 def z_ipm : PatFrag<(ops), (z_ipm_1 CC)>;
469 def z_addcarry : PatFrag<(ops node:$lhs, node:$rhs),
470 (z_addcarry_1 node:$lhs, node:$rhs, CC)>;
471 def z_subcarry : PatFrag<(ops node:$lhs, node:$rhs),
472 (z_subcarry_1 node:$lhs, node:$rhs, CC)>;
474 // Signed and unsigned comparisons.
475 def z_scmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, imm), [{
476 unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
477 return Type != SystemZICMP::UnsignedOnly;
479 def z_ucmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, imm), [{
480 unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
481 return Type != SystemZICMP::SignedOnly;
484 // Register- and memory-based TEST UNDER MASK.
485 def z_tm_reg : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, imm)>;
486 def z_tm_mem : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, 0)>;
488 // Register sign-extend operations. Sub-32-bit values are represented as i32s.
489 def sext8 : PatFrag<(ops node:$src), (sext_inreg node:$src, i8)>;
490 def sext16 : PatFrag<(ops node:$src), (sext_inreg node:$src, i16)>;
491 def sext32 : PatFrag<(ops node:$src), (sext (i32 node:$src))>;
493 // Match extensions of an i32 to an i64, followed by an in-register sign
494 // extension from a sub-i32 value.
495 def sext8dbl : PatFrag<(ops node:$src), (sext8 (anyext node:$src))>;
496 def sext16dbl : PatFrag<(ops node:$src), (sext16 (anyext node:$src))>;
498 // Register zero-extend operations. Sub-32-bit values are represented as i32s.
499 def zext8 : PatFrag<(ops node:$src), (and node:$src, 0xff)>;
500 def zext16 : PatFrag<(ops node:$src), (and node:$src, 0xffff)>;
501 def zext32 : PatFrag<(ops node:$src), (zext (i32 node:$src))>;
503 // Extending loads in which the extension type can be signed.
504 def asextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
505 unsigned Type = cast<LoadSDNode>(N)->getExtensionType();
506 return Type == ISD::EXTLOAD || Type == ISD::SEXTLOAD;
508 def asextloadi8 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
509 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
511 def asextloadi16 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
512 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
514 def asextloadi32 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
515 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
518 // Extending loads in which the extension type can be unsigned.
519 def azextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
520 unsigned Type = cast<LoadSDNode>(N)->getExtensionType();
521 return Type == ISD::EXTLOAD || Type == ISD::ZEXTLOAD;
523 def azextloadi8 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
524 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
526 def azextloadi16 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
527 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
529 def azextloadi32 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
530 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
533 // Extending loads in which the extension type doesn't matter.
534 def anyextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
535 return cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD;
537 def anyextloadi8 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
538 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
540 def anyextloadi16 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
541 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
543 def anyextloadi32 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
544 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
548 class AlignedLoad<SDPatternOperator load>
549 : PatFrag<(ops node:$addr), (load node:$addr), [{
550 auto *Load = cast<LoadSDNode>(N);
551 return Load->getAlignment() >= Load->getMemoryVT().getStoreSize();
553 def aligned_load : AlignedLoad<load>;
554 def aligned_asextloadi16 : AlignedLoad<asextloadi16>;
555 def aligned_asextloadi32 : AlignedLoad<asextloadi32>;
556 def aligned_azextloadi16 : AlignedLoad<azextloadi16>;
557 def aligned_azextloadi32 : AlignedLoad<azextloadi32>;
560 class AlignedStore<SDPatternOperator store>
561 : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
562 auto *Store = cast<StoreSDNode>(N);
563 return Store->getAlignment() >= Store->getMemoryVT().getStoreSize();
565 def aligned_store : AlignedStore<store>;
566 def aligned_truncstorei16 : AlignedStore<truncstorei16>;
567 def aligned_truncstorei32 : AlignedStore<truncstorei32>;
569 // Non-volatile loads. Used for instructions that might access the storage
570 // location multiple times.
571 class NonvolatileLoad<SDPatternOperator load>
572 : PatFrag<(ops node:$addr), (load node:$addr), [{
573 auto *Load = cast<LoadSDNode>(N);
574 return !Load->isVolatile();
576 def nonvolatile_anyextloadi8 : NonvolatileLoad<anyextloadi8>;
577 def nonvolatile_anyextloadi16 : NonvolatileLoad<anyextloadi16>;
578 def nonvolatile_anyextloadi32 : NonvolatileLoad<anyextloadi32>;
580 // Non-volatile stores.
581 class NonvolatileStore<SDPatternOperator store>
582 : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
583 auto *Store = cast<StoreSDNode>(N);
584 return !Store->isVolatile();
586 def nonvolatile_truncstorei8 : NonvolatileStore<truncstorei8>;
587 def nonvolatile_truncstorei16 : NonvolatileStore<truncstorei16>;
588 def nonvolatile_truncstorei32 : NonvolatileStore<truncstorei32>;
590 // A store of a load that can be implemented using MVC.
591 def mvc_store : PatFrag<(ops node:$value, node:$addr),
592 (unindexedstore node:$value, node:$addr),
593 [{ return storeLoadCanUseMVC(N); }]>;
595 // Binary read-modify-write operations on memory in which the other
596 // operand is also memory and for which block operations like NC can
597 // be used. There are two patterns for each operator, depending on
598 // which operand contains the "other" load.
599 multiclass block_op<SDPatternOperator operator> {
600 def "1" : PatFrag<(ops node:$value, node:$addr),
601 (unindexedstore (operator node:$value,
602 (unindexedload node:$addr)),
604 [{ return storeLoadCanUseBlockBinary(N, 0); }]>;
605 def "2" : PatFrag<(ops node:$value, node:$addr),
606 (unindexedstore (operator (unindexedload node:$addr),
609 [{ return storeLoadCanUseBlockBinary(N, 1); }]>;
611 defm block_and : block_op<and>;
612 defm block_or : block_op<or>;
613 defm block_xor : block_op<xor>;
616 def inserti8 : PatFrag<(ops node:$src1, node:$src2),
617 (or (and node:$src1, -256), node:$src2)>;
618 def insertll : PatFrag<(ops node:$src1, node:$src2),
619 (or (and node:$src1, 0xffffffffffff0000), node:$src2)>;
620 def insertlh : PatFrag<(ops node:$src1, node:$src2),
621 (or (and node:$src1, 0xffffffff0000ffff), node:$src2)>;
622 def inserthl : PatFrag<(ops node:$src1, node:$src2),
623 (or (and node:$src1, 0xffff0000ffffffff), node:$src2)>;
624 def inserthh : PatFrag<(ops node:$src1, node:$src2),
625 (or (and node:$src1, 0x0000ffffffffffff), node:$src2)>;
626 def insertlf : PatFrag<(ops node:$src1, node:$src2),
627 (or (and node:$src1, 0xffffffff00000000), node:$src2)>;
628 def inserthf : PatFrag<(ops node:$src1, node:$src2),
629 (or (and node:$src1, 0x00000000ffffffff), node:$src2)>;
631 // ORs that can be treated as insertions.
632 def or_as_inserti8 : PatFrag<(ops node:$src1, node:$src2),
633 (or node:$src1, node:$src2), [{
634 unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
635 return CurDAG->MaskedValueIsZero(N->getOperand(0),
636 APInt::getLowBitsSet(BitWidth, 8));
639 // ORs that can be treated as reversed insertions.
640 def or_as_revinserti8 : PatFrag<(ops node:$src1, node:$src2),
641 (or node:$src1, node:$src2), [{
642 unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
643 return CurDAG->MaskedValueIsZero(N->getOperand(1),
644 APInt::getLowBitsSet(BitWidth, 8));
647 // Negative integer absolute.
648 def z_inegabs : PatFrag<(ops node:$src), (ineg (z_iabs node:$src))>;
650 // Integer absolute, matching the canonical form generated by DAGCombiner.
651 def z_iabs32 : PatFrag<(ops node:$src),
652 (xor (add node:$src, (sra node:$src, (i32 31))),
653 (sra node:$src, (i32 31)))>;
654 def z_iabs64 : PatFrag<(ops node:$src),
655 (xor (add node:$src, (sra node:$src, (i32 63))),
656 (sra node:$src, (i32 63)))>;
657 def z_inegabs32 : PatFrag<(ops node:$src), (ineg (z_iabs32 node:$src))>;
658 def z_inegabs64 : PatFrag<(ops node:$src), (ineg (z_iabs64 node:$src))>;
660 // Integer multiply-and-add
661 def z_muladd : PatFrag<(ops node:$src1, node:$src2, node:$src3),
662 (add (mul node:$src1, node:$src2), node:$src3)>;
664 // Alternatives to match operations with or without an overflow CC result.
665 def z_sadd : PatFrags<(ops node:$src1, node:$src2),
666 [(z_saddo node:$src1, node:$src2),
667 (add node:$src1, node:$src2)]>;
668 def z_uadd : PatFrags<(ops node:$src1, node:$src2),
669 [(z_uaddo node:$src1, node:$src2),
670 (add node:$src1, node:$src2)]>;
671 def z_ssub : PatFrags<(ops node:$src1, node:$src2),
672 [(z_ssubo node:$src1, node:$src2),
673 (sub node:$src1, node:$src2)]>;
674 def z_usub : PatFrags<(ops node:$src1, node:$src2),
675 [(z_usubo node:$src1, node:$src2),
676 (sub node:$src1, node:$src2)]>;
678 // Combined logical operations.
679 def andc : PatFrag<(ops node:$src1, node:$src2),
680 (and node:$src1, (not node:$src2))>;
681 def orc : PatFrag<(ops node:$src1, node:$src2),
682 (or node:$src1, (not node:$src2))>;
683 def nand : PatFrag<(ops node:$src1, node:$src2),
684 (not (and node:$src1, node:$src2))>;
685 def nor : PatFrag<(ops node:$src1, node:$src2),
686 (not (or node:$src1, node:$src2))>;
687 def nxor : PatFrag<(ops node:$src1, node:$src2),
688 (not (xor node:$src1, node:$src2))>;
690 // Fused multiply-subtract, using the natural operand order.
691 def any_fms : PatFrag<(ops node:$src1, node:$src2, node:$src3),
692 (any_fma node:$src1, node:$src2, (fneg node:$src3))>;
694 // Fused multiply-add and multiply-subtract, but with the order of the
695 // operands matching SystemZ's MA and MS instructions.
696 def z_any_fma : PatFrag<(ops node:$src1, node:$src2, node:$src3),
697 (any_fma node:$src2, node:$src3, node:$src1)>;
698 def z_any_fms : PatFrag<(ops node:$src1, node:$src2, node:$src3),
699 (any_fma node:$src2, node:$src3, (fneg node:$src1))>;
701 // Negative fused multiply-add and multiply-subtract.
702 def any_fnma : PatFrag<(ops node:$src1, node:$src2, node:$src3),
703 (fneg (any_fma node:$src1, node:$src2, node:$src3))>;
704 def any_fnms : PatFrag<(ops node:$src1, node:$src2, node:$src3),
705 (fneg (any_fms node:$src1, node:$src2, node:$src3))>;
707 // Floating-point negative absolute.
708 def fnabs : PatFrag<(ops node:$ptr), (fneg (fabs node:$ptr))>;
710 // Create a unary operator that loads from memory and then performs
711 // the given operation on it.
712 class loadu<SDPatternOperator operator, SDPatternOperator load = load>
713 : PatFrag<(ops node:$addr), (operator (load node:$addr))>;
715 // Create a store operator that performs the given unary operation
716 // on the value before storing it.
717 class storeu<SDPatternOperator operator, SDPatternOperator store = store>
718 : PatFrag<(ops node:$value, node:$addr),
719 (store (operator node:$value), node:$addr)>;
721 // Create a store operator that performs the given inherent operation
722 // and stores the resulting value.
723 class storei<SDPatternOperator operator, SDPatternOperator store = store>
724 : PatFrag<(ops node:$addr),
725 (store (operator), node:$addr)>;
727 // Create a shift operator that optionally ignores an AND of the
728 // shift count with an immediate if the bottom 6 bits are all set.
729 def imm32bottom6set : PatLeaf<(i32 imm), [{
730 return (N->getZExtValue() & 0x3f) == 0x3f;
732 class shiftop<SDPatternOperator operator>
733 : PatFrags<(ops node:$val, node:$count),
734 [(operator node:$val, node:$count),
735 (operator node:$val, (and node:$count, imm32bottom6set))]>;
737 def imm32mod64 : PatLeaf<(i32 imm), [{
738 return (N->getZExtValue() % 64 == 0);
741 // Load a scalar and replicate it in all elements of a vector.
742 class z_replicate_load<ValueType scalartype, SDPatternOperator load>
743 : PatFrag<(ops node:$addr),
744 (z_replicate (scalartype (load node:$addr)))>;
745 def z_replicate_loadi8 : z_replicate_load<i32, anyextloadi8>;
746 def z_replicate_loadi16 : z_replicate_load<i32, anyextloadi16>;
747 def z_replicate_loadi32 : z_replicate_load<i32, load>;
748 def z_replicate_loadi64 : z_replicate_load<i64, load>;
749 def z_replicate_loadf32 : z_replicate_load<f32, load>;
750 def z_replicate_loadf64 : z_replicate_load<f64, load>;
751 // Byte-swapped replicated vector element loads.
752 def z_replicate_loadbswapi16 : z_replicate_load<i32, z_loadbswap16>;
753 def z_replicate_loadbswapi32 : z_replicate_load<i32, z_loadbswap32>;
754 def z_replicate_loadbswapi64 : z_replicate_load<i64, z_loadbswap64>;
756 // Load a scalar and insert it into a single element of a vector.
757 class z_vle<ValueType scalartype, SDPatternOperator load>
758 : PatFrag<(ops node:$vec, node:$addr, node:$index),
759 (z_vector_insert node:$vec, (scalartype (load node:$addr)),
761 def z_vlei8 : z_vle<i32, anyextloadi8>;
762 def z_vlei16 : z_vle<i32, anyextloadi16>;
763 def z_vlei32 : z_vle<i32, load>;
764 def z_vlei64 : z_vle<i64, load>;
765 def z_vlef32 : z_vle<f32, load>;
766 def z_vlef64 : z_vle<f64, load>;
767 // Byte-swapped vector element loads.
768 def z_vlebri16 : z_vle<i32, z_loadbswap16>;
769 def z_vlebri32 : z_vle<i32, z_loadbswap32>;
770 def z_vlebri64 : z_vle<i64, z_loadbswap64>;
772 // Load a scalar and insert it into the low element of the high i64 of a
774 class z_vllez<ValueType scalartype, SDPatternOperator load, int index>
775 : PatFrag<(ops node:$addr),
776 (z_vector_insert immAllZerosV,
777 (scalartype (load node:$addr)), (i32 index))>;
778 def z_vllezi8 : z_vllez<i32, anyextloadi8, 7>;
779 def z_vllezi16 : z_vllez<i32, anyextloadi16, 3>;
780 def z_vllezi32 : z_vllez<i32, load, 1>;
781 def z_vllezi64 : PatFrags<(ops node:$addr),
782 [(z_vector_insert immAllZerosV,
783 (i64 (load node:$addr)), (i32 0)),
784 (z_join_dwords (i64 (load node:$addr)), (i64 0))]>;
785 // We use high merges to form a v4f32 from four f32s. Propagating zero
786 // into all elements but index 1 gives this expression.
787 def z_vllezf32 : PatFrag<(ops node:$addr),
793 (v4f32 (scalar_to_vector
794 (f32 (load node:$addr)))))))),
796 (bitconvert (v4f32 immAllZerosV))))>;
797 def z_vllezf64 : PatFrag<(ops node:$addr),
799 (v2f64 (scalar_to_vector (f64 (load node:$addr)))),
802 // Similarly for the high element of a zeroed vector.
803 def z_vllezli32 : z_vllez<i32, load, 0>;
804 def z_vllezlf32 : PatFrag<(ops node:$addr),
809 (v4f32 (scalar_to_vector
810 (f32 (load node:$addr)))),
811 (v4f32 immAllZerosV)))),
813 (bitconvert (v4f32 immAllZerosV))))>;
815 // Byte-swapped variants.
816 def z_vllebrzi16 : z_vllez<i32, z_loadbswap16, 3>;
817 def z_vllebrzi32 : z_vllez<i32, z_loadbswap32, 1>;
818 def z_vllebrzli32 : z_vllez<i32, z_loadbswap32, 0>;
819 def z_vllebrzi64 : PatFrags<(ops node:$addr),
820 [(z_vector_insert immAllZerosV,
821 (i64 (z_loadbswap64 node:$addr)),
823 (z_join_dwords (i64 (z_loadbswap64 node:$addr)),
827 // Store one element of a vector.
828 class z_vste<ValueType scalartype, SDPatternOperator store>
829 : PatFrag<(ops node:$vec, node:$addr, node:$index),
830 (store (scalartype (z_vector_extract node:$vec, node:$index)),
832 def z_vstei8 : z_vste<i32, truncstorei8>;
833 def z_vstei16 : z_vste<i32, truncstorei16>;
834 def z_vstei32 : z_vste<i32, store>;
835 def z_vstei64 : z_vste<i64, store>;
836 def z_vstef32 : z_vste<f32, store>;
837 def z_vstef64 : z_vste<f64, store>;
838 // Byte-swapped vector element stores.
839 def z_vstebri16 : z_vste<i32, z_storebswap16>;
840 def z_vstebri32 : z_vste<i32, z_storebswap32>;
841 def z_vstebri64 : z_vste<i64, z_storebswap64>;
843 // Arithmetic negation on vectors.
844 def z_vneg : PatFrag<(ops node:$x), (sub immAllZerosV, node:$x)>;
846 // Bitwise negation on vectors.
847 def z_vnot : PatFrag<(ops node:$x), (xor node:$x, immAllOnesV)>;
849 // Signed "integer greater than zero" on vectors.
850 def z_vicmph_zero : PatFrag<(ops node:$x), (z_vicmph node:$x, immAllZerosV)>;
852 // Signed "integer less than zero" on vectors.
853 def z_vicmpl_zero : PatFrag<(ops node:$x), (z_vicmph immAllZerosV, node:$x)>;
855 // Integer absolute on vectors.
856 class z_viabs<int shift>
857 : PatFrag<(ops node:$src),
858 (xor (add node:$src, (z_vsra_by_scalar node:$src, (i32 shift))),
859 (z_vsra_by_scalar node:$src, (i32 shift)))>;
860 def z_viabs8 : z_viabs<7>;
861 def z_viabs16 : z_viabs<15>;
862 def z_viabs32 : z_viabs<31>;
863 def z_viabs64 : z_viabs<63>;
865 // Sign-extend the i64 elements of a vector.
866 class z_vse<int shift>
867 : PatFrag<(ops node:$src),
868 (z_vsra_by_scalar (z_vshl_by_scalar node:$src, shift), shift)>;
869 def z_vsei8 : z_vse<56>;
870 def z_vsei16 : z_vse<48>;
871 def z_vsei32 : z_vse<32>;
873 // ...and again with the extensions being done on individual i64 scalars.
874 class z_vse_by_parts<SDPatternOperator operator, int index1, int index2>
875 : PatFrag<(ops node:$src),
877 (operator (z_vector_extract node:$src, index1)),
878 (operator (z_vector_extract node:$src, index2)))>;
879 def z_vsei8_by_parts : z_vse_by_parts<sext8dbl, 7, 15>;
880 def z_vsei16_by_parts : z_vse_by_parts<sext16dbl, 3, 7>;
881 def z_vsei32_by_parts : z_vse_by_parts<sext32, 1, 3>;