1 //===-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ --===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines an instruction selector for the SystemZ target.
11 //===----------------------------------------------------------------------===//
13 #include "SystemZTargetMachine.h"
14 #include "SystemZISelLowering.h"
15 #include "llvm/Analysis/AliasAnalysis.h"
16 #include "llvm/CodeGen/SelectionDAGISel.h"
17 #include "llvm/Support/Debug.h"
18 #include "llvm/Support/KnownBits.h"
19 #include "llvm/Support/raw_ostream.h"
23 #define DEBUG_TYPE "systemz-isel"
26 // Used to build addressing modes.
27 struct SystemZAddressingMode
{
28 // The shape of the address.
33 // base+displacement+index for load and store operands
36 // base+displacement+index for load address operands
39 // base+displacement+index+ADJDYNALLOC
44 // The type of displacement. The enum names here correspond directly
45 // to the definitions in SystemZOperand.td. We could split them into
46 // flags -- single/pair, 128-bit, etc. -- but it hardly seems worth it.
56 // The parts of the address. The address is equivalent to:
58 // Base + Disp + Index + (IncludesDynAlloc ? ADJDYNALLOC : 0)
62 bool IncludesDynAlloc
;
64 SystemZAddressingMode(AddrForm form
, DispRange dr
)
65 : Form(form
), DR(dr
), Base(), Disp(0), Index(),
66 IncludesDynAlloc(false) {}
68 // True if the address can have an index register.
69 bool hasIndexField() { return Form
!= FormBD
; }
71 // True if the address can (and must) include ADJDYNALLOC.
72 bool isDynAlloc() { return Form
== FormBDXDynAlloc
; }
74 void dump(const llvm::SelectionDAG
*DAG
) {
75 errs() << "SystemZAddressingMode " << this << '\n';
79 Base
.getNode()->dump(DAG
);
83 if (hasIndexField()) {
86 Index
.getNode()->dump(DAG
);
91 errs() << " Disp " << Disp
;
93 errs() << " + ADJDYNALLOC";
98 // Return a mask with Count low bits set.
99 static uint64_t allOnes(unsigned int Count
) {
103 return (uint64_t(1) << Count
) - 1;
106 // Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation
107 // given by Opcode. The operands are: Input (R2), Start (I3), End (I4) and
108 // Rotate (I5). The combined operand value is effectively:
110 // (or (rotl Input, Rotate), ~Mask)
114 // (and (rotl Input, Rotate), Mask)
116 // otherwise. The output value has BitSize bits, although Input may be
117 // narrower (in which case the upper bits are don't care), or wider (in which
118 // case the result will be truncated as part of the operation).
119 struct RxSBGOperands
{
120 RxSBGOperands(unsigned Op
, SDValue N
)
121 : Opcode(Op
), BitSize(N
.getValueSizeInBits()),
122 Mask(allOnes(BitSize
)), Input(N
), Start(64 - BitSize
), End(63),
134 class SystemZDAGToDAGISel
: public SelectionDAGISel
{
135 const SystemZSubtarget
*Subtarget
;
137 // Used by SystemZOperands.td to create integer constants.
138 inline SDValue
getImm(const SDNode
*Node
, uint64_t Imm
) const {
139 return CurDAG
->getTargetConstant(Imm
, SDLoc(Node
), Node
->getValueType(0));
142 const SystemZTargetMachine
&getTargetMachine() const {
143 return static_cast<const SystemZTargetMachine
&>(TM
);
146 const SystemZInstrInfo
*getInstrInfo() const {
147 return Subtarget
->getInstrInfo();
150 // Try to fold more of the base or index of AM into AM, where IsBase
151 // selects between the base and index.
152 bool expandAddress(SystemZAddressingMode
&AM
, bool IsBase
) const;
154 // Try to describe N in AM, returning true on success.
155 bool selectAddress(SDValue N
, SystemZAddressingMode
&AM
) const;
157 // Extract individual target operands from matched address AM.
158 void getAddressOperands(const SystemZAddressingMode
&AM
, EVT VT
,
159 SDValue
&Base
, SDValue
&Disp
) const;
160 void getAddressOperands(const SystemZAddressingMode
&AM
, EVT VT
,
161 SDValue
&Base
, SDValue
&Disp
, SDValue
&Index
) const;
163 // Try to match Addr as a FormBD address with displacement type DR.
164 // Return true on success, storing the base and displacement in
165 // Base and Disp respectively.
166 bool selectBDAddr(SystemZAddressingMode::DispRange DR
, SDValue Addr
,
167 SDValue
&Base
, SDValue
&Disp
) const;
169 // Try to match Addr as a FormBDX address with displacement type DR.
170 // Return true on success and if the result had no index. Store the
171 // base and displacement in Base and Disp respectively.
172 bool selectMVIAddr(SystemZAddressingMode::DispRange DR
, SDValue Addr
,
173 SDValue
&Base
, SDValue
&Disp
) const;
175 // Try to match Addr as a FormBDX* address of form Form with
176 // displacement type DR. Return true on success, storing the base,
177 // displacement and index in Base, Disp and Index respectively.
178 bool selectBDXAddr(SystemZAddressingMode::AddrForm Form
,
179 SystemZAddressingMode::DispRange DR
, SDValue Addr
,
180 SDValue
&Base
, SDValue
&Disp
, SDValue
&Index
) const;
182 // PC-relative address matching routines used by SystemZOperands.td.
183 bool selectPCRelAddress(SDValue Addr
, SDValue
&Target
) const {
184 if (SystemZISD::isPCREL(Addr
.getOpcode())) {
185 Target
= Addr
.getOperand(0);
191 // BD matching routines used by SystemZOperands.td.
192 bool selectBDAddr12Only(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
) const {
193 return selectBDAddr(SystemZAddressingMode::Disp12Only
, Addr
, Base
, Disp
);
195 bool selectBDAddr12Pair(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
) const {
196 return selectBDAddr(SystemZAddressingMode::Disp12Pair
, Addr
, Base
, Disp
);
198 bool selectBDAddr20Only(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
) const {
199 return selectBDAddr(SystemZAddressingMode::Disp20Only
, Addr
, Base
, Disp
);
201 bool selectBDAddr20Pair(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
) const {
202 return selectBDAddr(SystemZAddressingMode::Disp20Pair
, Addr
, Base
, Disp
);
205 // MVI matching routines used by SystemZOperands.td.
206 bool selectMVIAddr12Pair(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
) const {
207 return selectMVIAddr(SystemZAddressingMode::Disp12Pair
, Addr
, Base
, Disp
);
209 bool selectMVIAddr20Pair(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
) const {
210 return selectMVIAddr(SystemZAddressingMode::Disp20Pair
, Addr
, Base
, Disp
);
213 // BDX matching routines used by SystemZOperands.td.
214 bool selectBDXAddr12Only(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
,
215 SDValue
&Index
) const {
216 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal
,
217 SystemZAddressingMode::Disp12Only
,
218 Addr
, Base
, Disp
, Index
);
220 bool selectBDXAddr12Pair(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
,
221 SDValue
&Index
) const {
222 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal
,
223 SystemZAddressingMode::Disp12Pair
,
224 Addr
, Base
, Disp
, Index
);
226 bool selectDynAlloc12Only(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
,
227 SDValue
&Index
) const {
228 return selectBDXAddr(SystemZAddressingMode::FormBDXDynAlloc
,
229 SystemZAddressingMode::Disp12Only
,
230 Addr
, Base
, Disp
, Index
);
232 bool selectBDXAddr20Only(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
,
233 SDValue
&Index
) const {
234 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal
,
235 SystemZAddressingMode::Disp20Only
,
236 Addr
, Base
, Disp
, Index
);
238 bool selectBDXAddr20Only128(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
,
239 SDValue
&Index
) const {
240 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal
,
241 SystemZAddressingMode::Disp20Only128
,
242 Addr
, Base
, Disp
, Index
);
244 bool selectBDXAddr20Pair(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
,
245 SDValue
&Index
) const {
246 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal
,
247 SystemZAddressingMode::Disp20Pair
,
248 Addr
, Base
, Disp
, Index
);
250 bool selectLAAddr12Pair(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
,
251 SDValue
&Index
) const {
252 return selectBDXAddr(SystemZAddressingMode::FormBDXLA
,
253 SystemZAddressingMode::Disp12Pair
,
254 Addr
, Base
, Disp
, Index
);
256 bool selectLAAddr20Pair(SDValue Addr
, SDValue
&Base
, SDValue
&Disp
,
257 SDValue
&Index
) const {
258 return selectBDXAddr(SystemZAddressingMode::FormBDXLA
,
259 SystemZAddressingMode::Disp20Pair
,
260 Addr
, Base
, Disp
, Index
);
263 // Try to match Addr as an address with a base, 12-bit displacement
264 // and index, where the index is element Elem of a vector.
265 // Return true on success, storing the base, displacement and vector
266 // in Base, Disp and Index respectively.
267 bool selectBDVAddr12Only(SDValue Addr
, SDValue Elem
, SDValue
&Base
,
268 SDValue
&Disp
, SDValue
&Index
) const;
270 // Check whether (or Op (and X InsertMask)) is effectively an insertion
271 // of X into bits InsertMask of some Y != Op. Return true if so and
273 bool detectOrAndInsertion(SDValue
&Op
, uint64_t InsertMask
) const;
275 // Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used.
276 // Return true on success.
277 bool refineRxSBGMask(RxSBGOperands
&RxSBG
, uint64_t Mask
) const;
279 // Try to fold some of RxSBG.Input into other fields of RxSBG.
280 // Return true on success.
281 bool expandRxSBG(RxSBGOperands
&RxSBG
) const;
283 // Return an undefined value of type VT.
284 SDValue
getUNDEF(const SDLoc
&DL
, EVT VT
) const;
286 // Convert N to VT, if it isn't already.
287 SDValue
convertTo(const SDLoc
&DL
, EVT VT
, SDValue N
) const;
289 // Try to implement AND or shift node N using RISBG with the zero flag set.
290 // Return the selected node on success, otherwise return null.
291 bool tryRISBGZero(SDNode
*N
);
293 // Try to use RISBG or Opcode to implement OR or XOR node N.
294 // Return the selected node on success, otherwise return null.
295 bool tryRxSBG(SDNode
*N
, unsigned Opcode
);
297 // If Op0 is null, then Node is a constant that can be loaded using:
299 // (Opcode UpperVal LowerVal)
301 // If Op0 is nonnull, then Node can be implemented using:
303 // (Opcode (Opcode Op0 UpperVal) LowerVal)
304 void splitLargeImmediate(unsigned Opcode
, SDNode
*Node
, SDValue Op0
,
305 uint64_t UpperVal
, uint64_t LowerVal
);
307 void loadVectorConstant(const SystemZVectorConstantInfo
&VCI
,
310 // Try to use gather instruction Opcode to implement vector insertion N.
311 bool tryGather(SDNode
*N
, unsigned Opcode
);
313 // Try to use scatter instruction Opcode to implement store Store.
314 bool tryScatter(StoreSDNode
*Store
, unsigned Opcode
);
316 // Change a chain of {load; op; store} of the same value into a simple op
317 // through memory of that value, if the uses of the modified value and its
318 // address are suitable.
319 bool tryFoldLoadStoreIntoMemOperand(SDNode
*Node
);
321 // Return true if Load and Store are loads and stores of the same size
322 // and are guaranteed not to overlap. Such operations can be implemented
323 // using block (SS-format) instructions.
325 // Partial overlap would lead to incorrect code, since the block operations
326 // are logically bytewise, even though they have a fast path for the
327 // non-overlapping case. We also need to avoid full overlap (i.e. two
328 // addresses that might be equal at run time) because although that case
329 // would be handled correctly, it might be implemented by millicode.
330 bool canUseBlockOperation(StoreSDNode
*Store
, LoadSDNode
*Load
) const;
332 // N is a (store (load Y), X) pattern. Return true if it can use an MVC
334 bool storeLoadCanUseMVC(SDNode
*N
) const;
336 // N is a (store (op (load A[0]), (load A[1])), X) pattern. Return true
337 // if A[1 - I] == X and if N can use a block operation like NC from A[I]
339 bool storeLoadCanUseBlockBinary(SDNode
*N
, unsigned I
) const;
341 // Try to expand a boolean SELECT_CCMASK using an IPM sequence.
342 SDValue
expandSelectBoolean(SDNode
*Node
);
345 SystemZDAGToDAGISel(SystemZTargetMachine
&TM
, CodeGenOpt::Level OptLevel
)
346 : SelectionDAGISel(TM
, OptLevel
) {}
348 bool runOnMachineFunction(MachineFunction
&MF
) override
{
349 const Function
&F
= MF
.getFunction();
350 if (F
.getFnAttribute("mnop-mcount").getValueAsString() == "true" &&
351 F
.getFnAttribute("fentry-call").getValueAsString() != "true")
352 report_fatal_error("mnop-mcount only supported with fentry-call");
354 Subtarget
= &MF
.getSubtarget
<SystemZSubtarget
>();
355 return SelectionDAGISel::runOnMachineFunction(MF
);
358 // Override MachineFunctionPass.
359 StringRef
getPassName() const override
{
360 return "SystemZ DAG->DAG Pattern Instruction Selection";
363 // Override SelectionDAGISel.
364 void Select(SDNode
*Node
) override
;
365 bool SelectInlineAsmMemoryOperand(const SDValue
&Op
, unsigned ConstraintID
,
366 std::vector
<SDValue
> &OutOps
) override
;
367 bool IsProfitableToFold(SDValue N
, SDNode
*U
, SDNode
*Root
) const override
;
368 void PreprocessISelDAG() override
;
370 // Include the pieces autogenerated from the target description.
371 #include "SystemZGenDAGISel.inc"
373 } // end anonymous namespace
375 FunctionPass
*llvm::createSystemZISelDag(SystemZTargetMachine
&TM
,
376 CodeGenOpt::Level OptLevel
) {
377 return new SystemZDAGToDAGISel(TM
, OptLevel
);
380 // Return true if Val should be selected as a displacement for an address
381 // with range DR. Here we're interested in the range of both the instruction
382 // described by DR and of any pairing instruction.
383 static bool selectDisp(SystemZAddressingMode::DispRange DR
, int64_t Val
) {
385 case SystemZAddressingMode::Disp12Only
:
386 return isUInt
<12>(Val
);
388 case SystemZAddressingMode::Disp12Pair
:
389 case SystemZAddressingMode::Disp20Only
:
390 case SystemZAddressingMode::Disp20Pair
:
391 return isInt
<20>(Val
);
393 case SystemZAddressingMode::Disp20Only128
:
394 return isInt
<20>(Val
) && isInt
<20>(Val
+ 8);
396 llvm_unreachable("Unhandled displacement range");
399 // Change the base or index in AM to Value, where IsBase selects
400 // between the base and index.
401 static void changeComponent(SystemZAddressingMode
&AM
, bool IsBase
,
409 // The base or index of AM is equivalent to Value + ADJDYNALLOC,
410 // where IsBase selects between the base and index. Try to fold the
411 // ADJDYNALLOC into AM.
412 static bool expandAdjDynAlloc(SystemZAddressingMode
&AM
, bool IsBase
,
414 if (AM
.isDynAlloc() && !AM
.IncludesDynAlloc
) {
415 changeComponent(AM
, IsBase
, Value
);
416 AM
.IncludesDynAlloc
= true;
422 // The base of AM is equivalent to Base + Index. Try to use Index as
423 // the index register.
424 static bool expandIndex(SystemZAddressingMode
&AM
, SDValue Base
,
426 if (AM
.hasIndexField() && !AM
.Index
.getNode()) {
434 // The base or index of AM is equivalent to Op0 + Op1, where IsBase selects
435 // between the base and index. Try to fold Op1 into AM's displacement.
436 static bool expandDisp(SystemZAddressingMode
&AM
, bool IsBase
,
437 SDValue Op0
, uint64_t Op1
) {
438 // First try adjusting the displacement.
439 int64_t TestDisp
= AM
.Disp
+ Op1
;
440 if (selectDisp(AM
.DR
, TestDisp
)) {
441 changeComponent(AM
, IsBase
, Op0
);
446 // We could consider forcing the displacement into a register and
447 // using it as an index, but it would need to be carefully tuned.
451 bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode
&AM
,
453 SDValue N
= IsBase
? AM
.Base
: AM
.Index
;
454 unsigned Opcode
= N
.getOpcode();
455 if (Opcode
== ISD::TRUNCATE
) {
457 Opcode
= N
.getOpcode();
459 if (Opcode
== ISD::ADD
|| CurDAG
->isBaseWithConstantOffset(N
)) {
460 SDValue Op0
= N
.getOperand(0);
461 SDValue Op1
= N
.getOperand(1);
463 unsigned Op0Code
= Op0
->getOpcode();
464 unsigned Op1Code
= Op1
->getOpcode();
466 if (Op0Code
== SystemZISD::ADJDYNALLOC
)
467 return expandAdjDynAlloc(AM
, IsBase
, Op1
);
468 if (Op1Code
== SystemZISD::ADJDYNALLOC
)
469 return expandAdjDynAlloc(AM
, IsBase
, Op0
);
471 if (Op0Code
== ISD::Constant
)
472 return expandDisp(AM
, IsBase
, Op1
,
473 cast
<ConstantSDNode
>(Op0
)->getSExtValue());
474 if (Op1Code
== ISD::Constant
)
475 return expandDisp(AM
, IsBase
, Op0
,
476 cast
<ConstantSDNode
>(Op1
)->getSExtValue());
478 if (IsBase
&& expandIndex(AM
, Op0
, Op1
))
481 if (Opcode
== SystemZISD::PCREL_OFFSET
) {
482 SDValue Full
= N
.getOperand(0);
483 SDValue Base
= N
.getOperand(1);
484 SDValue Anchor
= Base
.getOperand(0);
485 uint64_t Offset
= (cast
<GlobalAddressSDNode
>(Full
)->getOffset() -
486 cast
<GlobalAddressSDNode
>(Anchor
)->getOffset());
487 return expandDisp(AM
, IsBase
, Base
, Offset
);
492 // Return true if an instruction with displacement range DR should be
493 // used for displacement value Val. selectDisp(DR, Val) must already hold.
494 static bool isValidDisp(SystemZAddressingMode::DispRange DR
, int64_t Val
) {
495 assert(selectDisp(DR
, Val
) && "Invalid displacement");
497 case SystemZAddressingMode::Disp12Only
:
498 case SystemZAddressingMode::Disp20Only
:
499 case SystemZAddressingMode::Disp20Only128
:
502 case SystemZAddressingMode::Disp12Pair
:
503 // Use the other instruction if the displacement is too large.
504 return isUInt
<12>(Val
);
506 case SystemZAddressingMode::Disp20Pair
:
507 // Use the other instruction if the displacement is small enough.
508 return !isUInt
<12>(Val
);
510 llvm_unreachable("Unhandled displacement range");
513 // Return true if Base + Disp + Index should be performed by LA(Y).
514 static bool shouldUseLA(SDNode
*Base
, int64_t Disp
, SDNode
*Index
) {
515 // Don't use LA(Y) for constants.
519 // Always use LA(Y) for frame addresses, since we know that the destination
520 // register is almost always (perhaps always) going to be different from
521 // the frame register.
522 if (Base
->getOpcode() == ISD::FrameIndex
)
526 // Always use LA(Y) if there is a base, displacement and index.
530 // Always use LA if the displacement is small enough. It should always
531 // be no worse than AGHI (and better if it avoids a move).
532 if (isUInt
<12>(Disp
))
535 // For similar reasons, always use LAY if the constant is too big for AGHI.
536 // LAY should be no worse than AGFI.
537 if (!isInt
<16>(Disp
))
540 // Don't use LA for plain registers.
544 // Don't use LA for plain addition if the index operand is only used
545 // once. It should be a natural two-operand addition in that case.
546 if (Index
->hasOneUse())
549 // Prefer addition if the second operation is sign-extended, in the
550 // hope of using AGF.
551 unsigned IndexOpcode
= Index
->getOpcode();
552 if (IndexOpcode
== ISD::SIGN_EXTEND
||
553 IndexOpcode
== ISD::SIGN_EXTEND_INREG
)
557 // Don't use LA for two-operand addition if either operand is only
558 // used once. The addition instructions are better in that case.
559 if (Base
->hasOneUse())
565 // Return true if Addr is suitable for AM, updating AM if so.
566 bool SystemZDAGToDAGISel::selectAddress(SDValue Addr
,
567 SystemZAddressingMode
&AM
) const {
568 // Start out assuming that the address will need to be loaded separately,
569 // then try to extend it as much as we can.
572 // First try treating the address as a constant.
573 if (Addr
.getOpcode() == ISD::Constant
&&
574 expandDisp(AM
, true, SDValue(),
575 cast
<ConstantSDNode
>(Addr
)->getSExtValue()))
577 // Also see if it's a bare ADJDYNALLOC.
578 else if (Addr
.getOpcode() == SystemZISD::ADJDYNALLOC
&&
579 expandAdjDynAlloc(AM
, true, SDValue()))
582 // Otherwise try expanding each component.
583 while (expandAddress(AM
, true) ||
584 (AM
.Index
.getNode() && expandAddress(AM
, false)))
587 // Reject cases where it isn't profitable to use LA(Y).
588 if (AM
.Form
== SystemZAddressingMode::FormBDXLA
&&
589 !shouldUseLA(AM
.Base
.getNode(), AM
.Disp
, AM
.Index
.getNode()))
592 // Reject cases where the other instruction in a pair should be used.
593 if (!isValidDisp(AM
.DR
, AM
.Disp
))
596 // Make sure that ADJDYNALLOC is included where necessary.
597 if (AM
.isDynAlloc() && !AM
.IncludesDynAlloc
)
600 LLVM_DEBUG(AM
.dump(CurDAG
));
604 // Insert a node into the DAG at least before Pos. This will reposition
605 // the node as needed, and will assign it a node ID that is <= Pos's ID.
606 // Note that this does *not* preserve the uniqueness of node IDs!
607 // The selection DAG must no longer depend on their uniqueness when this
609 static void insertDAGNode(SelectionDAG
*DAG
, SDNode
*Pos
, SDValue N
) {
610 if (N
->getNodeId() == -1 ||
611 (SelectionDAGISel::getUninvalidatedNodeId(N
.getNode()) >
612 SelectionDAGISel::getUninvalidatedNodeId(Pos
))) {
613 DAG
->RepositionNode(Pos
->getIterator(), N
.getNode());
614 // Mark Node as invalid for pruning as after this it may be a successor to a
615 // selected node but otherwise be in the same position of Pos.
616 // Conservatively mark it with the same -abs(Id) to assure node id
617 // invariant is preserved.
618 N
->setNodeId(Pos
->getNodeId());
619 SelectionDAGISel::InvalidateNodeId(N
.getNode());
623 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode
&AM
,
624 EVT VT
, SDValue
&Base
,
625 SDValue
&Disp
) const {
628 // Register 0 means "no base". This is mostly useful for shifts.
629 Base
= CurDAG
->getRegister(0, VT
);
630 else if (Base
.getOpcode() == ISD::FrameIndex
) {
631 // Lower a FrameIndex to a TargetFrameIndex.
632 int64_t FrameIndex
= cast
<FrameIndexSDNode
>(Base
)->getIndex();
633 Base
= CurDAG
->getTargetFrameIndex(FrameIndex
, VT
);
634 } else if (Base
.getValueType() != VT
) {
635 // Truncate values from i64 to i32, for shifts.
636 assert(VT
== MVT::i32
&& Base
.getValueType() == MVT::i64
&&
637 "Unexpected truncation");
639 SDValue Trunc
= CurDAG
->getNode(ISD::TRUNCATE
, DL
, VT
, Base
);
640 insertDAGNode(CurDAG
, Base
.getNode(), Trunc
);
644 // Lower the displacement to a TargetConstant.
645 Disp
= CurDAG
->getTargetConstant(AM
.Disp
, SDLoc(Base
), VT
);
648 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode
&AM
,
649 EVT VT
, SDValue
&Base
,
651 SDValue
&Index
) const {
652 getAddressOperands(AM
, VT
, Base
, Disp
);
655 if (!Index
.getNode())
656 // Register 0 means "no index".
657 Index
= CurDAG
->getRegister(0, VT
);
660 bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR
,
661 SDValue Addr
, SDValue
&Base
,
662 SDValue
&Disp
) const {
663 SystemZAddressingMode
AM(SystemZAddressingMode::FormBD
, DR
);
664 if (!selectAddress(Addr
, AM
))
667 getAddressOperands(AM
, Addr
.getValueType(), Base
, Disp
);
671 bool SystemZDAGToDAGISel::selectMVIAddr(SystemZAddressingMode::DispRange DR
,
672 SDValue Addr
, SDValue
&Base
,
673 SDValue
&Disp
) const {
674 SystemZAddressingMode
AM(SystemZAddressingMode::FormBDXNormal
, DR
);
675 if (!selectAddress(Addr
, AM
) || AM
.Index
.getNode())
678 getAddressOperands(AM
, Addr
.getValueType(), Base
, Disp
);
682 bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form
,
683 SystemZAddressingMode::DispRange DR
,
684 SDValue Addr
, SDValue
&Base
,
685 SDValue
&Disp
, SDValue
&Index
) const {
686 SystemZAddressingMode
AM(Form
, DR
);
687 if (!selectAddress(Addr
, AM
))
690 getAddressOperands(AM
, Addr
.getValueType(), Base
, Disp
, Index
);
694 bool SystemZDAGToDAGISel::selectBDVAddr12Only(SDValue Addr
, SDValue Elem
,
697 SDValue
&Index
) const {
699 if (selectBDXAddr12Only(Addr
, Regs
[0], Disp
, Regs
[1]) &&
700 Regs
[0].getNode() && Regs
[1].getNode()) {
701 for (unsigned int I
= 0; I
< 2; ++I
) {
704 // We can't tell here whether the index vector has the right type
705 // for the access; the caller needs to do that instead.
706 if (Index
.getOpcode() == ISD::ZERO_EXTEND
)
707 Index
= Index
.getOperand(0);
708 if (Index
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
709 Index
.getOperand(1) == Elem
) {
710 Index
= Index
.getOperand(0);
718 bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue
&Op
,
719 uint64_t InsertMask
) const {
720 // We're only interested in cases where the insertion is into some operand
721 // of Op, rather than into Op itself. The only useful case is an AND.
722 if (Op
.getOpcode() != ISD::AND
)
725 // We need a constant mask.
726 auto *MaskNode
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1).getNode());
730 // It's not an insertion of Op.getOperand(0) if the two masks overlap.
731 uint64_t AndMask
= MaskNode
->getZExtValue();
732 if (InsertMask
& AndMask
)
735 // It's only an insertion if all bits are covered or are known to be zero.
736 // The inner check covers all cases but is more expensive.
737 uint64_t Used
= allOnes(Op
.getValueSizeInBits());
738 if (Used
!= (AndMask
| InsertMask
)) {
739 KnownBits Known
= CurDAG
->computeKnownBits(Op
.getOperand(0));
740 if (Used
!= (AndMask
| InsertMask
| Known
.Zero
.getZExtValue()))
744 Op
= Op
.getOperand(0);
748 bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands
&RxSBG
,
749 uint64_t Mask
) const {
750 const SystemZInstrInfo
*TII
= getInstrInfo();
751 if (RxSBG
.Rotate
!= 0)
752 Mask
= (Mask
<< RxSBG
.Rotate
) | (Mask
>> (64 - RxSBG
.Rotate
));
754 if (TII
->isRxSBGMask(Mask
, RxSBG
.BitSize
, RxSBG
.Start
, RxSBG
.End
)) {
761 // Return true if any bits of (RxSBG.Input & Mask) are significant.
762 static bool maskMatters(RxSBGOperands
&RxSBG
, uint64_t Mask
) {
763 // Rotate the mask in the same way as RxSBG.Input is rotated.
764 if (RxSBG
.Rotate
!= 0)
765 Mask
= ((Mask
<< RxSBG
.Rotate
) | (Mask
>> (64 - RxSBG
.Rotate
)));
766 return (Mask
& RxSBG
.Mask
) != 0;
769 bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands
&RxSBG
) const {
770 SDValue N
= RxSBG
.Input
;
771 unsigned Opcode
= N
.getOpcode();
773 case ISD::TRUNCATE
: {
774 if (RxSBG
.Opcode
== SystemZ::RNSBG
)
776 uint64_t BitSize
= N
.getValueSizeInBits();
777 uint64_t Mask
= allOnes(BitSize
);
778 if (!refineRxSBGMask(RxSBG
, Mask
))
780 RxSBG
.Input
= N
.getOperand(0);
784 if (RxSBG
.Opcode
== SystemZ::RNSBG
)
787 auto *MaskNode
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1).getNode());
791 SDValue Input
= N
.getOperand(0);
792 uint64_t Mask
= MaskNode
->getZExtValue();
793 if (!refineRxSBGMask(RxSBG
, Mask
)) {
794 // If some bits of Input are already known zeros, those bits will have
795 // been removed from the mask. See if adding them back in makes the
797 KnownBits Known
= CurDAG
->computeKnownBits(Input
);
798 Mask
|= Known
.Zero
.getZExtValue();
799 if (!refineRxSBGMask(RxSBG
, Mask
))
807 if (RxSBG
.Opcode
!= SystemZ::RNSBG
)
810 auto *MaskNode
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1).getNode());
814 SDValue Input
= N
.getOperand(0);
815 uint64_t Mask
= ~MaskNode
->getZExtValue();
816 if (!refineRxSBGMask(RxSBG
, Mask
)) {
817 // If some bits of Input are already known ones, those bits will have
818 // been removed from the mask. See if adding them back in makes the
820 KnownBits Known
= CurDAG
->computeKnownBits(Input
);
821 Mask
&= ~Known
.One
.getZExtValue();
822 if (!refineRxSBGMask(RxSBG
, Mask
))
830 // Any 64-bit rotate left can be merged into the RxSBG.
831 if (RxSBG
.BitSize
!= 64 || N
.getValueType() != MVT::i64
)
833 auto *CountNode
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1).getNode());
837 RxSBG
.Rotate
= (RxSBG
.Rotate
+ CountNode
->getZExtValue()) & 63;
838 RxSBG
.Input
= N
.getOperand(0);
842 case ISD::ANY_EXTEND
:
843 // Bits above the extended operand are don't-care.
844 RxSBG
.Input
= N
.getOperand(0);
847 case ISD::ZERO_EXTEND
:
848 if (RxSBG
.Opcode
!= SystemZ::RNSBG
) {
849 // Restrict the mask to the extended operand.
850 unsigned InnerBitSize
= N
.getOperand(0).getValueSizeInBits();
851 if (!refineRxSBGMask(RxSBG
, allOnes(InnerBitSize
)))
854 RxSBG
.Input
= N
.getOperand(0);
859 case ISD::SIGN_EXTEND
: {
860 // Check that the extension bits are don't-care (i.e. are masked out
861 // by the final mask).
862 unsigned BitSize
= N
.getValueSizeInBits();
863 unsigned InnerBitSize
= N
.getOperand(0).getValueSizeInBits();
864 if (maskMatters(RxSBG
, allOnes(BitSize
) - allOnes(InnerBitSize
))) {
865 // In the case where only the sign bit is active, increase Rotate with
866 // the extension width.
867 if (RxSBG
.Mask
== 1 && RxSBG
.Rotate
== 1)
868 RxSBG
.Rotate
+= (BitSize
- InnerBitSize
);
873 RxSBG
.Input
= N
.getOperand(0);
878 auto *CountNode
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1).getNode());
882 uint64_t Count
= CountNode
->getZExtValue();
883 unsigned BitSize
= N
.getValueSizeInBits();
884 if (Count
< 1 || Count
>= BitSize
)
887 if (RxSBG
.Opcode
== SystemZ::RNSBG
) {
888 // Treat (shl X, count) as (rotl X, size-count) as long as the bottom
889 // count bits from RxSBG.Input are ignored.
890 if (maskMatters(RxSBG
, allOnes(Count
)))
893 // Treat (shl X, count) as (and (rotl X, count), ~0<<count).
894 if (!refineRxSBGMask(RxSBG
, allOnes(BitSize
- Count
) << Count
))
898 RxSBG
.Rotate
= (RxSBG
.Rotate
+ Count
) & 63;
899 RxSBG
.Input
= N
.getOperand(0);
905 auto *CountNode
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1).getNode());
909 uint64_t Count
= CountNode
->getZExtValue();
910 unsigned BitSize
= N
.getValueSizeInBits();
911 if (Count
< 1 || Count
>= BitSize
)
914 if (RxSBG
.Opcode
== SystemZ::RNSBG
|| Opcode
== ISD::SRA
) {
915 // Treat (srl|sra X, count) as (rotl X, size-count) as long as the top
916 // count bits from RxSBG.Input are ignored.
917 if (maskMatters(RxSBG
, allOnes(Count
) << (BitSize
- Count
)))
920 // Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count),
921 // which is similar to SLL above.
922 if (!refineRxSBGMask(RxSBG
, allOnes(BitSize
- Count
)))
926 RxSBG
.Rotate
= (RxSBG
.Rotate
- Count
) & 63;
927 RxSBG
.Input
= N
.getOperand(0);
935 SDValue
SystemZDAGToDAGISel::getUNDEF(const SDLoc
&DL
, EVT VT
) const {
936 SDNode
*N
= CurDAG
->getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
, VT
);
937 return SDValue(N
, 0);
940 SDValue
SystemZDAGToDAGISel::convertTo(const SDLoc
&DL
, EVT VT
,
942 if (N
.getValueType() == MVT::i32
&& VT
== MVT::i64
)
943 return CurDAG
->getTargetInsertSubreg(SystemZ::subreg_l32
,
944 DL
, VT
, getUNDEF(DL
, MVT::i64
), N
);
945 if (N
.getValueType() == MVT::i64
&& VT
== MVT::i32
)
946 return CurDAG
->getTargetExtractSubreg(SystemZ::subreg_l32
, DL
, VT
, N
);
947 assert(N
.getValueType() == VT
&& "Unexpected value types");
951 bool SystemZDAGToDAGISel::tryRISBGZero(SDNode
*N
) {
953 EVT VT
= N
->getValueType(0);
954 if (!VT
.isInteger() || VT
.getSizeInBits() > 64)
956 RxSBGOperands
RISBG(SystemZ::RISBG
, SDValue(N
, 0));
958 while (expandRxSBG(RISBG
))
959 // The widening or narrowing is expected to be free.
960 // Counting widening or narrowing as a saved operation will result in
961 // preferring an R*SBG over a simple shift/logical instruction.
962 if (RISBG
.Input
.getOpcode() != ISD::ANY_EXTEND
&&
963 RISBG
.Input
.getOpcode() != ISD::TRUNCATE
)
968 // Prefer to use normal shift instructions over RISBG, since they can handle
969 // all cases and are sometimes shorter.
970 if (Count
== 1 && N
->getOpcode() != ISD::AND
)
973 // Prefer register extensions like LLC over RISBG. Also prefer to start
974 // out with normal ANDs if one instruction would be enough. We can convert
975 // these ANDs into an RISBG later if a three-address instruction is useful.
976 if (RISBG
.Rotate
== 0) {
977 bool PreferAnd
= false;
978 // Prefer AND for any 32-bit and-immediate operation.
981 // As well as for any 64-bit operation that can be implemented via LLC(R),
982 // LLH(R), LLGT(R), or one of the and-immediate instructions.
983 else if (RISBG
.Mask
== 0xff ||
984 RISBG
.Mask
== 0xffff ||
985 RISBG
.Mask
== 0x7fffffff ||
986 SystemZ::isImmLF(~RISBG
.Mask
) ||
987 SystemZ::isImmHF(~RISBG
.Mask
))
989 // And likewise for the LLZRGF instruction, which doesn't have a register
990 // to register version.
991 else if (auto *Load
= dyn_cast
<LoadSDNode
>(RISBG
.Input
)) {
992 if (Load
->getMemoryVT() == MVT::i32
&&
993 (Load
->getExtensionType() == ISD::EXTLOAD
||
994 Load
->getExtensionType() == ISD::ZEXTLOAD
) &&
995 RISBG
.Mask
== 0xffffff00 &&
996 Subtarget
->hasLoadAndZeroRightmostByte())
1000 // Replace the current node with an AND. Note that the current node
1001 // might already be that same AND, in which case it is already CSE'd
1002 // with it, and we must not call ReplaceNode.
1003 SDValue In
= convertTo(DL
, VT
, RISBG
.Input
);
1004 SDValue Mask
= CurDAG
->getConstant(RISBG
.Mask
, DL
, VT
);
1005 SDValue New
= CurDAG
->getNode(ISD::AND
, DL
, VT
, In
, Mask
);
1006 if (N
!= New
.getNode()) {
1007 insertDAGNode(CurDAG
, N
, Mask
);
1008 insertDAGNode(CurDAG
, N
, New
);
1009 ReplaceNode(N
, New
.getNode());
1012 // Now, select the machine opcode to implement this operation.
1013 if (!N
->isMachineOpcode())
1019 unsigned Opcode
= SystemZ::RISBG
;
1020 // Prefer RISBGN if available, since it does not clobber CC.
1021 if (Subtarget
->hasMiscellaneousExtensions())
1022 Opcode
= SystemZ::RISBGN
;
1023 EVT OpcodeVT
= MVT::i64
;
1024 if (VT
== MVT::i32
&& Subtarget
->hasHighWord() &&
1025 // We can only use the 32-bit instructions if all source bits are
1026 // in the low 32 bits without wrapping, both after rotation (because
1027 // of the smaller range for Start and End) and before rotation
1028 // (because the input value is truncated).
1029 RISBG
.Start
>= 32 && RISBG
.End
>= RISBG
.Start
&&
1030 ((RISBG
.Start
+ RISBG
.Rotate
) & 63) >= 32 &&
1031 ((RISBG
.End
+ RISBG
.Rotate
) & 63) >=
1032 ((RISBG
.Start
+ RISBG
.Rotate
) & 63)) {
1033 Opcode
= SystemZ::RISBMux
;
1034 OpcodeVT
= MVT::i32
;
1039 getUNDEF(DL
, OpcodeVT
),
1040 convertTo(DL
, OpcodeVT
, RISBG
.Input
),
1041 CurDAG
->getTargetConstant(RISBG
.Start
, DL
, MVT::i32
),
1042 CurDAG
->getTargetConstant(RISBG
.End
| 128, DL
, MVT::i32
),
1043 CurDAG
->getTargetConstant(RISBG
.Rotate
, DL
, MVT::i32
)
1045 SDValue New
= convertTo(
1046 DL
, VT
, SDValue(CurDAG
->getMachineNode(Opcode
, DL
, OpcodeVT
, Ops
), 0));
1047 ReplaceNode(N
, New
.getNode());
1051 bool SystemZDAGToDAGISel::tryRxSBG(SDNode
*N
, unsigned Opcode
) {
1053 EVT VT
= N
->getValueType(0);
1054 if (!VT
.isInteger() || VT
.getSizeInBits() > 64)
1056 // Try treating each operand of N as the second operand of the RxSBG
1057 // and see which goes deepest.
1058 RxSBGOperands RxSBG
[] = {
1059 RxSBGOperands(Opcode
, N
->getOperand(0)),
1060 RxSBGOperands(Opcode
, N
->getOperand(1))
1062 unsigned Count
[] = { 0, 0 };
1063 for (unsigned I
= 0; I
< 2; ++I
)
1064 while (expandRxSBG(RxSBG
[I
]))
1065 // The widening or narrowing is expected to be free.
1066 // Counting widening or narrowing as a saved operation will result in
1067 // preferring an R*SBG over a simple shift/logical instruction.
1068 if (RxSBG
[I
].Input
.getOpcode() != ISD::ANY_EXTEND
&&
1069 RxSBG
[I
].Input
.getOpcode() != ISD::TRUNCATE
)
1072 // Do nothing if neither operand is suitable.
1073 if (Count
[0] == 0 && Count
[1] == 0)
1076 // Pick the deepest second operand.
1077 unsigned I
= Count
[0] > Count
[1] ? 0 : 1;
1078 SDValue Op0
= N
->getOperand(I
^ 1);
1080 // Prefer IC for character insertions from memory.
1081 if (Opcode
== SystemZ::ROSBG
&& (RxSBG
[I
].Mask
& 0xff) == 0)
1082 if (auto *Load
= dyn_cast
<LoadSDNode
>(Op0
.getNode()))
1083 if (Load
->getMemoryVT() == MVT::i8
)
1086 // See whether we can avoid an AND in the first operand by converting
1088 if (Opcode
== SystemZ::ROSBG
&& detectOrAndInsertion(Op0
, RxSBG
[I
].Mask
)) {
1089 Opcode
= SystemZ::RISBG
;
1090 // Prefer RISBGN if available, since it does not clobber CC.
1091 if (Subtarget
->hasMiscellaneousExtensions())
1092 Opcode
= SystemZ::RISBGN
;
1096 convertTo(DL
, MVT::i64
, Op0
),
1097 convertTo(DL
, MVT::i64
, RxSBG
[I
].Input
),
1098 CurDAG
->getTargetConstant(RxSBG
[I
].Start
, DL
, MVT::i32
),
1099 CurDAG
->getTargetConstant(RxSBG
[I
].End
, DL
, MVT::i32
),
1100 CurDAG
->getTargetConstant(RxSBG
[I
].Rotate
, DL
, MVT::i32
)
1102 SDValue New
= convertTo(
1103 DL
, VT
, SDValue(CurDAG
->getMachineNode(Opcode
, DL
, MVT::i64
, Ops
), 0));
1104 ReplaceNode(N
, New
.getNode());
1108 void SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode
, SDNode
*Node
,
1109 SDValue Op0
, uint64_t UpperVal
,
1110 uint64_t LowerVal
) {
1111 EVT VT
= Node
->getValueType(0);
1113 SDValue Upper
= CurDAG
->getConstant(UpperVal
, DL
, VT
);
1115 Upper
= CurDAG
->getNode(Opcode
, DL
, VT
, Op0
, Upper
);
1118 // When we haven't passed in Op0, Upper will be a constant. In order to
1119 // prevent folding back to the large immediate in `Or = getNode(...)` we run
1120 // SelectCode first and end up with an opaque machine node. This means that
1121 // we need to use a handle to keep track of Upper in case it gets CSE'd by
1124 // Note that in the case where Op0 is passed in we could just call
1125 // SelectCode(Upper) later, along with the SelectCode(Or), and avoid needing
1126 // the handle at all, but it's fine to do it here.
1128 // TODO: This is a pretty hacky way to do this. Can we do something that
1129 // doesn't require a two paragraph explanation?
1130 HandleSDNode
Handle(Upper
);
1131 SelectCode(Upper
.getNode());
1132 Upper
= Handle
.getValue();
1135 SDValue Lower
= CurDAG
->getConstant(LowerVal
, DL
, VT
);
1136 SDValue Or
= CurDAG
->getNode(Opcode
, DL
, VT
, Upper
, Lower
);
1138 ReplaceNode(Node
, Or
.getNode());
1140 SelectCode(Or
.getNode());
1143 void SystemZDAGToDAGISel::loadVectorConstant(
1144 const SystemZVectorConstantInfo
&VCI
, SDNode
*Node
) {
1145 assert((VCI
.Opcode
== SystemZISD::BYTE_MASK
||
1146 VCI
.Opcode
== SystemZISD::REPLICATE
||
1147 VCI
.Opcode
== SystemZISD::ROTATE_MASK
) &&
1149 assert(VCI
.VecVT
.getSizeInBits() == 128 && "Expected a vector type");
1150 EVT VT
= Node
->getValueType(0);
1152 SmallVector
<SDValue
, 2> Ops
;
1153 for (unsigned OpVal
: VCI
.OpVals
)
1154 Ops
.push_back(CurDAG
->getTargetConstant(OpVal
, DL
, MVT::i32
));
1155 SDValue Op
= CurDAG
->getNode(VCI
.Opcode
, DL
, VCI
.VecVT
, Ops
);
1157 if (VCI
.VecVT
== VT
.getSimpleVT())
1158 ReplaceNode(Node
, Op
.getNode());
1159 else if (VT
.getSizeInBits() == 128) {
1160 SDValue BitCast
= CurDAG
->getNode(ISD::BITCAST
, DL
, VT
, Op
);
1161 ReplaceNode(Node
, BitCast
.getNode());
1162 SelectCode(BitCast
.getNode());
1163 } else { // float or double
1164 unsigned SubRegIdx
=
1165 (VT
.getSizeInBits() == 32 ? SystemZ::subreg_h32
: SystemZ::subreg_h64
);
1167 Node
, CurDAG
->getTargetExtractSubreg(SubRegIdx
, DL
, VT
, Op
).getNode());
1169 SelectCode(Op
.getNode());
1172 bool SystemZDAGToDAGISel::tryGather(SDNode
*N
, unsigned Opcode
) {
1173 SDValue ElemV
= N
->getOperand(2);
1174 auto *ElemN
= dyn_cast
<ConstantSDNode
>(ElemV
);
1178 unsigned Elem
= ElemN
->getZExtValue();
1179 EVT VT
= N
->getValueType(0);
1180 if (Elem
>= VT
.getVectorNumElements())
1183 auto *Load
= dyn_cast
<LoadSDNode
>(N
->getOperand(1));
1184 if (!Load
|| !Load
->hasNUsesOfValue(1, 0))
1186 if (Load
->getMemoryVT().getSizeInBits() !=
1187 Load
->getValueType(0).getSizeInBits())
1190 SDValue Base
, Disp
, Index
;
1191 if (!selectBDVAddr12Only(Load
->getBasePtr(), ElemV
, Base
, Disp
, Index
) ||
1192 Index
.getValueType() != VT
.changeVectorElementTypeToInteger())
1197 N
->getOperand(0), Base
, Disp
, Index
,
1198 CurDAG
->getTargetConstant(Elem
, DL
, MVT::i32
), Load
->getChain()
1200 SDNode
*Res
= CurDAG
->getMachineNode(Opcode
, DL
, VT
, MVT::Other
, Ops
);
1201 ReplaceUses(SDValue(Load
, 1), SDValue(Res
, 1));
1202 ReplaceNode(N
, Res
);
1206 bool SystemZDAGToDAGISel::tryScatter(StoreSDNode
*Store
, unsigned Opcode
) {
1207 SDValue Value
= Store
->getValue();
1208 if (Value
.getOpcode() != ISD::EXTRACT_VECTOR_ELT
)
1210 if (Store
->getMemoryVT().getSizeInBits() != Value
.getValueSizeInBits())
1213 SDValue ElemV
= Value
.getOperand(1);
1214 auto *ElemN
= dyn_cast
<ConstantSDNode
>(ElemV
);
1218 SDValue Vec
= Value
.getOperand(0);
1219 EVT VT
= Vec
.getValueType();
1220 unsigned Elem
= ElemN
->getZExtValue();
1221 if (Elem
>= VT
.getVectorNumElements())
1224 SDValue Base
, Disp
, Index
;
1225 if (!selectBDVAddr12Only(Store
->getBasePtr(), ElemV
, Base
, Disp
, Index
) ||
1226 Index
.getValueType() != VT
.changeVectorElementTypeToInteger())
1231 Vec
, Base
, Disp
, Index
, CurDAG
->getTargetConstant(Elem
, DL
, MVT::i32
),
1234 ReplaceNode(Store
, CurDAG
->getMachineNode(Opcode
, DL
, MVT::Other
, Ops
));
1238 // Check whether or not the chain ending in StoreNode is suitable for doing
1239 // the {load; op; store} to modify transformation.
1240 static bool isFusableLoadOpStorePattern(StoreSDNode
*StoreNode
,
1241 SDValue StoredVal
, SelectionDAG
*CurDAG
,
1242 LoadSDNode
*&LoadNode
,
1243 SDValue
&InputChain
) {
1244 // Is the stored value result 0 of the operation?
1245 if (StoredVal
.getResNo() != 0)
1248 // Are there other uses of the loaded value than the operation?
1249 if (!StoredVal
.getNode()->hasNUsesOfValue(1, 0))
1252 // Is the store non-extending and non-indexed?
1253 if (!ISD::isNormalStore(StoreNode
) || StoreNode
->isNonTemporal())
1256 SDValue Load
= StoredVal
->getOperand(0);
1257 // Is the stored value a non-extending and non-indexed load?
1258 if (!ISD::isNormalLoad(Load
.getNode()))
1261 // Return LoadNode by reference.
1262 LoadNode
= cast
<LoadSDNode
>(Load
);
1264 // Is store the only read of the loaded value?
1265 if (!Load
.hasOneUse())
1268 // Is the address of the store the same as the load?
1269 if (LoadNode
->getBasePtr() != StoreNode
->getBasePtr() ||
1270 LoadNode
->getOffset() != StoreNode
->getOffset())
1273 // Check if the chain is produced by the load or is a TokenFactor with
1274 // the load output chain as an operand. Return InputChain by reference.
1275 SDValue Chain
= StoreNode
->getChain();
1277 bool ChainCheck
= false;
1278 if (Chain
== Load
.getValue(1)) {
1280 InputChain
= LoadNode
->getChain();
1281 } else if (Chain
.getOpcode() == ISD::TokenFactor
) {
1282 SmallVector
<SDValue
, 4> ChainOps
;
1283 SmallVector
<const SDNode
*, 4> LoopWorklist
;
1284 SmallPtrSet
<const SDNode
*, 16> Visited
;
1285 const unsigned int Max
= 1024;
1286 for (unsigned i
= 0, e
= Chain
.getNumOperands(); i
!= e
; ++i
) {
1287 SDValue Op
= Chain
.getOperand(i
);
1288 if (Op
== Load
.getValue(1)) {
1290 // Drop Load, but keep its chain. No cycle check necessary.
1291 ChainOps
.push_back(Load
.getOperand(0));
1294 LoopWorklist
.push_back(Op
.getNode());
1295 ChainOps
.push_back(Op
);
1299 // Add the other operand of StoredVal to worklist.
1300 for (SDValue Op
: StoredVal
->ops())
1301 if (Op
.getNode() != LoadNode
)
1302 LoopWorklist
.push_back(Op
.getNode());
1304 // Check if Load is reachable from any of the nodes in the worklist.
1305 if (SDNode::hasPredecessorHelper(Load
.getNode(), Visited
, LoopWorklist
, Max
,
1309 // Make a new TokenFactor with all the other input chains except
1311 InputChain
= CurDAG
->getNode(ISD::TokenFactor
, SDLoc(Chain
),
1312 MVT::Other
, ChainOps
);
1321 // Change a chain of {load; op; store} of the same value into a simple op
1322 // through memory of that value, if the uses of the modified value and its
1323 // address are suitable.
1325 // The tablegen pattern memory operand pattern is currently not able to match
1326 // the case where the CC on the original operation are used.
1328 // See the equivalent routine in X86ISelDAGToDAG for further comments.
1329 bool SystemZDAGToDAGISel::tryFoldLoadStoreIntoMemOperand(SDNode
*Node
) {
1330 StoreSDNode
*StoreNode
= cast
<StoreSDNode
>(Node
);
1331 SDValue StoredVal
= StoreNode
->getOperand(1);
1332 unsigned Opc
= StoredVal
->getOpcode();
1333 SDLoc
DL(StoreNode
);
1335 // Before we try to select anything, make sure this is memory operand size
1336 // and opcode we can handle. Note that this must match the code below that
1337 // actually lowers the opcodes.
1338 EVT MemVT
= StoreNode
->getMemoryVT();
1339 unsigned NewOpc
= 0;
1340 bool NegateOperand
= false;
1344 case SystemZISD::SSUBO
:
1345 NegateOperand
= true;
1347 case SystemZISD::SADDO
:
1348 if (MemVT
== MVT::i32
)
1349 NewOpc
= SystemZ::ASI
;
1350 else if (MemVT
== MVT::i64
)
1351 NewOpc
= SystemZ::AGSI
;
1355 case SystemZISD::USUBO
:
1356 NegateOperand
= true;
1358 case SystemZISD::UADDO
:
1359 if (MemVT
== MVT::i32
)
1360 NewOpc
= SystemZ::ALSI
;
1361 else if (MemVT
== MVT::i64
)
1362 NewOpc
= SystemZ::ALGSI
;
1368 LoadSDNode
*LoadNode
= nullptr;
1370 if (!isFusableLoadOpStorePattern(StoreNode
, StoredVal
, CurDAG
, LoadNode
,
1374 SDValue Operand
= StoredVal
.getOperand(1);
1375 auto *OperandC
= dyn_cast
<ConstantSDNode
>(Operand
);
1378 auto OperandV
= OperandC
->getAPIntValue();
1380 OperandV
= -OperandV
;
1381 if (OperandV
.getMinSignedBits() > 8)
1383 Operand
= CurDAG
->getTargetConstant(OperandV
, DL
, MemVT
);
1386 if (!selectBDAddr20Only(StoreNode
->getBasePtr(), Base
, Disp
))
1389 SDValue Ops
[] = { Base
, Disp
, Operand
, InputChain
};
1390 MachineSDNode
*Result
=
1391 CurDAG
->getMachineNode(NewOpc
, DL
, MVT::i32
, MVT::Other
, Ops
);
1392 CurDAG
->setNodeMemRefs(
1393 Result
, {StoreNode
->getMemOperand(), LoadNode
->getMemOperand()});
1395 ReplaceUses(SDValue(StoreNode
, 0), SDValue(Result
, 1));
1396 ReplaceUses(SDValue(StoredVal
.getNode(), 1), SDValue(Result
, 0));
1397 CurDAG
->RemoveDeadNode(Node
);
1401 bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode
*Store
,
1402 LoadSDNode
*Load
) const {
1403 // Check that the two memory operands have the same size.
1404 if (Load
->getMemoryVT() != Store
->getMemoryVT())
1407 // Volatility stops an access from being decomposed.
1408 if (Load
->isVolatile() || Store
->isVolatile())
1411 // There's no chance of overlap if the load is invariant.
1412 if (Load
->isInvariant() && Load
->isDereferenceable())
1415 // Otherwise we need to check whether there's an alias.
1416 const Value
*V1
= Load
->getMemOperand()->getValue();
1417 const Value
*V2
= Store
->getMemOperand()->getValue();
1422 uint64_t Size
= Load
->getMemoryVT().getStoreSize();
1423 int64_t End1
= Load
->getSrcValueOffset() + Size
;
1424 int64_t End2
= Store
->getSrcValueOffset() + Size
;
1425 if (V1
== V2
&& End1
== End2
)
1428 return !AA
->alias(MemoryLocation(V1
, End1
, Load
->getAAInfo()),
1429 MemoryLocation(V2
, End2
, Store
->getAAInfo()));
1432 bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode
*N
) const {
1433 auto *Store
= cast
<StoreSDNode
>(N
);
1434 auto *Load
= cast
<LoadSDNode
>(Store
->getValue());
1436 // Prefer not to use MVC if either address can use ... RELATIVE LONG
1438 uint64_t Size
= Load
->getMemoryVT().getStoreSize();
1439 if (Size
> 1 && Size
<= 8) {
1440 // Prefer LHRL, LRL and LGRL.
1441 if (SystemZISD::isPCREL(Load
->getBasePtr().getOpcode()))
1443 // Prefer STHRL, STRL and STGRL.
1444 if (SystemZISD::isPCREL(Store
->getBasePtr().getOpcode()))
1448 return canUseBlockOperation(Store
, Load
);
1451 bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode
*N
,
1453 auto *StoreA
= cast
<StoreSDNode
>(N
);
1454 auto *LoadA
= cast
<LoadSDNode
>(StoreA
->getValue().getOperand(1 - I
));
1455 auto *LoadB
= cast
<LoadSDNode
>(StoreA
->getValue().getOperand(I
));
1456 return !LoadA
->isVolatile() && canUseBlockOperation(StoreA
, LoadB
);
1459 void SystemZDAGToDAGISel::Select(SDNode
*Node
) {
1460 // If we have a custom node, we already have selected!
1461 if (Node
->isMachineOpcode()) {
1462 LLVM_DEBUG(errs() << "== "; Node
->dump(CurDAG
); errs() << "\n");
1463 Node
->setNodeId(-1);
1467 unsigned Opcode
= Node
->getOpcode();
1470 if (Node
->getOperand(1).getOpcode() != ISD::Constant
)
1471 if (tryRxSBG(Node
, SystemZ::ROSBG
))
1476 if (Node
->getOperand(1).getOpcode() != ISD::Constant
)
1477 if (tryRxSBG(Node
, SystemZ::RXSBG
))
1481 // If this is a 64-bit operation in which both 32-bit halves are nonzero,
1482 // split the operation into two. If both operands here happen to be
1483 // constant, leave this to common code to optimize.
1484 if (Node
->getValueType(0) == MVT::i64
&&
1485 Node
->getOperand(0).getOpcode() != ISD::Constant
)
1486 if (auto *Op1
= dyn_cast
<ConstantSDNode
>(Node
->getOperand(1))) {
1487 uint64_t Val
= Op1
->getZExtValue();
1488 // Don't split the operation if we can match one of the combined
1489 // logical operations provided by miscellaneous-extensions-3.
1490 if (Subtarget
->hasMiscellaneousExtensions3()) {
1491 unsigned ChildOpcode
= Node
->getOperand(0).getOpcode();
1492 // Check whether this expression matches NAND/NOR/NXOR.
1493 if (Val
== (uint64_t)-1 && Opcode
== ISD::XOR
)
1494 if (ChildOpcode
== ISD::AND
|| ChildOpcode
== ISD::OR
||
1495 ChildOpcode
== ISD::XOR
)
1497 // Check whether this expression matches OR-with-complement.
1498 if (Opcode
== ISD::OR
&& ChildOpcode
== ISD::XOR
) {
1499 auto Op0
= Node
->getOperand(0);
1500 if (auto *Op0Op1
= dyn_cast
<ConstantSDNode
>(Op0
->getOperand(1)))
1501 if (Op0Op1
->getZExtValue() == (uint64_t)-1)
1505 if (!SystemZ::isImmLF(Val
) && !SystemZ::isImmHF(Val
)) {
1506 splitLargeImmediate(Opcode
, Node
, Node
->getOperand(0),
1507 Val
- uint32_t(Val
), uint32_t(Val
));
1514 if (Node
->getOperand(1).getOpcode() != ISD::Constant
)
1515 if (tryRxSBG(Node
, SystemZ::RNSBG
))
1521 case ISD::ZERO_EXTEND
:
1522 if (tryRISBGZero(Node
))
1527 // If this is a 64-bit constant that is out of the range of LLILF,
1528 // LLIHF and LGFI, split it into two 32-bit pieces.
1529 if (Node
->getValueType(0) == MVT::i64
) {
1530 uint64_t Val
= cast
<ConstantSDNode
>(Node
)->getZExtValue();
1531 if (!SystemZ::isImmLF(Val
) && !SystemZ::isImmHF(Val
) && !isInt
<32>(Val
)) {
1532 splitLargeImmediate(ISD::OR
, Node
, SDValue(), Val
- uint32_t(Val
),
1539 case SystemZISD::SELECT_CCMASK
: {
1540 SDValue Op0
= Node
->getOperand(0);
1541 SDValue Op1
= Node
->getOperand(1);
1542 // Prefer to put any load first, so that it can be matched as a
1543 // conditional load. Likewise for constants in range for LOCHI.
1544 if ((Op1
.getOpcode() == ISD::LOAD
&& Op0
.getOpcode() != ISD::LOAD
) ||
1545 (Subtarget
->hasLoadStoreOnCond2() &&
1546 Node
->getValueType(0).isInteger() &&
1547 Op1
.getOpcode() == ISD::Constant
&&
1548 isInt
<16>(cast
<ConstantSDNode
>(Op1
)->getSExtValue()) &&
1549 !(Op0
.getOpcode() == ISD::Constant
&&
1550 isInt
<16>(cast
<ConstantSDNode
>(Op0
)->getSExtValue())))) {
1551 SDValue CCValid
= Node
->getOperand(2);
1552 SDValue CCMask
= Node
->getOperand(3);
1553 uint64_t ConstCCValid
=
1554 cast
<ConstantSDNode
>(CCValid
.getNode())->getZExtValue();
1555 uint64_t ConstCCMask
=
1556 cast
<ConstantSDNode
>(CCMask
.getNode())->getZExtValue();
1557 // Invert the condition.
1558 CCMask
= CurDAG
->getTargetConstant(ConstCCValid
^ ConstCCMask
,
1559 SDLoc(Node
), CCMask
.getValueType());
1560 SDValue Op4
= Node
->getOperand(4);
1561 SDNode
*UpdatedNode
=
1562 CurDAG
->UpdateNodeOperands(Node
, Op1
, Op0
, CCValid
, CCMask
, Op4
);
1563 if (UpdatedNode
!= Node
) {
1564 // In case this node already exists then replace Node with it.
1565 ReplaceNode(Node
, UpdatedNode
);
1572 case ISD::INSERT_VECTOR_ELT
: {
1573 EVT VT
= Node
->getValueType(0);
1574 unsigned ElemBitSize
= VT
.getScalarSizeInBits();
1575 if (ElemBitSize
== 32) {
1576 if (tryGather(Node
, SystemZ::VGEF
))
1578 } else if (ElemBitSize
== 64) {
1579 if (tryGather(Node
, SystemZ::VGEG
))
1585 case ISD::BUILD_VECTOR
: {
1586 auto *BVN
= cast
<BuildVectorSDNode
>(Node
);
1587 SystemZVectorConstantInfo
VCI(BVN
);
1588 if (VCI
.isVectorConstantLegal(*Subtarget
)) {
1589 loadVectorConstant(VCI
, Node
);
1595 case ISD::ConstantFP
: {
1596 APFloat Imm
= cast
<ConstantFPSDNode
>(Node
)->getValueAPF();
1597 if (Imm
.isZero() || Imm
.isNegZero())
1599 SystemZVectorConstantInfo
VCI(Imm
);
1600 bool Success
= VCI
.isVectorConstantLegal(*Subtarget
); (void)Success
;
1601 assert(Success
&& "Expected legal FP immediate");
1602 loadVectorConstant(VCI
, Node
);
1607 if (tryFoldLoadStoreIntoMemOperand(Node
))
1609 auto *Store
= cast
<StoreSDNode
>(Node
);
1610 unsigned ElemBitSize
= Store
->getValue().getValueSizeInBits();
1611 if (ElemBitSize
== 32) {
1612 if (tryScatter(Store
, SystemZ::VSCEF
))
1614 } else if (ElemBitSize
== 64) {
1615 if (tryScatter(Store
, SystemZ::VSCEG
))
1625 bool SystemZDAGToDAGISel::
1626 SelectInlineAsmMemoryOperand(const SDValue
&Op
,
1627 unsigned ConstraintID
,
1628 std::vector
<SDValue
> &OutOps
) {
1629 SystemZAddressingMode::AddrForm Form
;
1630 SystemZAddressingMode::DispRange DispRange
;
1631 SDValue Base
, Disp
, Index
;
1633 switch(ConstraintID
) {
1635 llvm_unreachable("Unexpected asm memory constraint");
1636 case InlineAsm::Constraint_i
:
1637 case InlineAsm::Constraint_Q
:
1638 // Accept an address with a short displacement, but no index.
1639 Form
= SystemZAddressingMode::FormBD
;
1640 DispRange
= SystemZAddressingMode::Disp12Only
;
1642 case InlineAsm::Constraint_R
:
1643 // Accept an address with a short displacement and an index.
1644 Form
= SystemZAddressingMode::FormBDXNormal
;
1645 DispRange
= SystemZAddressingMode::Disp12Only
;
1647 case InlineAsm::Constraint_S
:
1648 // Accept an address with a long displacement, but no index.
1649 Form
= SystemZAddressingMode::FormBD
;
1650 DispRange
= SystemZAddressingMode::Disp20Only
;
1652 case InlineAsm::Constraint_T
:
1653 case InlineAsm::Constraint_m
:
1654 case InlineAsm::Constraint_o
:
1655 // Accept an address with a long displacement and an index.
1656 // m works the same as T, as this is the most general case.
1657 // We don't really have any special handling of "offsettable"
1658 // memory addresses, so just treat o the same as m.
1659 Form
= SystemZAddressingMode::FormBDXNormal
;
1660 DispRange
= SystemZAddressingMode::Disp20Only
;
1664 if (selectBDXAddr(Form
, DispRange
, Op
, Base
, Disp
, Index
)) {
1665 const TargetRegisterClass
*TRC
=
1666 Subtarget
->getRegisterInfo()->getPointerRegClass(*MF
);
1668 SDValue RC
= CurDAG
->getTargetConstant(TRC
->getID(), DL
, MVT::i32
);
1670 // Make sure that the base address doesn't go into %r0.
1671 // If it's a TargetFrameIndex or a fixed register, we shouldn't do anything.
1672 if (Base
.getOpcode() != ISD::TargetFrameIndex
&&
1673 Base
.getOpcode() != ISD::Register
) {
1675 SDValue(CurDAG
->getMachineNode(TargetOpcode::COPY_TO_REGCLASS
,
1676 DL
, Base
.getValueType(),
1680 // Make sure that the index register isn't assigned to %r0 either.
1681 if (Index
.getOpcode() != ISD::Register
) {
1683 SDValue(CurDAG
->getMachineNode(TargetOpcode::COPY_TO_REGCLASS
,
1684 DL
, Index
.getValueType(),
1688 OutOps
.push_back(Base
);
1689 OutOps
.push_back(Disp
);
1690 OutOps
.push_back(Index
);
1697 // IsProfitableToFold - Returns true if is profitable to fold the specific
1698 // operand node N of U during instruction selection that starts at Root.
1700 SystemZDAGToDAGISel::IsProfitableToFold(SDValue N
, SDNode
*U
,
1701 SDNode
*Root
) const {
1702 // We want to avoid folding a LOAD into an ICMP node if as a result
1703 // we would be forced to spill the condition code into a GPR.
1704 if (N
.getOpcode() == ISD::LOAD
&& U
->getOpcode() == SystemZISD::ICMP
) {
1705 if (!N
.hasOneUse() || !U
->hasOneUse())
1708 // The user of the CC value will usually be a CopyToReg into the
1709 // physical CC register, which in turn is glued and chained to the
1710 // actual instruction that uses the CC value. Bail out if we have
1711 // anything else than that.
1712 SDNode
*CCUser
= *U
->use_begin();
1713 SDNode
*CCRegUser
= nullptr;
1714 if (CCUser
->getOpcode() == ISD::CopyToReg
||
1715 cast
<RegisterSDNode
>(CCUser
->getOperand(1))->getReg() == SystemZ::CC
) {
1716 for (auto *U
: CCUser
->uses()) {
1717 if (CCRegUser
== nullptr)
1719 else if (CCRegUser
!= U
)
1723 if (CCRegUser
== nullptr)
1726 // If the actual instruction is a branch, the only thing that remains to be
1727 // checked is whether the CCUser chain is a predecessor of the load.
1728 if (CCRegUser
->isMachineOpcode() &&
1729 CCRegUser
->getMachineOpcode() == SystemZ::BRC
)
1730 return !N
->isPredecessorOf(CCUser
->getOperand(0).getNode());
1732 // Otherwise, the instruction may have multiple operands, and we need to
1733 // verify that none of them are a predecessor of the load. This is exactly
1734 // the same check that would be done by common code if the CC setter were
1735 // glued to the CC user, so simply invoke that check here.
1736 if (!IsLegalToFold(N
, U
, CCRegUser
, OptLevel
, false))
1744 // Represents a sequence for extracting a 0/1 value from an IPM result:
1745 // (((X ^ XORValue) + AddValue) >> Bit)
1746 struct IPMConversion
{
1747 IPMConversion(unsigned xorValue
, int64_t addValue
, unsigned bit
)
1748 : XORValue(xorValue
), AddValue(addValue
), Bit(bit
) {}
1754 } // end anonymous namespace
1756 // Return a sequence for getting a 1 from an IPM result when CC has a
1757 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
1758 // The handling of CC values outside CCValid doesn't matter.
1759 static IPMConversion
getIPMConversion(unsigned CCValid
, unsigned CCMask
) {
1760 // Deal with cases where the result can be taken directly from a bit
1761 // of the IPM result.
1762 if (CCMask
== (CCValid
& (SystemZ::CCMASK_1
| SystemZ::CCMASK_3
)))
1763 return IPMConversion(0, 0, SystemZ::IPM_CC
);
1764 if (CCMask
== (CCValid
& (SystemZ::CCMASK_2
| SystemZ::CCMASK_3
)))
1765 return IPMConversion(0, 0, SystemZ::IPM_CC
+ 1);
1767 // Deal with cases where we can add a value to force the sign bit
1768 // to contain the right value. Putting the bit in 31 means we can
1769 // use SRL rather than RISBG(L), and also makes it easier to get a
1770 // 0/-1 value, so it has priority over the other tests below.
1772 // These sequences rely on the fact that the upper two bits of the
1773 // IPM result are zero.
1774 uint64_t TopBit
= uint64_t(1) << 31;
1775 if (CCMask
== (CCValid
& SystemZ::CCMASK_0
))
1776 return IPMConversion(0, -(1 << SystemZ::IPM_CC
), 31);
1777 if (CCMask
== (CCValid
& (SystemZ::CCMASK_0
| SystemZ::CCMASK_1
)))
1778 return IPMConversion(0, -(2 << SystemZ::IPM_CC
), 31);
1779 if (CCMask
== (CCValid
& (SystemZ::CCMASK_0
1781 | SystemZ::CCMASK_2
)))
1782 return IPMConversion(0, -(3 << SystemZ::IPM_CC
), 31);
1783 if (CCMask
== (CCValid
& SystemZ::CCMASK_3
))
1784 return IPMConversion(0, TopBit
- (3 << SystemZ::IPM_CC
), 31);
1785 if (CCMask
== (CCValid
& (SystemZ::CCMASK_1
1787 | SystemZ::CCMASK_3
)))
1788 return IPMConversion(0, TopBit
- (1 << SystemZ::IPM_CC
), 31);
1790 // Next try inverting the value and testing a bit. 0/1 could be
1791 // handled this way too, but we dealt with that case above.
1792 if (CCMask
== (CCValid
& (SystemZ::CCMASK_0
| SystemZ::CCMASK_2
)))
1793 return IPMConversion(-1, 0, SystemZ::IPM_CC
);
1795 // Handle cases where adding a value forces a non-sign bit to contain
1797 if (CCMask
== (CCValid
& (SystemZ::CCMASK_1
| SystemZ::CCMASK_2
)))
1798 return IPMConversion(0, 1 << SystemZ::IPM_CC
, SystemZ::IPM_CC
+ 1);
1799 if (CCMask
== (CCValid
& (SystemZ::CCMASK_0
| SystemZ::CCMASK_3
)))
1800 return IPMConversion(0, -(1 << SystemZ::IPM_CC
), SystemZ::IPM_CC
+ 1);
1802 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
1803 // can be done by inverting the low CC bit and applying one of the
1804 // sign-based extractions above.
1805 if (CCMask
== (CCValid
& SystemZ::CCMASK_1
))
1806 return IPMConversion(1 << SystemZ::IPM_CC
, -(1 << SystemZ::IPM_CC
), 31);
1807 if (CCMask
== (CCValid
& SystemZ::CCMASK_2
))
1808 return IPMConversion(1 << SystemZ::IPM_CC
,
1809 TopBit
- (3 << SystemZ::IPM_CC
), 31);
1810 if (CCMask
== (CCValid
& (SystemZ::CCMASK_0
1812 | SystemZ::CCMASK_3
)))
1813 return IPMConversion(1 << SystemZ::IPM_CC
, -(3 << SystemZ::IPM_CC
), 31);
1814 if (CCMask
== (CCValid
& (SystemZ::CCMASK_0
1816 | SystemZ::CCMASK_3
)))
1817 return IPMConversion(1 << SystemZ::IPM_CC
,
1818 TopBit
- (1 << SystemZ::IPM_CC
), 31);
1820 llvm_unreachable("Unexpected CC combination");
1823 SDValue
SystemZDAGToDAGISel::expandSelectBoolean(SDNode
*Node
) {
1824 auto *TrueOp
= dyn_cast
<ConstantSDNode
>(Node
->getOperand(0));
1825 auto *FalseOp
= dyn_cast
<ConstantSDNode
>(Node
->getOperand(1));
1826 if (!TrueOp
|| !FalseOp
)
1828 if (FalseOp
->getZExtValue() != 0)
1830 if (TrueOp
->getSExtValue() != 1 && TrueOp
->getSExtValue() != -1)
1833 auto *CCValidOp
= dyn_cast
<ConstantSDNode
>(Node
->getOperand(2));
1834 auto *CCMaskOp
= dyn_cast
<ConstantSDNode
>(Node
->getOperand(3));
1835 if (!CCValidOp
|| !CCMaskOp
)
1837 int CCValid
= CCValidOp
->getZExtValue();
1838 int CCMask
= CCMaskOp
->getZExtValue();
1841 SDValue CCReg
= Node
->getOperand(4);
1842 IPMConversion IPM
= getIPMConversion(CCValid
, CCMask
);
1843 SDValue Result
= CurDAG
->getNode(SystemZISD::IPM
, DL
, MVT::i32
, CCReg
);
1846 Result
= CurDAG
->getNode(ISD::XOR
, DL
, MVT::i32
, Result
,
1847 CurDAG
->getConstant(IPM
.XORValue
, DL
, MVT::i32
));
1850 Result
= CurDAG
->getNode(ISD::ADD
, DL
, MVT::i32
, Result
,
1851 CurDAG
->getConstant(IPM
.AddValue
, DL
, MVT::i32
));
1853 EVT VT
= Node
->getValueType(0);
1854 if (VT
== MVT::i32
&& IPM
.Bit
== 31) {
1855 unsigned ShiftOp
= TrueOp
->getSExtValue() == 1 ? ISD::SRL
: ISD::SRA
;
1856 Result
= CurDAG
->getNode(ShiftOp
, DL
, MVT::i32
, Result
,
1857 CurDAG
->getConstant(IPM
.Bit
, DL
, MVT::i32
));
1860 Result
= CurDAG
->getNode(ISD::ANY_EXTEND
, DL
, VT
, Result
);
1862 if (TrueOp
->getSExtValue() == 1) {
1863 // The SHR/AND sequence should get optimized to an RISBG.
1864 Result
= CurDAG
->getNode(ISD::SRL
, DL
, VT
, Result
,
1865 CurDAG
->getConstant(IPM
.Bit
, DL
, MVT::i32
));
1866 Result
= CurDAG
->getNode(ISD::AND
, DL
, VT
, Result
,
1867 CurDAG
->getConstant(1, DL
, VT
));
1869 // Sign-extend from IPM.Bit using a pair of shifts.
1870 int ShlAmt
= VT
.getSizeInBits() - 1 - IPM
.Bit
;
1871 int SraAmt
= VT
.getSizeInBits() - 1;
1872 Result
= CurDAG
->getNode(ISD::SHL
, DL
, VT
, Result
,
1873 CurDAG
->getConstant(ShlAmt
, DL
, MVT::i32
));
1874 Result
= CurDAG
->getNode(ISD::SRA
, DL
, VT
, Result
,
1875 CurDAG
->getConstant(SraAmt
, DL
, MVT::i32
));
1882 void SystemZDAGToDAGISel::PreprocessISelDAG() {
1883 // If we have conditional immediate loads, we always prefer
1884 // using those over an IPM sequence.
1885 if (Subtarget
->hasLoadStoreOnCond2())
1888 bool MadeChange
= false;
1890 for (SelectionDAG::allnodes_iterator I
= CurDAG
->allnodes_begin(),
1891 E
= CurDAG
->allnodes_end();
1898 switch (N
->getOpcode()) {
1900 case SystemZISD::SELECT_CCMASK
:
1901 Res
= expandSelectBoolean(N
);
1906 LLVM_DEBUG(dbgs() << "SystemZ DAG preprocessing replacing:\nOld: ");
1907 LLVM_DEBUG(N
->dump(CurDAG
));
1908 LLVM_DEBUG(dbgs() << "\nNew: ");
1909 LLVM_DEBUG(Res
.getNode()->dump(CurDAG
));
1910 LLVM_DEBUG(dbgs() << "\n");
1912 CurDAG
->ReplaceAllUsesOfValueWith(SDValue(N
, 0), Res
);
1918 CurDAG
->RemoveDeadNodes();