1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SystemZTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SystemZISelLowering.h"
15 #include "SystemZCallingConv.h"
16 #include "SystemZConstantPoolValue.h"
17 #include "SystemZMachineFunctionInfo.h"
18 #include "SystemZTargetMachine.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/KnownBits.h"
31 #define DEBUG_TYPE "systemz-lower"
34 // Represents information about a comparison.
36 Comparison(SDValue Op0In
, SDValue Op1In
)
37 : Op0(Op0In
), Op1(Op1In
), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
39 // The operands to the comparison.
42 // The opcode that should be used to compare Op0 and Op1.
45 // A SystemZICMP value. Only used for integer comparisons.
48 // The mask of CC values that Opcode can produce.
51 // The mask of CC values for which the original condition is true.
54 } // end anonymous namespace
56 // Classify VT as either 32 or 64 bit.
57 static bool is32Bit(EVT VT
) {
58 switch (VT
.getSimpleVT().SimpleTy
) {
64 llvm_unreachable("Unsupported type");
68 // Return a version of MachineOperand that can be safely used before the
70 static MachineOperand
earlyUseOperand(MachineOperand Op
) {
76 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine
&TM
,
77 const SystemZSubtarget
&STI
)
78 : TargetLowering(TM
), Subtarget(STI
) {
79 MVT PtrVT
= MVT::getIntegerVT(8 * TM
.getPointerSize(0));
81 // Set up the register classes.
82 if (Subtarget
.hasHighWord())
83 addRegisterClass(MVT::i32
, &SystemZ::GRX32BitRegClass
);
85 addRegisterClass(MVT::i32
, &SystemZ::GR32BitRegClass
);
86 addRegisterClass(MVT::i64
, &SystemZ::GR64BitRegClass
);
87 if (Subtarget
.hasVector()) {
88 addRegisterClass(MVT::f32
, &SystemZ::VR32BitRegClass
);
89 addRegisterClass(MVT::f64
, &SystemZ::VR64BitRegClass
);
91 addRegisterClass(MVT::f32
, &SystemZ::FP32BitRegClass
);
92 addRegisterClass(MVT::f64
, &SystemZ::FP64BitRegClass
);
94 if (Subtarget
.hasVectorEnhancements1())
95 addRegisterClass(MVT::f128
, &SystemZ::VR128BitRegClass
);
97 addRegisterClass(MVT::f128
, &SystemZ::FP128BitRegClass
);
99 if (Subtarget
.hasVector()) {
100 addRegisterClass(MVT::v16i8
, &SystemZ::VR128BitRegClass
);
101 addRegisterClass(MVT::v8i16
, &SystemZ::VR128BitRegClass
);
102 addRegisterClass(MVT::v4i32
, &SystemZ::VR128BitRegClass
);
103 addRegisterClass(MVT::v2i64
, &SystemZ::VR128BitRegClass
);
104 addRegisterClass(MVT::v4f32
, &SystemZ::VR128BitRegClass
);
105 addRegisterClass(MVT::v2f64
, &SystemZ::VR128BitRegClass
);
108 // Compute derived properties from the register classes
109 computeRegisterProperties(Subtarget
.getRegisterInfo());
111 // Set up special registers.
112 setStackPointerRegisterToSaveRestore(SystemZ::R15D
);
114 // TODO: It may be better to default to latency-oriented scheduling, however
115 // LLVM's current latency-oriented scheduler can't handle physreg definitions
116 // such as SystemZ has with CC, so set this to the register-pressure
117 // scheduler, because it can.
118 setSchedulingPreference(Sched::RegPressure
);
120 setBooleanContents(ZeroOrOneBooleanContent
);
121 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent
);
123 // Instructions are strings of 2-byte aligned 2-byte values.
124 setMinFunctionAlignment(2);
125 // For performance reasons we prefer 16-byte alignment.
126 setPrefFunctionAlignment(4);
128 // Handle operations that are handled in a similar way for all types.
129 for (unsigned I
= MVT::FIRST_INTEGER_VALUETYPE
;
130 I
<= MVT::LAST_FP_VALUETYPE
;
132 MVT VT
= MVT::SimpleValueType(I
);
133 if (isTypeLegal(VT
)) {
134 // Lower SET_CC into an IPM-based sequence.
135 setOperationAction(ISD::SETCC
, VT
, Custom
);
137 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
138 setOperationAction(ISD::SELECT
, VT
, Expand
);
140 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
141 setOperationAction(ISD::SELECT_CC
, VT
, Custom
);
142 setOperationAction(ISD::BR_CC
, VT
, Custom
);
146 // Expand jump table branches as address arithmetic followed by an
148 setOperationAction(ISD::BR_JT
, MVT::Other
, Expand
);
150 // Expand BRCOND into a BR_CC (see above).
151 setOperationAction(ISD::BRCOND
, MVT::Other
, Expand
);
153 // Handle integer types.
154 for (unsigned I
= MVT::FIRST_INTEGER_VALUETYPE
;
155 I
<= MVT::LAST_INTEGER_VALUETYPE
;
157 MVT VT
= MVT::SimpleValueType(I
);
158 if (isTypeLegal(VT
)) {
159 // Expand individual DIV and REMs into DIVREMs.
160 setOperationAction(ISD::SDIV
, VT
, Expand
);
161 setOperationAction(ISD::UDIV
, VT
, Expand
);
162 setOperationAction(ISD::SREM
, VT
, Expand
);
163 setOperationAction(ISD::UREM
, VT
, Expand
);
164 setOperationAction(ISD::SDIVREM
, VT
, Custom
);
165 setOperationAction(ISD::UDIVREM
, VT
, Custom
);
167 // Support addition/subtraction with overflow.
168 setOperationAction(ISD::SADDO
, VT
, Custom
);
169 setOperationAction(ISD::SSUBO
, VT
, Custom
);
171 // Support addition/subtraction with carry.
172 setOperationAction(ISD::UADDO
, VT
, Custom
);
173 setOperationAction(ISD::USUBO
, VT
, Custom
);
175 // Support carry in as value rather than glue.
176 setOperationAction(ISD::ADDCARRY
, VT
, Custom
);
177 setOperationAction(ISD::SUBCARRY
, VT
, Custom
);
179 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
180 // stores, putting a serialization instruction after the stores.
181 setOperationAction(ISD::ATOMIC_LOAD
, VT
, Custom
);
182 setOperationAction(ISD::ATOMIC_STORE
, VT
, Custom
);
184 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
185 // available, or if the operand is constant.
186 setOperationAction(ISD::ATOMIC_LOAD_SUB
, VT
, Custom
);
188 // Use POPCNT on z196 and above.
189 if (Subtarget
.hasPopulationCount())
190 setOperationAction(ISD::CTPOP
, VT
, Custom
);
192 setOperationAction(ISD::CTPOP
, VT
, Expand
);
194 // No special instructions for these.
195 setOperationAction(ISD::CTTZ
, VT
, Expand
);
196 setOperationAction(ISD::ROTR
, VT
, Expand
);
198 // Use *MUL_LOHI where possible instead of MULH*.
199 setOperationAction(ISD::MULHS
, VT
, Expand
);
200 setOperationAction(ISD::MULHU
, VT
, Expand
);
201 setOperationAction(ISD::SMUL_LOHI
, VT
, Custom
);
202 setOperationAction(ISD::UMUL_LOHI
, VT
, Custom
);
204 // Only z196 and above have native support for conversions to unsigned.
205 // On z10, promoting to i64 doesn't generate an inexact condition for
206 // values that are outside the i32 range but in the i64 range, so use
207 // the default expansion.
208 if (!Subtarget
.hasFPExtension())
209 setOperationAction(ISD::FP_TO_UINT
, VT
, Expand
);
213 // Type legalization will convert 8- and 16-bit atomic operations into
214 // forms that operate on i32s (but still keeping the original memory VT).
215 // Lower them into full i32 operations.
216 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i32
, Custom
);
217 setOperationAction(ISD::ATOMIC_LOAD_ADD
, MVT::i32
, Custom
);
218 setOperationAction(ISD::ATOMIC_LOAD_SUB
, MVT::i32
, Custom
);
219 setOperationAction(ISD::ATOMIC_LOAD_AND
, MVT::i32
, Custom
);
220 setOperationAction(ISD::ATOMIC_LOAD_OR
, MVT::i32
, Custom
);
221 setOperationAction(ISD::ATOMIC_LOAD_XOR
, MVT::i32
, Custom
);
222 setOperationAction(ISD::ATOMIC_LOAD_NAND
, MVT::i32
, Custom
);
223 setOperationAction(ISD::ATOMIC_LOAD_MIN
, MVT::i32
, Custom
);
224 setOperationAction(ISD::ATOMIC_LOAD_MAX
, MVT::i32
, Custom
);
225 setOperationAction(ISD::ATOMIC_LOAD_UMIN
, MVT::i32
, Custom
);
226 setOperationAction(ISD::ATOMIC_LOAD_UMAX
, MVT::i32
, Custom
);
228 // Even though i128 is not a legal type, we still need to custom lower
229 // the atomic operations in order to exploit SystemZ instructions.
230 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i128
, Custom
);
231 setOperationAction(ISD::ATOMIC_STORE
, MVT::i128
, Custom
);
233 // We can use the CC result of compare-and-swap to implement
234 // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS.
235 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
, MVT::i32
, Custom
);
236 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
, MVT::i64
, Custom
);
237 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
, MVT::i128
, Custom
);
239 setOperationAction(ISD::ATOMIC_FENCE
, MVT::Other
, Custom
);
241 // Traps are legal, as we will convert them to "j .+2".
242 setOperationAction(ISD::TRAP
, MVT::Other
, Legal
);
244 // z10 has instructions for signed but not unsigned FP conversion.
245 // Handle unsigned 32-bit types as signed 64-bit types.
246 if (!Subtarget
.hasFPExtension()) {
247 setOperationAction(ISD::UINT_TO_FP
, MVT::i32
, Promote
);
248 setOperationAction(ISD::UINT_TO_FP
, MVT::i64
, Expand
);
251 // We have native support for a 64-bit CTLZ, via FLOGR.
252 setOperationAction(ISD::CTLZ
, MVT::i32
, Promote
);
253 setOperationAction(ISD::CTLZ
, MVT::i64
, Legal
);
255 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
256 setOperationAction(ISD::OR
, MVT::i64
, Custom
);
258 // FIXME: Can we support these natively?
259 setOperationAction(ISD::SRL_PARTS
, MVT::i64
, Expand
);
260 setOperationAction(ISD::SHL_PARTS
, MVT::i64
, Expand
);
261 setOperationAction(ISD::SRA_PARTS
, MVT::i64
, Expand
);
263 // We have native instructions for i8, i16 and i32 extensions, but not i1.
264 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
265 for (MVT VT
: MVT::integer_valuetypes()) {
266 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::i1
, Promote
);
267 setLoadExtAction(ISD::ZEXTLOAD
, VT
, MVT::i1
, Promote
);
268 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::i1
, Promote
);
271 // Handle the various types of symbolic address.
272 setOperationAction(ISD::ConstantPool
, PtrVT
, Custom
);
273 setOperationAction(ISD::GlobalAddress
, PtrVT
, Custom
);
274 setOperationAction(ISD::GlobalTLSAddress
, PtrVT
, Custom
);
275 setOperationAction(ISD::BlockAddress
, PtrVT
, Custom
);
276 setOperationAction(ISD::JumpTable
, PtrVT
, Custom
);
278 // We need to handle dynamic allocations specially because of the
279 // 160-byte area at the bottom of the stack.
280 setOperationAction(ISD::DYNAMIC_STACKALLOC
, PtrVT
, Custom
);
281 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET
, PtrVT
, Custom
);
283 // Use custom expanders so that we can force the function to use
285 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Custom
);
286 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Custom
);
288 // Handle prefetches with PFD or PFDRL.
289 setOperationAction(ISD::PREFETCH
, MVT::Other
, Custom
);
291 for (MVT VT
: MVT::vector_valuetypes()) {
292 // Assume by default that all vector operations need to be expanded.
293 for (unsigned Opcode
= 0; Opcode
< ISD::BUILTIN_OP_END
; ++Opcode
)
294 if (getOperationAction(Opcode
, VT
) == Legal
)
295 setOperationAction(Opcode
, VT
, Expand
);
297 // Likewise all truncating stores and extending loads.
298 for (MVT InnerVT
: MVT::vector_valuetypes()) {
299 setTruncStoreAction(VT
, InnerVT
, Expand
);
300 setLoadExtAction(ISD::SEXTLOAD
, VT
, InnerVT
, Expand
);
301 setLoadExtAction(ISD::ZEXTLOAD
, VT
, InnerVT
, Expand
);
302 setLoadExtAction(ISD::EXTLOAD
, VT
, InnerVT
, Expand
);
305 if (isTypeLegal(VT
)) {
306 // These operations are legal for anything that can be stored in a
307 // vector register, even if there is no native support for the format
308 // as such. In particular, we can do these for v4f32 even though there
309 // are no specific instructions for that format.
310 setOperationAction(ISD::LOAD
, VT
, Legal
);
311 setOperationAction(ISD::STORE
, VT
, Legal
);
312 setOperationAction(ISD::VSELECT
, VT
, Legal
);
313 setOperationAction(ISD::BITCAST
, VT
, Legal
);
314 setOperationAction(ISD::UNDEF
, VT
, Legal
);
316 // Likewise, except that we need to replace the nodes with something
318 setOperationAction(ISD::BUILD_VECTOR
, VT
, Custom
);
319 setOperationAction(ISD::VECTOR_SHUFFLE
, VT
, Custom
);
323 // Handle integer vector types.
324 for (MVT VT
: MVT::integer_vector_valuetypes()) {
325 if (isTypeLegal(VT
)) {
326 // These operations have direct equivalents.
327 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, VT
, Legal
);
328 setOperationAction(ISD::INSERT_VECTOR_ELT
, VT
, Legal
);
329 setOperationAction(ISD::ADD
, VT
, Legal
);
330 setOperationAction(ISD::SUB
, VT
, Legal
);
331 if (VT
!= MVT::v2i64
)
332 setOperationAction(ISD::MUL
, VT
, Legal
);
333 setOperationAction(ISD::AND
, VT
, Legal
);
334 setOperationAction(ISD::OR
, VT
, Legal
);
335 setOperationAction(ISD::XOR
, VT
, Legal
);
336 if (Subtarget
.hasVectorEnhancements1())
337 setOperationAction(ISD::CTPOP
, VT
, Legal
);
339 setOperationAction(ISD::CTPOP
, VT
, Custom
);
340 setOperationAction(ISD::CTTZ
, VT
, Legal
);
341 setOperationAction(ISD::CTLZ
, VT
, Legal
);
343 // Convert a GPR scalar to a vector by inserting it into element 0.
344 setOperationAction(ISD::SCALAR_TO_VECTOR
, VT
, Custom
);
346 // Use a series of unpacks for extensions.
347 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG
, VT
, Custom
);
348 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG
, VT
, Custom
);
350 // Detect shifts by a scalar amount and convert them into
352 setOperationAction(ISD::SHL
, VT
, Custom
);
353 setOperationAction(ISD::SRA
, VT
, Custom
);
354 setOperationAction(ISD::SRL
, VT
, Custom
);
356 // At present ROTL isn't matched by DAGCombiner. ROTR should be
357 // converted into ROTL.
358 setOperationAction(ISD::ROTL
, VT
, Expand
);
359 setOperationAction(ISD::ROTR
, VT
, Expand
);
361 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
362 // and inverting the result as necessary.
363 setOperationAction(ISD::SETCC
, VT
, Custom
);
367 if (Subtarget
.hasVector()) {
368 // There should be no need to check for float types other than v2f64
369 // since <2 x f32> isn't a legal type.
370 setOperationAction(ISD::FP_TO_SINT
, MVT::v2i64
, Legal
);
371 setOperationAction(ISD::FP_TO_SINT
, MVT::v2f64
, Legal
);
372 setOperationAction(ISD::FP_TO_UINT
, MVT::v2i64
, Legal
);
373 setOperationAction(ISD::FP_TO_UINT
, MVT::v2f64
, Legal
);
374 setOperationAction(ISD::SINT_TO_FP
, MVT::v2i64
, Legal
);
375 setOperationAction(ISD::SINT_TO_FP
, MVT::v2f64
, Legal
);
376 setOperationAction(ISD::UINT_TO_FP
, MVT::v2i64
, Legal
);
377 setOperationAction(ISD::UINT_TO_FP
, MVT::v2f64
, Legal
);
380 // Handle floating-point types.
381 for (unsigned I
= MVT::FIRST_FP_VALUETYPE
;
382 I
<= MVT::LAST_FP_VALUETYPE
;
384 MVT VT
= MVT::SimpleValueType(I
);
385 if (isTypeLegal(VT
)) {
386 // We can use FI for FRINT.
387 setOperationAction(ISD::FRINT
, VT
, Legal
);
389 // We can use the extended form of FI for other rounding operations.
390 if (Subtarget
.hasFPExtension()) {
391 setOperationAction(ISD::FNEARBYINT
, VT
, Legal
);
392 setOperationAction(ISD::FFLOOR
, VT
, Legal
);
393 setOperationAction(ISD::FCEIL
, VT
, Legal
);
394 setOperationAction(ISD::FTRUNC
, VT
, Legal
);
395 setOperationAction(ISD::FROUND
, VT
, Legal
);
398 // No special instructions for these.
399 setOperationAction(ISD::FSIN
, VT
, Expand
);
400 setOperationAction(ISD::FCOS
, VT
, Expand
);
401 setOperationAction(ISD::FSINCOS
, VT
, Expand
);
402 setOperationAction(ISD::FREM
, VT
, Expand
);
403 setOperationAction(ISD::FPOW
, VT
, Expand
);
407 // Handle floating-point vector types.
408 if (Subtarget
.hasVector()) {
409 // Scalar-to-vector conversion is just a subreg.
410 setOperationAction(ISD::SCALAR_TO_VECTOR
, MVT::v4f32
, Legal
);
411 setOperationAction(ISD::SCALAR_TO_VECTOR
, MVT::v2f64
, Legal
);
413 // Some insertions and extractions can be done directly but others
414 // need to go via integers.
415 setOperationAction(ISD::INSERT_VECTOR_ELT
, MVT::v4f32
, Custom
);
416 setOperationAction(ISD::INSERT_VECTOR_ELT
, MVT::v2f64
, Custom
);
417 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, MVT::v4f32
, Custom
);
418 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, MVT::v2f64
, Custom
);
420 // These operations have direct equivalents.
421 setOperationAction(ISD::FADD
, MVT::v2f64
, Legal
);
422 setOperationAction(ISD::FNEG
, MVT::v2f64
, Legal
);
423 setOperationAction(ISD::FSUB
, MVT::v2f64
, Legal
);
424 setOperationAction(ISD::FMUL
, MVT::v2f64
, Legal
);
425 setOperationAction(ISD::FMA
, MVT::v2f64
, Legal
);
426 setOperationAction(ISD::FDIV
, MVT::v2f64
, Legal
);
427 setOperationAction(ISD::FABS
, MVT::v2f64
, Legal
);
428 setOperationAction(ISD::FSQRT
, MVT::v2f64
, Legal
);
429 setOperationAction(ISD::FRINT
, MVT::v2f64
, Legal
);
430 setOperationAction(ISD::FNEARBYINT
, MVT::v2f64
, Legal
);
431 setOperationAction(ISD::FFLOOR
, MVT::v2f64
, Legal
);
432 setOperationAction(ISD::FCEIL
, MVT::v2f64
, Legal
);
433 setOperationAction(ISD::FTRUNC
, MVT::v2f64
, Legal
);
434 setOperationAction(ISD::FROUND
, MVT::v2f64
, Legal
);
437 // The vector enhancements facility 1 has instructions for these.
438 if (Subtarget
.hasVectorEnhancements1()) {
439 setOperationAction(ISD::FADD
, MVT::v4f32
, Legal
);
440 setOperationAction(ISD::FNEG
, MVT::v4f32
, Legal
);
441 setOperationAction(ISD::FSUB
, MVT::v4f32
, Legal
);
442 setOperationAction(ISD::FMUL
, MVT::v4f32
, Legal
);
443 setOperationAction(ISD::FMA
, MVT::v4f32
, Legal
);
444 setOperationAction(ISD::FDIV
, MVT::v4f32
, Legal
);
445 setOperationAction(ISD::FABS
, MVT::v4f32
, Legal
);
446 setOperationAction(ISD::FSQRT
, MVT::v4f32
, Legal
);
447 setOperationAction(ISD::FRINT
, MVT::v4f32
, Legal
);
448 setOperationAction(ISD::FNEARBYINT
, MVT::v4f32
, Legal
);
449 setOperationAction(ISD::FFLOOR
, MVT::v4f32
, Legal
);
450 setOperationAction(ISD::FCEIL
, MVT::v4f32
, Legal
);
451 setOperationAction(ISD::FTRUNC
, MVT::v4f32
, Legal
);
452 setOperationAction(ISD::FROUND
, MVT::v4f32
, Legal
);
454 setOperationAction(ISD::FMAXNUM
, MVT::f64
, Legal
);
455 setOperationAction(ISD::FMAXNAN
, MVT::f64
, Legal
);
456 setOperationAction(ISD::FMINNUM
, MVT::f64
, Legal
);
457 setOperationAction(ISD::FMINNAN
, MVT::f64
, Legal
);
459 setOperationAction(ISD::FMAXNUM
, MVT::v2f64
, Legal
);
460 setOperationAction(ISD::FMAXNAN
, MVT::v2f64
, Legal
);
461 setOperationAction(ISD::FMINNUM
, MVT::v2f64
, Legal
);
462 setOperationAction(ISD::FMINNAN
, MVT::v2f64
, Legal
);
464 setOperationAction(ISD::FMAXNUM
, MVT::f32
, Legal
);
465 setOperationAction(ISD::FMAXNAN
, MVT::f32
, Legal
);
466 setOperationAction(ISD::FMINNUM
, MVT::f32
, Legal
);
467 setOperationAction(ISD::FMINNAN
, MVT::f32
, Legal
);
469 setOperationAction(ISD::FMAXNUM
, MVT::v4f32
, Legal
);
470 setOperationAction(ISD::FMAXNAN
, MVT::v4f32
, Legal
);
471 setOperationAction(ISD::FMINNUM
, MVT::v4f32
, Legal
);
472 setOperationAction(ISD::FMINNAN
, MVT::v4f32
, Legal
);
474 setOperationAction(ISD::FMAXNUM
, MVT::f128
, Legal
);
475 setOperationAction(ISD::FMAXNAN
, MVT::f128
, Legal
);
476 setOperationAction(ISD::FMINNUM
, MVT::f128
, Legal
);
477 setOperationAction(ISD::FMINNAN
, MVT::f128
, Legal
);
480 // We have fused multiply-addition for f32 and f64 but not f128.
481 setOperationAction(ISD::FMA
, MVT::f32
, Legal
);
482 setOperationAction(ISD::FMA
, MVT::f64
, Legal
);
483 if (Subtarget
.hasVectorEnhancements1())
484 setOperationAction(ISD::FMA
, MVT::f128
, Legal
);
486 setOperationAction(ISD::FMA
, MVT::f128
, Expand
);
488 // We don't have a copysign instruction on vector registers.
489 if (Subtarget
.hasVectorEnhancements1())
490 setOperationAction(ISD::FCOPYSIGN
, MVT::f128
, Expand
);
492 // Needed so that we don't try to implement f128 constant loads using
493 // a load-and-extend of a f80 constant (in cases where the constant
494 // would fit in an f80).
495 for (MVT VT
: MVT::fp_valuetypes())
496 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f80
, Expand
);
498 // We don't have extending load instruction on vector registers.
499 if (Subtarget
.hasVectorEnhancements1()) {
500 setLoadExtAction(ISD::EXTLOAD
, MVT::f128
, MVT::f32
, Expand
);
501 setLoadExtAction(ISD::EXTLOAD
, MVT::f128
, MVT::f64
, Expand
);
504 // Floating-point truncation and stores need to be done separately.
505 setTruncStoreAction(MVT::f64
, MVT::f32
, Expand
);
506 setTruncStoreAction(MVT::f128
, MVT::f32
, Expand
);
507 setTruncStoreAction(MVT::f128
, MVT::f64
, Expand
);
509 // We have 64-bit FPR<->GPR moves, but need special handling for
511 if (!Subtarget
.hasVector()) {
512 setOperationAction(ISD::BITCAST
, MVT::i32
, Custom
);
513 setOperationAction(ISD::BITCAST
, MVT::f32
, Custom
);
516 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
517 // structure, but VAEND is a no-op.
518 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
519 setOperationAction(ISD::VACOPY
, MVT::Other
, Custom
);
520 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
522 // Codes for which we want to perform some z-specific combinations.
523 setTargetDAGCombine(ISD::ZERO_EXTEND
);
524 setTargetDAGCombine(ISD::SIGN_EXTEND
);
525 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG
);
526 setTargetDAGCombine(ISD::STORE
);
527 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT
);
528 setTargetDAGCombine(ISD::FP_ROUND
);
529 setTargetDAGCombine(ISD::BSWAP
);
531 // Handle intrinsics.
532 setOperationAction(ISD::INTRINSIC_W_CHAIN
, MVT::Other
, Custom
);
533 setOperationAction(ISD::INTRINSIC_WO_CHAIN
, MVT::Other
, Custom
);
535 // We want to use MVC in preference to even a single load/store pair.
536 MaxStoresPerMemcpy
= 0;
537 MaxStoresPerMemcpyOptSize
= 0;
539 // The main memset sequence is a byte store followed by an MVC.
540 // Two STC or MV..I stores win over that, but the kind of fused stores
541 // generated by target-independent code don't when the byte value is
542 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
543 // than "STC;MVC". Handle the choice in target-specific code instead.
544 MaxStoresPerMemset
= 0;
545 MaxStoresPerMemsetOptSize
= 0;
548 EVT
SystemZTargetLowering::getSetCCResultType(const DataLayout
&DL
,
549 LLVMContext
&, EVT VT
) const {
552 return VT
.changeVectorElementTypeToInteger();
555 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT
) const {
556 VT
= VT
.getScalarType();
561 switch (VT
.getSimpleVT().SimpleTy
) {
566 return Subtarget
.hasVectorEnhancements1();
574 bool SystemZTargetLowering::isFPImmLegal(const APFloat
&Imm
, EVT VT
) const {
575 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
576 return Imm
.isZero() || Imm
.isNegZero();
579 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm
) const {
580 // We can use CGFI or CLGFI.
581 return isInt
<32>(Imm
) || isUInt
<32>(Imm
);
584 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm
) const {
585 // We can use ALGFI or SLGFI.
586 return isUInt
<32>(Imm
) || isUInt
<32>(-Imm
);
589 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT
,
593 // Unaligned accesses should never be slower than the expanded version.
594 // We check specifically for aligned accesses in the few cases where
595 // they are required.
601 // Information about the addressing mode for a memory access.
602 struct AddressingMode
{
603 // True if a long displacement is supported.
604 bool LongDisplacement
;
606 // True if use of index register is supported.
609 AddressingMode(bool LongDispl
, bool IdxReg
) :
610 LongDisplacement(LongDispl
), IndexReg(IdxReg
) {}
613 // Return the desired addressing mode for a Load which has only one use (in
614 // the same block) which is a Store.
615 static AddressingMode
getLoadStoreAddrMode(bool HasVector
,
617 // With vector support a Load->Store combination may be combined to either
618 // an MVC or vector operations and it seems to work best to allow the
619 // vector addressing mode.
621 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
623 // Otherwise only the MVC case is special.
624 bool MVC
= Ty
->isIntegerTy(8);
625 return AddressingMode(!MVC
/*LongDispl*/, !MVC
/*IdxReg*/);
628 // Return the addressing mode which seems most desirable given an LLVM
629 // Instruction pointer.
630 static AddressingMode
631 supportedAddressingMode(Instruction
*I
, bool HasVector
) {
632 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
633 switch (II
->getIntrinsicID()) {
635 case Intrinsic::memset
:
636 case Intrinsic::memmove
:
637 case Intrinsic::memcpy
:
638 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
642 if (isa
<LoadInst
>(I
) && I
->hasOneUse()) {
643 auto *SingleUser
= dyn_cast
<Instruction
>(*I
->user_begin());
644 if (SingleUser
->getParent() == I
->getParent()) {
645 if (isa
<ICmpInst
>(SingleUser
)) {
646 if (auto *C
= dyn_cast
<ConstantInt
>(SingleUser
->getOperand(1)))
647 if (C
->getBitWidth() <= 64 &&
648 (isInt
<16>(C
->getSExtValue()) || isUInt
<16>(C
->getZExtValue())))
649 // Comparison of memory with 16 bit signed / unsigned immediate
650 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
651 } else if (isa
<StoreInst
>(SingleUser
))
653 return getLoadStoreAddrMode(HasVector
, I
->getType());
655 } else if (auto *StoreI
= dyn_cast
<StoreInst
>(I
)) {
656 if (auto *LoadI
= dyn_cast
<LoadInst
>(StoreI
->getValueOperand()))
657 if (LoadI
->hasOneUse() && LoadI
->getParent() == I
->getParent())
659 return getLoadStoreAddrMode(HasVector
, LoadI
->getType());
662 if (HasVector
&& (isa
<LoadInst
>(I
) || isa
<StoreInst
>(I
))) {
664 // * Use LDE instead of LE/LEY for z13 to avoid partial register
665 // dependencies (LDE only supports small offsets).
666 // * Utilize the vector registers to hold floating point
667 // values (vector load / store instructions only support small
670 Type
*MemAccessTy
= (isa
<LoadInst
>(I
) ? I
->getType() :
671 I
->getOperand(0)->getType());
672 bool IsFPAccess
= MemAccessTy
->isFloatingPointTy();
673 bool IsVectorAccess
= MemAccessTy
->isVectorTy();
675 // A store of an extracted vector element will be combined into a VSTE type
677 if (!IsVectorAccess
&& isa
<StoreInst
>(I
)) {
678 Value
*DataOp
= I
->getOperand(0);
679 if (isa
<ExtractElementInst
>(DataOp
))
680 IsVectorAccess
= true;
683 // A load which gets inserted into a vector element will be combined into a
684 // VLE type instruction.
685 if (!IsVectorAccess
&& isa
<LoadInst
>(I
) && I
->hasOneUse()) {
686 User
*LoadUser
= *I
->user_begin();
687 if (isa
<InsertElementInst
>(LoadUser
))
688 IsVectorAccess
= true;
691 if (IsFPAccess
|| IsVectorAccess
)
692 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
695 return AddressingMode(true/*LongDispl*/, true/*IdxReg*/);
698 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout
&DL
,
699 const AddrMode
&AM
, Type
*Ty
, unsigned AS
, Instruction
*I
) const {
700 // Punt on globals for now, although they can be used in limited
701 // RELATIVE LONG cases.
705 // Require a 20-bit signed offset.
706 if (!isInt
<20>(AM
.BaseOffs
))
709 AddressingMode
SupportedAM(true, true);
711 SupportedAM
= supportedAddressingMode(I
, Subtarget
.hasVector());
713 if (!SupportedAM
.LongDisplacement
&& !isUInt
<12>(AM
.BaseOffs
))
716 if (!SupportedAM
.IndexReg
)
717 // No indexing allowed.
718 return AM
.Scale
== 0;
720 // Indexing is OK but no scale factor can be applied.
721 return AM
.Scale
== 0 || AM
.Scale
== 1;
724 bool SystemZTargetLowering::isTruncateFree(Type
*FromType
, Type
*ToType
) const {
725 if (!FromType
->isIntegerTy() || !ToType
->isIntegerTy())
727 unsigned FromBits
= FromType
->getPrimitiveSizeInBits();
728 unsigned ToBits
= ToType
->getPrimitiveSizeInBits();
729 return FromBits
> ToBits
;
732 bool SystemZTargetLowering::isTruncateFree(EVT FromVT
, EVT ToVT
) const {
733 if (!FromVT
.isInteger() || !ToVT
.isInteger())
735 unsigned FromBits
= FromVT
.getSizeInBits();
736 unsigned ToBits
= ToVT
.getSizeInBits();
737 return FromBits
> ToBits
;
740 //===----------------------------------------------------------------------===//
741 // Inline asm support
742 //===----------------------------------------------------------------------===//
744 TargetLowering::ConstraintType
745 SystemZTargetLowering::getConstraintType(StringRef Constraint
) const {
746 if (Constraint
.size() == 1) {
747 switch (Constraint
[0]) {
748 case 'a': // Address register
749 case 'd': // Data register (equivalent to 'r')
750 case 'f': // Floating-point register
751 case 'h': // High-part register
752 case 'r': // General-purpose register
753 case 'v': // Vector register
754 return C_RegisterClass
;
756 case 'Q': // Memory with base and unsigned 12-bit displacement
757 case 'R': // Likewise, plus an index
758 case 'S': // Memory with base and signed 20-bit displacement
759 case 'T': // Likewise, plus an index
760 case 'm': // Equivalent to 'T'.
763 case 'I': // Unsigned 8-bit constant
764 case 'J': // Unsigned 12-bit constant
765 case 'K': // Signed 16-bit constant
766 case 'L': // Signed 20-bit displacement (on all targets we support)
767 case 'M': // 0x7fffffff
774 return TargetLowering::getConstraintType(Constraint
);
777 TargetLowering::ConstraintWeight
SystemZTargetLowering::
778 getSingleConstraintMatchWeight(AsmOperandInfo
&info
,
779 const char *constraint
) const {
780 ConstraintWeight weight
= CW_Invalid
;
781 Value
*CallOperandVal
= info
.CallOperandVal
;
782 // If we don't have a value, we can't do a match,
783 // but allow it at the lowest weight.
786 Type
*type
= CallOperandVal
->getType();
787 // Look at the constraint type.
788 switch (*constraint
) {
790 weight
= TargetLowering::getSingleConstraintMatchWeight(info
, constraint
);
793 case 'a': // Address register
794 case 'd': // Data register (equivalent to 'r')
795 case 'h': // High-part register
796 case 'r': // General-purpose register
797 if (CallOperandVal
->getType()->isIntegerTy())
798 weight
= CW_Register
;
801 case 'f': // Floating-point register
802 if (type
->isFloatingPointTy())
803 weight
= CW_Register
;
806 case 'v': // Vector register
807 if ((type
->isVectorTy() || type
->isFloatingPointTy()) &&
808 Subtarget
.hasVector())
809 weight
= CW_Register
;
812 case 'I': // Unsigned 8-bit constant
813 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
814 if (isUInt
<8>(C
->getZExtValue()))
815 weight
= CW_Constant
;
818 case 'J': // Unsigned 12-bit constant
819 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
820 if (isUInt
<12>(C
->getZExtValue()))
821 weight
= CW_Constant
;
824 case 'K': // Signed 16-bit constant
825 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
826 if (isInt
<16>(C
->getSExtValue()))
827 weight
= CW_Constant
;
830 case 'L': // Signed 20-bit displacement (on all targets we support)
831 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
832 if (isInt
<20>(C
->getSExtValue()))
833 weight
= CW_Constant
;
836 case 'M': // 0x7fffffff
837 if (auto *C
= dyn_cast
<ConstantInt
>(CallOperandVal
))
838 if (C
->getZExtValue() == 0x7fffffff)
839 weight
= CW_Constant
;
845 // Parse a "{tNNN}" register constraint for which the register type "t"
846 // has already been verified. MC is the class associated with "t" and
847 // Map maps 0-based register numbers to LLVM register numbers.
848 static std::pair
<unsigned, const TargetRegisterClass
*>
849 parseRegisterNumber(StringRef Constraint
, const TargetRegisterClass
*RC
,
850 const unsigned *Map
, unsigned Size
) {
851 assert(*(Constraint
.end()-1) == '}' && "Missing '}'");
852 if (isdigit(Constraint
[2])) {
855 Constraint
.slice(2, Constraint
.size() - 1).getAsInteger(10, Index
);
856 if (!Failed
&& Index
< Size
&& Map
[Index
])
857 return std::make_pair(Map
[Index
], RC
);
859 return std::make_pair(0U, nullptr);
862 std::pair
<unsigned, const TargetRegisterClass
*>
863 SystemZTargetLowering::getRegForInlineAsmConstraint(
864 const TargetRegisterInfo
*TRI
, StringRef Constraint
, MVT VT
) const {
865 if (Constraint
.size() == 1) {
866 // GCC Constraint Letters
867 switch (Constraint
[0]) {
869 case 'd': // Data register (equivalent to 'r')
870 case 'r': // General-purpose register
872 return std::make_pair(0U, &SystemZ::GR64BitRegClass
);
873 else if (VT
== MVT::i128
)
874 return std::make_pair(0U, &SystemZ::GR128BitRegClass
);
875 return std::make_pair(0U, &SystemZ::GR32BitRegClass
);
877 case 'a': // Address register
879 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass
);
880 else if (VT
== MVT::i128
)
881 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass
);
882 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass
);
884 case 'h': // High-part register (an LLVM extension)
885 return std::make_pair(0U, &SystemZ::GRH32BitRegClass
);
887 case 'f': // Floating-point register
889 return std::make_pair(0U, &SystemZ::FP64BitRegClass
);
890 else if (VT
== MVT::f128
)
891 return std::make_pair(0U, &SystemZ::FP128BitRegClass
);
892 return std::make_pair(0U, &SystemZ::FP32BitRegClass
);
894 case 'v': // Vector register
895 if (Subtarget
.hasVector()) {
897 return std::make_pair(0U, &SystemZ::VR32BitRegClass
);
899 return std::make_pair(0U, &SystemZ::VR64BitRegClass
);
900 return std::make_pair(0U, &SystemZ::VR128BitRegClass
);
905 if (Constraint
.size() > 0 && Constraint
[0] == '{') {
906 // We need to override the default register parsing for GPRs and FPRs
907 // because the interpretation depends on VT. The internal names of
908 // the registers are also different from the external names
909 // (F0D and F0S instead of F0, etc.).
910 if (Constraint
[1] == 'r') {
912 return parseRegisterNumber(Constraint
, &SystemZ::GR32BitRegClass
,
913 SystemZMC::GR32Regs
, 16);
915 return parseRegisterNumber(Constraint
, &SystemZ::GR128BitRegClass
,
916 SystemZMC::GR128Regs
, 16);
917 return parseRegisterNumber(Constraint
, &SystemZ::GR64BitRegClass
,
918 SystemZMC::GR64Regs
, 16);
920 if (Constraint
[1] == 'f') {
922 return parseRegisterNumber(Constraint
, &SystemZ::FP32BitRegClass
,
923 SystemZMC::FP32Regs
, 16);
925 return parseRegisterNumber(Constraint
, &SystemZ::FP128BitRegClass
,
926 SystemZMC::FP128Regs
, 16);
927 return parseRegisterNumber(Constraint
, &SystemZ::FP64BitRegClass
,
928 SystemZMC::FP64Regs
, 16);
930 if (Constraint
[1] == 'v') {
932 return parseRegisterNumber(Constraint
, &SystemZ::VR32BitRegClass
,
933 SystemZMC::VR32Regs
, 32);
935 return parseRegisterNumber(Constraint
, &SystemZ::VR64BitRegClass
,
936 SystemZMC::VR64Regs
, 32);
937 return parseRegisterNumber(Constraint
, &SystemZ::VR128BitRegClass
,
938 SystemZMC::VR128Regs
, 32);
941 return TargetLowering::getRegForInlineAsmConstraint(TRI
, Constraint
, VT
);
944 void SystemZTargetLowering::
945 LowerAsmOperandForConstraint(SDValue Op
, std::string
&Constraint
,
946 std::vector
<SDValue
> &Ops
,
947 SelectionDAG
&DAG
) const {
948 // Only support length 1 constraints for now.
949 if (Constraint
.length() == 1) {
950 switch (Constraint
[0]) {
951 case 'I': // Unsigned 8-bit constant
952 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
953 if (isUInt
<8>(C
->getZExtValue()))
954 Ops
.push_back(DAG
.getTargetConstant(C
->getZExtValue(), SDLoc(Op
),
958 case 'J': // Unsigned 12-bit constant
959 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
960 if (isUInt
<12>(C
->getZExtValue()))
961 Ops
.push_back(DAG
.getTargetConstant(C
->getZExtValue(), SDLoc(Op
),
965 case 'K': // Signed 16-bit constant
966 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
967 if (isInt
<16>(C
->getSExtValue()))
968 Ops
.push_back(DAG
.getTargetConstant(C
->getSExtValue(), SDLoc(Op
),
972 case 'L': // Signed 20-bit displacement (on all targets we support)
973 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
974 if (isInt
<20>(C
->getSExtValue()))
975 Ops
.push_back(DAG
.getTargetConstant(C
->getSExtValue(), SDLoc(Op
),
979 case 'M': // 0x7fffffff
980 if (auto *C
= dyn_cast
<ConstantSDNode
>(Op
))
981 if (C
->getZExtValue() == 0x7fffffff)
982 Ops
.push_back(DAG
.getTargetConstant(C
->getZExtValue(), SDLoc(Op
),
987 TargetLowering::LowerAsmOperandForConstraint(Op
, Constraint
, Ops
, DAG
);
990 //===----------------------------------------------------------------------===//
991 // Calling conventions
992 //===----------------------------------------------------------------------===//
994 #include "SystemZGenCallingConv.inc"
996 const MCPhysReg
*SystemZTargetLowering::getScratchRegisters(
997 CallingConv::ID
) const {
998 static const MCPhysReg ScratchRegs
[] = { SystemZ::R0D
, SystemZ::R1D
,
1003 bool SystemZTargetLowering::allowTruncateForTailCall(Type
*FromType
,
1004 Type
*ToType
) const {
1005 return isTruncateFree(FromType
, ToType
);
1008 bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst
*CI
) const {
1009 return CI
->isTailCall();
1012 // We do not yet support 128-bit single-element vector types. If the user
1013 // attempts to use such types as function argument or return type, prefer
1014 // to error out instead of emitting code violating the ABI.
1015 static void VerifyVectorType(MVT VT
, EVT ArgVT
) {
1016 if (ArgVT
.isVector() && !VT
.isVector())
1017 report_fatal_error("Unsupported vector argument or return type");
1020 static void VerifyVectorTypes(const SmallVectorImpl
<ISD::InputArg
> &Ins
) {
1021 for (unsigned i
= 0; i
< Ins
.size(); ++i
)
1022 VerifyVectorType(Ins
[i
].VT
, Ins
[i
].ArgVT
);
1025 static void VerifyVectorTypes(const SmallVectorImpl
<ISD::OutputArg
> &Outs
) {
1026 for (unsigned i
= 0; i
< Outs
.size(); ++i
)
1027 VerifyVectorType(Outs
[i
].VT
, Outs
[i
].ArgVT
);
1030 // Value is a value that has been passed to us in the location described by VA
1031 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
1032 // any loads onto Chain.
1033 static SDValue
convertLocVTToValVT(SelectionDAG
&DAG
, const SDLoc
&DL
,
1034 CCValAssign
&VA
, SDValue Chain
,
1036 // If the argument has been promoted from a smaller type, insert an
1037 // assertion to capture this.
1038 if (VA
.getLocInfo() == CCValAssign::SExt
)
1039 Value
= DAG
.getNode(ISD::AssertSext
, DL
, VA
.getLocVT(), Value
,
1040 DAG
.getValueType(VA
.getValVT()));
1041 else if (VA
.getLocInfo() == CCValAssign::ZExt
)
1042 Value
= DAG
.getNode(ISD::AssertZext
, DL
, VA
.getLocVT(), Value
,
1043 DAG
.getValueType(VA
.getValVT()));
1045 if (VA
.isExtInLoc())
1046 Value
= DAG
.getNode(ISD::TRUNCATE
, DL
, VA
.getValVT(), Value
);
1047 else if (VA
.getLocInfo() == CCValAssign::BCvt
) {
1048 // If this is a short vector argument loaded from the stack,
1049 // extend from i64 to full vector size and then bitcast.
1050 assert(VA
.getLocVT() == MVT::i64
);
1051 assert(VA
.getValVT().isVector());
1052 Value
= DAG
.getBuildVector(MVT::v2i64
, DL
, {Value
, DAG
.getUNDEF(MVT::i64
)});
1053 Value
= DAG
.getNode(ISD::BITCAST
, DL
, VA
.getValVT(), Value
);
1055 assert(VA
.getLocInfo() == CCValAssign::Full
&& "Unsupported getLocInfo");
1059 // Value is a value of type VA.getValVT() that we need to copy into
1060 // the location described by VA. Return a copy of Value converted to
1061 // VA.getValVT(). The caller is responsible for handling indirect values.
1062 static SDValue
convertValVTToLocVT(SelectionDAG
&DAG
, const SDLoc
&DL
,
1063 CCValAssign
&VA
, SDValue Value
) {
1064 switch (VA
.getLocInfo()) {
1065 case CCValAssign::SExt
:
1066 return DAG
.getNode(ISD::SIGN_EXTEND
, DL
, VA
.getLocVT(), Value
);
1067 case CCValAssign::ZExt
:
1068 return DAG
.getNode(ISD::ZERO_EXTEND
, DL
, VA
.getLocVT(), Value
);
1069 case CCValAssign::AExt
:
1070 return DAG
.getNode(ISD::ANY_EXTEND
, DL
, VA
.getLocVT(), Value
);
1071 case CCValAssign::BCvt
:
1072 // If this is a short vector argument to be stored to the stack,
1073 // bitcast to v2i64 and then extract first element.
1074 assert(VA
.getLocVT() == MVT::i64
);
1075 assert(VA
.getValVT().isVector());
1076 Value
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2i64
, Value
);
1077 return DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, VA
.getLocVT(), Value
,
1078 DAG
.getConstant(0, DL
, MVT::i32
));
1079 case CCValAssign::Full
:
1082 llvm_unreachable("Unhandled getLocInfo()");
1086 SDValue
SystemZTargetLowering::LowerFormalArguments(
1087 SDValue Chain
, CallingConv::ID CallConv
, bool IsVarArg
,
1088 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&DL
,
1089 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
1090 MachineFunction
&MF
= DAG
.getMachineFunction();
1091 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
1092 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
1093 SystemZMachineFunctionInfo
*FuncInfo
=
1094 MF
.getInfo
<SystemZMachineFunctionInfo
>();
1096 static_cast<const SystemZFrameLowering
*>(Subtarget
.getFrameLowering());
1097 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
1099 // Detect unsupported vector argument types.
1100 if (Subtarget
.hasVector())
1101 VerifyVectorTypes(Ins
);
1103 // Assign locations to all of the incoming arguments.
1104 SmallVector
<CCValAssign
, 16> ArgLocs
;
1105 SystemZCCState
CCInfo(CallConv
, IsVarArg
, MF
, ArgLocs
, *DAG
.getContext());
1106 CCInfo
.AnalyzeFormalArguments(Ins
, CC_SystemZ
);
1108 unsigned NumFixedGPRs
= 0;
1109 unsigned NumFixedFPRs
= 0;
1110 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1112 CCValAssign
&VA
= ArgLocs
[I
];
1113 EVT LocVT
= VA
.getLocVT();
1114 if (VA
.isRegLoc()) {
1115 // Arguments passed in registers
1116 const TargetRegisterClass
*RC
;
1117 switch (LocVT
.getSimpleVT().SimpleTy
) {
1119 // Integers smaller than i64 should be promoted to i64.
1120 llvm_unreachable("Unexpected argument type");
1123 RC
= &SystemZ::GR32BitRegClass
;
1127 RC
= &SystemZ::GR64BitRegClass
;
1131 RC
= &SystemZ::FP32BitRegClass
;
1135 RC
= &SystemZ::FP64BitRegClass
;
1143 RC
= &SystemZ::VR128BitRegClass
;
1147 unsigned VReg
= MRI
.createVirtualRegister(RC
);
1148 MRI
.addLiveIn(VA
.getLocReg(), VReg
);
1149 ArgValue
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, LocVT
);
1151 assert(VA
.isMemLoc() && "Argument not register or memory");
1153 // Create the frame index object for this incoming parameter.
1154 int FI
= MFI
.CreateFixedObject(LocVT
.getSizeInBits() / 8,
1155 VA
.getLocMemOffset(), true);
1157 // Create the SelectionDAG nodes corresponding to a load
1158 // from this parameter. Unpromoted ints and floats are
1159 // passed as right-justified 8-byte values.
1160 SDValue FIN
= DAG
.getFrameIndex(FI
, PtrVT
);
1161 if (VA
.getLocVT() == MVT::i32
|| VA
.getLocVT() == MVT::f32
)
1162 FIN
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, FIN
,
1163 DAG
.getIntPtrConstant(4, DL
));
1164 ArgValue
= DAG
.getLoad(LocVT
, DL
, Chain
, FIN
,
1165 MachinePointerInfo::getFixedStack(MF
, FI
));
1168 // Convert the value of the argument register into the value that's
1170 if (VA
.getLocInfo() == CCValAssign::Indirect
) {
1171 InVals
.push_back(DAG
.getLoad(VA
.getValVT(), DL
, Chain
, ArgValue
,
1172 MachinePointerInfo()));
1173 // If the original argument was split (e.g. i128), we need
1174 // to load all parts of it here (using the same address).
1175 unsigned ArgIndex
= Ins
[I
].OrigArgIndex
;
1176 assert (Ins
[I
].PartOffset
== 0);
1177 while (I
+ 1 != E
&& Ins
[I
+ 1].OrigArgIndex
== ArgIndex
) {
1178 CCValAssign
&PartVA
= ArgLocs
[I
+ 1];
1179 unsigned PartOffset
= Ins
[I
+ 1].PartOffset
;
1180 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, ArgValue
,
1181 DAG
.getIntPtrConstant(PartOffset
, DL
));
1182 InVals
.push_back(DAG
.getLoad(PartVA
.getValVT(), DL
, Chain
, Address
,
1183 MachinePointerInfo()));
1187 InVals
.push_back(convertLocVTToValVT(DAG
, DL
, VA
, Chain
, ArgValue
));
1191 // Save the number of non-varargs registers for later use by va_start, etc.
1192 FuncInfo
->setVarArgsFirstGPR(NumFixedGPRs
);
1193 FuncInfo
->setVarArgsFirstFPR(NumFixedFPRs
);
1195 // Likewise the address (in the form of a frame index) of where the
1196 // first stack vararg would be. The 1-byte size here is arbitrary.
1197 int64_t StackSize
= CCInfo
.getNextStackOffset();
1198 FuncInfo
->setVarArgsFrameIndex(MFI
.CreateFixedObject(1, StackSize
, true));
1200 // ...and a similar frame index for the caller-allocated save area
1201 // that will be used to store the incoming registers.
1202 int64_t RegSaveOffset
= TFL
->getOffsetOfLocalArea();
1203 unsigned RegSaveIndex
= MFI
.CreateFixedObject(1, RegSaveOffset
, true);
1204 FuncInfo
->setRegSaveFrameIndex(RegSaveIndex
);
1206 // Store the FPR varargs in the reserved frame slots. (We store the
1207 // GPRs as part of the prologue.)
1208 if (NumFixedFPRs
< SystemZ::NumArgFPRs
) {
1209 SDValue MemOps
[SystemZ::NumArgFPRs
];
1210 for (unsigned I
= NumFixedFPRs
; I
< SystemZ::NumArgFPRs
; ++I
) {
1211 unsigned Offset
= TFL
->getRegSpillOffset(SystemZ::ArgFPRs
[I
]);
1212 int FI
= MFI
.CreateFixedObject(8, RegSaveOffset
+ Offset
, true);
1213 SDValue FIN
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
1214 unsigned VReg
= MF
.addLiveIn(SystemZ::ArgFPRs
[I
],
1215 &SystemZ::FP64BitRegClass
);
1216 SDValue ArgValue
= DAG
.getCopyFromReg(Chain
, DL
, VReg
, MVT::f64
);
1217 MemOps
[I
] = DAG
.getStore(ArgValue
.getValue(1), DL
, ArgValue
, FIN
,
1218 MachinePointerInfo::getFixedStack(MF
, FI
));
1220 // Join the stores, which are independent of one another.
1221 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
,
1222 makeArrayRef(&MemOps
[NumFixedFPRs
],
1223 SystemZ::NumArgFPRs
-NumFixedFPRs
));
1230 static bool canUseSiblingCall(const CCState
&ArgCCInfo
,
1231 SmallVectorImpl
<CCValAssign
> &ArgLocs
,
1232 SmallVectorImpl
<ISD::OutputArg
> &Outs
) {
1233 // Punt if there are any indirect or stack arguments, or if the call
1234 // needs the callee-saved argument register R6, or if the call uses
1235 // the callee-saved register arguments SwiftSelf and SwiftError.
1236 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1237 CCValAssign
&VA
= ArgLocs
[I
];
1238 if (VA
.getLocInfo() == CCValAssign::Indirect
)
1242 unsigned Reg
= VA
.getLocReg();
1243 if (Reg
== SystemZ::R6H
|| Reg
== SystemZ::R6L
|| Reg
== SystemZ::R6D
)
1245 if (Outs
[I
].Flags
.isSwiftSelf() || Outs
[I
].Flags
.isSwiftError())
1252 SystemZTargetLowering::LowerCall(CallLoweringInfo
&CLI
,
1253 SmallVectorImpl
<SDValue
> &InVals
) const {
1254 SelectionDAG
&DAG
= CLI
.DAG
;
1256 SmallVectorImpl
<ISD::OutputArg
> &Outs
= CLI
.Outs
;
1257 SmallVectorImpl
<SDValue
> &OutVals
= CLI
.OutVals
;
1258 SmallVectorImpl
<ISD::InputArg
> &Ins
= CLI
.Ins
;
1259 SDValue Chain
= CLI
.Chain
;
1260 SDValue Callee
= CLI
.Callee
;
1261 bool &IsTailCall
= CLI
.IsTailCall
;
1262 CallingConv::ID CallConv
= CLI
.CallConv
;
1263 bool IsVarArg
= CLI
.IsVarArg
;
1264 MachineFunction
&MF
= DAG
.getMachineFunction();
1265 EVT PtrVT
= getPointerTy(MF
.getDataLayout());
1267 // Detect unsupported vector argument and return types.
1268 if (Subtarget
.hasVector()) {
1269 VerifyVectorTypes(Outs
);
1270 VerifyVectorTypes(Ins
);
1273 // Analyze the operands of the call, assigning locations to each operand.
1274 SmallVector
<CCValAssign
, 16> ArgLocs
;
1275 SystemZCCState
ArgCCInfo(CallConv
, IsVarArg
, MF
, ArgLocs
, *DAG
.getContext());
1276 ArgCCInfo
.AnalyzeCallOperands(Outs
, CC_SystemZ
);
1278 // We don't support GuaranteedTailCallOpt, only automatically-detected
1280 if (IsTailCall
&& !canUseSiblingCall(ArgCCInfo
, ArgLocs
, Outs
))
1283 // Get a count of how many bytes are to be pushed on the stack.
1284 unsigned NumBytes
= ArgCCInfo
.getNextStackOffset();
1286 // Mark the start of the call.
1288 Chain
= DAG
.getCALLSEQ_START(Chain
, NumBytes
, 0, DL
);
1290 // Copy argument values to their designated locations.
1291 SmallVector
<std::pair
<unsigned, SDValue
>, 9> RegsToPass
;
1292 SmallVector
<SDValue
, 8> MemOpChains
;
1294 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1295 CCValAssign
&VA
= ArgLocs
[I
];
1296 SDValue ArgValue
= OutVals
[I
];
1298 if (VA
.getLocInfo() == CCValAssign::Indirect
) {
1299 // Store the argument in a stack slot and pass its address.
1300 SDValue SpillSlot
= DAG
.CreateStackTemporary(Outs
[I
].ArgVT
);
1301 int FI
= cast
<FrameIndexSDNode
>(SpillSlot
)->getIndex();
1302 MemOpChains
.push_back(
1303 DAG
.getStore(Chain
, DL
, ArgValue
, SpillSlot
,
1304 MachinePointerInfo::getFixedStack(MF
, FI
)));
1305 // If the original argument was split (e.g. i128), we need
1306 // to store all parts of it here (and pass just one address).
1307 unsigned ArgIndex
= Outs
[I
].OrigArgIndex
;
1308 assert (Outs
[I
].PartOffset
== 0);
1309 while (I
+ 1 != E
&& Outs
[I
+ 1].OrigArgIndex
== ArgIndex
) {
1310 SDValue PartValue
= OutVals
[I
+ 1];
1311 unsigned PartOffset
= Outs
[I
+ 1].PartOffset
;
1312 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, SpillSlot
,
1313 DAG
.getIntPtrConstant(PartOffset
, DL
));
1314 MemOpChains
.push_back(
1315 DAG
.getStore(Chain
, DL
, PartValue
, Address
,
1316 MachinePointerInfo::getFixedStack(MF
, FI
)));
1319 ArgValue
= SpillSlot
;
1321 ArgValue
= convertValVTToLocVT(DAG
, DL
, VA
, ArgValue
);
1324 // Queue up the argument copies and emit them at the end.
1325 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), ArgValue
));
1327 assert(VA
.isMemLoc() && "Argument not register or memory");
1329 // Work out the address of the stack slot. Unpromoted ints and
1330 // floats are passed as right-justified 8-byte values.
1331 if (!StackPtr
.getNode())
1332 StackPtr
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::R15D
, PtrVT
);
1333 unsigned Offset
= SystemZMC::CallFrameSize
+ VA
.getLocMemOffset();
1334 if (VA
.getLocVT() == MVT::i32
|| VA
.getLocVT() == MVT::f32
)
1336 SDValue Address
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, StackPtr
,
1337 DAG
.getIntPtrConstant(Offset
, DL
));
1340 MemOpChains
.push_back(
1341 DAG
.getStore(Chain
, DL
, ArgValue
, Address
, MachinePointerInfo()));
1345 // Join the stores, which are independent of one another.
1346 if (!MemOpChains
.empty())
1347 Chain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOpChains
);
1349 // Accept direct calls by converting symbolic call addresses to the
1350 // associated Target* opcodes. Force %r1 to be used for indirect
1353 if (auto *G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
1354 Callee
= DAG
.getTargetGlobalAddress(G
->getGlobal(), DL
, PtrVT
);
1355 Callee
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Callee
);
1356 } else if (auto *E
= dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
1357 Callee
= DAG
.getTargetExternalSymbol(E
->getSymbol(), PtrVT
);
1358 Callee
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Callee
);
1359 } else if (IsTailCall
) {
1360 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R1D
, Callee
, Glue
);
1361 Glue
= Chain
.getValue(1);
1362 Callee
= DAG
.getRegister(SystemZ::R1D
, Callee
.getValueType());
1365 // Build a sequence of copy-to-reg nodes, chained and glued together.
1366 for (unsigned I
= 0, E
= RegsToPass
.size(); I
!= E
; ++I
) {
1367 Chain
= DAG
.getCopyToReg(Chain
, DL
, RegsToPass
[I
].first
,
1368 RegsToPass
[I
].second
, Glue
);
1369 Glue
= Chain
.getValue(1);
1372 // The first call operand is the chain and the second is the target address.
1373 SmallVector
<SDValue
, 8> Ops
;
1374 Ops
.push_back(Chain
);
1375 Ops
.push_back(Callee
);
1377 // Add argument registers to the end of the list so that they are
1378 // known live into the call.
1379 for (unsigned I
= 0, E
= RegsToPass
.size(); I
!= E
; ++I
)
1380 Ops
.push_back(DAG
.getRegister(RegsToPass
[I
].first
,
1381 RegsToPass
[I
].second
.getValueType()));
1383 // Add a register mask operand representing the call-preserved registers.
1384 const TargetRegisterInfo
*TRI
= Subtarget
.getRegisterInfo();
1385 const uint32_t *Mask
= TRI
->getCallPreservedMask(MF
, CallConv
);
1386 assert(Mask
&& "Missing call preserved mask for calling convention");
1387 Ops
.push_back(DAG
.getRegisterMask(Mask
));
1389 // Glue the call to the argument copies, if any.
1391 Ops
.push_back(Glue
);
1394 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
1396 return DAG
.getNode(SystemZISD::SIBCALL
, DL
, NodeTys
, Ops
);
1397 Chain
= DAG
.getNode(SystemZISD::CALL
, DL
, NodeTys
, Ops
);
1398 Glue
= Chain
.getValue(1);
1400 // Mark the end of the call, which is glued to the call itself.
1401 Chain
= DAG
.getCALLSEQ_END(Chain
,
1402 DAG
.getConstant(NumBytes
, DL
, PtrVT
, true),
1403 DAG
.getConstant(0, DL
, PtrVT
, true),
1405 Glue
= Chain
.getValue(1);
1407 // Assign locations to each value returned by this call.
1408 SmallVector
<CCValAssign
, 16> RetLocs
;
1409 CCState
RetCCInfo(CallConv
, IsVarArg
, MF
, RetLocs
, *DAG
.getContext());
1410 RetCCInfo
.AnalyzeCallResult(Ins
, RetCC_SystemZ
);
1412 // Copy all of the result registers out of their specified physreg.
1413 for (unsigned I
= 0, E
= RetLocs
.size(); I
!= E
; ++I
) {
1414 CCValAssign
&VA
= RetLocs
[I
];
1416 // Copy the value out, gluing the copy to the end of the call sequence.
1417 SDValue RetValue
= DAG
.getCopyFromReg(Chain
, DL
, VA
.getLocReg(),
1418 VA
.getLocVT(), Glue
);
1419 Chain
= RetValue
.getValue(1);
1420 Glue
= RetValue
.getValue(2);
1422 // Convert the value of the return register into the value that's
1424 InVals
.push_back(convertLocVTToValVT(DAG
, DL
, VA
, Chain
, RetValue
));
1430 bool SystemZTargetLowering::
1431 CanLowerReturn(CallingConv::ID CallConv
,
1432 MachineFunction
&MF
, bool isVarArg
,
1433 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1434 LLVMContext
&Context
) const {
1435 // Detect unsupported vector return types.
1436 if (Subtarget
.hasVector())
1437 VerifyVectorTypes(Outs
);
1439 // Special case that we cannot easily detect in RetCC_SystemZ since
1440 // i128 is not a legal type.
1441 for (auto &Out
: Outs
)
1442 if (Out
.ArgVT
== MVT::i128
)
1445 SmallVector
<CCValAssign
, 16> RetLocs
;
1446 CCState
RetCCInfo(CallConv
, isVarArg
, MF
, RetLocs
, Context
);
1447 return RetCCInfo
.CheckReturn(Outs
, RetCC_SystemZ
);
1451 SystemZTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
1453 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1454 const SmallVectorImpl
<SDValue
> &OutVals
,
1455 const SDLoc
&DL
, SelectionDAG
&DAG
) const {
1456 MachineFunction
&MF
= DAG
.getMachineFunction();
1458 // Detect unsupported vector return types.
1459 if (Subtarget
.hasVector())
1460 VerifyVectorTypes(Outs
);
1462 // Assign locations to each returned value.
1463 SmallVector
<CCValAssign
, 16> RetLocs
;
1464 CCState
RetCCInfo(CallConv
, IsVarArg
, MF
, RetLocs
, *DAG
.getContext());
1465 RetCCInfo
.AnalyzeReturn(Outs
, RetCC_SystemZ
);
1467 // Quick exit for void returns
1468 if (RetLocs
.empty())
1469 return DAG
.getNode(SystemZISD::RET_FLAG
, DL
, MVT::Other
, Chain
);
1471 // Copy the result values into the output registers.
1473 SmallVector
<SDValue
, 4> RetOps
;
1474 RetOps
.push_back(Chain
);
1475 for (unsigned I
= 0, E
= RetLocs
.size(); I
!= E
; ++I
) {
1476 CCValAssign
&VA
= RetLocs
[I
];
1477 SDValue RetValue
= OutVals
[I
];
1479 // Make the return register live on exit.
1480 assert(VA
.isRegLoc() && "Can only return in registers!");
1482 // Promote the value as required.
1483 RetValue
= convertValVTToLocVT(DAG
, DL
, VA
, RetValue
);
1485 // Chain and glue the copies together.
1486 unsigned Reg
= VA
.getLocReg();
1487 Chain
= DAG
.getCopyToReg(Chain
, DL
, Reg
, RetValue
, Glue
);
1488 Glue
= Chain
.getValue(1);
1489 RetOps
.push_back(DAG
.getRegister(Reg
, VA
.getLocVT()));
1492 // Update chain and glue.
1495 RetOps
.push_back(Glue
);
1497 return DAG
.getNode(SystemZISD::RET_FLAG
, DL
, MVT::Other
, RetOps
);
1500 // Return true if Op is an intrinsic node with chain that returns the CC value
1501 // as its only (other) argument. Provide the associated SystemZISD opcode and
1502 // the mask of valid CC values if so.
1503 static bool isIntrinsicWithCCAndChain(SDValue Op
, unsigned &Opcode
,
1504 unsigned &CCValid
) {
1505 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue();
1507 case Intrinsic::s390_tbegin
:
1508 Opcode
= SystemZISD::TBEGIN
;
1509 CCValid
= SystemZ::CCMASK_TBEGIN
;
1512 case Intrinsic::s390_tbegin_nofloat
:
1513 Opcode
= SystemZISD::TBEGIN_NOFLOAT
;
1514 CCValid
= SystemZ::CCMASK_TBEGIN
;
1517 case Intrinsic::s390_tend
:
1518 Opcode
= SystemZISD::TEND
;
1519 CCValid
= SystemZ::CCMASK_TEND
;
1527 // Return true if Op is an intrinsic node without chain that returns the
1528 // CC value as its final argument. Provide the associated SystemZISD
1529 // opcode and the mask of valid CC values if so.
1530 static bool isIntrinsicWithCC(SDValue Op
, unsigned &Opcode
, unsigned &CCValid
) {
1531 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
1533 case Intrinsic::s390_vpkshs
:
1534 case Intrinsic::s390_vpksfs
:
1535 case Intrinsic::s390_vpksgs
:
1536 Opcode
= SystemZISD::PACKS_CC
;
1537 CCValid
= SystemZ::CCMASK_VCMP
;
1540 case Intrinsic::s390_vpklshs
:
1541 case Intrinsic::s390_vpklsfs
:
1542 case Intrinsic::s390_vpklsgs
:
1543 Opcode
= SystemZISD::PACKLS_CC
;
1544 CCValid
= SystemZ::CCMASK_VCMP
;
1547 case Intrinsic::s390_vceqbs
:
1548 case Intrinsic::s390_vceqhs
:
1549 case Intrinsic::s390_vceqfs
:
1550 case Intrinsic::s390_vceqgs
:
1551 Opcode
= SystemZISD::VICMPES
;
1552 CCValid
= SystemZ::CCMASK_VCMP
;
1555 case Intrinsic::s390_vchbs
:
1556 case Intrinsic::s390_vchhs
:
1557 case Intrinsic::s390_vchfs
:
1558 case Intrinsic::s390_vchgs
:
1559 Opcode
= SystemZISD::VICMPHS
;
1560 CCValid
= SystemZ::CCMASK_VCMP
;
1563 case Intrinsic::s390_vchlbs
:
1564 case Intrinsic::s390_vchlhs
:
1565 case Intrinsic::s390_vchlfs
:
1566 case Intrinsic::s390_vchlgs
:
1567 Opcode
= SystemZISD::VICMPHLS
;
1568 CCValid
= SystemZ::CCMASK_VCMP
;
1571 case Intrinsic::s390_vtm
:
1572 Opcode
= SystemZISD::VTM
;
1573 CCValid
= SystemZ::CCMASK_VCMP
;
1576 case Intrinsic::s390_vfaebs
:
1577 case Intrinsic::s390_vfaehs
:
1578 case Intrinsic::s390_vfaefs
:
1579 Opcode
= SystemZISD::VFAE_CC
;
1580 CCValid
= SystemZ::CCMASK_ANY
;
1583 case Intrinsic::s390_vfaezbs
:
1584 case Intrinsic::s390_vfaezhs
:
1585 case Intrinsic::s390_vfaezfs
:
1586 Opcode
= SystemZISD::VFAEZ_CC
;
1587 CCValid
= SystemZ::CCMASK_ANY
;
1590 case Intrinsic::s390_vfeebs
:
1591 case Intrinsic::s390_vfeehs
:
1592 case Intrinsic::s390_vfeefs
:
1593 Opcode
= SystemZISD::VFEE_CC
;
1594 CCValid
= SystemZ::CCMASK_ANY
;
1597 case Intrinsic::s390_vfeezbs
:
1598 case Intrinsic::s390_vfeezhs
:
1599 case Intrinsic::s390_vfeezfs
:
1600 Opcode
= SystemZISD::VFEEZ_CC
;
1601 CCValid
= SystemZ::CCMASK_ANY
;
1604 case Intrinsic::s390_vfenebs
:
1605 case Intrinsic::s390_vfenehs
:
1606 case Intrinsic::s390_vfenefs
:
1607 Opcode
= SystemZISD::VFENE_CC
;
1608 CCValid
= SystemZ::CCMASK_ANY
;
1611 case Intrinsic::s390_vfenezbs
:
1612 case Intrinsic::s390_vfenezhs
:
1613 case Intrinsic::s390_vfenezfs
:
1614 Opcode
= SystemZISD::VFENEZ_CC
;
1615 CCValid
= SystemZ::CCMASK_ANY
;
1618 case Intrinsic::s390_vistrbs
:
1619 case Intrinsic::s390_vistrhs
:
1620 case Intrinsic::s390_vistrfs
:
1621 Opcode
= SystemZISD::VISTR_CC
;
1622 CCValid
= SystemZ::CCMASK_0
| SystemZ::CCMASK_3
;
1625 case Intrinsic::s390_vstrcbs
:
1626 case Intrinsic::s390_vstrchs
:
1627 case Intrinsic::s390_vstrcfs
:
1628 Opcode
= SystemZISD::VSTRC_CC
;
1629 CCValid
= SystemZ::CCMASK_ANY
;
1632 case Intrinsic::s390_vstrczbs
:
1633 case Intrinsic::s390_vstrczhs
:
1634 case Intrinsic::s390_vstrczfs
:
1635 Opcode
= SystemZISD::VSTRCZ_CC
;
1636 CCValid
= SystemZ::CCMASK_ANY
;
1639 case Intrinsic::s390_vfcedbs
:
1640 case Intrinsic::s390_vfcesbs
:
1641 Opcode
= SystemZISD::VFCMPES
;
1642 CCValid
= SystemZ::CCMASK_VCMP
;
1645 case Intrinsic::s390_vfchdbs
:
1646 case Intrinsic::s390_vfchsbs
:
1647 Opcode
= SystemZISD::VFCMPHS
;
1648 CCValid
= SystemZ::CCMASK_VCMP
;
1651 case Intrinsic::s390_vfchedbs
:
1652 case Intrinsic::s390_vfchesbs
:
1653 Opcode
= SystemZISD::VFCMPHES
;
1654 CCValid
= SystemZ::CCMASK_VCMP
;
1657 case Intrinsic::s390_vftcidb
:
1658 case Intrinsic::s390_vftcisb
:
1659 Opcode
= SystemZISD::VFTCI
;
1660 CCValid
= SystemZ::CCMASK_VCMP
;
1663 case Intrinsic::s390_tdc
:
1664 Opcode
= SystemZISD::TDC
;
1665 CCValid
= SystemZ::CCMASK_TDC
;
1673 // Emit an intrinsic with chain and an explicit CC register result.
1674 static SDNode
*emitIntrinsicWithCCAndChain(SelectionDAG
&DAG
, SDValue Op
,
1676 // Copy all operands except the intrinsic ID.
1677 unsigned NumOps
= Op
.getNumOperands();
1678 SmallVector
<SDValue
, 6> Ops
;
1679 Ops
.reserve(NumOps
- 1);
1680 Ops
.push_back(Op
.getOperand(0));
1681 for (unsigned I
= 2; I
< NumOps
; ++I
)
1682 Ops
.push_back(Op
.getOperand(I
));
1684 assert(Op
->getNumValues() == 2 && "Expected only CC result and chain");
1685 SDVTList RawVTs
= DAG
.getVTList(MVT::i32
, MVT::Other
);
1686 SDValue Intr
= DAG
.getNode(Opcode
, SDLoc(Op
), RawVTs
, Ops
);
1687 SDValue OldChain
= SDValue(Op
.getNode(), 1);
1688 SDValue NewChain
= SDValue(Intr
.getNode(), 1);
1689 DAG
.ReplaceAllUsesOfValueWith(OldChain
, NewChain
);
1690 return Intr
.getNode();
1693 // Emit an intrinsic with an explicit CC register result.
1694 static SDNode
*emitIntrinsicWithCC(SelectionDAG
&DAG
, SDValue Op
,
1696 // Copy all operands except the intrinsic ID.
1697 unsigned NumOps
= Op
.getNumOperands();
1698 SmallVector
<SDValue
, 6> Ops
;
1699 Ops
.reserve(NumOps
- 1);
1700 for (unsigned I
= 1; I
< NumOps
; ++I
)
1701 Ops
.push_back(Op
.getOperand(I
));
1703 SDValue Intr
= DAG
.getNode(Opcode
, SDLoc(Op
), Op
->getVTList(), Ops
);
1704 return Intr
.getNode();
1707 // CC is a comparison that will be implemented using an integer or
1708 // floating-point comparison. Return the condition code mask for
1709 // a branch on true. In the integer case, CCMASK_CMP_UO is set for
1710 // unsigned comparisons and clear for signed ones. In the floating-point
1711 // case, CCMASK_CMP_UO has its normal mask meaning (unordered).
1712 static unsigned CCMaskForCondCode(ISD::CondCode CC
) {
1714 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
1715 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
1716 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
1720 llvm_unreachable("Invalid integer condition!");
1729 case ISD::SETO
: return SystemZ::CCMASK_CMP_O
;
1730 case ISD::SETUO
: return SystemZ::CCMASK_CMP_UO
;
1735 // If C can be converted to a comparison against zero, adjust the operands
1737 static void adjustZeroCmp(SelectionDAG
&DAG
, const SDLoc
&DL
, Comparison
&C
) {
1738 if (C
.ICmpType
== SystemZICMP::UnsignedOnly
)
1741 auto *ConstOp1
= dyn_cast
<ConstantSDNode
>(C
.Op1
.getNode());
1745 int64_t Value
= ConstOp1
->getSExtValue();
1746 if ((Value
== -1 && C
.CCMask
== SystemZ::CCMASK_CMP_GT
) ||
1747 (Value
== -1 && C
.CCMask
== SystemZ::CCMASK_CMP_LE
) ||
1748 (Value
== 1 && C
.CCMask
== SystemZ::CCMASK_CMP_LT
) ||
1749 (Value
== 1 && C
.CCMask
== SystemZ::CCMASK_CMP_GE
)) {
1750 C
.CCMask
^= SystemZ::CCMASK_CMP_EQ
;
1751 C
.Op1
= DAG
.getConstant(0, DL
, C
.Op1
.getValueType());
1755 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
1756 // adjust the operands as necessary.
1757 static void adjustSubwordCmp(SelectionDAG
&DAG
, const SDLoc
&DL
,
1759 // For us to make any changes, it must a comparison between a single-use
1760 // load and a constant.
1761 if (!C
.Op0
.hasOneUse() ||
1762 C
.Op0
.getOpcode() != ISD::LOAD
||
1763 C
.Op1
.getOpcode() != ISD::Constant
)
1766 // We must have an 8- or 16-bit load.
1767 auto *Load
= cast
<LoadSDNode
>(C
.Op0
);
1768 unsigned NumBits
= Load
->getMemoryVT().getStoreSizeInBits();
1769 if (NumBits
!= 8 && NumBits
!= 16)
1772 // The load must be an extending one and the constant must be within the
1773 // range of the unextended value.
1774 auto *ConstOp1
= cast
<ConstantSDNode
>(C
.Op1
);
1775 uint64_t Value
= ConstOp1
->getZExtValue();
1776 uint64_t Mask
= (1 << NumBits
) - 1;
1777 if (Load
->getExtensionType() == ISD::SEXTLOAD
) {
1778 // Make sure that ConstOp1 is in range of C.Op0.
1779 int64_t SignedValue
= ConstOp1
->getSExtValue();
1780 if (uint64_t(SignedValue
) + (uint64_t(1) << (NumBits
- 1)) > Mask
)
1782 if (C
.ICmpType
!= SystemZICMP::SignedOnly
) {
1783 // Unsigned comparison between two sign-extended values is equivalent
1784 // to unsigned comparison between two zero-extended values.
1786 } else if (NumBits
== 8) {
1787 // Try to treat the comparison as unsigned, so that we can use CLI.
1788 // Adjust CCMask and Value as necessary.
1789 if (Value
== 0 && C
.CCMask
== SystemZ::CCMASK_CMP_LT
)
1790 // Test whether the high bit of the byte is set.
1791 Value
= 127, C
.CCMask
= SystemZ::CCMASK_CMP_GT
;
1792 else if (Value
== 0 && C
.CCMask
== SystemZ::CCMASK_CMP_GE
)
1793 // Test whether the high bit of the byte is clear.
1794 Value
= 128, C
.CCMask
= SystemZ::CCMASK_CMP_LT
;
1796 // No instruction exists for this combination.
1798 C
.ICmpType
= SystemZICMP::UnsignedOnly
;
1800 } else if (Load
->getExtensionType() == ISD::ZEXTLOAD
) {
1803 // If the constant is in range, we can use any comparison.
1804 C
.ICmpType
= SystemZICMP::Any
;
1808 // Make sure that the first operand is an i32 of the right extension type.
1809 ISD::LoadExtType ExtType
= (C
.ICmpType
== SystemZICMP::SignedOnly
?
1812 if (C
.Op0
.getValueType() != MVT::i32
||
1813 Load
->getExtensionType() != ExtType
) {
1814 C
.Op0
= DAG
.getExtLoad(ExtType
, SDLoc(Load
), MVT::i32
, Load
->getChain(),
1815 Load
->getBasePtr(), Load
->getPointerInfo(),
1816 Load
->getMemoryVT(), Load
->getAlignment(),
1817 Load
->getMemOperand()->getFlags());
1818 // Update the chain uses.
1819 DAG
.ReplaceAllUsesOfValueWith(SDValue(Load
, 1), C
.Op0
.getValue(1));
1822 // Make sure that the second operand is an i32 with the right value.
1823 if (C
.Op1
.getValueType() != MVT::i32
||
1824 Value
!= ConstOp1
->getZExtValue())
1825 C
.Op1
= DAG
.getConstant(Value
, DL
, MVT::i32
);
1828 // Return true if Op is either an unextended load, or a load suitable
1829 // for integer register-memory comparisons of type ICmpType.
1830 static bool isNaturalMemoryOperand(SDValue Op
, unsigned ICmpType
) {
1831 auto *Load
= dyn_cast
<LoadSDNode
>(Op
.getNode());
1833 // There are no instructions to compare a register with a memory byte.
1834 if (Load
->getMemoryVT() == MVT::i8
)
1836 // Otherwise decide on extension type.
1837 switch (Load
->getExtensionType()) {
1838 case ISD::NON_EXTLOAD
:
1841 return ICmpType
!= SystemZICMP::UnsignedOnly
;
1843 return ICmpType
!= SystemZICMP::SignedOnly
;
1851 // Return true if it is better to swap the operands of C.
1852 static bool shouldSwapCmpOperands(const Comparison
&C
) {
1853 // Leave f128 comparisons alone, since they have no memory forms.
1854 if (C
.Op0
.getValueType() == MVT::f128
)
1857 // Always keep a floating-point constant second, since comparisons with
1858 // zero can use LOAD TEST and comparisons with other constants make a
1859 // natural memory operand.
1860 if (isa
<ConstantFPSDNode
>(C
.Op1
))
1863 // Never swap comparisons with zero since there are many ways to optimize
1865 auto *ConstOp1
= dyn_cast
<ConstantSDNode
>(C
.Op1
);
1866 if (ConstOp1
&& ConstOp1
->getZExtValue() == 0)
1869 // Also keep natural memory operands second if the loaded value is
1870 // only used here. Several comparisons have memory forms.
1871 if (isNaturalMemoryOperand(C
.Op1
, C
.ICmpType
) && C
.Op1
.hasOneUse())
1874 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
1875 // In that case we generally prefer the memory to be second.
1876 if (isNaturalMemoryOperand(C
.Op0
, C
.ICmpType
) && C
.Op0
.hasOneUse()) {
1877 // The only exceptions are when the second operand is a constant and
1878 // we can use things like CHHSI.
1881 // The unsigned memory-immediate instructions can handle 16-bit
1882 // unsigned integers.
1883 if (C
.ICmpType
!= SystemZICMP::SignedOnly
&&
1884 isUInt
<16>(ConstOp1
->getZExtValue()))
1886 // The signed memory-immediate instructions can handle 16-bit
1888 if (C
.ICmpType
!= SystemZICMP::UnsignedOnly
&&
1889 isInt
<16>(ConstOp1
->getSExtValue()))
1894 // Try to promote the use of CGFR and CLGFR.
1895 unsigned Opcode0
= C
.Op0
.getOpcode();
1896 if (C
.ICmpType
!= SystemZICMP::UnsignedOnly
&& Opcode0
== ISD::SIGN_EXTEND
)
1898 if (C
.ICmpType
!= SystemZICMP::SignedOnly
&& Opcode0
== ISD::ZERO_EXTEND
)
1900 if (C
.ICmpType
!= SystemZICMP::SignedOnly
&&
1901 Opcode0
== ISD::AND
&&
1902 C
.Op0
.getOperand(1).getOpcode() == ISD::Constant
&&
1903 cast
<ConstantSDNode
>(C
.Op0
.getOperand(1))->getZExtValue() == 0xffffffff)
1909 // Return a version of comparison CC mask CCMask in which the LT and GT
1910 // actions are swapped.
1911 static unsigned reverseCCMask(unsigned CCMask
) {
1912 return ((CCMask
& SystemZ::CCMASK_CMP_EQ
) |
1913 (CCMask
& SystemZ::CCMASK_CMP_GT
? SystemZ::CCMASK_CMP_LT
: 0) |
1914 (CCMask
& SystemZ::CCMASK_CMP_LT
? SystemZ::CCMASK_CMP_GT
: 0) |
1915 (CCMask
& SystemZ::CCMASK_CMP_UO
));
1918 // Check whether C tests for equality between X and Y and whether X - Y
1919 // or Y - X is also computed. In that case it's better to compare the
1920 // result of the subtraction against zero.
1921 static void adjustForSubtraction(SelectionDAG
&DAG
, const SDLoc
&DL
,
1923 if (C
.CCMask
== SystemZ::CCMASK_CMP_EQ
||
1924 C
.CCMask
== SystemZ::CCMASK_CMP_NE
) {
1925 for (auto I
= C
.Op0
->use_begin(), E
= C
.Op0
->use_end(); I
!= E
; ++I
) {
1927 if (N
->getOpcode() == ISD::SUB
&&
1928 ((N
->getOperand(0) == C
.Op0
&& N
->getOperand(1) == C
.Op1
) ||
1929 (N
->getOperand(0) == C
.Op1
&& N
->getOperand(1) == C
.Op0
))) {
1930 C
.Op0
= SDValue(N
, 0);
1931 C
.Op1
= DAG
.getConstant(0, DL
, N
->getValueType(0));
1938 // Check whether C compares a floating-point value with zero and if that
1939 // floating-point value is also negated. In this case we can use the
1940 // negation to set CC, so avoiding separate LOAD AND TEST and
1941 // LOAD (NEGATIVE/COMPLEMENT) instructions.
1942 static void adjustForFNeg(Comparison
&C
) {
1943 auto *C1
= dyn_cast
<ConstantFPSDNode
>(C
.Op1
);
1944 if (C1
&& C1
->isZero()) {
1945 for (auto I
= C
.Op0
->use_begin(), E
= C
.Op0
->use_end(); I
!= E
; ++I
) {
1947 if (N
->getOpcode() == ISD::FNEG
) {
1948 C
.Op0
= SDValue(N
, 0);
1949 C
.CCMask
= reverseCCMask(C
.CCMask
);
1956 // Check whether C compares (shl X, 32) with 0 and whether X is
1957 // also sign-extended. In that case it is better to test the result
1958 // of the sign extension using LTGFR.
1960 // This case is important because InstCombine transforms a comparison
1961 // with (sext (trunc X)) into a comparison with (shl X, 32).
1962 static void adjustForLTGFR(Comparison
&C
) {
1963 // Check for a comparison between (shl X, 32) and 0.
1964 if (C
.Op0
.getOpcode() == ISD::SHL
&&
1965 C
.Op0
.getValueType() == MVT::i64
&&
1966 C
.Op1
.getOpcode() == ISD::Constant
&&
1967 cast
<ConstantSDNode
>(C
.Op1
)->getZExtValue() == 0) {
1968 auto *C1
= dyn_cast
<ConstantSDNode
>(C
.Op0
.getOperand(1));
1969 if (C1
&& C1
->getZExtValue() == 32) {
1970 SDValue ShlOp0
= C
.Op0
.getOperand(0);
1971 // See whether X has any SIGN_EXTEND_INREG uses.
1972 for (auto I
= ShlOp0
->use_begin(), E
= ShlOp0
->use_end(); I
!= E
; ++I
) {
1974 if (N
->getOpcode() == ISD::SIGN_EXTEND_INREG
&&
1975 cast
<VTSDNode
>(N
->getOperand(1))->getVT() == MVT::i32
) {
1976 C
.Op0
= SDValue(N
, 0);
1984 // If C compares the truncation of an extending load, try to compare
1985 // the untruncated value instead. This exposes more opportunities to
1987 static void adjustICmpTruncate(SelectionDAG
&DAG
, const SDLoc
&DL
,
1989 if (C
.Op0
.getOpcode() == ISD::TRUNCATE
&&
1990 C
.Op0
.getOperand(0).getOpcode() == ISD::LOAD
&&
1991 C
.Op1
.getOpcode() == ISD::Constant
&&
1992 cast
<ConstantSDNode
>(C
.Op1
)->getZExtValue() == 0) {
1993 auto *L
= cast
<LoadSDNode
>(C
.Op0
.getOperand(0));
1994 if (L
->getMemoryVT().getStoreSizeInBits() <= C
.Op0
.getValueSizeInBits()) {
1995 unsigned Type
= L
->getExtensionType();
1996 if ((Type
== ISD::ZEXTLOAD
&& C
.ICmpType
!= SystemZICMP::SignedOnly
) ||
1997 (Type
== ISD::SEXTLOAD
&& C
.ICmpType
!= SystemZICMP::UnsignedOnly
)) {
1998 C
.Op0
= C
.Op0
.getOperand(0);
1999 C
.Op1
= DAG
.getConstant(0, DL
, C
.Op0
.getValueType());
2005 // Return true if shift operation N has an in-range constant shift value.
2006 // Store it in ShiftVal if so.
2007 static bool isSimpleShift(SDValue N
, unsigned &ShiftVal
) {
2008 auto *Shift
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1));
2012 uint64_t Amount
= Shift
->getZExtValue();
2013 if (Amount
>= N
.getValueSizeInBits())
2020 // Check whether an AND with Mask is suitable for a TEST UNDER MASK
2021 // instruction and whether the CC value is descriptive enough to handle
2022 // a comparison of type Opcode between the AND result and CmpVal.
2023 // CCMask says which comparison result is being tested and BitSize is
2024 // the number of bits in the operands. If TEST UNDER MASK can be used,
2025 // return the corresponding CC mask, otherwise return 0.
2026 static unsigned getTestUnderMaskCond(unsigned BitSize
, unsigned CCMask
,
2027 uint64_t Mask
, uint64_t CmpVal
,
2028 unsigned ICmpType
) {
2029 assert(Mask
!= 0 && "ANDs with zero should have been removed by now");
2031 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
2032 if (!SystemZ::isImmLL(Mask
) && !SystemZ::isImmLH(Mask
) &&
2033 !SystemZ::isImmHL(Mask
) && !SystemZ::isImmHH(Mask
))
2036 // Work out the masks for the lowest and highest bits.
2037 unsigned HighShift
= 63 - countLeadingZeros(Mask
);
2038 uint64_t High
= uint64_t(1) << HighShift
;
2039 uint64_t Low
= uint64_t(1) << countTrailingZeros(Mask
);
2041 // Signed ordered comparisons are effectively unsigned if the sign
2043 bool EffectivelyUnsigned
= (ICmpType
!= SystemZICMP::SignedOnly
);
2045 // Check for equality comparisons with 0, or the equivalent.
2047 if (CCMask
== SystemZ::CCMASK_CMP_EQ
)
2048 return SystemZ::CCMASK_TM_ALL_0
;
2049 if (CCMask
== SystemZ::CCMASK_CMP_NE
)
2050 return SystemZ::CCMASK_TM_SOME_1
;
2052 if (EffectivelyUnsigned
&& CmpVal
> 0 && CmpVal
<= Low
) {
2053 if (CCMask
== SystemZ::CCMASK_CMP_LT
)
2054 return SystemZ::CCMASK_TM_ALL_0
;
2055 if (CCMask
== SystemZ::CCMASK_CMP_GE
)
2056 return SystemZ::CCMASK_TM_SOME_1
;
2058 if (EffectivelyUnsigned
&& CmpVal
< Low
) {
2059 if (CCMask
== SystemZ::CCMASK_CMP_LE
)
2060 return SystemZ::CCMASK_TM_ALL_0
;
2061 if (CCMask
== SystemZ::CCMASK_CMP_GT
)
2062 return SystemZ::CCMASK_TM_SOME_1
;
2065 // Check for equality comparisons with the mask, or the equivalent.
2066 if (CmpVal
== Mask
) {
2067 if (CCMask
== SystemZ::CCMASK_CMP_EQ
)
2068 return SystemZ::CCMASK_TM_ALL_1
;
2069 if (CCMask
== SystemZ::CCMASK_CMP_NE
)
2070 return SystemZ::CCMASK_TM_SOME_0
;
2072 if (EffectivelyUnsigned
&& CmpVal
>= Mask
- Low
&& CmpVal
< Mask
) {
2073 if (CCMask
== SystemZ::CCMASK_CMP_GT
)
2074 return SystemZ::CCMASK_TM_ALL_1
;
2075 if (CCMask
== SystemZ::CCMASK_CMP_LE
)
2076 return SystemZ::CCMASK_TM_SOME_0
;
2078 if (EffectivelyUnsigned
&& CmpVal
> Mask
- Low
&& CmpVal
<= Mask
) {
2079 if (CCMask
== SystemZ::CCMASK_CMP_GE
)
2080 return SystemZ::CCMASK_TM_ALL_1
;
2081 if (CCMask
== SystemZ::CCMASK_CMP_LT
)
2082 return SystemZ::CCMASK_TM_SOME_0
;
2085 // Check for ordered comparisons with the top bit.
2086 if (EffectivelyUnsigned
&& CmpVal
>= Mask
- High
&& CmpVal
< High
) {
2087 if (CCMask
== SystemZ::CCMASK_CMP_LE
)
2088 return SystemZ::CCMASK_TM_MSB_0
;
2089 if (CCMask
== SystemZ::CCMASK_CMP_GT
)
2090 return SystemZ::CCMASK_TM_MSB_1
;
2092 if (EffectivelyUnsigned
&& CmpVal
> Mask
- High
&& CmpVal
<= High
) {
2093 if (CCMask
== SystemZ::CCMASK_CMP_LT
)
2094 return SystemZ::CCMASK_TM_MSB_0
;
2095 if (CCMask
== SystemZ::CCMASK_CMP_GE
)
2096 return SystemZ::CCMASK_TM_MSB_1
;
2099 // If there are just two bits, we can do equality checks for Low and High
2101 if (Mask
== Low
+ High
) {
2102 if (CCMask
== SystemZ::CCMASK_CMP_EQ
&& CmpVal
== Low
)
2103 return SystemZ::CCMASK_TM_MIXED_MSB_0
;
2104 if (CCMask
== SystemZ::CCMASK_CMP_NE
&& CmpVal
== Low
)
2105 return SystemZ::CCMASK_TM_MIXED_MSB_0
^ SystemZ::CCMASK_ANY
;
2106 if (CCMask
== SystemZ::CCMASK_CMP_EQ
&& CmpVal
== High
)
2107 return SystemZ::CCMASK_TM_MIXED_MSB_1
;
2108 if (CCMask
== SystemZ::CCMASK_CMP_NE
&& CmpVal
== High
)
2109 return SystemZ::CCMASK_TM_MIXED_MSB_1
^ SystemZ::CCMASK_ANY
;
2112 // Looks like we've exhausted our options.
2116 // See whether C can be implemented as a TEST UNDER MASK instruction.
2117 // Update the arguments with the TM version if so.
2118 static void adjustForTestUnderMask(SelectionDAG
&DAG
, const SDLoc
&DL
,
2120 // Check that we have a comparison with a constant.
2121 auto *ConstOp1
= dyn_cast
<ConstantSDNode
>(C
.Op1
);
2124 uint64_t CmpVal
= ConstOp1
->getZExtValue();
2126 // Check whether the nonconstant input is an AND with a constant mask.
2129 ConstantSDNode
*Mask
= nullptr;
2130 if (C
.Op0
.getOpcode() == ISD::AND
) {
2131 NewC
.Op0
= C
.Op0
.getOperand(0);
2132 NewC
.Op1
= C
.Op0
.getOperand(1);
2133 Mask
= dyn_cast
<ConstantSDNode
>(NewC
.Op1
);
2136 MaskVal
= Mask
->getZExtValue();
2138 // There is no instruction to compare with a 64-bit immediate
2139 // so use TMHH instead if possible. We need an unsigned ordered
2140 // comparison with an i64 immediate.
2141 if (NewC
.Op0
.getValueType() != MVT::i64
||
2142 NewC
.CCMask
== SystemZ::CCMASK_CMP_EQ
||
2143 NewC
.CCMask
== SystemZ::CCMASK_CMP_NE
||
2144 NewC
.ICmpType
== SystemZICMP::SignedOnly
)
2146 // Convert LE and GT comparisons into LT and GE.
2147 if (NewC
.CCMask
== SystemZ::CCMASK_CMP_LE
||
2148 NewC
.CCMask
== SystemZ::CCMASK_CMP_GT
) {
2149 if (CmpVal
== uint64_t(-1))
2152 NewC
.CCMask
^= SystemZ::CCMASK_CMP_EQ
;
2154 // If the low N bits of Op1 are zero than the low N bits of Op0 can
2155 // be masked off without changing the result.
2156 MaskVal
= -(CmpVal
& -CmpVal
);
2157 NewC
.ICmpType
= SystemZICMP::UnsignedOnly
;
2162 // Check whether the combination of mask, comparison value and comparison
2163 // type are suitable.
2164 unsigned BitSize
= NewC
.Op0
.getValueSizeInBits();
2165 unsigned NewCCMask
, ShiftVal
;
2166 if (NewC
.ICmpType
!= SystemZICMP::SignedOnly
&&
2167 NewC
.Op0
.getOpcode() == ISD::SHL
&&
2168 isSimpleShift(NewC
.Op0
, ShiftVal
) &&
2169 (MaskVal
>> ShiftVal
!= 0) &&
2170 ((CmpVal
>> ShiftVal
) << ShiftVal
) == CmpVal
&&
2171 (NewCCMask
= getTestUnderMaskCond(BitSize
, NewC
.CCMask
,
2172 MaskVal
>> ShiftVal
,
2174 SystemZICMP::Any
))) {
2175 NewC
.Op0
= NewC
.Op0
.getOperand(0);
2176 MaskVal
>>= ShiftVal
;
2177 } else if (NewC
.ICmpType
!= SystemZICMP::SignedOnly
&&
2178 NewC
.Op0
.getOpcode() == ISD::SRL
&&
2179 isSimpleShift(NewC
.Op0
, ShiftVal
) &&
2180 (MaskVal
<< ShiftVal
!= 0) &&
2181 ((CmpVal
<< ShiftVal
) >> ShiftVal
) == CmpVal
&&
2182 (NewCCMask
= getTestUnderMaskCond(BitSize
, NewC
.CCMask
,
2183 MaskVal
<< ShiftVal
,
2185 SystemZICMP::UnsignedOnly
))) {
2186 NewC
.Op0
= NewC
.Op0
.getOperand(0);
2187 MaskVal
<<= ShiftVal
;
2189 NewCCMask
= getTestUnderMaskCond(BitSize
, NewC
.CCMask
, MaskVal
, CmpVal
,
2195 // Go ahead and make the change.
2196 C
.Opcode
= SystemZISD::TM
;
2198 if (Mask
&& Mask
->getZExtValue() == MaskVal
)
2199 C
.Op1
= SDValue(Mask
, 0);
2201 C
.Op1
= DAG
.getConstant(MaskVal
, DL
, C
.Op0
.getValueType());
2202 C
.CCValid
= SystemZ::CCMASK_TM
;
2203 C
.CCMask
= NewCCMask
;
2206 // See whether the comparison argument contains a redundant AND
2207 // and remove it if so. This sometimes happens due to the generic
2208 // BRCOND expansion.
2209 static void adjustForRedundantAnd(SelectionDAG
&DAG
, const SDLoc
&DL
,
2211 if (C
.Op0
.getOpcode() != ISD::AND
)
2213 auto *Mask
= dyn_cast
<ConstantSDNode
>(C
.Op0
.getOperand(1));
2217 DAG
.computeKnownBits(C
.Op0
.getOperand(0), Known
);
2218 if ((~Known
.Zero
).getZExtValue() & ~Mask
->getZExtValue())
2221 C
.Op0
= C
.Op0
.getOperand(0);
2224 // Return a Comparison that tests the condition-code result of intrinsic
2225 // node Call against constant integer CC using comparison code Cond.
2226 // Opcode is the opcode of the SystemZISD operation for the intrinsic
2227 // and CCValid is the set of possible condition-code results.
2228 static Comparison
getIntrinsicCmp(SelectionDAG
&DAG
, unsigned Opcode
,
2229 SDValue Call
, unsigned CCValid
, uint64_t CC
,
2230 ISD::CondCode Cond
) {
2231 Comparison
C(Call
, SDValue());
2233 C
.CCValid
= CCValid
;
2234 if (Cond
== ISD::SETEQ
)
2235 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
2236 C
.CCMask
= CC
< 4 ? 1 << (3 - CC
) : 0;
2237 else if (Cond
== ISD::SETNE
)
2238 // ...and the inverse of that.
2239 C
.CCMask
= CC
< 4 ? ~(1 << (3 - CC
)) : -1;
2240 else if (Cond
== ISD::SETLT
|| Cond
== ISD::SETULT
)
2241 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
2242 // always true for CC>3.
2243 C
.CCMask
= CC
< 4 ? ~0U << (4 - CC
) : -1;
2244 else if (Cond
== ISD::SETGE
|| Cond
== ISD::SETUGE
)
2245 // ...and the inverse of that.
2246 C
.CCMask
= CC
< 4 ? ~(~0U << (4 - CC
)) : 0;
2247 else if (Cond
== ISD::SETLE
|| Cond
== ISD::SETULE
)
2248 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
2249 // always true for CC>3.
2250 C
.CCMask
= CC
< 4 ? ~0U << (3 - CC
) : -1;
2251 else if (Cond
== ISD::SETGT
|| Cond
== ISD::SETUGT
)
2252 // ...and the inverse of that.
2253 C
.CCMask
= CC
< 4 ? ~(~0U << (3 - CC
)) : 0;
2255 llvm_unreachable("Unexpected integer comparison type");
2256 C
.CCMask
&= CCValid
;
2260 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
2261 static Comparison
getCmp(SelectionDAG
&DAG
, SDValue CmpOp0
, SDValue CmpOp1
,
2262 ISD::CondCode Cond
, const SDLoc
&DL
) {
2263 if (CmpOp1
.getOpcode() == ISD::Constant
) {
2264 uint64_t Constant
= cast
<ConstantSDNode
>(CmpOp1
)->getZExtValue();
2265 unsigned Opcode
, CCValid
;
2266 if (CmpOp0
.getOpcode() == ISD::INTRINSIC_W_CHAIN
&&
2267 CmpOp0
.getResNo() == 0 && CmpOp0
->hasNUsesOfValue(1, 0) &&
2268 isIntrinsicWithCCAndChain(CmpOp0
, Opcode
, CCValid
))
2269 return getIntrinsicCmp(DAG
, Opcode
, CmpOp0
, CCValid
, Constant
, Cond
);
2270 if (CmpOp0
.getOpcode() == ISD::INTRINSIC_WO_CHAIN
&&
2271 CmpOp0
.getResNo() == CmpOp0
->getNumValues() - 1 &&
2272 isIntrinsicWithCC(CmpOp0
, Opcode
, CCValid
))
2273 return getIntrinsicCmp(DAG
, Opcode
, CmpOp0
, CCValid
, Constant
, Cond
);
2275 Comparison
C(CmpOp0
, CmpOp1
);
2276 C
.CCMask
= CCMaskForCondCode(Cond
);
2277 if (C
.Op0
.getValueType().isFloatingPoint()) {
2278 C
.CCValid
= SystemZ::CCMASK_FCMP
;
2279 C
.Opcode
= SystemZISD::FCMP
;
2282 C
.CCValid
= SystemZ::CCMASK_ICMP
;
2283 C
.Opcode
= SystemZISD::ICMP
;
2284 // Choose the type of comparison. Equality and inequality tests can
2285 // use either signed or unsigned comparisons. The choice also doesn't
2286 // matter if both sign bits are known to be clear. In those cases we
2287 // want to give the main isel code the freedom to choose whichever
2289 if (C
.CCMask
== SystemZ::CCMASK_CMP_EQ
||
2290 C
.CCMask
== SystemZ::CCMASK_CMP_NE
||
2291 (DAG
.SignBitIsZero(C
.Op0
) && DAG
.SignBitIsZero(C
.Op1
)))
2292 C
.ICmpType
= SystemZICMP::Any
;
2293 else if (C
.CCMask
& SystemZ::CCMASK_CMP_UO
)
2294 C
.ICmpType
= SystemZICMP::UnsignedOnly
;
2296 C
.ICmpType
= SystemZICMP::SignedOnly
;
2297 C
.CCMask
&= ~SystemZ::CCMASK_CMP_UO
;
2298 adjustForRedundantAnd(DAG
, DL
, C
);
2299 adjustZeroCmp(DAG
, DL
, C
);
2300 adjustSubwordCmp(DAG
, DL
, C
);
2301 adjustForSubtraction(DAG
, DL
, C
);
2303 adjustICmpTruncate(DAG
, DL
, C
);
2306 if (shouldSwapCmpOperands(C
)) {
2307 std::swap(C
.Op0
, C
.Op1
);
2308 C
.CCMask
= reverseCCMask(C
.CCMask
);
2311 adjustForTestUnderMask(DAG
, DL
, C
);
2315 // Emit the comparison instruction described by C.
2316 static SDValue
emitCmp(SelectionDAG
&DAG
, const SDLoc
&DL
, Comparison
&C
) {
2317 if (!C
.Op1
.getNode()) {
2319 switch (C
.Op0
.getOpcode()) {
2320 case ISD::INTRINSIC_W_CHAIN
:
2321 Node
= emitIntrinsicWithCCAndChain(DAG
, C
.Op0
, C
.Opcode
);
2322 return SDValue(Node
, 0);
2323 case ISD::INTRINSIC_WO_CHAIN
:
2324 Node
= emitIntrinsicWithCC(DAG
, C
.Op0
, C
.Opcode
);
2325 return SDValue(Node
, Node
->getNumValues() - 1);
2327 llvm_unreachable("Invalid comparison operands");
2330 if (C
.Opcode
== SystemZISD::ICMP
)
2331 return DAG
.getNode(SystemZISD::ICMP
, DL
, MVT::i32
, C
.Op0
, C
.Op1
,
2332 DAG
.getConstant(C
.ICmpType
, DL
, MVT::i32
));
2333 if (C
.Opcode
== SystemZISD::TM
) {
2334 bool RegisterOnly
= (bool(C
.CCMask
& SystemZ::CCMASK_TM_MIXED_MSB_0
) !=
2335 bool(C
.CCMask
& SystemZ::CCMASK_TM_MIXED_MSB_1
));
2336 return DAG
.getNode(SystemZISD::TM
, DL
, MVT::i32
, C
.Op0
, C
.Op1
,
2337 DAG
.getConstant(RegisterOnly
, DL
, MVT::i32
));
2339 return DAG
.getNode(C
.Opcode
, DL
, MVT::i32
, C
.Op0
, C
.Op1
);
2342 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
2343 // 64 bits. Extend is the extension type to use. Store the high part
2344 // in Hi and the low part in Lo.
2345 static void lowerMUL_LOHI32(SelectionDAG
&DAG
, const SDLoc
&DL
, unsigned Extend
,
2346 SDValue Op0
, SDValue Op1
, SDValue
&Hi
,
2348 Op0
= DAG
.getNode(Extend
, DL
, MVT::i64
, Op0
);
2349 Op1
= DAG
.getNode(Extend
, DL
, MVT::i64
, Op1
);
2350 SDValue Mul
= DAG
.getNode(ISD::MUL
, DL
, MVT::i64
, Op0
, Op1
);
2351 Hi
= DAG
.getNode(ISD::SRL
, DL
, MVT::i64
, Mul
,
2352 DAG
.getConstant(32, DL
, MVT::i64
));
2353 Hi
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Hi
);
2354 Lo
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Mul
);
2357 // Lower a binary operation that produces two VT results, one in each
2358 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
2359 // and Opcode performs the GR128 operation. Store the even register result
2360 // in Even and the odd register result in Odd.
2361 static void lowerGR128Binary(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
2362 unsigned Opcode
, SDValue Op0
, SDValue Op1
,
2363 SDValue
&Even
, SDValue
&Odd
) {
2364 SDValue Result
= DAG
.getNode(Opcode
, DL
, MVT::Untyped
, Op0
, Op1
);
2365 bool Is32Bit
= is32Bit(VT
);
2366 Even
= DAG
.getTargetExtractSubreg(SystemZ::even128(Is32Bit
), DL
, VT
, Result
);
2367 Odd
= DAG
.getTargetExtractSubreg(SystemZ::odd128(Is32Bit
), DL
, VT
, Result
);
2370 // Return an i32 value that is 1 if the CC value produced by CCReg is
2371 // in the mask CCMask and 0 otherwise. CC is known to have a value
2372 // in CCValid, so other values can be ignored.
2373 static SDValue
emitSETCC(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue CCReg
,
2374 unsigned CCValid
, unsigned CCMask
) {
2375 SDValue Ops
[] = { DAG
.getConstant(1, DL
, MVT::i32
),
2376 DAG
.getConstant(0, DL
, MVT::i32
),
2377 DAG
.getConstant(CCValid
, DL
, MVT::i32
),
2378 DAG
.getConstant(CCMask
, DL
, MVT::i32
), CCReg
};
2379 return DAG
.getNode(SystemZISD::SELECT_CCMASK
, DL
, MVT::i32
, Ops
);
2382 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot
2383 // be done directly. IsFP is true if CC is for a floating-point rather than
2384 // integer comparison.
2385 static unsigned getVectorComparison(ISD::CondCode CC
, bool IsFP
) {
2389 return IsFP
? SystemZISD::VFCMPE
: SystemZISD::VICMPE
;
2393 return IsFP
? SystemZISD::VFCMPHE
: static_cast<SystemZISD::NodeType
>(0);
2397 return IsFP
? SystemZISD::VFCMPH
: SystemZISD::VICMPH
;
2400 return IsFP
? static_cast<SystemZISD::NodeType
>(0) : SystemZISD::VICMPHL
;
2407 // Return the SystemZISD vector comparison operation for CC or its inverse,
2408 // or 0 if neither can be done directly. Indicate in Invert whether the
2409 // result is for the inverse of CC. IsFP is true if CC is for a
2410 // floating-point rather than integer comparison.
2411 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC
, bool IsFP
,
2413 if (unsigned Opcode
= getVectorComparison(CC
, IsFP
)) {
2418 CC
= ISD::getSetCCInverse(CC
, !IsFP
);
2419 if (unsigned Opcode
= getVectorComparison(CC
, IsFP
)) {
2427 // Return a v2f64 that contains the extended form of elements Start and Start+1
2428 // of v4f32 value Op.
2429 static SDValue
expandV4F32ToV2F64(SelectionDAG
&DAG
, int Start
, const SDLoc
&DL
,
2431 int Mask
[] = { Start
, -1, Start
+ 1, -1 };
2432 Op
= DAG
.getVectorShuffle(MVT::v4f32
, DL
, Op
, DAG
.getUNDEF(MVT::v4f32
), Mask
);
2433 return DAG
.getNode(SystemZISD::VEXTEND
, DL
, MVT::v2f64
, Op
);
2436 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
2437 // producing a result of type VT.
2438 SDValue
SystemZTargetLowering::getVectorCmp(SelectionDAG
&DAG
, unsigned Opcode
,
2439 const SDLoc
&DL
, EVT VT
,
2441 SDValue CmpOp1
) const {
2442 // There is no hardware support for v4f32 (unless we have the vector
2443 // enhancements facility 1), so extend the vector into two v2f64s
2444 // and compare those.
2445 if (CmpOp0
.getValueType() == MVT::v4f32
&&
2446 !Subtarget
.hasVectorEnhancements1()) {
2447 SDValue H0
= expandV4F32ToV2F64(DAG
, 0, DL
, CmpOp0
);
2448 SDValue L0
= expandV4F32ToV2F64(DAG
, 2, DL
, CmpOp0
);
2449 SDValue H1
= expandV4F32ToV2F64(DAG
, 0, DL
, CmpOp1
);
2450 SDValue L1
= expandV4F32ToV2F64(DAG
, 2, DL
, CmpOp1
);
2451 SDValue HRes
= DAG
.getNode(Opcode
, DL
, MVT::v2i64
, H0
, H1
);
2452 SDValue LRes
= DAG
.getNode(Opcode
, DL
, MVT::v2i64
, L0
, L1
);
2453 return DAG
.getNode(SystemZISD::PACK
, DL
, VT
, HRes
, LRes
);
2455 return DAG
.getNode(Opcode
, DL
, VT
, CmpOp0
, CmpOp1
);
2458 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
2459 // an integer mask of type VT.
2460 SDValue
SystemZTargetLowering::lowerVectorSETCC(SelectionDAG
&DAG
,
2461 const SDLoc
&DL
, EVT VT
,
2464 SDValue CmpOp1
) const {
2465 bool IsFP
= CmpOp0
.getValueType().isFloatingPoint();
2466 bool Invert
= false;
2469 // Handle tests for order using (or (ogt y x) (oge x y)).
2474 assert(IsFP
&& "Unexpected integer comparison");
2475 SDValue LT
= getVectorCmp(DAG
, SystemZISD::VFCMPH
, DL
, VT
, CmpOp1
, CmpOp0
);
2476 SDValue GE
= getVectorCmp(DAG
, SystemZISD::VFCMPHE
, DL
, VT
, CmpOp0
, CmpOp1
);
2477 Cmp
= DAG
.getNode(ISD::OR
, DL
, VT
, LT
, GE
);
2481 // Handle <> tests using (or (ogt y x) (ogt x y)).
2486 assert(IsFP
&& "Unexpected integer comparison");
2487 SDValue LT
= getVectorCmp(DAG
, SystemZISD::VFCMPH
, DL
, VT
, CmpOp1
, CmpOp0
);
2488 SDValue GT
= getVectorCmp(DAG
, SystemZISD::VFCMPH
, DL
, VT
, CmpOp0
, CmpOp1
);
2489 Cmp
= DAG
.getNode(ISD::OR
, DL
, VT
, LT
, GT
);
2493 // Otherwise a single comparison is enough. It doesn't really
2494 // matter whether we try the inversion or the swap first, since
2495 // there are no cases where both work.
2497 if (unsigned Opcode
= getVectorComparisonOrInvert(CC
, IsFP
, Invert
))
2498 Cmp
= getVectorCmp(DAG
, Opcode
, DL
, VT
, CmpOp0
, CmpOp1
);
2500 CC
= ISD::getSetCCSwappedOperands(CC
);
2501 if (unsigned Opcode
= getVectorComparisonOrInvert(CC
, IsFP
, Invert
))
2502 Cmp
= getVectorCmp(DAG
, Opcode
, DL
, VT
, CmpOp1
, CmpOp0
);
2504 llvm_unreachable("Unhandled comparison");
2509 SDValue Mask
= DAG
.getNode(SystemZISD::BYTE_MASK
, DL
, MVT::v16i8
,
2510 DAG
.getConstant(65535, DL
, MVT::i32
));
2511 Mask
= DAG
.getNode(ISD::BITCAST
, DL
, VT
, Mask
);
2512 Cmp
= DAG
.getNode(ISD::XOR
, DL
, VT
, Cmp
, Mask
);
2517 SDValue
SystemZTargetLowering::lowerSETCC(SDValue Op
,
2518 SelectionDAG
&DAG
) const {
2519 SDValue CmpOp0
= Op
.getOperand(0);
2520 SDValue CmpOp1
= Op
.getOperand(1);
2521 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(2))->get();
2523 EVT VT
= Op
.getValueType();
2525 return lowerVectorSETCC(DAG
, DL
, VT
, CC
, CmpOp0
, CmpOp1
);
2527 Comparison
C(getCmp(DAG
, CmpOp0
, CmpOp1
, CC
, DL
));
2528 SDValue CCReg
= emitCmp(DAG
, DL
, C
);
2529 return emitSETCC(DAG
, DL
, CCReg
, C
.CCValid
, C
.CCMask
);
2532 SDValue
SystemZTargetLowering::lowerBR_CC(SDValue Op
, SelectionDAG
&DAG
) const {
2533 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
2534 SDValue CmpOp0
= Op
.getOperand(2);
2535 SDValue CmpOp1
= Op
.getOperand(3);
2536 SDValue Dest
= Op
.getOperand(4);
2539 Comparison
C(getCmp(DAG
, CmpOp0
, CmpOp1
, CC
, DL
));
2540 SDValue CCReg
= emitCmp(DAG
, DL
, C
);
2541 return DAG
.getNode(SystemZISD::BR_CCMASK
, DL
, Op
.getValueType(),
2542 Op
.getOperand(0), DAG
.getConstant(C
.CCValid
, DL
, MVT::i32
),
2543 DAG
.getConstant(C
.CCMask
, DL
, MVT::i32
), Dest
, CCReg
);
2546 // Return true if Pos is CmpOp and Neg is the negative of CmpOp,
2547 // allowing Pos and Neg to be wider than CmpOp.
2548 static bool isAbsolute(SDValue CmpOp
, SDValue Pos
, SDValue Neg
) {
2549 return (Neg
.getOpcode() == ISD::SUB
&&
2550 Neg
.getOperand(0).getOpcode() == ISD::Constant
&&
2551 cast
<ConstantSDNode
>(Neg
.getOperand(0))->getZExtValue() == 0 &&
2552 Neg
.getOperand(1) == Pos
&&
2554 (Pos
.getOpcode() == ISD::SIGN_EXTEND
&&
2555 Pos
.getOperand(0) == CmpOp
)));
2558 // Return the absolute or negative absolute of Op; IsNegative decides which.
2559 static SDValue
getAbsolute(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue Op
,
2561 Op
= DAG
.getNode(SystemZISD::IABS
, DL
, Op
.getValueType(), Op
);
2563 Op
= DAG
.getNode(ISD::SUB
, DL
, Op
.getValueType(),
2564 DAG
.getConstant(0, DL
, Op
.getValueType()), Op
);
2568 SDValue
SystemZTargetLowering::lowerSELECT_CC(SDValue Op
,
2569 SelectionDAG
&DAG
) const {
2570 SDValue CmpOp0
= Op
.getOperand(0);
2571 SDValue CmpOp1
= Op
.getOperand(1);
2572 SDValue TrueOp
= Op
.getOperand(2);
2573 SDValue FalseOp
= Op
.getOperand(3);
2574 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
2577 Comparison
C(getCmp(DAG
, CmpOp0
, CmpOp1
, CC
, DL
));
2579 // Check for absolute and negative-absolute selections, including those
2580 // where the comparison value is sign-extended (for LPGFR and LNGFR).
2581 // This check supplements the one in DAGCombiner.
2582 if (C
.Opcode
== SystemZISD::ICMP
&&
2583 C
.CCMask
!= SystemZ::CCMASK_CMP_EQ
&&
2584 C
.CCMask
!= SystemZ::CCMASK_CMP_NE
&&
2585 C
.Op1
.getOpcode() == ISD::Constant
&&
2586 cast
<ConstantSDNode
>(C
.Op1
)->getZExtValue() == 0) {
2587 if (isAbsolute(C
.Op0
, TrueOp
, FalseOp
))
2588 return getAbsolute(DAG
, DL
, TrueOp
, C
.CCMask
& SystemZ::CCMASK_CMP_LT
);
2589 if (isAbsolute(C
.Op0
, FalseOp
, TrueOp
))
2590 return getAbsolute(DAG
, DL
, FalseOp
, C
.CCMask
& SystemZ::CCMASK_CMP_GT
);
2593 SDValue CCReg
= emitCmp(DAG
, DL
, C
);
2594 SDValue Ops
[] = {TrueOp
, FalseOp
, DAG
.getConstant(C
.CCValid
, DL
, MVT::i32
),
2595 DAG
.getConstant(C
.CCMask
, DL
, MVT::i32
), CCReg
};
2597 return DAG
.getNode(SystemZISD::SELECT_CCMASK
, DL
, Op
.getValueType(), Ops
);
2600 SDValue
SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode
*Node
,
2601 SelectionDAG
&DAG
) const {
2603 const GlobalValue
*GV
= Node
->getGlobal();
2604 int64_t Offset
= Node
->getOffset();
2605 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2606 CodeModel::Model CM
= DAG
.getTarget().getCodeModel();
2609 if (Subtarget
.isPC32DBLSymbol(GV
, CM
)) {
2610 // Assign anchors at 1<<12 byte boundaries.
2611 uint64_t Anchor
= Offset
& ~uint64_t(0xfff);
2612 Result
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, Anchor
);
2613 Result
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
2615 // The offset can be folded into the address if it is aligned to a halfword.
2617 if (Offset
!= 0 && (Offset
& 1) == 0) {
2618 SDValue Full
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, Anchor
+ Offset
);
2619 Result
= DAG
.getNode(SystemZISD::PCREL_OFFSET
, DL
, PtrVT
, Full
, Result
);
2623 Result
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, 0, SystemZII::MO_GOT
);
2624 Result
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
2625 Result
= DAG
.getLoad(PtrVT
, DL
, DAG
.getEntryNode(), Result
,
2626 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
2629 // If there was a non-zero offset that we didn't fold, create an explicit
2632 Result
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, Result
,
2633 DAG
.getConstant(Offset
, DL
, PtrVT
));
2638 SDValue
SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode
*Node
,
2641 SDValue GOTOffset
) const {
2643 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2644 SDValue Chain
= DAG
.getEntryNode();
2647 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
2648 SDValue GOT
= DAG
.getGLOBAL_OFFSET_TABLE(PtrVT
);
2649 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R12D
, GOT
, Glue
);
2650 Glue
= Chain
.getValue(1);
2651 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R2D
, GOTOffset
, Glue
);
2652 Glue
= Chain
.getValue(1);
2654 // The first call operand is the chain and the second is the TLS symbol.
2655 SmallVector
<SDValue
, 8> Ops
;
2656 Ops
.push_back(Chain
);
2657 Ops
.push_back(DAG
.getTargetGlobalAddress(Node
->getGlobal(), DL
,
2658 Node
->getValueType(0),
2661 // Add argument registers to the end of the list so that they are
2662 // known live into the call.
2663 Ops
.push_back(DAG
.getRegister(SystemZ::R2D
, PtrVT
));
2664 Ops
.push_back(DAG
.getRegister(SystemZ::R12D
, PtrVT
));
2666 // Add a register mask operand representing the call-preserved registers.
2667 const TargetRegisterInfo
*TRI
= Subtarget
.getRegisterInfo();
2668 const uint32_t *Mask
=
2669 TRI
->getCallPreservedMask(DAG
.getMachineFunction(), CallingConv::C
);
2670 assert(Mask
&& "Missing call preserved mask for calling convention");
2671 Ops
.push_back(DAG
.getRegisterMask(Mask
));
2673 // Glue the call to the argument copies.
2674 Ops
.push_back(Glue
);
2677 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
2678 Chain
= DAG
.getNode(Opcode
, DL
, NodeTys
, Ops
);
2679 Glue
= Chain
.getValue(1);
2681 // Copy the return value from %r2.
2682 return DAG
.getCopyFromReg(Chain
, DL
, SystemZ::R2D
, PtrVT
, Glue
);
2685 SDValue
SystemZTargetLowering::lowerThreadPointer(const SDLoc
&DL
,
2686 SelectionDAG
&DAG
) const {
2687 SDValue Chain
= DAG
.getEntryNode();
2688 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2690 // The high part of the thread pointer is in access register 0.
2691 SDValue TPHi
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::A0
, MVT::i32
);
2692 TPHi
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, PtrVT
, TPHi
);
2694 // The low part of the thread pointer is in access register 1.
2695 SDValue TPLo
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::A1
, MVT::i32
);
2696 TPLo
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, PtrVT
, TPLo
);
2698 // Merge them into a single 64-bit address.
2699 SDValue TPHiShifted
= DAG
.getNode(ISD::SHL
, DL
, PtrVT
, TPHi
,
2700 DAG
.getConstant(32, DL
, PtrVT
));
2701 return DAG
.getNode(ISD::OR
, DL
, PtrVT
, TPHiShifted
, TPLo
);
2704 SDValue
SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode
*Node
,
2705 SelectionDAG
&DAG
) const {
2706 if (DAG
.getTarget().useEmulatedTLS())
2707 return LowerToTLSEmulatedModel(Node
, DAG
);
2709 const GlobalValue
*GV
= Node
->getGlobal();
2710 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2711 TLSModel::Model model
= DAG
.getTarget().getTLSModel(GV
);
2713 SDValue TP
= lowerThreadPointer(DL
, DAG
);
2715 // Get the offset of GA from the thread pointer, based on the TLS model.
2718 case TLSModel::GeneralDynamic
: {
2719 // Load the GOT offset of the tls_index (module ID / per-symbol offset).
2720 SystemZConstantPoolValue
*CPV
=
2721 SystemZConstantPoolValue::Create(GV
, SystemZCP::TLSGD
);
2723 Offset
= DAG
.getConstantPool(CPV
, PtrVT
, 8);
2724 Offset
= DAG
.getLoad(
2725 PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
2726 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2728 // Call __tls_get_offset to retrieve the offset.
2729 Offset
= lowerTLSGetOffset(Node
, DAG
, SystemZISD::TLS_GDCALL
, Offset
);
2733 case TLSModel::LocalDynamic
: {
2734 // Load the GOT offset of the module ID.
2735 SystemZConstantPoolValue
*CPV
=
2736 SystemZConstantPoolValue::Create(GV
, SystemZCP::TLSLDM
);
2738 Offset
= DAG
.getConstantPool(CPV
, PtrVT
, 8);
2739 Offset
= DAG
.getLoad(
2740 PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
2741 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2743 // Call __tls_get_offset to retrieve the module base offset.
2744 Offset
= lowerTLSGetOffset(Node
, DAG
, SystemZISD::TLS_LDCALL
, Offset
);
2746 // Note: The SystemZLDCleanupPass will remove redundant computations
2747 // of the module base offset. Count total number of local-dynamic
2748 // accesses to trigger execution of that pass.
2749 SystemZMachineFunctionInfo
* MFI
=
2750 DAG
.getMachineFunction().getInfo
<SystemZMachineFunctionInfo
>();
2751 MFI
->incNumLocalDynamicTLSAccesses();
2753 // Add the per-symbol offset.
2754 CPV
= SystemZConstantPoolValue::Create(GV
, SystemZCP::DTPOFF
);
2756 SDValue DTPOffset
= DAG
.getConstantPool(CPV
, PtrVT
, 8);
2757 DTPOffset
= DAG
.getLoad(
2758 PtrVT
, DL
, DAG
.getEntryNode(), DTPOffset
,
2759 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2761 Offset
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, Offset
, DTPOffset
);
2765 case TLSModel::InitialExec
: {
2766 // Load the offset from the GOT.
2767 Offset
= DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, 0,
2768 SystemZII::MO_INDNTPOFF
);
2769 Offset
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Offset
);
2771 DAG
.getLoad(PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
2772 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
2776 case TLSModel::LocalExec
: {
2777 // Force the offset into the constant pool and load it from there.
2778 SystemZConstantPoolValue
*CPV
=
2779 SystemZConstantPoolValue::Create(GV
, SystemZCP::NTPOFF
);
2781 Offset
= DAG
.getConstantPool(CPV
, PtrVT
, 8);
2782 Offset
= DAG
.getLoad(
2783 PtrVT
, DL
, DAG
.getEntryNode(), Offset
,
2784 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2789 // Add the base and offset together.
2790 return DAG
.getNode(ISD::ADD
, DL
, PtrVT
, TP
, Offset
);
2793 SDValue
SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode
*Node
,
2794 SelectionDAG
&DAG
) const {
2796 const BlockAddress
*BA
= Node
->getBlockAddress();
2797 int64_t Offset
= Node
->getOffset();
2798 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2800 SDValue Result
= DAG
.getTargetBlockAddress(BA
, PtrVT
, Offset
);
2801 Result
= DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
2805 SDValue
SystemZTargetLowering::lowerJumpTable(JumpTableSDNode
*JT
,
2806 SelectionDAG
&DAG
) const {
2808 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2809 SDValue Result
= DAG
.getTargetJumpTable(JT
->getIndex(), PtrVT
);
2811 // Use LARL to load the address of the table.
2812 return DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
2815 SDValue
SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode
*CP
,
2816 SelectionDAG
&DAG
) const {
2818 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2821 if (CP
->isMachineConstantPoolEntry())
2822 Result
= DAG
.getTargetConstantPool(CP
->getMachineCPVal(), PtrVT
,
2823 CP
->getAlignment());
2825 Result
= DAG
.getTargetConstantPool(CP
->getConstVal(), PtrVT
,
2826 CP
->getAlignment(), CP
->getOffset());
2828 // Use LARL to load the address of the constant pool entry.
2829 return DAG
.getNode(SystemZISD::PCREL_WRAPPER
, DL
, PtrVT
, Result
);
2832 SDValue
SystemZTargetLowering::lowerFRAMEADDR(SDValue Op
,
2833 SelectionDAG
&DAG
) const {
2834 MachineFunction
&MF
= DAG
.getMachineFunction();
2835 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
2836 MFI
.setFrameAddressIsTaken(true);
2839 unsigned Depth
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
2840 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2842 // If the back chain frame index has not been allocated yet, do so.
2843 SystemZMachineFunctionInfo
*FI
= MF
.getInfo
<SystemZMachineFunctionInfo
>();
2844 int BackChainIdx
= FI
->getFramePointerSaveIndex();
2845 if (!BackChainIdx
) {
2846 // By definition, the frame address is the address of the back chain.
2847 BackChainIdx
= MFI
.CreateFixedObject(8, -SystemZMC::CallFrameSize
, false);
2848 FI
->setFramePointerSaveIndex(BackChainIdx
);
2850 SDValue BackChain
= DAG
.getFrameIndex(BackChainIdx
, PtrVT
);
2852 // FIXME The frontend should detect this case.
2854 report_fatal_error("Unsupported stack frame traversal count");
2860 SDValue
SystemZTargetLowering::lowerRETURNADDR(SDValue Op
,
2861 SelectionDAG
&DAG
) const {
2862 MachineFunction
&MF
= DAG
.getMachineFunction();
2863 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
2864 MFI
.setReturnAddressIsTaken(true);
2866 if (verifyReturnAddressArgumentIsConstant(Op
, DAG
))
2870 unsigned Depth
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
2871 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2873 // FIXME The frontend should detect this case.
2875 report_fatal_error("Unsupported stack frame traversal count");
2878 // Return R14D, which has the return address. Mark it an implicit live-in.
2879 unsigned LinkReg
= MF
.addLiveIn(SystemZ::R14D
, &SystemZ::GR64BitRegClass
);
2880 return DAG
.getCopyFromReg(DAG
.getEntryNode(), DL
, LinkReg
, PtrVT
);
2883 SDValue
SystemZTargetLowering::lowerBITCAST(SDValue Op
,
2884 SelectionDAG
&DAG
) const {
2886 SDValue In
= Op
.getOperand(0);
2887 EVT InVT
= In
.getValueType();
2888 EVT ResVT
= Op
.getValueType();
2890 // Convert loads directly. This is normally done by DAGCombiner,
2891 // but we need this case for bitcasts that are created during lowering
2892 // and which are then lowered themselves.
2893 if (auto *LoadN
= dyn_cast
<LoadSDNode
>(In
))
2894 if (ISD::isNormalLoad(LoadN
)) {
2895 SDValue NewLoad
= DAG
.getLoad(ResVT
, DL
, LoadN
->getChain(),
2896 LoadN
->getBasePtr(), LoadN
->getMemOperand());
2897 // Update the chain uses.
2898 DAG
.ReplaceAllUsesOfValueWith(SDValue(LoadN
, 1), NewLoad
.getValue(1));
2902 if (InVT
== MVT::i32
&& ResVT
== MVT::f32
) {
2904 if (Subtarget
.hasHighWord()) {
2905 SDNode
*U64
= DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
,
2907 In64
= DAG
.getTargetInsertSubreg(SystemZ::subreg_h32
, DL
,
2908 MVT::i64
, SDValue(U64
, 0), In
);
2910 In64
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, In
);
2911 In64
= DAG
.getNode(ISD::SHL
, DL
, MVT::i64
, In64
,
2912 DAG
.getConstant(32, DL
, MVT::i64
));
2914 SDValue Out64
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::f64
, In64
);
2915 return DAG
.getTargetExtractSubreg(SystemZ::subreg_h32
,
2916 DL
, MVT::f32
, Out64
);
2918 if (InVT
== MVT::f32
&& ResVT
== MVT::i32
) {
2919 SDNode
*U64
= DAG
.getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
, MVT::f64
);
2920 SDValue In64
= DAG
.getTargetInsertSubreg(SystemZ::subreg_h32
, DL
,
2921 MVT::f64
, SDValue(U64
, 0), In
);
2922 SDValue Out64
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::i64
, In64
);
2923 if (Subtarget
.hasHighWord())
2924 return DAG
.getTargetExtractSubreg(SystemZ::subreg_h32
, DL
,
2926 SDValue Shift
= DAG
.getNode(ISD::SRL
, DL
, MVT::i64
, Out64
,
2927 DAG
.getConstant(32, DL
, MVT::i64
));
2928 return DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Shift
);
2930 llvm_unreachable("Unexpected bitcast combination");
2933 SDValue
SystemZTargetLowering::lowerVASTART(SDValue Op
,
2934 SelectionDAG
&DAG
) const {
2935 MachineFunction
&MF
= DAG
.getMachineFunction();
2936 SystemZMachineFunctionInfo
*FuncInfo
=
2937 MF
.getInfo
<SystemZMachineFunctionInfo
>();
2938 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
2940 SDValue Chain
= Op
.getOperand(0);
2941 SDValue Addr
= Op
.getOperand(1);
2942 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
2945 // The initial values of each field.
2946 const unsigned NumFields
= 4;
2947 SDValue Fields
[NumFields
] = {
2948 DAG
.getConstant(FuncInfo
->getVarArgsFirstGPR(), DL
, PtrVT
),
2949 DAG
.getConstant(FuncInfo
->getVarArgsFirstFPR(), DL
, PtrVT
),
2950 DAG
.getFrameIndex(FuncInfo
->getVarArgsFrameIndex(), PtrVT
),
2951 DAG
.getFrameIndex(FuncInfo
->getRegSaveFrameIndex(), PtrVT
)
2954 // Store each field into its respective slot.
2955 SDValue MemOps
[NumFields
];
2956 unsigned Offset
= 0;
2957 for (unsigned I
= 0; I
< NumFields
; ++I
) {
2958 SDValue FieldAddr
= Addr
;
2960 FieldAddr
= DAG
.getNode(ISD::ADD
, DL
, PtrVT
, FieldAddr
,
2961 DAG
.getIntPtrConstant(Offset
, DL
));
2962 MemOps
[I
] = DAG
.getStore(Chain
, DL
, Fields
[I
], FieldAddr
,
2963 MachinePointerInfo(SV
, Offset
));
2966 return DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, MemOps
);
2969 SDValue
SystemZTargetLowering::lowerVACOPY(SDValue Op
,
2970 SelectionDAG
&DAG
) const {
2971 SDValue Chain
= Op
.getOperand(0);
2972 SDValue DstPtr
= Op
.getOperand(1);
2973 SDValue SrcPtr
= Op
.getOperand(2);
2974 const Value
*DstSV
= cast
<SrcValueSDNode
>(Op
.getOperand(3))->getValue();
2975 const Value
*SrcSV
= cast
<SrcValueSDNode
>(Op
.getOperand(4))->getValue();
2978 return DAG
.getMemcpy(Chain
, DL
, DstPtr
, SrcPtr
, DAG
.getIntPtrConstant(32, DL
),
2979 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
2980 /*isTailCall*/false,
2981 MachinePointerInfo(DstSV
), MachinePointerInfo(SrcSV
));
2984 SDValue
SystemZTargetLowering::
2985 lowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
) const {
2986 const TargetFrameLowering
*TFI
= Subtarget
.getFrameLowering();
2987 MachineFunction
&MF
= DAG
.getMachineFunction();
2988 bool RealignOpt
= !MF
.getFunction().hasFnAttribute("no-realign-stack");
2989 bool StoreBackchain
= MF
.getFunction().hasFnAttribute("backchain");
2991 SDValue Chain
= Op
.getOperand(0);
2992 SDValue Size
= Op
.getOperand(1);
2993 SDValue Align
= Op
.getOperand(2);
2996 // If user has set the no alignment function attribute, ignore
2997 // alloca alignments.
2998 uint64_t AlignVal
= (RealignOpt
?
2999 dyn_cast
<ConstantSDNode
>(Align
)->getZExtValue() : 0);
3001 uint64_t StackAlign
= TFI
->getStackAlignment();
3002 uint64_t RequiredAlign
= std::max(AlignVal
, StackAlign
);
3003 uint64_t ExtraAlignSpace
= RequiredAlign
- StackAlign
;
3005 unsigned SPReg
= getStackPointerRegisterToSaveRestore();
3006 SDValue NeededSpace
= Size
;
3008 // Get a reference to the stack pointer.
3009 SDValue OldSP
= DAG
.getCopyFromReg(Chain
, DL
, SPReg
, MVT::i64
);
3011 // If we need a backchain, save it now.
3014 Backchain
= DAG
.getLoad(MVT::i64
, DL
, Chain
, OldSP
, MachinePointerInfo());
3016 // Add extra space for alignment if needed.
3017 if (ExtraAlignSpace
)
3018 NeededSpace
= DAG
.getNode(ISD::ADD
, DL
, MVT::i64
, NeededSpace
,
3019 DAG
.getConstant(ExtraAlignSpace
, DL
, MVT::i64
));
3021 // Get the new stack pointer value.
3022 SDValue NewSP
= DAG
.getNode(ISD::SUB
, DL
, MVT::i64
, OldSP
, NeededSpace
);
3024 // Copy the new stack pointer back.
3025 Chain
= DAG
.getCopyToReg(Chain
, DL
, SPReg
, NewSP
);
3027 // The allocated data lives above the 160 bytes allocated for the standard
3028 // frame, plus any outgoing stack arguments. We don't know how much that
3029 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
3030 SDValue ArgAdjust
= DAG
.getNode(SystemZISD::ADJDYNALLOC
, DL
, MVT::i64
);
3031 SDValue Result
= DAG
.getNode(ISD::ADD
, DL
, MVT::i64
, NewSP
, ArgAdjust
);
3033 // Dynamically realign if needed.
3034 if (RequiredAlign
> StackAlign
) {
3036 DAG
.getNode(ISD::ADD
, DL
, MVT::i64
, Result
,
3037 DAG
.getConstant(ExtraAlignSpace
, DL
, MVT::i64
));
3039 DAG
.getNode(ISD::AND
, DL
, MVT::i64
, Result
,
3040 DAG
.getConstant(~(RequiredAlign
- 1), DL
, MVT::i64
));
3044 Chain
= DAG
.getStore(Chain
, DL
, Backchain
, NewSP
, MachinePointerInfo());
3046 SDValue Ops
[2] = { Result
, Chain
};
3047 return DAG
.getMergeValues(Ops
, DL
);
3050 SDValue
SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3051 SDValue Op
, SelectionDAG
&DAG
) const {
3054 return DAG
.getNode(SystemZISD::ADJDYNALLOC
, DL
, MVT::i64
);
3057 SDValue
SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op
,
3058 SelectionDAG
&DAG
) const {
3059 EVT VT
= Op
.getValueType();
3063 // Just do a normal 64-bit multiplication and extract the results.
3064 // We define this so that it can be used for constant division.
3065 lowerMUL_LOHI32(DAG
, DL
, ISD::SIGN_EXTEND
, Op
.getOperand(0),
3066 Op
.getOperand(1), Ops
[1], Ops
[0]);
3067 else if (Subtarget
.hasMiscellaneousExtensions2())
3068 // SystemZISD::SMUL_LOHI returns the low result in the odd register and
3069 // the high result in the even register. ISD::SMUL_LOHI is defined to
3070 // return the low half first, so the results are in reverse order.
3071 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::SMUL_LOHI
,
3072 Op
.getOperand(0), Op
.getOperand(1), Ops
[1], Ops
[0]);
3074 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
3076 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
3078 // but using the fact that the upper halves are either all zeros
3081 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
3083 // and grouping the right terms together since they are quicker than the
3086 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
3087 SDValue C63
= DAG
.getConstant(63, DL
, MVT::i64
);
3088 SDValue LL
= Op
.getOperand(0);
3089 SDValue RL
= Op
.getOperand(1);
3090 SDValue LH
= DAG
.getNode(ISD::SRA
, DL
, VT
, LL
, C63
);
3091 SDValue RH
= DAG
.getNode(ISD::SRA
, DL
, VT
, RL
, C63
);
3092 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3093 // the high result in the even register. ISD::SMUL_LOHI is defined to
3094 // return the low half first, so the results are in reverse order.
3095 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::UMUL_LOHI
,
3096 LL
, RL
, Ops
[1], Ops
[0]);
3097 SDValue NegLLTimesRH
= DAG
.getNode(ISD::AND
, DL
, VT
, LL
, RH
);
3098 SDValue NegLHTimesRL
= DAG
.getNode(ISD::AND
, DL
, VT
, LH
, RL
);
3099 SDValue NegSum
= DAG
.getNode(ISD::ADD
, DL
, VT
, NegLLTimesRH
, NegLHTimesRL
);
3100 Ops
[1] = DAG
.getNode(ISD::SUB
, DL
, VT
, Ops
[1], NegSum
);
3102 return DAG
.getMergeValues(Ops
, DL
);
3105 SDValue
SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op
,
3106 SelectionDAG
&DAG
) const {
3107 EVT VT
= Op
.getValueType();
3111 // Just do a normal 64-bit multiplication and extract the results.
3112 // We define this so that it can be used for constant division.
3113 lowerMUL_LOHI32(DAG
, DL
, ISD::ZERO_EXTEND
, Op
.getOperand(0),
3114 Op
.getOperand(1), Ops
[1], Ops
[0]);
3116 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3117 // the high result in the even register. ISD::UMUL_LOHI is defined to
3118 // return the low half first, so the results are in reverse order.
3119 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::UMUL_LOHI
,
3120 Op
.getOperand(0), Op
.getOperand(1), Ops
[1], Ops
[0]);
3121 return DAG
.getMergeValues(Ops
, DL
);
3124 SDValue
SystemZTargetLowering::lowerSDIVREM(SDValue Op
,
3125 SelectionDAG
&DAG
) const {
3126 SDValue Op0
= Op
.getOperand(0);
3127 SDValue Op1
= Op
.getOperand(1);
3128 EVT VT
= Op
.getValueType();
3131 // We use DSGF for 32-bit division. This means the first operand must
3132 // always be 64-bit, and the second operand should be 32-bit whenever
3133 // that is possible, to improve performance.
3135 Op0
= DAG
.getNode(ISD::SIGN_EXTEND
, DL
, MVT::i64
, Op0
);
3136 else if (DAG
.ComputeNumSignBits(Op1
) > 32)
3137 Op1
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Op1
);
3139 // DSG(F) returns the remainder in the even register and the
3140 // quotient in the odd register.
3142 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::SDIVREM
, Op0
, Op1
, Ops
[1], Ops
[0]);
3143 return DAG
.getMergeValues(Ops
, DL
);
3146 SDValue
SystemZTargetLowering::lowerUDIVREM(SDValue Op
,
3147 SelectionDAG
&DAG
) const {
3148 EVT VT
= Op
.getValueType();
3151 // DL(G) returns the remainder in the even register and the
3152 // quotient in the odd register.
3154 lowerGR128Binary(DAG
, DL
, VT
, SystemZISD::UDIVREM
,
3155 Op
.getOperand(0), Op
.getOperand(1), Ops
[1], Ops
[0]);
3156 return DAG
.getMergeValues(Ops
, DL
);
3159 SDValue
SystemZTargetLowering::lowerOR(SDValue Op
, SelectionDAG
&DAG
) const {
3160 assert(Op
.getValueType() == MVT::i64
&& "Should be 64-bit operation");
3162 // Get the known-zero masks for each operand.
3163 SDValue Ops
[] = { Op
.getOperand(0), Op
.getOperand(1) };
3165 DAG
.computeKnownBits(Ops
[0], Known
[0]);
3166 DAG
.computeKnownBits(Ops
[1], Known
[1]);
3168 // See if the upper 32 bits of one operand and the lower 32 bits of the
3169 // other are known zero. They are the low and high operands respectively.
3170 uint64_t Masks
[] = { Known
[0].Zero
.getZExtValue(),
3171 Known
[1].Zero
.getZExtValue() };
3173 if ((Masks
[0] >> 32) == 0xffffffff && uint32_t(Masks
[1]) == 0xffffffff)
3175 else if ((Masks
[1] >> 32) == 0xffffffff && uint32_t(Masks
[0]) == 0xffffffff)
3180 SDValue LowOp
= Ops
[Low
];
3181 SDValue HighOp
= Ops
[High
];
3183 // If the high part is a constant, we're better off using IILH.
3184 if (HighOp
.getOpcode() == ISD::Constant
)
3187 // If the low part is a constant that is outside the range of LHI,
3188 // then we're better off using IILF.
3189 if (LowOp
.getOpcode() == ISD::Constant
) {
3190 int64_t Value
= int32_t(cast
<ConstantSDNode
>(LowOp
)->getZExtValue());
3191 if (!isInt
<16>(Value
))
3195 // Check whether the high part is an AND that doesn't change the
3196 // high 32 bits and just masks out low bits. We can skip it if so.
3197 if (HighOp
.getOpcode() == ISD::AND
&&
3198 HighOp
.getOperand(1).getOpcode() == ISD::Constant
) {
3199 SDValue HighOp0
= HighOp
.getOperand(0);
3200 uint64_t Mask
= cast
<ConstantSDNode
>(HighOp
.getOperand(1))->getZExtValue();
3201 if (DAG
.MaskedValueIsZero(HighOp0
, APInt(64, ~(Mask
| 0xffffffff))))
3205 // Take advantage of the fact that all GR32 operations only change the
3206 // low 32 bits by truncating Low to an i32 and inserting it directly
3207 // using a subreg. The interesting cases are those where the truncation
3210 SDValue Low32
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, LowOp
);
3211 return DAG
.getTargetInsertSubreg(SystemZ::subreg_l32
, DL
,
3212 MVT::i64
, HighOp
, Low32
);
3215 // Lower SADDO/SSUBO/UADDO/USUBO nodes.
3216 SDValue
SystemZTargetLowering::lowerXALUO(SDValue Op
,
3217 SelectionDAG
&DAG
) const {
3218 SDNode
*N
= Op
.getNode();
3219 SDValue LHS
= N
->getOperand(0);
3220 SDValue RHS
= N
->getOperand(1);
3222 unsigned BaseOp
= 0;
3223 unsigned CCValid
= 0;
3224 unsigned CCMask
= 0;
3226 switch (Op
.getOpcode()) {
3227 default: llvm_unreachable("Unknown instruction!");
3229 BaseOp
= SystemZISD::SADDO
;
3230 CCValid
= SystemZ::CCMASK_ARITH
;
3231 CCMask
= SystemZ::CCMASK_ARITH_OVERFLOW
;
3234 BaseOp
= SystemZISD::SSUBO
;
3235 CCValid
= SystemZ::CCMASK_ARITH
;
3236 CCMask
= SystemZ::CCMASK_ARITH_OVERFLOW
;
3239 BaseOp
= SystemZISD::UADDO
;
3240 CCValid
= SystemZ::CCMASK_LOGICAL
;
3241 CCMask
= SystemZ::CCMASK_LOGICAL_CARRY
;
3244 BaseOp
= SystemZISD::USUBO
;
3245 CCValid
= SystemZ::CCMASK_LOGICAL
;
3246 CCMask
= SystemZ::CCMASK_LOGICAL_BORROW
;
3250 SDVTList VTs
= DAG
.getVTList(N
->getValueType(0), MVT::i32
);
3251 SDValue Result
= DAG
.getNode(BaseOp
, DL
, VTs
, LHS
, RHS
);
3253 SDValue SetCC
= emitSETCC(DAG
, DL
, Result
.getValue(1), CCValid
, CCMask
);
3254 if (N
->getValueType(1) == MVT::i1
)
3255 SetCC
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i1
, SetCC
);
3257 return DAG
.getNode(ISD::MERGE_VALUES
, DL
, N
->getVTList(), Result
, SetCC
);
3260 // Lower ADDCARRY/SUBCARRY nodes.
3261 SDValue
SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op
,
3262 SelectionDAG
&DAG
) const {
3264 SDNode
*N
= Op
.getNode();
3265 MVT VT
= N
->getSimpleValueType(0);
3267 // Let legalize expand this if it isn't a legal type yet.
3268 if (!DAG
.getTargetLoweringInfo().isTypeLegal(VT
))
3271 SDValue LHS
= N
->getOperand(0);
3272 SDValue RHS
= N
->getOperand(1);
3273 SDValue Carry
= Op
.getOperand(2);
3275 unsigned BaseOp
= 0;
3276 unsigned CCValid
= 0;
3277 unsigned CCMask
= 0;
3279 switch (Op
.getOpcode()) {
3280 default: llvm_unreachable("Unknown instruction!");
3282 BaseOp
= SystemZISD::ADDCARRY
;
3283 CCValid
= SystemZ::CCMASK_LOGICAL
;
3284 CCMask
= SystemZ::CCMASK_LOGICAL_CARRY
;
3287 BaseOp
= SystemZISD::SUBCARRY
;
3288 CCValid
= SystemZ::CCMASK_LOGICAL
;
3289 CCMask
= SystemZ::CCMASK_LOGICAL_BORROW
;
3293 // Set the condition code from the carry flag.
3294 Carry
= DAG
.getNode(SystemZISD::GET_CCMASK
, DL
, MVT::i32
, Carry
,
3295 DAG
.getConstant(CCValid
, DL
, MVT::i32
),
3296 DAG
.getConstant(CCMask
, DL
, MVT::i32
));
3298 SDVTList VTs
= DAG
.getVTList(VT
, MVT::i32
);
3299 SDValue Result
= DAG
.getNode(BaseOp
, DL
, VTs
, LHS
, RHS
, Carry
);
3301 SDValue SetCC
= emitSETCC(DAG
, DL
, Result
.getValue(1), CCValid
, CCMask
);
3302 if (N
->getValueType(1) == MVT::i1
)
3303 SetCC
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i1
, SetCC
);
3305 return DAG
.getNode(ISD::MERGE_VALUES
, DL
, N
->getVTList(), Result
, SetCC
);
3308 SDValue
SystemZTargetLowering::lowerCTPOP(SDValue Op
,
3309 SelectionDAG
&DAG
) const {
3310 EVT VT
= Op
.getValueType();
3312 Op
= Op
.getOperand(0);
3314 // Handle vector types via VPOPCT.
3315 if (VT
.isVector()) {
3316 Op
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v16i8
, Op
);
3317 Op
= DAG
.getNode(SystemZISD::POPCNT
, DL
, MVT::v16i8
, Op
);
3318 switch (VT
.getScalarSizeInBits()) {
3322 Op
= DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
3323 SDValue Shift
= DAG
.getConstant(8, DL
, MVT::i32
);
3324 SDValue Tmp
= DAG
.getNode(SystemZISD::VSHL_BY_SCALAR
, DL
, VT
, Op
, Shift
);
3325 Op
= DAG
.getNode(ISD::ADD
, DL
, VT
, Op
, Tmp
);
3326 Op
= DAG
.getNode(SystemZISD::VSRL_BY_SCALAR
, DL
, VT
, Op
, Shift
);
3330 SDValue Tmp
= DAG
.getNode(SystemZISD::BYTE_MASK
, DL
, MVT::v16i8
,
3331 DAG
.getConstant(0, DL
, MVT::i32
));
3332 Op
= DAG
.getNode(SystemZISD::VSUM
, DL
, VT
, Op
, Tmp
);
3336 SDValue Tmp
= DAG
.getNode(SystemZISD::BYTE_MASK
, DL
, MVT::v16i8
,
3337 DAG
.getConstant(0, DL
, MVT::i32
));
3338 Op
= DAG
.getNode(SystemZISD::VSUM
, DL
, MVT::v4i32
, Op
, Tmp
);
3339 Op
= DAG
.getNode(SystemZISD::VSUM
, DL
, VT
, Op
, Tmp
);
3343 llvm_unreachable("Unexpected type");
3348 // Get the known-zero mask for the operand.
3350 DAG
.computeKnownBits(Op
, Known
);
3351 unsigned NumSignificantBits
= (~Known
.Zero
).getActiveBits();
3352 if (NumSignificantBits
== 0)
3353 return DAG
.getConstant(0, DL
, VT
);
3355 // Skip known-zero high parts of the operand.
3356 int64_t OrigBitSize
= VT
.getSizeInBits();
3357 int64_t BitSize
= (int64_t)1 << Log2_32_Ceil(NumSignificantBits
);
3358 BitSize
= std::min(BitSize
, OrigBitSize
);
3360 // The POPCNT instruction counts the number of bits in each byte.
3361 Op
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op
);
3362 Op
= DAG
.getNode(SystemZISD::POPCNT
, DL
, MVT::i64
, Op
);
3363 Op
= DAG
.getNode(ISD::TRUNCATE
, DL
, VT
, Op
);
3365 // Add up per-byte counts in a binary tree. All bits of Op at
3366 // position larger than BitSize remain zero throughout.
3367 for (int64_t I
= BitSize
/ 2; I
>= 8; I
= I
/ 2) {
3368 SDValue Tmp
= DAG
.getNode(ISD::SHL
, DL
, VT
, Op
, DAG
.getConstant(I
, DL
, VT
));
3369 if (BitSize
!= OrigBitSize
)
3370 Tmp
= DAG
.getNode(ISD::AND
, DL
, VT
, Tmp
,
3371 DAG
.getConstant(((uint64_t)1 << BitSize
) - 1, DL
, VT
));
3372 Op
= DAG
.getNode(ISD::ADD
, DL
, VT
, Op
, Tmp
);
3375 // Extract overall result from high byte.
3377 Op
= DAG
.getNode(ISD::SRL
, DL
, VT
, Op
,
3378 DAG
.getConstant(BitSize
- 8, DL
, VT
));
3383 SDValue
SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op
,
3384 SelectionDAG
&DAG
) const {
3386 AtomicOrdering FenceOrdering
= static_cast<AtomicOrdering
>(
3387 cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue());
3388 SyncScope::ID FenceSSID
= static_cast<SyncScope::ID
>(
3389 cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue());
3391 // The only fence that needs an instruction is a sequentially-consistent
3392 // cross-thread fence.
3393 if (FenceOrdering
== AtomicOrdering::SequentiallyConsistent
&&
3394 FenceSSID
== SyncScope::System
) {
3395 return SDValue(DAG
.getMachineNode(SystemZ::Serialize
, DL
, MVT::Other
,
3400 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
3401 return DAG
.getNode(SystemZISD::MEMBARRIER
, DL
, MVT::Other
, Op
.getOperand(0));
3404 // Op is an atomic load. Lower it into a normal volatile load.
3405 SDValue
SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op
,
3406 SelectionDAG
&DAG
) const {
3407 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3408 return DAG
.getExtLoad(ISD::EXTLOAD
, SDLoc(Op
), Op
.getValueType(),
3409 Node
->getChain(), Node
->getBasePtr(),
3410 Node
->getMemoryVT(), Node
->getMemOperand());
3413 // Op is an atomic store. Lower it into a normal volatile store.
3414 SDValue
SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op
,
3415 SelectionDAG
&DAG
) const {
3416 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3417 SDValue Chain
= DAG
.getTruncStore(Node
->getChain(), SDLoc(Op
), Node
->getVal(),
3418 Node
->getBasePtr(), Node
->getMemoryVT(),
3419 Node
->getMemOperand());
3420 // We have to enforce sequential consistency by performing a
3421 // serialization operation after the store.
3422 if (Node
->getOrdering() == AtomicOrdering::SequentiallyConsistent
)
3423 Chain
= SDValue(DAG
.getMachineNode(SystemZ::Serialize
, SDLoc(Op
),
3424 MVT::Other
, Chain
), 0);
3428 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
3429 // two into the fullword ATOMIC_LOADW_* operation given by Opcode.
3430 SDValue
SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op
,
3432 unsigned Opcode
) const {
3433 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3435 // 32-bit operations need no code outside the main loop.
3436 EVT NarrowVT
= Node
->getMemoryVT();
3437 EVT WideVT
= MVT::i32
;
3438 if (NarrowVT
== WideVT
)
3441 int64_t BitSize
= NarrowVT
.getSizeInBits();
3442 SDValue ChainIn
= Node
->getChain();
3443 SDValue Addr
= Node
->getBasePtr();
3444 SDValue Src2
= Node
->getVal();
3445 MachineMemOperand
*MMO
= Node
->getMemOperand();
3447 EVT PtrVT
= Addr
.getValueType();
3449 // Convert atomic subtracts of constants into additions.
3450 if (Opcode
== SystemZISD::ATOMIC_LOADW_SUB
)
3451 if (auto *Const
= dyn_cast
<ConstantSDNode
>(Src2
)) {
3452 Opcode
= SystemZISD::ATOMIC_LOADW_ADD
;
3453 Src2
= DAG
.getConstant(-Const
->getSExtValue(), DL
, Src2
.getValueType());
3456 // Get the address of the containing word.
3457 SDValue AlignedAddr
= DAG
.getNode(ISD::AND
, DL
, PtrVT
, Addr
,
3458 DAG
.getConstant(-4, DL
, PtrVT
));
3460 // Get the number of bits that the word must be rotated left in order
3461 // to bring the field to the top bits of a GR32.
3462 SDValue BitShift
= DAG
.getNode(ISD::SHL
, DL
, PtrVT
, Addr
,
3463 DAG
.getConstant(3, DL
, PtrVT
));
3464 BitShift
= DAG
.getNode(ISD::TRUNCATE
, DL
, WideVT
, BitShift
);
3466 // Get the complementing shift amount, for rotating a field in the top
3467 // bits back to its proper position.
3468 SDValue NegBitShift
= DAG
.getNode(ISD::SUB
, DL
, WideVT
,
3469 DAG
.getConstant(0, DL
, WideVT
), BitShift
);
3471 // Extend the source operand to 32 bits and prepare it for the inner loop.
3472 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
3473 // operations require the source to be shifted in advance. (This shift
3474 // can be folded if the source is constant.) For AND and NAND, the lower
3475 // bits must be set, while for other opcodes they should be left clear.
3476 if (Opcode
!= SystemZISD::ATOMIC_SWAPW
)
3477 Src2
= DAG
.getNode(ISD::SHL
, DL
, WideVT
, Src2
,
3478 DAG
.getConstant(32 - BitSize
, DL
, WideVT
));
3479 if (Opcode
== SystemZISD::ATOMIC_LOADW_AND
||
3480 Opcode
== SystemZISD::ATOMIC_LOADW_NAND
)
3481 Src2
= DAG
.getNode(ISD::OR
, DL
, WideVT
, Src2
,
3482 DAG
.getConstant(uint32_t(-1) >> BitSize
, DL
, WideVT
));
3484 // Construct the ATOMIC_LOADW_* node.
3485 SDVTList VTList
= DAG
.getVTList(WideVT
, MVT::Other
);
3486 SDValue Ops
[] = { ChainIn
, AlignedAddr
, Src2
, BitShift
, NegBitShift
,
3487 DAG
.getConstant(BitSize
, DL
, WideVT
) };
3488 SDValue AtomicOp
= DAG
.getMemIntrinsicNode(Opcode
, DL
, VTList
, Ops
,
3491 // Rotate the result of the final CS so that the field is in the lower
3492 // bits of a GR32, then truncate it.
3493 SDValue ResultShift
= DAG
.getNode(ISD::ADD
, DL
, WideVT
, BitShift
,
3494 DAG
.getConstant(BitSize
, DL
, WideVT
));
3495 SDValue Result
= DAG
.getNode(ISD::ROTL
, DL
, WideVT
, AtomicOp
, ResultShift
);
3497 SDValue RetOps
[2] = { Result
, AtomicOp
.getValue(1) };
3498 return DAG
.getMergeValues(RetOps
, DL
);
3501 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
3502 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
3503 // operations into additions.
3504 SDValue
SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op
,
3505 SelectionDAG
&DAG
) const {
3506 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3507 EVT MemVT
= Node
->getMemoryVT();
3508 if (MemVT
== MVT::i32
|| MemVT
== MVT::i64
) {
3509 // A full-width operation.
3510 assert(Op
.getValueType() == MemVT
&& "Mismatched VTs");
3511 SDValue Src2
= Node
->getVal();
3515 if (auto *Op2
= dyn_cast
<ConstantSDNode
>(Src2
)) {
3516 // Use an addition if the operand is constant and either LAA(G) is
3517 // available or the negative value is in the range of A(G)FHI.
3518 int64_t Value
= (-Op2
->getAPIntValue()).getSExtValue();
3519 if (isInt
<32>(Value
) || Subtarget
.hasInterlockedAccess1())
3520 NegSrc2
= DAG
.getConstant(Value
, DL
, MemVT
);
3521 } else if (Subtarget
.hasInterlockedAccess1())
3522 // Use LAA(G) if available.
3523 NegSrc2
= DAG
.getNode(ISD::SUB
, DL
, MemVT
, DAG
.getConstant(0, DL
, MemVT
),
3526 if (NegSrc2
.getNode())
3527 return DAG
.getAtomic(ISD::ATOMIC_LOAD_ADD
, DL
, MemVT
,
3528 Node
->getChain(), Node
->getBasePtr(), NegSrc2
,
3529 Node
->getMemOperand());
3531 // Use the node as-is.
3535 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_SUB
);
3538 // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
3539 SDValue
SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op
,
3540 SelectionDAG
&DAG
) const {
3541 auto *Node
= cast
<AtomicSDNode
>(Op
.getNode());
3542 SDValue ChainIn
= Node
->getOperand(0);
3543 SDValue Addr
= Node
->getOperand(1);
3544 SDValue CmpVal
= Node
->getOperand(2);
3545 SDValue SwapVal
= Node
->getOperand(3);
3546 MachineMemOperand
*MMO
= Node
->getMemOperand();
3549 // We have native support for 32-bit and 64-bit compare and swap, but we
3550 // still need to expand extracting the "success" result from the CC.
3551 EVT NarrowVT
= Node
->getMemoryVT();
3552 EVT WideVT
= NarrowVT
== MVT::i64
? MVT::i64
: MVT::i32
;
3553 if (NarrowVT
== WideVT
) {
3554 SDVTList Tys
= DAG
.getVTList(WideVT
, MVT::i32
, MVT::Other
);
3555 SDValue Ops
[] = { ChainIn
, Addr
, CmpVal
, SwapVal
};
3556 SDValue AtomicOp
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP
,
3557 DL
, Tys
, Ops
, NarrowVT
, MMO
);
3558 SDValue Success
= emitSETCC(DAG
, DL
, AtomicOp
.getValue(1),
3559 SystemZ::CCMASK_CS
, SystemZ::CCMASK_CS_EQ
);
3561 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(0), AtomicOp
.getValue(0));
3562 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(1), Success
);
3563 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(2), AtomicOp
.getValue(2));
3567 // Convert 8-bit and 16-bit compare and swap to a loop, implemented
3568 // via a fullword ATOMIC_CMP_SWAPW operation.
3569 int64_t BitSize
= NarrowVT
.getSizeInBits();
3570 EVT PtrVT
= Addr
.getValueType();
3572 // Get the address of the containing word.
3573 SDValue AlignedAddr
= DAG
.getNode(ISD::AND
, DL
, PtrVT
, Addr
,
3574 DAG
.getConstant(-4, DL
, PtrVT
));
3576 // Get the number of bits that the word must be rotated left in order
3577 // to bring the field to the top bits of a GR32.
3578 SDValue BitShift
= DAG
.getNode(ISD::SHL
, DL
, PtrVT
, Addr
,
3579 DAG
.getConstant(3, DL
, PtrVT
));
3580 BitShift
= DAG
.getNode(ISD::TRUNCATE
, DL
, WideVT
, BitShift
);
3582 // Get the complementing shift amount, for rotating a field in the top
3583 // bits back to its proper position.
3584 SDValue NegBitShift
= DAG
.getNode(ISD::SUB
, DL
, WideVT
,
3585 DAG
.getConstant(0, DL
, WideVT
), BitShift
);
3587 // Construct the ATOMIC_CMP_SWAPW node.
3588 SDVTList VTList
= DAG
.getVTList(WideVT
, MVT::i32
, MVT::Other
);
3589 SDValue Ops
[] = { ChainIn
, AlignedAddr
, CmpVal
, SwapVal
, BitShift
,
3590 NegBitShift
, DAG
.getConstant(BitSize
, DL
, WideVT
) };
3591 SDValue AtomicOp
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW
, DL
,
3592 VTList
, Ops
, NarrowVT
, MMO
);
3593 SDValue Success
= emitSETCC(DAG
, DL
, AtomicOp
.getValue(1),
3594 SystemZ::CCMASK_ICMP
, SystemZ::CCMASK_CMP_EQ
);
3596 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(0), AtomicOp
.getValue(0));
3597 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(1), Success
);
3598 DAG
.ReplaceAllUsesOfValueWith(Op
.getValue(2), AtomicOp
.getValue(2));
3602 SDValue
SystemZTargetLowering::lowerSTACKSAVE(SDValue Op
,
3603 SelectionDAG
&DAG
) const {
3604 MachineFunction
&MF
= DAG
.getMachineFunction();
3605 MF
.getInfo
<SystemZMachineFunctionInfo
>()->setManipulatesSP(true);
3606 return DAG
.getCopyFromReg(Op
.getOperand(0), SDLoc(Op
),
3607 SystemZ::R15D
, Op
.getValueType());
3610 SDValue
SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op
,
3611 SelectionDAG
&DAG
) const {
3612 MachineFunction
&MF
= DAG
.getMachineFunction();
3613 MF
.getInfo
<SystemZMachineFunctionInfo
>()->setManipulatesSP(true);
3614 bool StoreBackchain
= MF
.getFunction().hasFnAttribute("backchain");
3616 SDValue Chain
= Op
.getOperand(0);
3617 SDValue NewSP
= Op
.getOperand(1);
3621 if (StoreBackchain
) {
3622 SDValue OldSP
= DAG
.getCopyFromReg(Chain
, DL
, SystemZ::R15D
, MVT::i64
);
3623 Backchain
= DAG
.getLoad(MVT::i64
, DL
, Chain
, OldSP
, MachinePointerInfo());
3626 Chain
= DAG
.getCopyToReg(Chain
, DL
, SystemZ::R15D
, NewSP
);
3629 Chain
= DAG
.getStore(Chain
, DL
, Backchain
, NewSP
, MachinePointerInfo());
3634 SDValue
SystemZTargetLowering::lowerPREFETCH(SDValue Op
,
3635 SelectionDAG
&DAG
) const {
3636 bool IsData
= cast
<ConstantSDNode
>(Op
.getOperand(4))->getZExtValue();
3638 // Just preserve the chain.
3639 return Op
.getOperand(0);
3642 bool IsWrite
= cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue();
3643 unsigned Code
= IsWrite
? SystemZ::PFD_WRITE
: SystemZ::PFD_READ
;
3644 auto *Node
= cast
<MemIntrinsicSDNode
>(Op
.getNode());
3647 DAG
.getConstant(Code
, DL
, MVT::i32
),
3650 return DAG
.getMemIntrinsicNode(SystemZISD::PREFETCH
, DL
,
3651 Node
->getVTList(), Ops
,
3652 Node
->getMemoryVT(), Node
->getMemOperand());
3655 // Convert condition code in CCReg to an i32 value.
3656 static SDValue
getCCResult(SelectionDAG
&DAG
, SDValue CCReg
) {
3658 SDValue IPM
= DAG
.getNode(SystemZISD::IPM
, DL
, MVT::i32
, CCReg
);
3659 return DAG
.getNode(ISD::SRL
, DL
, MVT::i32
, IPM
,
3660 DAG
.getConstant(SystemZ::IPM_CC
, DL
, MVT::i32
));
3664 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op
,
3665 SelectionDAG
&DAG
) const {
3666 unsigned Opcode
, CCValid
;
3667 if (isIntrinsicWithCCAndChain(Op
, Opcode
, CCValid
)) {
3668 assert(Op
->getNumValues() == 2 && "Expected only CC result and chain");
3669 SDNode
*Node
= emitIntrinsicWithCCAndChain(DAG
, Op
, Opcode
);
3670 SDValue CC
= getCCResult(DAG
, SDValue(Node
, 0));
3671 DAG
.ReplaceAllUsesOfValueWith(SDValue(Op
.getNode(), 0), CC
);
3679 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op
,
3680 SelectionDAG
&DAG
) const {
3681 unsigned Opcode
, CCValid
;
3682 if (isIntrinsicWithCC(Op
, Opcode
, CCValid
)) {
3683 SDNode
*Node
= emitIntrinsicWithCC(DAG
, Op
, Opcode
);
3684 if (Op
->getNumValues() == 1)
3685 return getCCResult(DAG
, SDValue(Node
, 0));
3686 assert(Op
->getNumValues() == 2 && "Expected a CC and non-CC result");
3687 return DAG
.getNode(ISD::MERGE_VALUES
, SDLoc(Op
), Op
->getVTList(),
3688 SDValue(Node
, 0), getCCResult(DAG
, SDValue(Node
, 1)));
3691 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
3693 case Intrinsic::thread_pointer
:
3694 return lowerThreadPointer(SDLoc(Op
), DAG
);
3696 case Intrinsic::s390_vpdi
:
3697 return DAG
.getNode(SystemZISD::PERMUTE_DWORDS
, SDLoc(Op
), Op
.getValueType(),
3698 Op
.getOperand(1), Op
.getOperand(2), Op
.getOperand(3));
3700 case Intrinsic::s390_vperm
:
3701 return DAG
.getNode(SystemZISD::PERMUTE
, SDLoc(Op
), Op
.getValueType(),
3702 Op
.getOperand(1), Op
.getOperand(2), Op
.getOperand(3));
3704 case Intrinsic::s390_vuphb
:
3705 case Intrinsic::s390_vuphh
:
3706 case Intrinsic::s390_vuphf
:
3707 return DAG
.getNode(SystemZISD::UNPACK_HIGH
, SDLoc(Op
), Op
.getValueType(),
3710 case Intrinsic::s390_vuplhb
:
3711 case Intrinsic::s390_vuplhh
:
3712 case Intrinsic::s390_vuplhf
:
3713 return DAG
.getNode(SystemZISD::UNPACKL_HIGH
, SDLoc(Op
), Op
.getValueType(),
3716 case Intrinsic::s390_vuplb
:
3717 case Intrinsic::s390_vuplhw
:
3718 case Intrinsic::s390_vuplf
:
3719 return DAG
.getNode(SystemZISD::UNPACK_LOW
, SDLoc(Op
), Op
.getValueType(),
3722 case Intrinsic::s390_vupllb
:
3723 case Intrinsic::s390_vupllh
:
3724 case Intrinsic::s390_vupllf
:
3725 return DAG
.getNode(SystemZISD::UNPACKL_LOW
, SDLoc(Op
), Op
.getValueType(),
3728 case Intrinsic::s390_vsumb
:
3729 case Intrinsic::s390_vsumh
:
3730 case Intrinsic::s390_vsumgh
:
3731 case Intrinsic::s390_vsumgf
:
3732 case Intrinsic::s390_vsumqf
:
3733 case Intrinsic::s390_vsumqg
:
3734 return DAG
.getNode(SystemZISD::VSUM
, SDLoc(Op
), Op
.getValueType(),
3735 Op
.getOperand(1), Op
.getOperand(2));
3742 // Says that SystemZISD operation Opcode can be used to perform the equivalent
3743 // of a VPERM with permute vector Bytes. If Opcode takes three operands,
3744 // Operand is the constant third operand, otherwise it is the number of
3745 // bytes in each element of the result.
3749 unsigned char Bytes
[SystemZ::VectorBytes
];
3753 static const Permute PermuteForms
[] = {
3755 { SystemZISD::MERGE_HIGH
, 8,
3756 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
3758 { SystemZISD::MERGE_HIGH
, 4,
3759 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
3761 { SystemZISD::MERGE_HIGH
, 2,
3762 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
3764 { SystemZISD::MERGE_HIGH
, 1,
3765 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
3767 { SystemZISD::MERGE_LOW
, 8,
3768 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
3770 { SystemZISD::MERGE_LOW
, 4,
3771 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
3773 { SystemZISD::MERGE_LOW
, 2,
3774 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
3776 { SystemZISD::MERGE_LOW
, 1,
3777 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
3779 { SystemZISD::PACK
, 4,
3780 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
3782 { SystemZISD::PACK
, 2,
3783 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
3785 { SystemZISD::PACK
, 1,
3786 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
3787 // VPDI V1, V2, 4 (low half of V1, high half of V2)
3788 { SystemZISD::PERMUTE_DWORDS
, 4,
3789 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
3790 // VPDI V1, V2, 1 (high half of V1, low half of V2)
3791 { SystemZISD::PERMUTE_DWORDS
, 1,
3792 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
3795 // Called after matching a vector shuffle against a particular pattern.
3796 // Both the original shuffle and the pattern have two vector operands.
3797 // OpNos[0] is the operand of the original shuffle that should be used for
3798 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
3799 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
3800 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used
3801 // for operands 0 and 1 of the pattern.
3802 static bool chooseShuffleOpNos(int *OpNos
, unsigned &OpNo0
, unsigned &OpNo1
) {
3806 OpNo0
= OpNo1
= OpNos
[1];
3807 } else if (OpNos
[1] < 0) {
3808 OpNo0
= OpNo1
= OpNos
[0];
3816 // Bytes is a VPERM-like permute vector, except that -1 is used for
3817 // undefined bytes. Return true if the VPERM can be implemented using P.
3818 // When returning true set OpNo0 to the VPERM operand that should be
3819 // used for operand 0 of P and likewise OpNo1 for operand 1 of P.
3821 // For example, if swapping the VPERM operands allows P to match, OpNo0
3822 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
3823 // operand, but rewriting it to use two duplicated operands allows it to
3824 // match P, then OpNo0 and OpNo1 will be the same.
3825 static bool matchPermute(const SmallVectorImpl
<int> &Bytes
, const Permute
&P
,
3826 unsigned &OpNo0
, unsigned &OpNo1
) {
3827 int OpNos
[] = { -1, -1 };
3828 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
) {
3831 // Make sure that the two permute vectors use the same suboperand
3832 // byte number. Only the operand numbers (the high bits) are
3833 // allowed to differ.
3834 if ((Elt
^ P
.Bytes
[I
]) & (SystemZ::VectorBytes
- 1))
3836 int ModelOpNo
= P
.Bytes
[I
] / SystemZ::VectorBytes
;
3837 int RealOpNo
= unsigned(Elt
) / SystemZ::VectorBytes
;
3838 // Make sure that the operand mappings are consistent with previous
3840 if (OpNos
[ModelOpNo
] == 1 - RealOpNo
)
3842 OpNos
[ModelOpNo
] = RealOpNo
;
3845 return chooseShuffleOpNos(OpNos
, OpNo0
, OpNo1
);
3848 // As above, but search for a matching permute.
3849 static const Permute
*matchPermute(const SmallVectorImpl
<int> &Bytes
,
3850 unsigned &OpNo0
, unsigned &OpNo1
) {
3851 for (auto &P
: PermuteForms
)
3852 if (matchPermute(Bytes
, P
, OpNo0
, OpNo1
))
3857 // Bytes is a VPERM-like permute vector, except that -1 is used for
3858 // undefined bytes. This permute is an operand of an outer permute.
3859 // See whether redistributing the -1 bytes gives a shuffle that can be
3860 // implemented using P. If so, set Transform to a VPERM-like permute vector
3861 // that, when applied to the result of P, gives the original permute in Bytes.
3862 static bool matchDoublePermute(const SmallVectorImpl
<int> &Bytes
,
3864 SmallVectorImpl
<int> &Transform
) {
3866 for (unsigned From
= 0; From
< SystemZ::VectorBytes
; ++From
) {
3867 int Elt
= Bytes
[From
];
3869 // Byte number From of the result is undefined.
3870 Transform
[From
] = -1;
3872 while (P
.Bytes
[To
] != Elt
) {
3874 if (To
== SystemZ::VectorBytes
)
3877 Transform
[From
] = To
;
3883 // As above, but search for a matching permute.
3884 static const Permute
*matchDoublePermute(const SmallVectorImpl
<int> &Bytes
,
3885 SmallVectorImpl
<int> &Transform
) {
3886 for (auto &P
: PermuteForms
)
3887 if (matchDoublePermute(Bytes
, P
, Transform
))
3892 // Convert the mask of the given shuffle op into a byte-level mask,
3893 // as if it had type vNi8.
3894 static bool getVPermMask(SDValue ShuffleOp
,
3895 SmallVectorImpl
<int> &Bytes
) {
3896 EVT VT
= ShuffleOp
.getValueType();
3897 unsigned NumElements
= VT
.getVectorNumElements();
3898 unsigned BytesPerElement
= VT
.getVectorElementType().getStoreSize();
3900 if (auto *VSN
= dyn_cast
<ShuffleVectorSDNode
>(ShuffleOp
)) {
3901 Bytes
.resize(NumElements
* BytesPerElement
, -1);
3902 for (unsigned I
= 0; I
< NumElements
; ++I
) {
3903 int Index
= VSN
->getMaskElt(I
);
3905 for (unsigned J
= 0; J
< BytesPerElement
; ++J
)
3906 Bytes
[I
* BytesPerElement
+ J
] = Index
* BytesPerElement
+ J
;
3910 if (SystemZISD::SPLAT
== ShuffleOp
.getOpcode() &&
3911 isa
<ConstantSDNode
>(ShuffleOp
.getOperand(1))) {
3912 unsigned Index
= ShuffleOp
.getConstantOperandVal(1);
3913 Bytes
.resize(NumElements
* BytesPerElement
, -1);
3914 for (unsigned I
= 0; I
< NumElements
; ++I
)
3915 for (unsigned J
= 0; J
< BytesPerElement
; ++J
)
3916 Bytes
[I
* BytesPerElement
+ J
] = Index
* BytesPerElement
+ J
;
3922 // Bytes is a VPERM-like permute vector, except that -1 is used for
3923 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
3924 // the result come from a contiguous sequence of bytes from one input.
3925 // Set Base to the selector for the first byte if so.
3926 static bool getShuffleInput(const SmallVectorImpl
<int> &Bytes
, unsigned Start
,
3927 unsigned BytesPerElement
, int &Base
) {
3929 for (unsigned I
= 0; I
< BytesPerElement
; ++I
) {
3930 if (Bytes
[Start
+ I
] >= 0) {
3931 unsigned Elem
= Bytes
[Start
+ I
];
3934 // Make sure the bytes would come from one input operand.
3935 if (unsigned(Base
) % Bytes
.size() + BytesPerElement
> Bytes
.size())
3937 } else if (unsigned(Base
) != Elem
- I
)
3944 // Bytes is a VPERM-like permute vector, except that -1 is used for
3945 // undefined bytes. Return true if it can be performed using VSLDI.
3946 // When returning true, set StartIndex to the shift amount and OpNo0
3947 // and OpNo1 to the VPERM operands that should be used as the first
3948 // and second shift operand respectively.
3949 static bool isShlDoublePermute(const SmallVectorImpl
<int> &Bytes
,
3950 unsigned &StartIndex
, unsigned &OpNo0
,
3952 int OpNos
[] = { -1, -1 };
3954 for (unsigned I
= 0; I
< 16; ++I
) {
3955 int Index
= Bytes
[I
];
3957 int ExpectedShift
= (Index
- I
) % SystemZ::VectorBytes
;
3958 int ModelOpNo
= unsigned(ExpectedShift
+ I
) / SystemZ::VectorBytes
;
3959 int RealOpNo
= unsigned(Index
) / SystemZ::VectorBytes
;
3961 Shift
= ExpectedShift
;
3962 else if (Shift
!= ExpectedShift
)
3964 // Make sure that the operand mappings are consistent with previous
3966 if (OpNos
[ModelOpNo
] == 1 - RealOpNo
)
3968 OpNos
[ModelOpNo
] = RealOpNo
;
3972 return chooseShuffleOpNos(OpNos
, OpNo0
, OpNo1
);
3975 // Create a node that performs P on operands Op0 and Op1, casting the
3976 // operands to the appropriate type. The type of the result is determined by P.
3977 static SDValue
getPermuteNode(SelectionDAG
&DAG
, const SDLoc
&DL
,
3978 const Permute
&P
, SDValue Op0
, SDValue Op1
) {
3979 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
3980 // elements of a PACK are twice as wide as the outputs.
3981 unsigned InBytes
= (P
.Opcode
== SystemZISD::PERMUTE_DWORDS
? 8 :
3982 P
.Opcode
== SystemZISD::PACK
? P
.Operand
* 2 :
3984 // Cast both operands to the appropriate type.
3985 MVT InVT
= MVT::getVectorVT(MVT::getIntegerVT(InBytes
* 8),
3986 SystemZ::VectorBytes
/ InBytes
);
3987 Op0
= DAG
.getNode(ISD::BITCAST
, DL
, InVT
, Op0
);
3988 Op1
= DAG
.getNode(ISD::BITCAST
, DL
, InVT
, Op1
);
3990 if (P
.Opcode
== SystemZISD::PERMUTE_DWORDS
) {
3991 SDValue Op2
= DAG
.getConstant(P
.Operand
, DL
, MVT::i32
);
3992 Op
= DAG
.getNode(SystemZISD::PERMUTE_DWORDS
, DL
, InVT
, Op0
, Op1
, Op2
);
3993 } else if (P
.Opcode
== SystemZISD::PACK
) {
3994 MVT OutVT
= MVT::getVectorVT(MVT::getIntegerVT(P
.Operand
* 8),
3995 SystemZ::VectorBytes
/ P
.Operand
);
3996 Op
= DAG
.getNode(SystemZISD::PACK
, DL
, OutVT
, Op0
, Op1
);
3998 Op
= DAG
.getNode(P
.Opcode
, DL
, InVT
, Op0
, Op1
);
4003 // Bytes is a VPERM-like permute vector, except that -1 is used for
4004 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using
4006 static SDValue
getGeneralPermuteNode(SelectionDAG
&DAG
, const SDLoc
&DL
,
4008 const SmallVectorImpl
<int> &Bytes
) {
4009 for (unsigned I
= 0; I
< 2; ++I
)
4010 Ops
[I
] = DAG
.getNode(ISD::BITCAST
, DL
, MVT::v16i8
, Ops
[I
]);
4012 // First see whether VSLDI can be used.
4013 unsigned StartIndex
, OpNo0
, OpNo1
;
4014 if (isShlDoublePermute(Bytes
, StartIndex
, OpNo0
, OpNo1
))
4015 return DAG
.getNode(SystemZISD::SHL_DOUBLE
, DL
, MVT::v16i8
, Ops
[OpNo0
],
4016 Ops
[OpNo1
], DAG
.getConstant(StartIndex
, DL
, MVT::i32
));
4018 // Fall back on VPERM. Construct an SDNode for the permute vector.
4019 SDValue IndexNodes
[SystemZ::VectorBytes
];
4020 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
)
4022 IndexNodes
[I
] = DAG
.getConstant(Bytes
[I
], DL
, MVT::i32
);
4024 IndexNodes
[I
] = DAG
.getUNDEF(MVT::i32
);
4025 SDValue Op2
= DAG
.getBuildVector(MVT::v16i8
, DL
, IndexNodes
);
4026 return DAG
.getNode(SystemZISD::PERMUTE
, DL
, MVT::v16i8
, Ops
[0], Ops
[1], Op2
);
4030 // Describes a general N-operand vector shuffle.
4031 struct GeneralShuffle
{
4032 GeneralShuffle(EVT vt
) : VT(vt
) {}
4034 bool add(SDValue
, unsigned);
4035 SDValue
getNode(SelectionDAG
&, const SDLoc
&);
4037 // The operands of the shuffle.
4038 SmallVector
<SDValue
, SystemZ::VectorBytes
> Ops
;
4040 // Index I is -1 if byte I of the result is undefined. Otherwise the
4041 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
4042 // Bytes[I] / SystemZ::VectorBytes.
4043 SmallVector
<int, SystemZ::VectorBytes
> Bytes
;
4045 // The type of the shuffle result.
4050 // Add an extra undefined element to the shuffle.
4051 void GeneralShuffle::addUndef() {
4052 unsigned BytesPerElement
= VT
.getVectorElementType().getStoreSize();
4053 for (unsigned I
= 0; I
< BytesPerElement
; ++I
)
4054 Bytes
.push_back(-1);
4057 // Add an extra element to the shuffle, taking it from element Elem of Op.
4058 // A null Op indicates a vector input whose value will be calculated later;
4059 // there is at most one such input per shuffle and it always has the same
4060 // type as the result. Aborts and returns false if the source vector elements
4061 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
4062 // LLVM they become implicitly extended, but this is rare and not optimized.
4063 bool GeneralShuffle::add(SDValue Op
, unsigned Elem
) {
4064 unsigned BytesPerElement
= VT
.getVectorElementType().getStoreSize();
4066 // The source vector can have wider elements than the result,
4067 // either through an explicit TRUNCATE or because of type legalization.
4068 // We want the least significant part.
4069 EVT FromVT
= Op
.getNode() ? Op
.getValueType() : VT
;
4070 unsigned FromBytesPerElement
= FromVT
.getVectorElementType().getStoreSize();
4072 // Return false if the source elements are smaller than their destination
4074 if (FromBytesPerElement
< BytesPerElement
)
4077 unsigned Byte
= ((Elem
* FromBytesPerElement
) % SystemZ::VectorBytes
+
4078 (FromBytesPerElement
- BytesPerElement
));
4080 // Look through things like shuffles and bitcasts.
4081 while (Op
.getNode()) {
4082 if (Op
.getOpcode() == ISD::BITCAST
)
4083 Op
= Op
.getOperand(0);
4084 else if (Op
.getOpcode() == ISD::VECTOR_SHUFFLE
&& Op
.hasOneUse()) {
4085 // See whether the bytes we need come from a contiguous part of one
4087 SmallVector
<int, SystemZ::VectorBytes
> OpBytes
;
4088 if (!getVPermMask(Op
, OpBytes
))
4091 if (!getShuffleInput(OpBytes
, Byte
, BytesPerElement
, NewByte
))
4097 Op
= Op
.getOperand(unsigned(NewByte
) / SystemZ::VectorBytes
);
4098 Byte
= unsigned(NewByte
) % SystemZ::VectorBytes
;
4099 } else if (Op
.isUndef()) {
4106 // Make sure that the source of the extraction is in Ops.
4108 for (; OpNo
< Ops
.size(); ++OpNo
)
4109 if (Ops
[OpNo
] == Op
)
4111 if (OpNo
== Ops
.size())
4114 // Add the element to Bytes.
4115 unsigned Base
= OpNo
* SystemZ::VectorBytes
+ Byte
;
4116 for (unsigned I
= 0; I
< BytesPerElement
; ++I
)
4117 Bytes
.push_back(Base
+ I
);
4122 // Return SDNodes for the completed shuffle.
4123 SDValue
GeneralShuffle::getNode(SelectionDAG
&DAG
, const SDLoc
&DL
) {
4124 assert(Bytes
.size() == SystemZ::VectorBytes
&& "Incomplete vector");
4126 if (Ops
.size() == 0)
4127 return DAG
.getUNDEF(VT
);
4129 // Make sure that there are at least two shuffle operands.
4130 if (Ops
.size() == 1)
4131 Ops
.push_back(DAG
.getUNDEF(MVT::v16i8
));
4133 // Create a tree of shuffles, deferring root node until after the loop.
4134 // Try to redistribute the undefined elements of non-root nodes so that
4135 // the non-root shuffles match something like a pack or merge, then adjust
4136 // the parent node's permute vector to compensate for the new order.
4137 // Among other things, this copes with vectors like <2 x i16> that were
4138 // padded with undefined elements during type legalization.
4140 // In the best case this redistribution will lead to the whole tree
4141 // using packs and merges. It should rarely be a loss in other cases.
4142 unsigned Stride
= 1;
4143 for (; Stride
* 2 < Ops
.size(); Stride
*= 2) {
4144 for (unsigned I
= 0; I
< Ops
.size() - Stride
; I
+= Stride
* 2) {
4145 SDValue SubOps
[] = { Ops
[I
], Ops
[I
+ Stride
] };
4147 // Create a mask for just these two operands.
4148 SmallVector
<int, SystemZ::VectorBytes
> NewBytes(SystemZ::VectorBytes
);
4149 for (unsigned J
= 0; J
< SystemZ::VectorBytes
; ++J
) {
4150 unsigned OpNo
= unsigned(Bytes
[J
]) / SystemZ::VectorBytes
;
4151 unsigned Byte
= unsigned(Bytes
[J
]) % SystemZ::VectorBytes
;
4154 else if (OpNo
== I
+ Stride
)
4155 NewBytes
[J
] = SystemZ::VectorBytes
+ Byte
;
4159 // See if it would be better to reorganize NewMask to avoid using VPERM.
4160 SmallVector
<int, SystemZ::VectorBytes
> NewBytesMap(SystemZ::VectorBytes
);
4161 if (const Permute
*P
= matchDoublePermute(NewBytes
, NewBytesMap
)) {
4162 Ops
[I
] = getPermuteNode(DAG
, DL
, *P
, SubOps
[0], SubOps
[1]);
4163 // Applying NewBytesMap to Ops[I] gets back to NewBytes.
4164 for (unsigned J
= 0; J
< SystemZ::VectorBytes
; ++J
) {
4165 if (NewBytes
[J
] >= 0) {
4166 assert(unsigned(NewBytesMap
[J
]) < SystemZ::VectorBytes
&&
4167 "Invalid double permute");
4168 Bytes
[J
] = I
* SystemZ::VectorBytes
+ NewBytesMap
[J
];
4170 assert(NewBytesMap
[J
] < 0 && "Invalid double permute");
4173 // Just use NewBytes on the operands.
4174 Ops
[I
] = getGeneralPermuteNode(DAG
, DL
, SubOps
, NewBytes
);
4175 for (unsigned J
= 0; J
< SystemZ::VectorBytes
; ++J
)
4176 if (NewBytes
[J
] >= 0)
4177 Bytes
[J
] = I
* SystemZ::VectorBytes
+ J
;
4182 // Now we just have 2 inputs. Put the second operand in Ops[1].
4184 Ops
[1] = Ops
[Stride
];
4185 for (unsigned I
= 0; I
< SystemZ::VectorBytes
; ++I
)
4186 if (Bytes
[I
] >= int(SystemZ::VectorBytes
))
4187 Bytes
[I
] -= (Stride
- 1) * SystemZ::VectorBytes
;
4190 // Look for an instruction that can do the permute without resorting
4192 unsigned OpNo0
, OpNo1
;
4194 if (const Permute
*P
= matchPermute(Bytes
, OpNo0
, OpNo1
))
4195 Op
= getPermuteNode(DAG
, DL
, *P
, Ops
[OpNo0
], Ops
[OpNo1
]);
4197 Op
= getGeneralPermuteNode(DAG
, DL
, &Ops
[0], Bytes
);
4198 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
4201 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
4202 static bool isScalarToVector(SDValue Op
) {
4203 for (unsigned I
= 1, E
= Op
.getNumOperands(); I
!= E
; ++I
)
4204 if (!Op
.getOperand(I
).isUndef())
4209 // Return a vector of type VT that contains Value in the first element.
4210 // The other elements don't matter.
4211 static SDValue
buildScalarToVector(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
4213 // If we have a constant, replicate it to all elements and let the
4214 // BUILD_VECTOR lowering take care of it.
4215 if (Value
.getOpcode() == ISD::Constant
||
4216 Value
.getOpcode() == ISD::ConstantFP
) {
4217 SmallVector
<SDValue
, 16> Ops(VT
.getVectorNumElements(), Value
);
4218 return DAG
.getBuildVector(VT
, DL
, Ops
);
4220 if (Value
.isUndef())
4221 return DAG
.getUNDEF(VT
);
4222 return DAG
.getNode(ISD::SCALAR_TO_VECTOR
, DL
, VT
, Value
);
4225 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in
4226 // element 1. Used for cases in which replication is cheap.
4227 static SDValue
buildMergeScalars(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
4228 SDValue Op0
, SDValue Op1
) {
4229 if (Op0
.isUndef()) {
4231 return DAG
.getUNDEF(VT
);
4232 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Op1
);
4235 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Op0
);
4236 return DAG
.getNode(SystemZISD::MERGE_HIGH
, DL
, VT
,
4237 buildScalarToVector(DAG
, DL
, VT
, Op0
),
4238 buildScalarToVector(DAG
, DL
, VT
, Op1
));
4241 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
4243 static SDValue
joinDwords(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue Op0
,
4245 if (Op0
.isUndef() && Op1
.isUndef())
4246 return DAG
.getUNDEF(MVT::v2i64
);
4247 // If one of the two inputs is undefined then replicate the other one,
4248 // in order to avoid using another register unnecessarily.
4250 Op0
= Op1
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op1
);
4251 else if (Op1
.isUndef())
4252 Op0
= Op1
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op0
);
4254 Op0
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op0
);
4255 Op1
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, MVT::i64
, Op1
);
4257 return DAG
.getNode(SystemZISD::JOIN_DWORDS
, DL
, MVT::v2i64
, Op0
, Op1
);
4260 // Try to represent constant BUILD_VECTOR node BVN using a
4261 // SystemZISD::BYTE_MASK-style mask. Store the mask value in Mask
4263 static bool tryBuildVectorByteMask(BuildVectorSDNode
*BVN
, uint64_t &Mask
) {
4264 EVT ElemVT
= BVN
->getValueType(0).getVectorElementType();
4265 unsigned BytesPerElement
= ElemVT
.getStoreSize();
4266 for (unsigned I
= 0, E
= BVN
->getNumOperands(); I
!= E
; ++I
) {
4267 SDValue Op
= BVN
->getOperand(I
);
4268 if (!Op
.isUndef()) {
4270 if (Op
.getOpcode() == ISD::Constant
)
4271 Value
= cast
<ConstantSDNode
>(Op
)->getZExtValue();
4272 else if (Op
.getOpcode() == ISD::ConstantFP
)
4273 Value
= (cast
<ConstantFPSDNode
>(Op
)->getValueAPF().bitcastToAPInt()
4277 for (unsigned J
= 0; J
< BytesPerElement
; ++J
) {
4278 uint64_t Byte
= (Value
>> (J
* 8)) & 0xff;
4280 Mask
|= 1ULL << ((E
- I
- 1) * BytesPerElement
+ J
);
4289 // Try to load a vector constant in which BitsPerElement-bit value Value
4290 // is replicated to fill the vector. VT is the type of the resulting
4291 // constant, which may have elements of a different size from BitsPerElement.
4292 // Return the SDValue of the constant on success, otherwise return
4294 static SDValue
tryBuildVectorReplicate(SelectionDAG
&DAG
,
4295 const SystemZInstrInfo
*TII
,
4296 const SDLoc
&DL
, EVT VT
, uint64_t Value
,
4297 unsigned BitsPerElement
) {
4298 // Signed 16-bit values can be replicated using VREPI.
4299 // Mark the constants as opaque or DAGCombiner will convert back to
4301 int64_t SignedValue
= SignExtend64(Value
, BitsPerElement
);
4302 if (isInt
<16>(SignedValue
)) {
4303 MVT VecVT
= MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement
),
4304 SystemZ::VectorBits
/ BitsPerElement
);
4305 SDValue Op
= DAG
.getNode(
4306 SystemZISD::REPLICATE
, DL
, VecVT
,
4307 DAG
.getConstant(SignedValue
, DL
, MVT::i32
, false, true /*isOpaque*/));
4308 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
4310 // See whether rotating the constant left some N places gives a value that
4311 // is one less than a power of 2 (i.e. all zeros followed by all ones).
4312 // If so we can use VGM.
4313 unsigned Start
, End
;
4314 if (TII
->isRxSBGMask(Value
, BitsPerElement
, Start
, End
)) {
4315 // isRxSBGMask returns the bit numbers for a full 64-bit value,
4316 // with 0 denoting 1 << 63 and 63 denoting 1. Convert them to
4317 // bit numbers for an BitsPerElement value, so that 0 denotes
4318 // 1 << (BitsPerElement-1).
4319 Start
-= 64 - BitsPerElement
;
4320 End
-= 64 - BitsPerElement
;
4321 MVT VecVT
= MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement
),
4322 SystemZ::VectorBits
/ BitsPerElement
);
4323 SDValue Op
= DAG
.getNode(
4324 SystemZISD::ROTATE_MASK
, DL
, VecVT
,
4325 DAG
.getConstant(Start
, DL
, MVT::i32
, false, true /*isOpaque*/),
4326 DAG
.getConstant(End
, DL
, MVT::i32
, false, true /*isOpaque*/));
4327 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
4332 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
4333 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
4334 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
4335 // would benefit from this representation and return it if so.
4336 static SDValue
tryBuildVectorShuffle(SelectionDAG
&DAG
,
4337 BuildVectorSDNode
*BVN
) {
4338 EVT VT
= BVN
->getValueType(0);
4339 unsigned NumElements
= VT
.getVectorNumElements();
4341 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
4342 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
4343 // need a BUILD_VECTOR, add an additional placeholder operand for that
4344 // BUILD_VECTOR and store its operands in ResidueOps.
4345 GeneralShuffle
GS(VT
);
4346 SmallVector
<SDValue
, SystemZ::VectorBytes
> ResidueOps
;
4347 bool FoundOne
= false;
4348 for (unsigned I
= 0; I
< NumElements
; ++I
) {
4349 SDValue Op
= BVN
->getOperand(I
);
4350 if (Op
.getOpcode() == ISD::TRUNCATE
)
4351 Op
= Op
.getOperand(0);
4352 if (Op
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
4353 Op
.getOperand(1).getOpcode() == ISD::Constant
) {
4354 unsigned Elem
= cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue();
4355 if (!GS
.add(Op
.getOperand(0), Elem
))
4358 } else if (Op
.isUndef()) {
4361 if (!GS
.add(SDValue(), ResidueOps
.size()))
4363 ResidueOps
.push_back(BVN
->getOperand(I
));
4367 // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
4371 // Create the BUILD_VECTOR for the remaining elements, if any.
4372 if (!ResidueOps
.empty()) {
4373 while (ResidueOps
.size() < NumElements
)
4374 ResidueOps
.push_back(DAG
.getUNDEF(ResidueOps
[0].getValueType()));
4375 for (auto &Op
: GS
.Ops
) {
4376 if (!Op
.getNode()) {
4377 Op
= DAG
.getBuildVector(VT
, SDLoc(BVN
), ResidueOps
);
4382 return GS
.getNode(DAG
, SDLoc(BVN
));
4385 // Combine GPR scalar values Elems into a vector of type VT.
4386 static SDValue
buildVector(SelectionDAG
&DAG
, const SDLoc
&DL
, EVT VT
,
4387 SmallVectorImpl
<SDValue
> &Elems
) {
4388 // See whether there is a single replicated value.
4390 unsigned int NumElements
= Elems
.size();
4391 unsigned int Count
= 0;
4392 for (auto Elem
: Elems
) {
4393 if (!Elem
.isUndef()) {
4394 if (!Single
.getNode())
4396 else if (Elem
!= Single
) {
4403 // There are three cases here:
4405 // - if the only defined element is a loaded one, the best sequence
4406 // is a replicating load.
4408 // - otherwise, if the only defined element is an i64 value, we will
4409 // end up with the same VLVGP sequence regardless of whether we short-cut
4410 // for replication or fall through to the later code.
4412 // - otherwise, if the only defined element is an i32 or smaller value,
4413 // we would need 2 instructions to replicate it: VLVGP followed by VREPx.
4414 // This is only a win if the single defined element is used more than once.
4415 // In other cases we're better off using a single VLVGx.
4416 if (Single
.getNode() && (Count
> 1 || Single
.getOpcode() == ISD::LOAD
))
4417 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Single
);
4419 // If all elements are loads, use VLREP/VLEs (below).
4420 bool AllLoads
= true;
4421 for (auto Elem
: Elems
)
4422 if (Elem
.getOpcode() != ISD::LOAD
|| cast
<LoadSDNode
>(Elem
)->isIndexed()) {
4427 // The best way of building a v2i64 from two i64s is to use VLVGP.
4428 if (VT
== MVT::v2i64
&& !AllLoads
)
4429 return joinDwords(DAG
, DL
, Elems
[0], Elems
[1]);
4431 // Use a 64-bit merge high to combine two doubles.
4432 if (VT
== MVT::v2f64
&& !AllLoads
)
4433 return buildMergeScalars(DAG
, DL
, VT
, Elems
[0], Elems
[1]);
4435 // Build v4f32 values directly from the FPRs:
4437 // <Axxx> <Bxxx> <Cxxxx> <Dxxx>
4442 if (VT
== MVT::v4f32
&& !AllLoads
) {
4443 SDValue Op01
= buildMergeScalars(DAG
, DL
, VT
, Elems
[0], Elems
[1]);
4444 SDValue Op23
= buildMergeScalars(DAG
, DL
, VT
, Elems
[2], Elems
[3]);
4445 // Avoid unnecessary undefs by reusing the other operand.
4448 else if (Op23
.isUndef())
4450 // Merging identical replications is a no-op.
4451 if (Op01
.getOpcode() == SystemZISD::REPLICATE
&& Op01
== Op23
)
4453 Op01
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2i64
, Op01
);
4454 Op23
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2i64
, Op23
);
4455 SDValue Op
= DAG
.getNode(SystemZISD::MERGE_HIGH
,
4456 DL
, MVT::v2i64
, Op01
, Op23
);
4457 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
4460 // Collect the constant terms.
4461 SmallVector
<SDValue
, SystemZ::VectorBytes
> Constants(NumElements
, SDValue());
4462 SmallVector
<bool, SystemZ::VectorBytes
> Done(NumElements
, false);
4464 unsigned NumConstants
= 0;
4465 for (unsigned I
= 0; I
< NumElements
; ++I
) {
4466 SDValue Elem
= Elems
[I
];
4467 if (Elem
.getOpcode() == ISD::Constant
||
4468 Elem
.getOpcode() == ISD::ConstantFP
) {
4470 Constants
[I
] = Elem
;
4474 // If there was at least one constant, fill in the other elements of
4475 // Constants with undefs to get a full vector constant and use that
4476 // as the starting point.
4478 if (NumConstants
> 0) {
4479 for (unsigned I
= 0; I
< NumElements
; ++I
)
4480 if (!Constants
[I
].getNode())
4481 Constants
[I
] = DAG
.getUNDEF(Elems
[I
].getValueType());
4482 Result
= DAG
.getBuildVector(VT
, DL
, Constants
);
4484 // Otherwise try to use VLREP or VLVGP to start the sequence in order to
4485 // avoid a false dependency on any previous contents of the vector
4488 // Use a VLREP if at least one element is a load.
4489 unsigned LoadElIdx
= UINT_MAX
;
4490 for (unsigned I
= 0; I
< NumElements
; ++I
)
4491 if (Elems
[I
].getOpcode() == ISD::LOAD
&&
4492 cast
<LoadSDNode
>(Elems
[I
])->isUnindexed()) {
4496 if (LoadElIdx
!= UINT_MAX
) {
4497 Result
= DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Elems
[LoadElIdx
]);
4498 Done
[LoadElIdx
] = true;
4500 // Try to use VLVGP.
4501 unsigned I1
= NumElements
/ 2 - 1;
4502 unsigned I2
= NumElements
- 1;
4503 bool Def1
= !Elems
[I1
].isUndef();
4504 bool Def2
= !Elems
[I2
].isUndef();
4506 SDValue Elem1
= Elems
[Def1
? I1
: I2
];
4507 SDValue Elem2
= Elems
[Def2
? I2
: I1
];
4508 Result
= DAG
.getNode(ISD::BITCAST
, DL
, VT
,
4509 joinDwords(DAG
, DL
, Elem1
, Elem2
));
4513 Result
= DAG
.getUNDEF(VT
);
4517 // Use VLVGx to insert the other elements.
4518 for (unsigned I
= 0; I
< NumElements
; ++I
)
4519 if (!Done
[I
] && !Elems
[I
].isUndef())
4520 Result
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, DL
, VT
, Result
, Elems
[I
],
4521 DAG
.getConstant(I
, DL
, MVT::i32
));
4525 SDValue
SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op
,
4526 SelectionDAG
&DAG
) const {
4527 const SystemZInstrInfo
*TII
=
4528 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
4529 auto *BVN
= cast
<BuildVectorSDNode
>(Op
.getNode());
4531 EVT VT
= Op
.getValueType();
4533 if (BVN
->isConstant()) {
4534 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
4535 // preferred way of creating all-zero and all-one vectors so give it
4536 // priority over other methods below.
4538 if (tryBuildVectorByteMask(BVN
, Mask
)) {
4539 SDValue Op
= DAG
.getNode(
4540 SystemZISD::BYTE_MASK
, DL
, MVT::v16i8
,
4541 DAG
.getConstant(Mask
, DL
, MVT::i32
, false, true /*isOpaque*/));
4542 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
4545 // Try using some form of replication.
4546 APInt SplatBits
, SplatUndef
;
4547 unsigned SplatBitSize
;
4549 if (BVN
->isConstantSplat(SplatBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
,
4551 SplatBitSize
<= 64) {
4552 // First try assuming that any undefined bits above the highest set bit
4553 // and below the lowest set bit are 1s. This increases the likelihood of
4554 // being able to use a sign-extended element value in VECTOR REPLICATE
4555 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
4556 uint64_t SplatBitsZ
= SplatBits
.getZExtValue();
4557 uint64_t SplatUndefZ
= SplatUndef
.getZExtValue();
4558 uint64_t Lower
= (SplatUndefZ
4559 & ((uint64_t(1) << findFirstSet(SplatBitsZ
)) - 1));
4560 uint64_t Upper
= (SplatUndefZ
4561 & ~((uint64_t(1) << findLastSet(SplatBitsZ
)) - 1));
4562 uint64_t Value
= SplatBitsZ
| Upper
| Lower
;
4563 SDValue Op
= tryBuildVectorReplicate(DAG
, TII
, DL
, VT
, Value
,
4568 // Now try assuming that any undefined bits between the first and
4569 // last defined set bits are set. This increases the chances of
4570 // using a non-wraparound mask.
4571 uint64_t Middle
= SplatUndefZ
& ~Upper
& ~Lower
;
4572 Value
= SplatBitsZ
| Middle
;
4573 Op
= tryBuildVectorReplicate(DAG
, TII
, DL
, VT
, Value
, SplatBitSize
);
4578 // Fall back to loading it from memory.
4582 // See if we should use shuffles to construct the vector from other vectors.
4583 if (SDValue Res
= tryBuildVectorShuffle(DAG
, BVN
))
4586 // Detect SCALAR_TO_VECTOR conversions.
4587 if (isOperationLegal(ISD::SCALAR_TO_VECTOR
, VT
) && isScalarToVector(Op
))
4588 return buildScalarToVector(DAG
, DL
, VT
, Op
.getOperand(0));
4590 // Otherwise use buildVector to build the vector up from GPRs.
4591 unsigned NumElements
= Op
.getNumOperands();
4592 SmallVector
<SDValue
, SystemZ::VectorBytes
> Ops(NumElements
);
4593 for (unsigned I
= 0; I
< NumElements
; ++I
)
4594 Ops
[I
] = Op
.getOperand(I
);
4595 return buildVector(DAG
, DL
, VT
, Ops
);
4598 SDValue
SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op
,
4599 SelectionDAG
&DAG
) const {
4600 auto *VSN
= cast
<ShuffleVectorSDNode
>(Op
.getNode());
4602 EVT VT
= Op
.getValueType();
4603 unsigned NumElements
= VT
.getVectorNumElements();
4605 if (VSN
->isSplat()) {
4606 SDValue Op0
= Op
.getOperand(0);
4607 unsigned Index
= VSN
->getSplatIndex();
4608 assert(Index
< VT
.getVectorNumElements() &&
4609 "Splat index should be defined and in first operand");
4610 // See whether the value we're splatting is directly available as a scalar.
4611 if ((Index
== 0 && Op0
.getOpcode() == ISD::SCALAR_TO_VECTOR
) ||
4612 Op0
.getOpcode() == ISD::BUILD_VECTOR
)
4613 return DAG
.getNode(SystemZISD::REPLICATE
, DL
, VT
, Op0
.getOperand(Index
));
4614 // Otherwise keep it as a vector-to-vector operation.
4615 return DAG
.getNode(SystemZISD::SPLAT
, DL
, VT
, Op
.getOperand(0),
4616 DAG
.getConstant(Index
, DL
, MVT::i32
));
4619 GeneralShuffle
GS(VT
);
4620 for (unsigned I
= 0; I
< NumElements
; ++I
) {
4621 int Elt
= VSN
->getMaskElt(I
);
4624 else if (!GS
.add(Op
.getOperand(unsigned(Elt
) / NumElements
),
4625 unsigned(Elt
) % NumElements
))
4628 return GS
.getNode(DAG
, SDLoc(VSN
));
4631 SDValue
SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op
,
4632 SelectionDAG
&DAG
) const {
4634 // Just insert the scalar into element 0 of an undefined vector.
4635 return DAG
.getNode(ISD::INSERT_VECTOR_ELT
, DL
,
4636 Op
.getValueType(), DAG
.getUNDEF(Op
.getValueType()),
4637 Op
.getOperand(0), DAG
.getConstant(0, DL
, MVT::i32
));
4640 SDValue
SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op
,
4641 SelectionDAG
&DAG
) const {
4642 // Handle insertions of floating-point values.
4644 SDValue Op0
= Op
.getOperand(0);
4645 SDValue Op1
= Op
.getOperand(1);
4646 SDValue Op2
= Op
.getOperand(2);
4647 EVT VT
= Op
.getValueType();
4649 // Insertions into constant indices of a v2f64 can be done using VPDI.
4650 // However, if the inserted value is a bitcast or a constant then it's
4651 // better to use GPRs, as below.
4652 if (VT
== MVT::v2f64
&&
4653 Op1
.getOpcode() != ISD::BITCAST
&&
4654 Op1
.getOpcode() != ISD::ConstantFP
&&
4655 Op2
.getOpcode() == ISD::Constant
) {
4656 uint64_t Index
= cast
<ConstantSDNode
>(Op2
)->getZExtValue();
4657 unsigned Mask
= VT
.getVectorNumElements() - 1;
4662 // Otherwise bitcast to the equivalent integer form and insert via a GPR.
4663 MVT IntVT
= MVT::getIntegerVT(VT
.getScalarSizeInBits());
4664 MVT IntVecVT
= MVT::getVectorVT(IntVT
, VT
.getVectorNumElements());
4665 SDValue Res
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, DL
, IntVecVT
,
4666 DAG
.getNode(ISD::BITCAST
, DL
, IntVecVT
, Op0
),
4667 DAG
.getNode(ISD::BITCAST
, DL
, IntVT
, Op1
), Op2
);
4668 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Res
);
4672 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op
,
4673 SelectionDAG
&DAG
) const {
4674 // Handle extractions of floating-point values.
4676 SDValue Op0
= Op
.getOperand(0);
4677 SDValue Op1
= Op
.getOperand(1);
4678 EVT VT
= Op
.getValueType();
4679 EVT VecVT
= Op0
.getValueType();
4681 // Extractions of constant indices can be done directly.
4682 if (auto *CIndexN
= dyn_cast
<ConstantSDNode
>(Op1
)) {
4683 uint64_t Index
= CIndexN
->getZExtValue();
4684 unsigned Mask
= VecVT
.getVectorNumElements() - 1;
4689 // Otherwise bitcast to the equivalent integer form and extract via a GPR.
4690 MVT IntVT
= MVT::getIntegerVT(VT
.getSizeInBits());
4691 MVT IntVecVT
= MVT::getVectorVT(IntVT
, VecVT
.getVectorNumElements());
4692 SDValue Res
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, IntVT
,
4693 DAG
.getNode(ISD::BITCAST
, DL
, IntVecVT
, Op0
), Op1
);
4694 return DAG
.getNode(ISD::BITCAST
, DL
, VT
, Res
);
4698 SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op
, SelectionDAG
&DAG
,
4699 unsigned UnpackHigh
) const {
4700 SDValue PackedOp
= Op
.getOperand(0);
4701 EVT OutVT
= Op
.getValueType();
4702 EVT InVT
= PackedOp
.getValueType();
4703 unsigned ToBits
= OutVT
.getScalarSizeInBits();
4704 unsigned FromBits
= InVT
.getScalarSizeInBits();
4707 EVT OutVT
= MVT::getVectorVT(MVT::getIntegerVT(FromBits
),
4708 SystemZ::VectorBits
/ FromBits
);
4709 PackedOp
= DAG
.getNode(UnpackHigh
, SDLoc(PackedOp
), OutVT
, PackedOp
);
4710 } while (FromBits
!= ToBits
);
4714 SDValue
SystemZTargetLowering::lowerShift(SDValue Op
, SelectionDAG
&DAG
,
4715 unsigned ByScalar
) const {
4716 // Look for cases where a vector shift can use the *_BY_SCALAR form.
4717 SDValue Op0
= Op
.getOperand(0);
4718 SDValue Op1
= Op
.getOperand(1);
4720 EVT VT
= Op
.getValueType();
4721 unsigned ElemBitSize
= VT
.getScalarSizeInBits();
4723 // See whether the shift vector is a splat represented as BUILD_VECTOR.
4724 if (auto *BVN
= dyn_cast
<BuildVectorSDNode
>(Op1
)) {
4725 APInt SplatBits
, SplatUndef
;
4726 unsigned SplatBitSize
;
4728 // Check for constant splats. Use ElemBitSize as the minimum element
4729 // width and reject splats that need wider elements.
4730 if (BVN
->isConstantSplat(SplatBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
,
4731 ElemBitSize
, true) &&
4732 SplatBitSize
== ElemBitSize
) {
4733 SDValue Shift
= DAG
.getConstant(SplatBits
.getZExtValue() & 0xfff,
4735 return DAG
.getNode(ByScalar
, DL
, VT
, Op0
, Shift
);
4737 // Check for variable splats.
4738 BitVector UndefElements
;
4739 SDValue Splat
= BVN
->getSplatValue(&UndefElements
);
4741 // Since i32 is the smallest legal type, we either need a no-op
4743 SDValue Shift
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
, Splat
);
4744 return DAG
.getNode(ByScalar
, DL
, VT
, Op0
, Shift
);
4748 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
4749 // and the shift amount is directly available in a GPR.
4750 if (auto *VSN
= dyn_cast
<ShuffleVectorSDNode
>(Op1
)) {
4751 if (VSN
->isSplat()) {
4752 SDValue VSNOp0
= VSN
->getOperand(0);
4753 unsigned Index
= VSN
->getSplatIndex();
4754 assert(Index
< VT
.getVectorNumElements() &&
4755 "Splat index should be defined and in first operand");
4756 if ((Index
== 0 && VSNOp0
.getOpcode() == ISD::SCALAR_TO_VECTOR
) ||
4757 VSNOp0
.getOpcode() == ISD::BUILD_VECTOR
) {
4758 // Since i32 is the smallest legal type, we either need a no-op
4760 SDValue Shift
= DAG
.getNode(ISD::TRUNCATE
, DL
, MVT::i32
,
4761 VSNOp0
.getOperand(Index
));
4762 return DAG
.getNode(ByScalar
, DL
, VT
, Op0
, Shift
);
4767 // Otherwise just treat the current form as legal.
4771 SDValue
SystemZTargetLowering::LowerOperation(SDValue Op
,
4772 SelectionDAG
&DAG
) const {
4773 switch (Op
.getOpcode()) {
4774 case ISD::FRAMEADDR
:
4775 return lowerFRAMEADDR(Op
, DAG
);
4776 case ISD::RETURNADDR
:
4777 return lowerRETURNADDR(Op
, DAG
);
4779 return lowerBR_CC(Op
, DAG
);
4780 case ISD::SELECT_CC
:
4781 return lowerSELECT_CC(Op
, DAG
);
4783 return lowerSETCC(Op
, DAG
);
4784 case ISD::GlobalAddress
:
4785 return lowerGlobalAddress(cast
<GlobalAddressSDNode
>(Op
), DAG
);
4786 case ISD::GlobalTLSAddress
:
4787 return lowerGlobalTLSAddress(cast
<GlobalAddressSDNode
>(Op
), DAG
);
4788 case ISD::BlockAddress
:
4789 return lowerBlockAddress(cast
<BlockAddressSDNode
>(Op
), DAG
);
4790 case ISD::JumpTable
:
4791 return lowerJumpTable(cast
<JumpTableSDNode
>(Op
), DAG
);
4792 case ISD::ConstantPool
:
4793 return lowerConstantPool(cast
<ConstantPoolSDNode
>(Op
), DAG
);
4795 return lowerBITCAST(Op
, DAG
);
4797 return lowerVASTART(Op
, DAG
);
4799 return lowerVACOPY(Op
, DAG
);
4800 case ISD::DYNAMIC_STACKALLOC
:
4801 return lowerDYNAMIC_STACKALLOC(Op
, DAG
);
4802 case ISD::GET_DYNAMIC_AREA_OFFSET
:
4803 return lowerGET_DYNAMIC_AREA_OFFSET(Op
, DAG
);
4804 case ISD::SMUL_LOHI
:
4805 return lowerSMUL_LOHI(Op
, DAG
);
4806 case ISD::UMUL_LOHI
:
4807 return lowerUMUL_LOHI(Op
, DAG
);
4809 return lowerSDIVREM(Op
, DAG
);
4811 return lowerUDIVREM(Op
, DAG
);
4816 return lowerXALUO(Op
, DAG
);
4819 return lowerADDSUBCARRY(Op
, DAG
);
4821 return lowerOR(Op
, DAG
);
4823 return lowerCTPOP(Op
, DAG
);
4824 case ISD::ATOMIC_FENCE
:
4825 return lowerATOMIC_FENCE(Op
, DAG
);
4826 case ISD::ATOMIC_SWAP
:
4827 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_SWAPW
);
4828 case ISD::ATOMIC_STORE
:
4829 return lowerATOMIC_STORE(Op
, DAG
);
4830 case ISD::ATOMIC_LOAD
:
4831 return lowerATOMIC_LOAD(Op
, DAG
);
4832 case ISD::ATOMIC_LOAD_ADD
:
4833 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_ADD
);
4834 case ISD::ATOMIC_LOAD_SUB
:
4835 return lowerATOMIC_LOAD_SUB(Op
, DAG
);
4836 case ISD::ATOMIC_LOAD_AND
:
4837 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_AND
);
4838 case ISD::ATOMIC_LOAD_OR
:
4839 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_OR
);
4840 case ISD::ATOMIC_LOAD_XOR
:
4841 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_XOR
);
4842 case ISD::ATOMIC_LOAD_NAND
:
4843 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_NAND
);
4844 case ISD::ATOMIC_LOAD_MIN
:
4845 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_MIN
);
4846 case ISD::ATOMIC_LOAD_MAX
:
4847 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_MAX
);
4848 case ISD::ATOMIC_LOAD_UMIN
:
4849 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_UMIN
);
4850 case ISD::ATOMIC_LOAD_UMAX
:
4851 return lowerATOMIC_LOAD_OP(Op
, DAG
, SystemZISD::ATOMIC_LOADW_UMAX
);
4852 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
:
4853 return lowerATOMIC_CMP_SWAP(Op
, DAG
);
4854 case ISD::STACKSAVE
:
4855 return lowerSTACKSAVE(Op
, DAG
);
4856 case ISD::STACKRESTORE
:
4857 return lowerSTACKRESTORE(Op
, DAG
);
4859 return lowerPREFETCH(Op
, DAG
);
4860 case ISD::INTRINSIC_W_CHAIN
:
4861 return lowerINTRINSIC_W_CHAIN(Op
, DAG
);
4862 case ISD::INTRINSIC_WO_CHAIN
:
4863 return lowerINTRINSIC_WO_CHAIN(Op
, DAG
);
4864 case ISD::BUILD_VECTOR
:
4865 return lowerBUILD_VECTOR(Op
, DAG
);
4866 case ISD::VECTOR_SHUFFLE
:
4867 return lowerVECTOR_SHUFFLE(Op
, DAG
);
4868 case ISD::SCALAR_TO_VECTOR
:
4869 return lowerSCALAR_TO_VECTOR(Op
, DAG
);
4870 case ISD::INSERT_VECTOR_ELT
:
4871 return lowerINSERT_VECTOR_ELT(Op
, DAG
);
4872 case ISD::EXTRACT_VECTOR_ELT
:
4873 return lowerEXTRACT_VECTOR_ELT(Op
, DAG
);
4874 case ISD::SIGN_EXTEND_VECTOR_INREG
:
4875 return lowerExtendVectorInreg(Op
, DAG
, SystemZISD::UNPACK_HIGH
);
4876 case ISD::ZERO_EXTEND_VECTOR_INREG
:
4877 return lowerExtendVectorInreg(Op
, DAG
, SystemZISD::UNPACKL_HIGH
);
4879 return lowerShift(Op
, DAG
, SystemZISD::VSHL_BY_SCALAR
);
4881 return lowerShift(Op
, DAG
, SystemZISD::VSRL_BY_SCALAR
);
4883 return lowerShift(Op
, DAG
, SystemZISD::VSRA_BY_SCALAR
);
4885 llvm_unreachable("Unexpected node to lower");
4889 // Lower operations with invalid operand or result types (currently used
4890 // only for 128-bit integer types).
4892 static SDValue
lowerI128ToGR128(SelectionDAG
&DAG
, SDValue In
) {
4894 SDValue Lo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i64
, In
,
4895 DAG
.getIntPtrConstant(0, DL
));
4896 SDValue Hi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i64
, In
,
4897 DAG
.getIntPtrConstant(1, DL
));
4898 SDNode
*Pair
= DAG
.getMachineNode(SystemZ::PAIR128
, DL
,
4899 MVT::Untyped
, Hi
, Lo
);
4900 return SDValue(Pair
, 0);
4903 static SDValue
lowerGR128ToI128(SelectionDAG
&DAG
, SDValue In
) {
4905 SDValue Hi
= DAG
.getTargetExtractSubreg(SystemZ::subreg_h64
,
4907 SDValue Lo
= DAG
.getTargetExtractSubreg(SystemZ::subreg_l64
,
4909 return DAG
.getNode(ISD::BUILD_PAIR
, DL
, MVT::i128
, Lo
, Hi
);
4913 SystemZTargetLowering::LowerOperationWrapper(SDNode
*N
,
4914 SmallVectorImpl
<SDValue
> &Results
,
4915 SelectionDAG
&DAG
) const {
4916 switch (N
->getOpcode()) {
4917 case ISD::ATOMIC_LOAD
: {
4919 SDVTList Tys
= DAG
.getVTList(MVT::Untyped
, MVT::Other
);
4920 SDValue Ops
[] = { N
->getOperand(0), N
->getOperand(1) };
4921 MachineMemOperand
*MMO
= cast
<AtomicSDNode
>(N
)->getMemOperand();
4922 SDValue Res
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128
,
4923 DL
, Tys
, Ops
, MVT::i128
, MMO
);
4924 Results
.push_back(lowerGR128ToI128(DAG
, Res
));
4925 Results
.push_back(Res
.getValue(1));
4928 case ISD::ATOMIC_STORE
: {
4930 SDVTList Tys
= DAG
.getVTList(MVT::Other
);
4931 SDValue Ops
[] = { N
->getOperand(0),
4932 lowerI128ToGR128(DAG
, N
->getOperand(2)),
4934 MachineMemOperand
*MMO
= cast
<AtomicSDNode
>(N
)->getMemOperand();
4935 SDValue Res
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128
,
4936 DL
, Tys
, Ops
, MVT::i128
, MMO
);
4937 // We have to enforce sequential consistency by performing a
4938 // serialization operation after the store.
4939 if (cast
<AtomicSDNode
>(N
)->getOrdering() ==
4940 AtomicOrdering::SequentiallyConsistent
)
4941 Res
= SDValue(DAG
.getMachineNode(SystemZ::Serialize
, DL
,
4942 MVT::Other
, Res
), 0);
4943 Results
.push_back(Res
);
4946 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
: {
4948 SDVTList Tys
= DAG
.getVTList(MVT::Untyped
, MVT::i32
, MVT::Other
);
4949 SDValue Ops
[] = { N
->getOperand(0), N
->getOperand(1),
4950 lowerI128ToGR128(DAG
, N
->getOperand(2)),
4951 lowerI128ToGR128(DAG
, N
->getOperand(3)) };
4952 MachineMemOperand
*MMO
= cast
<AtomicSDNode
>(N
)->getMemOperand();
4953 SDValue Res
= DAG
.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128
,
4954 DL
, Tys
, Ops
, MVT::i128
, MMO
);
4955 SDValue Success
= emitSETCC(DAG
, DL
, Res
.getValue(1),
4956 SystemZ::CCMASK_CS
, SystemZ::CCMASK_CS_EQ
);
4957 Success
= DAG
.getZExtOrTrunc(Success
, DL
, N
->getValueType(1));
4958 Results
.push_back(lowerGR128ToI128(DAG
, Res
));
4959 Results
.push_back(Success
);
4960 Results
.push_back(Res
.getValue(2));
4964 llvm_unreachable("Unexpected node to lower");
4969 SystemZTargetLowering::ReplaceNodeResults(SDNode
*N
,
4970 SmallVectorImpl
<SDValue
> &Results
,
4971 SelectionDAG
&DAG
) const {
4972 return LowerOperationWrapper(N
, Results
, DAG
);
4975 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode
) const {
4976 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
4977 switch ((SystemZISD::NodeType
)Opcode
) {
4978 case SystemZISD::FIRST_NUMBER
: break;
4984 OPCODE(PCREL_WRAPPER
);
4985 OPCODE(PCREL_OFFSET
);
4991 OPCODE(SELECT_CCMASK
);
4992 OPCODE(ADJDYNALLOC
);
5017 OPCODE(SEARCH_STRING
);
5021 OPCODE(TBEGIN_NOFLOAT
);
5024 OPCODE(ROTATE_MASK
);
5026 OPCODE(JOIN_DWORDS
);
5031 OPCODE(PERMUTE_DWORDS
);
5036 OPCODE(UNPACK_HIGH
);
5037 OPCODE(UNPACKL_HIGH
);
5039 OPCODE(UNPACKL_LOW
);
5040 OPCODE(VSHL_BY_SCALAR
);
5041 OPCODE(VSRL_BY_SCALAR
);
5042 OPCODE(VSRA_BY_SCALAR
);
5070 OPCODE(ATOMIC_SWAPW
);
5071 OPCODE(ATOMIC_LOADW_ADD
);
5072 OPCODE(ATOMIC_LOADW_SUB
);
5073 OPCODE(ATOMIC_LOADW_AND
);
5074 OPCODE(ATOMIC_LOADW_OR
);
5075 OPCODE(ATOMIC_LOADW_XOR
);
5076 OPCODE(ATOMIC_LOADW_NAND
);
5077 OPCODE(ATOMIC_LOADW_MIN
);
5078 OPCODE(ATOMIC_LOADW_MAX
);
5079 OPCODE(ATOMIC_LOADW_UMIN
);
5080 OPCODE(ATOMIC_LOADW_UMAX
);
5081 OPCODE(ATOMIC_CMP_SWAPW
);
5082 OPCODE(ATOMIC_CMP_SWAP
);
5083 OPCODE(ATOMIC_LOAD_128
);
5084 OPCODE(ATOMIC_STORE_128
);
5085 OPCODE(ATOMIC_CMP_SWAP_128
);
5094 // Return true if VT is a vector whose elements are a whole number of bytes
5095 // in width. Also check for presence of vector support.
5096 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT
) const {
5097 if (!Subtarget
.hasVector())
5100 return VT
.isVector() && VT
.getScalarSizeInBits() % 8 == 0 && VT
.isSimple();
5103 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
5104 // producing a result of type ResVT. Op is a possibly bitcast version
5105 // of the input vector and Index is the index (based on type VecVT) that
5106 // should be extracted. Return the new extraction if a simplification
5107 // was possible or if Force is true.
5108 SDValue
SystemZTargetLowering::combineExtract(const SDLoc
&DL
, EVT ResVT
,
5109 EVT VecVT
, SDValue Op
,
5111 DAGCombinerInfo
&DCI
,
5113 SelectionDAG
&DAG
= DCI
.DAG
;
5115 // The number of bytes being extracted.
5116 unsigned BytesPerElement
= VecVT
.getVectorElementType().getStoreSize();
5119 unsigned Opcode
= Op
.getOpcode();
5120 if (Opcode
== ISD::BITCAST
)
5121 // Look through bitcasts.
5122 Op
= Op
.getOperand(0);
5123 else if ((Opcode
== ISD::VECTOR_SHUFFLE
|| Opcode
== SystemZISD::SPLAT
) &&
5124 canTreatAsByteVector(Op
.getValueType())) {
5125 // Get a VPERM-like permute mask and see whether the bytes covered
5126 // by the extracted element are a contiguous sequence from one
5128 SmallVector
<int, SystemZ::VectorBytes
> Bytes
;
5129 if (!getVPermMask(Op
, Bytes
))
5132 if (!getShuffleInput(Bytes
, Index
* BytesPerElement
,
5133 BytesPerElement
, First
))
5136 return DAG
.getUNDEF(ResVT
);
5137 // Make sure the contiguous sequence starts at a multiple of the
5138 // original element size.
5139 unsigned Byte
= unsigned(First
) % Bytes
.size();
5140 if (Byte
% BytesPerElement
!= 0)
5142 // We can get the extracted value directly from an input.
5143 Index
= Byte
/ BytesPerElement
;
5144 Op
= Op
.getOperand(unsigned(First
) / Bytes
.size());
5146 } else if (Opcode
== ISD::BUILD_VECTOR
&&
5147 canTreatAsByteVector(Op
.getValueType())) {
5148 // We can only optimize this case if the BUILD_VECTOR elements are
5149 // at least as wide as the extracted value.
5150 EVT OpVT
= Op
.getValueType();
5151 unsigned OpBytesPerElement
= OpVT
.getVectorElementType().getStoreSize();
5152 if (OpBytesPerElement
< BytesPerElement
)
5154 // Make sure that the least-significant bit of the extracted value
5155 // is the least significant bit of an input.
5156 unsigned End
= (Index
+ 1) * BytesPerElement
;
5157 if (End
% OpBytesPerElement
!= 0)
5159 // We're extracting the low part of one operand of the BUILD_VECTOR.
5160 Op
= Op
.getOperand(End
/ OpBytesPerElement
- 1);
5161 if (!Op
.getValueType().isInteger()) {
5162 EVT VT
= MVT::getIntegerVT(Op
.getValueSizeInBits());
5163 Op
= DAG
.getNode(ISD::BITCAST
, DL
, VT
, Op
);
5164 DCI
.AddToWorklist(Op
.getNode());
5166 EVT VT
= MVT::getIntegerVT(ResVT
.getSizeInBits());
5167 Op
= DAG
.getNode(ISD::TRUNCATE
, DL
, VT
, Op
);
5169 DCI
.AddToWorklist(Op
.getNode());
5170 Op
= DAG
.getNode(ISD::BITCAST
, DL
, ResVT
, Op
);
5173 } else if ((Opcode
== ISD::SIGN_EXTEND_VECTOR_INREG
||
5174 Opcode
== ISD::ZERO_EXTEND_VECTOR_INREG
||
5175 Opcode
== ISD::ANY_EXTEND_VECTOR_INREG
) &&
5176 canTreatAsByteVector(Op
.getValueType()) &&
5177 canTreatAsByteVector(Op
.getOperand(0).getValueType())) {
5178 // Make sure that only the unextended bits are significant.
5179 EVT ExtVT
= Op
.getValueType();
5180 EVT OpVT
= Op
.getOperand(0).getValueType();
5181 unsigned ExtBytesPerElement
= ExtVT
.getVectorElementType().getStoreSize();
5182 unsigned OpBytesPerElement
= OpVT
.getVectorElementType().getStoreSize();
5183 unsigned Byte
= Index
* BytesPerElement
;
5184 unsigned SubByte
= Byte
% ExtBytesPerElement
;
5185 unsigned MinSubByte
= ExtBytesPerElement
- OpBytesPerElement
;
5186 if (SubByte
< MinSubByte
||
5187 SubByte
+ BytesPerElement
> ExtBytesPerElement
)
5189 // Get the byte offset of the unextended element
5190 Byte
= Byte
/ ExtBytesPerElement
* OpBytesPerElement
;
5191 // ...then add the byte offset relative to that element.
5192 Byte
+= SubByte
- MinSubByte
;
5193 if (Byte
% BytesPerElement
!= 0)
5195 Op
= Op
.getOperand(0);
5196 Index
= Byte
/ BytesPerElement
;
5202 if (Op
.getValueType() != VecVT
) {
5203 Op
= DAG
.getNode(ISD::BITCAST
, DL
, VecVT
, Op
);
5204 DCI
.AddToWorklist(Op
.getNode());
5206 return DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, ResVT
, Op
,
5207 DAG
.getConstant(Index
, DL
, MVT::i32
));
5212 // Optimize vector operations in scalar value Op on the basis that Op
5213 // is truncated to TruncVT.
5214 SDValue
SystemZTargetLowering::combineTruncateExtract(
5215 const SDLoc
&DL
, EVT TruncVT
, SDValue Op
, DAGCombinerInfo
&DCI
) const {
5216 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into
5217 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
5219 if (Op
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
5220 TruncVT
.getSizeInBits() % 8 == 0) {
5221 SDValue Vec
= Op
.getOperand(0);
5222 EVT VecVT
= Vec
.getValueType();
5223 if (canTreatAsByteVector(VecVT
)) {
5224 if (auto *IndexN
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1))) {
5225 unsigned BytesPerElement
= VecVT
.getVectorElementType().getStoreSize();
5226 unsigned TruncBytes
= TruncVT
.getStoreSize();
5227 if (BytesPerElement
% TruncBytes
== 0) {
5228 // Calculate the value of Y' in the above description. We are
5229 // splitting the original elements into Scale equal-sized pieces
5230 // and for truncation purposes want the last (least-significant)
5231 // of these pieces for IndexN. This is easiest to do by calculating
5232 // the start index of the following element and then subtracting 1.
5233 unsigned Scale
= BytesPerElement
/ TruncBytes
;
5234 unsigned NewIndex
= (IndexN
->getZExtValue() + 1) * Scale
- 1;
5236 // Defer the creation of the bitcast from X to combineExtract,
5237 // which might be able to optimize the extraction.
5238 VecVT
= MVT::getVectorVT(MVT::getIntegerVT(TruncBytes
* 8),
5239 VecVT
.getStoreSize() / TruncBytes
);
5240 EVT ResVT
= (TruncBytes
< 4 ? MVT::i32
: TruncVT
);
5241 return combineExtract(DL
, ResVT
, VecVT
, Vec
, NewIndex
, DCI
, true);
5249 SDValue
SystemZTargetLowering::combineZERO_EXTEND(
5250 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5251 // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2')
5252 SelectionDAG
&DAG
= DCI
.DAG
;
5253 SDValue N0
= N
->getOperand(0);
5254 EVT VT
= N
->getValueType(0);
5255 if (N0
.getOpcode() == SystemZISD::SELECT_CCMASK
) {
5256 auto *TrueOp
= dyn_cast
<ConstantSDNode
>(N0
.getOperand(0));
5257 auto *FalseOp
= dyn_cast
<ConstantSDNode
>(N0
.getOperand(1));
5258 if (TrueOp
&& FalseOp
) {
5260 SDValue Ops
[] = { DAG
.getConstant(TrueOp
->getZExtValue(), DL
, VT
),
5261 DAG
.getConstant(FalseOp
->getZExtValue(), DL
, VT
),
5262 N0
.getOperand(2), N0
.getOperand(3), N0
.getOperand(4) };
5263 SDValue NewSelect
= DAG
.getNode(SystemZISD::SELECT_CCMASK
, DL
, VT
, Ops
);
5264 // If N0 has multiple uses, change other uses as well.
5265 if (!N0
.hasOneUse()) {
5266 SDValue TruncSelect
=
5267 DAG
.getNode(ISD::TRUNCATE
, DL
, N0
.getValueType(), NewSelect
);
5268 DCI
.CombineTo(N0
.getNode(), TruncSelect
);
5276 SDValue
SystemZTargetLowering::combineSIGN_EXTEND_INREG(
5277 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5278 // Convert (sext_in_reg (setcc LHS, RHS, COND), i1)
5279 // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1)
5280 // into (select_cc LHS, RHS, -1, 0, COND)
5281 SelectionDAG
&DAG
= DCI
.DAG
;
5282 SDValue N0
= N
->getOperand(0);
5283 EVT VT
= N
->getValueType(0);
5284 EVT EVT
= cast
<VTSDNode
>(N
->getOperand(1))->getVT();
5285 if (N0
.hasOneUse() && N0
.getOpcode() == ISD::ANY_EXTEND
)
5286 N0
= N0
.getOperand(0);
5287 if (EVT
== MVT::i1
&& N0
.hasOneUse() && N0
.getOpcode() == ISD::SETCC
) {
5289 SDValue Ops
[] = { N0
.getOperand(0), N0
.getOperand(1),
5290 DAG
.getConstant(-1, DL
, VT
), DAG
.getConstant(0, DL
, VT
),
5292 return DAG
.getNode(ISD::SELECT_CC
, DL
, VT
, Ops
);
5297 SDValue
SystemZTargetLowering::combineSIGN_EXTEND(
5298 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5299 // Convert (sext (ashr (shl X, C1), C2)) to
5300 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
5301 // cheap as narrower ones.
5302 SelectionDAG
&DAG
= DCI
.DAG
;
5303 SDValue N0
= N
->getOperand(0);
5304 EVT VT
= N
->getValueType(0);
5305 if (N0
.hasOneUse() && N0
.getOpcode() == ISD::SRA
) {
5306 auto *SraAmt
= dyn_cast
<ConstantSDNode
>(N0
.getOperand(1));
5307 SDValue Inner
= N0
.getOperand(0);
5308 if (SraAmt
&& Inner
.hasOneUse() && Inner
.getOpcode() == ISD::SHL
) {
5309 if (auto *ShlAmt
= dyn_cast
<ConstantSDNode
>(Inner
.getOperand(1))) {
5310 unsigned Extra
= (VT
.getSizeInBits() - N0
.getValueSizeInBits());
5311 unsigned NewShlAmt
= ShlAmt
->getZExtValue() + Extra
;
5312 unsigned NewSraAmt
= SraAmt
->getZExtValue() + Extra
;
5313 EVT ShiftVT
= N0
.getOperand(1).getValueType();
5314 SDValue Ext
= DAG
.getNode(ISD::ANY_EXTEND
, SDLoc(Inner
), VT
,
5315 Inner
.getOperand(0));
5316 SDValue Shl
= DAG
.getNode(ISD::SHL
, SDLoc(Inner
), VT
, Ext
,
5317 DAG
.getConstant(NewShlAmt
, SDLoc(Inner
),
5319 return DAG
.getNode(ISD::SRA
, SDLoc(N0
), VT
, Shl
,
5320 DAG
.getConstant(NewSraAmt
, SDLoc(N0
), ShiftVT
));
5327 SDValue
SystemZTargetLowering::combineMERGE(
5328 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5329 SelectionDAG
&DAG
= DCI
.DAG
;
5330 unsigned Opcode
= N
->getOpcode();
5331 SDValue Op0
= N
->getOperand(0);
5332 SDValue Op1
= N
->getOperand(1);
5333 if (Op0
.getOpcode() == ISD::BITCAST
)
5334 Op0
= Op0
.getOperand(0);
5335 if (Op0
.getOpcode() == SystemZISD::BYTE_MASK
&&
5336 cast
<ConstantSDNode
>(Op0
.getOperand(0))->getZExtValue() == 0) {
5337 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
5339 if (Op1
== N
->getOperand(0))
5341 // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
5342 EVT VT
= Op1
.getValueType();
5343 unsigned ElemBytes
= VT
.getVectorElementType().getStoreSize();
5344 if (ElemBytes
<= 4) {
5345 Opcode
= (Opcode
== SystemZISD::MERGE_HIGH
?
5346 SystemZISD::UNPACKL_HIGH
: SystemZISD::UNPACKL_LOW
);
5347 EVT InVT
= VT
.changeVectorElementTypeToInteger();
5348 EVT OutVT
= MVT::getVectorVT(MVT::getIntegerVT(ElemBytes
* 16),
5349 SystemZ::VectorBytes
/ ElemBytes
/ 2);
5351 Op1
= DAG
.getNode(ISD::BITCAST
, SDLoc(N
), InVT
, Op1
);
5352 DCI
.AddToWorklist(Op1
.getNode());
5354 SDValue Op
= DAG
.getNode(Opcode
, SDLoc(N
), OutVT
, Op1
);
5355 DCI
.AddToWorklist(Op
.getNode());
5356 return DAG
.getNode(ISD::BITCAST
, SDLoc(N
), VT
, Op
);
5362 SDValue
SystemZTargetLowering::combineSTORE(
5363 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5364 SelectionDAG
&DAG
= DCI
.DAG
;
5365 auto *SN
= cast
<StoreSDNode
>(N
);
5366 auto &Op1
= N
->getOperand(1);
5367 EVT MemVT
= SN
->getMemoryVT();
5368 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
5369 // for the extraction to be done on a vMiN value, so that we can use VSTE.
5370 // If X has wider elements then convert it to:
5371 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
5372 if (MemVT
.isInteger() && SN
->isTruncatingStore()) {
5374 combineTruncateExtract(SDLoc(N
), MemVT
, SN
->getValue(), DCI
)) {
5375 DCI
.AddToWorklist(Value
.getNode());
5377 // Rewrite the store with the new form of stored value.
5378 return DAG
.getTruncStore(SN
->getChain(), SDLoc(SN
), Value
,
5379 SN
->getBasePtr(), SN
->getMemoryVT(),
5380 SN
->getMemOperand());
5383 // Combine STORE (BSWAP) into STRVH/STRV/STRVG
5384 if (!SN
->isTruncatingStore() &&
5385 Op1
.getOpcode() == ISD::BSWAP
&&
5386 Op1
.getNode()->hasOneUse() &&
5387 (Op1
.getValueType() == MVT::i16
||
5388 Op1
.getValueType() == MVT::i32
||
5389 Op1
.getValueType() == MVT::i64
)) {
5391 SDValue BSwapOp
= Op1
.getOperand(0);
5393 if (BSwapOp
.getValueType() == MVT::i16
)
5394 BSwapOp
= DAG
.getNode(ISD::ANY_EXTEND
, SDLoc(N
), MVT::i32
, BSwapOp
);
5397 N
->getOperand(0), BSwapOp
, N
->getOperand(2),
5398 DAG
.getValueType(Op1
.getValueType())
5402 DAG
.getMemIntrinsicNode(SystemZISD::STRV
, SDLoc(N
), DAG
.getVTList(MVT::Other
),
5403 Ops
, MemVT
, SN
->getMemOperand());
5408 SDValue
SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
5409 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5411 if (!Subtarget
.hasVector())
5414 // Try to simplify a vector extraction.
5415 if (auto *IndexN
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1))) {
5416 SDValue Op0
= N
->getOperand(0);
5417 EVT VecVT
= Op0
.getValueType();
5418 return combineExtract(SDLoc(N
), N
->getValueType(0), VecVT
, Op0
,
5419 IndexN
->getZExtValue(), DCI
, false);
5424 SDValue
SystemZTargetLowering::combineJOIN_DWORDS(
5425 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5426 SelectionDAG
&DAG
= DCI
.DAG
;
5427 // (join_dwords X, X) == (replicate X)
5428 if (N
->getOperand(0) == N
->getOperand(1))
5429 return DAG
.getNode(SystemZISD::REPLICATE
, SDLoc(N
), N
->getValueType(0),
5434 SDValue
SystemZTargetLowering::combineFP_ROUND(
5435 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5436 // (fpround (extract_vector_elt X 0))
5437 // (fpround (extract_vector_elt X 1)) ->
5438 // (extract_vector_elt (VROUND X) 0)
5439 // (extract_vector_elt (VROUND X) 1)
5441 // This is a special case since the target doesn't really support v2f32s.
5442 SelectionDAG
&DAG
= DCI
.DAG
;
5443 SDValue Op0
= N
->getOperand(0);
5444 if (N
->getValueType(0) == MVT::f32
&&
5446 Op0
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
5447 Op0
.getOperand(0).getValueType() == MVT::v2f64
&&
5448 Op0
.getOperand(1).getOpcode() == ISD::Constant
&&
5449 cast
<ConstantSDNode
>(Op0
.getOperand(1))->getZExtValue() == 0) {
5450 SDValue Vec
= Op0
.getOperand(0);
5451 for (auto *U
: Vec
->uses()) {
5452 if (U
!= Op0
.getNode() &&
5454 U
->getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
5455 U
->getOperand(0) == Vec
&&
5456 U
->getOperand(1).getOpcode() == ISD::Constant
&&
5457 cast
<ConstantSDNode
>(U
->getOperand(1))->getZExtValue() == 1) {
5458 SDValue OtherRound
= SDValue(*U
->use_begin(), 0);
5459 if (OtherRound
.getOpcode() == ISD::FP_ROUND
&&
5460 OtherRound
.getOperand(0) == SDValue(U
, 0) &&
5461 OtherRound
.getValueType() == MVT::f32
) {
5462 SDValue VRound
= DAG
.getNode(SystemZISD::VROUND
, SDLoc(N
),
5464 DCI
.AddToWorklist(VRound
.getNode());
5466 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(U
), MVT::f32
,
5467 VRound
, DAG
.getConstant(2, SDLoc(U
), MVT::i32
));
5468 DCI
.AddToWorklist(Extract1
.getNode());
5469 DAG
.ReplaceAllUsesOfValueWith(OtherRound
, Extract1
);
5471 DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, SDLoc(Op0
), MVT::f32
,
5472 VRound
, DAG
.getConstant(0, SDLoc(Op0
), MVT::i32
));
5481 SDValue
SystemZTargetLowering::combineBSWAP(
5482 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5483 SelectionDAG
&DAG
= DCI
.DAG
;
5484 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG
5485 if (ISD::isNON_EXTLoad(N
->getOperand(0).getNode()) &&
5486 N
->getOperand(0).hasOneUse() &&
5487 (N
->getValueType(0) == MVT::i16
|| N
->getValueType(0) == MVT::i32
||
5488 N
->getValueType(0) == MVT::i64
)) {
5489 SDValue Load
= N
->getOperand(0);
5490 LoadSDNode
*LD
= cast
<LoadSDNode
>(Load
);
5492 // Create the byte-swapping load.
5494 LD
->getChain(), // Chain
5495 LD
->getBasePtr(), // Ptr
5496 DAG
.getValueType(N
->getValueType(0)) // VT
5499 DAG
.getMemIntrinsicNode(SystemZISD::LRV
, SDLoc(N
),
5500 DAG
.getVTList(N
->getValueType(0) == MVT::i64
?
5501 MVT::i64
: MVT::i32
, MVT::Other
),
5502 Ops
, LD
->getMemoryVT(), LD
->getMemOperand());
5504 // If this is an i16 load, insert the truncate.
5505 SDValue ResVal
= BSLoad
;
5506 if (N
->getValueType(0) == MVT::i16
)
5507 ResVal
= DAG
.getNode(ISD::TRUNCATE
, SDLoc(N
), MVT::i16
, BSLoad
);
5509 // First, combine the bswap away. This makes the value produced by the
5511 DCI
.CombineTo(N
, ResVal
);
5513 // Next, combine the load away, we give it a bogus result value but a real
5514 // chain result. The result value is dead because the bswap is dead.
5515 DCI
.CombineTo(Load
.getNode(), ResVal
, BSLoad
.getValue(1));
5517 // Return N so it doesn't get rechecked!
5518 return SDValue(N
, 0);
5523 static bool combineCCMask(SDValue
&CCReg
, int &CCValid
, int &CCMask
) {
5524 // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
5525 // set by the CCReg instruction using the CCValid / CCMask masks,
5526 // If the CCReg instruction is itself a (ICMP (SELECT_CCMASK)) testing
5527 // the condition code set by some other instruction, see whether we
5528 // can directly use that condition code.
5529 bool Invert
= false;
5531 // Verify that we have an appropriate mask for a EQ or NE comparison.
5532 if (CCValid
!= SystemZ::CCMASK_ICMP
)
5534 if (CCMask
== SystemZ::CCMASK_CMP_NE
)
5536 else if (CCMask
!= SystemZ::CCMASK_CMP_EQ
)
5539 // Verify that we have an ICMP that is the user of a SELECT_CCMASK.
5540 SDNode
*ICmp
= CCReg
.getNode();
5541 if (ICmp
->getOpcode() != SystemZISD::ICMP
)
5543 SDNode
*Select
= ICmp
->getOperand(0).getNode();
5544 if (Select
->getOpcode() != SystemZISD::SELECT_CCMASK
)
5547 // Verify that the ICMP compares against one of select values.
5548 auto *CompareVal
= dyn_cast
<ConstantSDNode
>(ICmp
->getOperand(1));
5551 auto *TrueVal
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(0));
5554 auto *FalseVal
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(1));
5557 if (CompareVal
->getZExtValue() == FalseVal
->getZExtValue())
5559 else if (CompareVal
->getZExtValue() != TrueVal
->getZExtValue())
5562 // Compute the effective CC mask for the new branch or select.
5563 auto *NewCCValid
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(2));
5564 auto *NewCCMask
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(3));
5565 if (!NewCCValid
|| !NewCCMask
)
5567 CCValid
= NewCCValid
->getZExtValue();
5568 CCMask
= NewCCMask
->getZExtValue();
5572 // Return the updated CCReg link.
5573 CCReg
= Select
->getOperand(4);
5577 SDValue
SystemZTargetLowering::combineBR_CCMASK(
5578 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5579 SelectionDAG
&DAG
= DCI
.DAG
;
5581 // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK.
5582 auto *CCValid
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
5583 auto *CCMask
= dyn_cast
<ConstantSDNode
>(N
->getOperand(2));
5584 if (!CCValid
|| !CCMask
)
5587 int CCValidVal
= CCValid
->getZExtValue();
5588 int CCMaskVal
= CCMask
->getZExtValue();
5589 SDValue Chain
= N
->getOperand(0);
5590 SDValue CCReg
= N
->getOperand(4);
5592 if (combineCCMask(CCReg
, CCValidVal
, CCMaskVal
))
5593 return DAG
.getNode(SystemZISD::BR_CCMASK
, SDLoc(N
), N
->getValueType(0),
5595 DAG
.getConstant(CCValidVal
, SDLoc(N
), MVT::i32
),
5596 DAG
.getConstant(CCMaskVal
, SDLoc(N
), MVT::i32
),
5597 N
->getOperand(3), CCReg
);
5601 SDValue
SystemZTargetLowering::combineSELECT_CCMASK(
5602 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5603 SelectionDAG
&DAG
= DCI
.DAG
;
5605 // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK.
5606 auto *CCValid
= dyn_cast
<ConstantSDNode
>(N
->getOperand(2));
5607 auto *CCMask
= dyn_cast
<ConstantSDNode
>(N
->getOperand(3));
5608 if (!CCValid
|| !CCMask
)
5611 int CCValidVal
= CCValid
->getZExtValue();
5612 int CCMaskVal
= CCMask
->getZExtValue();
5613 SDValue CCReg
= N
->getOperand(4);
5615 if (combineCCMask(CCReg
, CCValidVal
, CCMaskVal
))
5616 return DAG
.getNode(SystemZISD::SELECT_CCMASK
, SDLoc(N
), N
->getValueType(0),
5619 DAG
.getConstant(CCValidVal
, SDLoc(N
), MVT::i32
),
5620 DAG
.getConstant(CCMaskVal
, SDLoc(N
), MVT::i32
),
5626 SDValue
SystemZTargetLowering::combineGET_CCMASK(
5627 SDNode
*N
, DAGCombinerInfo
&DCI
) const {
5629 // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible
5630 auto *CCValid
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
5631 auto *CCMask
= dyn_cast
<ConstantSDNode
>(N
->getOperand(2));
5632 if (!CCValid
|| !CCMask
)
5634 int CCValidVal
= CCValid
->getZExtValue();
5635 int CCMaskVal
= CCMask
->getZExtValue();
5637 SDValue Select
= N
->getOperand(0);
5638 if (Select
->getOpcode() != SystemZISD::SELECT_CCMASK
)
5641 auto *SelectCCValid
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(2));
5642 auto *SelectCCMask
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(3));
5643 if (!SelectCCValid
|| !SelectCCMask
)
5645 int SelectCCValidVal
= SelectCCValid
->getZExtValue();
5646 int SelectCCMaskVal
= SelectCCMask
->getZExtValue();
5648 auto *TrueVal
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(0));
5649 auto *FalseVal
= dyn_cast
<ConstantSDNode
>(Select
->getOperand(1));
5650 if (!TrueVal
|| !FalseVal
)
5652 if (TrueVal
->getZExtValue() != 0 && FalseVal
->getZExtValue() == 0)
5654 else if (TrueVal
->getZExtValue() == 0 && FalseVal
->getZExtValue() != 0)
5655 SelectCCMaskVal
^= SelectCCValidVal
;
5659 if (SelectCCValidVal
& ~CCValidVal
)
5661 if (SelectCCMaskVal
!= (CCMaskVal
& SelectCCValidVal
))
5664 return Select
->getOperand(4);
5667 SDValue
SystemZTargetLowering::PerformDAGCombine(SDNode
*N
,
5668 DAGCombinerInfo
&DCI
) const {
5669 switch(N
->getOpcode()) {
5671 case ISD::ZERO_EXTEND
: return combineZERO_EXTEND(N
, DCI
);
5672 case ISD::SIGN_EXTEND
: return combineSIGN_EXTEND(N
, DCI
);
5673 case ISD::SIGN_EXTEND_INREG
: return combineSIGN_EXTEND_INREG(N
, DCI
);
5674 case SystemZISD::MERGE_HIGH
:
5675 case SystemZISD::MERGE_LOW
: return combineMERGE(N
, DCI
);
5676 case ISD::STORE
: return combineSTORE(N
, DCI
);
5677 case ISD::EXTRACT_VECTOR_ELT
: return combineEXTRACT_VECTOR_ELT(N
, DCI
);
5678 case SystemZISD::JOIN_DWORDS
: return combineJOIN_DWORDS(N
, DCI
);
5679 case ISD::FP_ROUND
: return combineFP_ROUND(N
, DCI
);
5680 case ISD::BSWAP
: return combineBSWAP(N
, DCI
);
5681 case SystemZISD::BR_CCMASK
: return combineBR_CCMASK(N
, DCI
);
5682 case SystemZISD::SELECT_CCMASK
: return combineSELECT_CCMASK(N
, DCI
);
5683 case SystemZISD::GET_CCMASK
: return combineGET_CCMASK(N
, DCI
);
5689 // Return the demanded elements for the OpNo source operand of Op. DemandedElts
5691 static APInt
getDemandedSrcElements(SDValue Op
, const APInt
&DemandedElts
,
5693 EVT VT
= Op
.getValueType();
5694 unsigned NumElts
= (VT
.isVector() ? VT
.getVectorNumElements() : 1);
5696 unsigned Opcode
= Op
.getOpcode();
5697 if (Opcode
== ISD::INTRINSIC_WO_CHAIN
) {
5698 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
5700 case Intrinsic::s390_vpksh
: // PACKS
5701 case Intrinsic::s390_vpksf
:
5702 case Intrinsic::s390_vpksg
:
5703 case Intrinsic::s390_vpkshs
: // PACKS_CC
5704 case Intrinsic::s390_vpksfs
:
5705 case Intrinsic::s390_vpksgs
:
5706 case Intrinsic::s390_vpklsh
: // PACKLS
5707 case Intrinsic::s390_vpklsf
:
5708 case Intrinsic::s390_vpklsg
:
5709 case Intrinsic::s390_vpklshs
: // PACKLS_CC
5710 case Intrinsic::s390_vpklsfs
:
5711 case Intrinsic::s390_vpklsgs
:
5712 // VECTOR PACK truncates the elements of two source vectors into one.
5713 SrcDemE
= DemandedElts
;
5715 SrcDemE
.lshrInPlace(NumElts
/ 2);
5716 SrcDemE
= SrcDemE
.trunc(NumElts
/ 2);
5718 // VECTOR UNPACK extends half the elements of the source vector.
5719 case Intrinsic::s390_vuphb
: // VECTOR UNPACK HIGH
5720 case Intrinsic::s390_vuphh
:
5721 case Intrinsic::s390_vuphf
:
5722 case Intrinsic::s390_vuplhb
: // VECTOR UNPACK LOGICAL HIGH
5723 case Intrinsic::s390_vuplhh
:
5724 case Intrinsic::s390_vuplhf
:
5725 SrcDemE
= APInt(NumElts
* 2, 0);
5726 SrcDemE
.insertBits(DemandedElts
, 0);
5728 case Intrinsic::s390_vuplb
: // VECTOR UNPACK LOW
5729 case Intrinsic::s390_vuplhw
:
5730 case Intrinsic::s390_vuplf
:
5731 case Intrinsic::s390_vupllb
: // VECTOR UNPACK LOGICAL LOW
5732 case Intrinsic::s390_vupllh
:
5733 case Intrinsic::s390_vupllf
:
5734 SrcDemE
= APInt(NumElts
* 2, 0);
5735 SrcDemE
.insertBits(DemandedElts
, NumElts
);
5737 case Intrinsic::s390_vpdi
: {
5738 // VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source.
5739 SrcDemE
= APInt(NumElts
, 0);
5740 if (!DemandedElts
[OpNo
- 1])
5742 unsigned Mask
= cast
<ConstantSDNode
>(Op
.getOperand(3))->getZExtValue();
5743 unsigned MaskBit
= ((OpNo
- 1) ? 1 : 4);
5744 // Demand input element 0 or 1, given by the mask bit value.
5745 SrcDemE
.setBit((Mask
& MaskBit
)? 1 : 0);
5748 case Intrinsic::s390_vsldb
: {
5749 // VECTOR SHIFT LEFT DOUBLE BY BYTE
5750 assert(VT
== MVT::v16i8
&& "Unexpected type.");
5751 unsigned FirstIdx
= cast
<ConstantSDNode
>(Op
.getOperand(3))->getZExtValue();
5752 assert (FirstIdx
> 0 && FirstIdx
< 16 && "Unused operand.");
5753 unsigned NumSrc0Els
= 16 - FirstIdx
;
5754 SrcDemE
= APInt(NumElts
, 0);
5756 APInt DemEls
= DemandedElts
.trunc(NumSrc0Els
);
5757 SrcDemE
.insertBits(DemEls
, FirstIdx
);
5759 APInt DemEls
= DemandedElts
.lshr(NumSrc0Els
);
5760 SrcDemE
.insertBits(DemEls
, 0);
5764 case Intrinsic::s390_vperm
:
5765 SrcDemE
= APInt(NumElts
, 1);
5768 llvm_unreachable("Unhandled intrinsic.");
5773 case SystemZISD::JOIN_DWORDS
:
5775 SrcDemE
= APInt(1, 1);
5777 case SystemZISD::SELECT_CCMASK
:
5778 SrcDemE
= DemandedElts
;
5781 llvm_unreachable("Unhandled opcode.");
5788 static void computeKnownBitsBinOp(const SDValue Op
, KnownBits
&Known
,
5789 const APInt
&DemandedElts
,
5790 const SelectionDAG
&DAG
, unsigned Depth
,
5792 APInt Src0DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
);
5793 APInt Src1DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
+ 1);
5794 unsigned SrcBitWidth
= Op
.getOperand(OpNo
).getScalarValueSizeInBits();
5795 KnownBits
LHSKnown(SrcBitWidth
), RHSKnown(SrcBitWidth
);
5796 DAG
.computeKnownBits(Op
.getOperand(OpNo
), LHSKnown
, Src0DemE
, Depth
+ 1);
5797 DAG
.computeKnownBits(Op
.getOperand(OpNo
+ 1), RHSKnown
, Src1DemE
, Depth
+ 1);
5798 Known
.Zero
= LHSKnown
.Zero
& RHSKnown
.Zero
;
5799 Known
.One
= LHSKnown
.One
& RHSKnown
.One
;
5803 SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op
,
5805 const APInt
&DemandedElts
,
5806 const SelectionDAG
&DAG
,
5807 unsigned Depth
) const {
5810 // Intrinsic CC result is returned in the two low bits.
5811 unsigned tmp0
, tmp1
; // not used
5812 if (Op
.getResNo() == 1 && isIntrinsicWithCC(Op
, tmp0
, tmp1
)) {
5813 Known
.Zero
.setBitsFrom(2);
5816 EVT VT
= Op
.getValueType();
5817 if (Op
.getResNo() != 0 || VT
== MVT::Untyped
)
5819 assert (Known
.getBitWidth() == VT
.getScalarSizeInBits() &&
5820 "KnownBits does not match VT in bitwidth");
5821 assert ((!VT
.isVector() ||
5822 (DemandedElts
.getBitWidth() == VT
.getVectorNumElements())) &&
5823 "DemandedElts does not match VT number of elements");
5824 unsigned BitWidth
= Known
.getBitWidth();
5825 unsigned Opcode
= Op
.getOpcode();
5826 if (Opcode
== ISD::INTRINSIC_WO_CHAIN
) {
5827 bool IsLogical
= false;
5828 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
5830 case Intrinsic::s390_vpksh
: // PACKS
5831 case Intrinsic::s390_vpksf
:
5832 case Intrinsic::s390_vpksg
:
5833 case Intrinsic::s390_vpkshs
: // PACKS_CC
5834 case Intrinsic::s390_vpksfs
:
5835 case Intrinsic::s390_vpksgs
:
5836 case Intrinsic::s390_vpklsh
: // PACKLS
5837 case Intrinsic::s390_vpklsf
:
5838 case Intrinsic::s390_vpklsg
:
5839 case Intrinsic::s390_vpklshs
: // PACKLS_CC
5840 case Intrinsic::s390_vpklsfs
:
5841 case Intrinsic::s390_vpklsgs
:
5842 case Intrinsic::s390_vpdi
:
5843 case Intrinsic::s390_vsldb
:
5844 case Intrinsic::s390_vperm
:
5845 computeKnownBitsBinOp(Op
, Known
, DemandedElts
, DAG
, Depth
, 1);
5847 case Intrinsic::s390_vuplhb
: // VECTOR UNPACK LOGICAL HIGH
5848 case Intrinsic::s390_vuplhh
:
5849 case Intrinsic::s390_vuplhf
:
5850 case Intrinsic::s390_vupllb
: // VECTOR UNPACK LOGICAL LOW
5851 case Intrinsic::s390_vupllh
:
5852 case Intrinsic::s390_vupllf
:
5855 case Intrinsic::s390_vuphb
: // VECTOR UNPACK HIGH
5856 case Intrinsic::s390_vuphh
:
5857 case Intrinsic::s390_vuphf
:
5858 case Intrinsic::s390_vuplb
: // VECTOR UNPACK LOW
5859 case Intrinsic::s390_vuplhw
:
5860 case Intrinsic::s390_vuplf
: {
5861 SDValue SrcOp
= Op
.getOperand(1);
5862 unsigned SrcBitWidth
= SrcOp
.getScalarValueSizeInBits();
5863 Known
= KnownBits(SrcBitWidth
);
5864 APInt SrcDemE
= getDemandedSrcElements(Op
, DemandedElts
, 0);
5865 DAG
.computeKnownBits(SrcOp
, Known
, SrcDemE
, Depth
+ 1);
5867 Known
= Known
.zext(BitWidth
);
5868 Known
.Zero
.setBitsFrom(SrcBitWidth
);
5870 Known
= Known
.sext(BitWidth
);
5878 case SystemZISD::JOIN_DWORDS
:
5879 case SystemZISD::SELECT_CCMASK
:
5880 computeKnownBitsBinOp(Op
, Known
, DemandedElts
, DAG
, Depth
, 0);
5882 case SystemZISD::REPLICATE
: {
5883 SDValue SrcOp
= Op
.getOperand(0);
5884 DAG
.computeKnownBits(SrcOp
, Known
, Depth
+ 1);
5885 if (Known
.getBitWidth() < BitWidth
&& isa
<ConstantSDNode
>(SrcOp
))
5886 Known
= Known
.sext(BitWidth
); // VREPI sign extends the immedate.
5894 // Known has the width of the source operand(s). Adjust if needed to match
5895 // the passed bitwidth.
5896 if (Known
.getBitWidth() != BitWidth
)
5897 Known
= Known
.zextOrTrunc(BitWidth
);
5900 static unsigned computeNumSignBitsBinOp(SDValue Op
, const APInt
&DemandedElts
,
5901 const SelectionDAG
&DAG
, unsigned Depth
,
5903 APInt Src0DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
);
5904 unsigned LHS
= DAG
.ComputeNumSignBits(Op
.getOperand(OpNo
), Src0DemE
, Depth
+ 1);
5905 if (LHS
== 1) return 1; // Early out.
5906 APInt Src1DemE
= getDemandedSrcElements(Op
, DemandedElts
, OpNo
+ 1);
5907 unsigned RHS
= DAG
.ComputeNumSignBits(Op
.getOperand(OpNo
+ 1), Src1DemE
, Depth
+ 1);
5908 if (RHS
== 1) return 1; // Early out.
5909 unsigned Common
= std::min(LHS
, RHS
);
5910 unsigned SrcBitWidth
= Op
.getOperand(OpNo
).getScalarValueSizeInBits();
5911 EVT VT
= Op
.getValueType();
5912 unsigned VTBits
= VT
.getScalarSizeInBits();
5913 if (SrcBitWidth
> VTBits
) { // PACK
5914 unsigned SrcExtraBits
= SrcBitWidth
- VTBits
;
5915 if (Common
> SrcExtraBits
)
5916 return (Common
- SrcExtraBits
);
5919 assert (SrcBitWidth
== VTBits
&& "Expected operands of same bitwidth.");
5924 SystemZTargetLowering::ComputeNumSignBitsForTargetNode(
5925 SDValue Op
, const APInt
&DemandedElts
, const SelectionDAG
&DAG
,
5926 unsigned Depth
) const {
5927 if (Op
.getResNo() != 0)
5929 unsigned Opcode
= Op
.getOpcode();
5930 if (Opcode
== ISD::INTRINSIC_WO_CHAIN
) {
5931 unsigned Id
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
5933 case Intrinsic::s390_vpksh
: // PACKS
5934 case Intrinsic::s390_vpksf
:
5935 case Intrinsic::s390_vpksg
:
5936 case Intrinsic::s390_vpkshs
: // PACKS_CC
5937 case Intrinsic::s390_vpksfs
:
5938 case Intrinsic::s390_vpksgs
:
5939 case Intrinsic::s390_vpklsh
: // PACKLS
5940 case Intrinsic::s390_vpklsf
:
5941 case Intrinsic::s390_vpklsg
:
5942 case Intrinsic::s390_vpklshs
: // PACKLS_CC
5943 case Intrinsic::s390_vpklsfs
:
5944 case Intrinsic::s390_vpklsgs
:
5945 case Intrinsic::s390_vpdi
:
5946 case Intrinsic::s390_vsldb
:
5947 case Intrinsic::s390_vperm
:
5948 return computeNumSignBitsBinOp(Op
, DemandedElts
, DAG
, Depth
, 1);
5949 case Intrinsic::s390_vuphb
: // VECTOR UNPACK HIGH
5950 case Intrinsic::s390_vuphh
:
5951 case Intrinsic::s390_vuphf
:
5952 case Intrinsic::s390_vuplb
: // VECTOR UNPACK LOW
5953 case Intrinsic::s390_vuplhw
:
5954 case Intrinsic::s390_vuplf
: {
5955 SDValue PackedOp
= Op
.getOperand(1);
5956 APInt SrcDemE
= getDemandedSrcElements(Op
, DemandedElts
, 1);
5957 unsigned Tmp
= DAG
.ComputeNumSignBits(PackedOp
, SrcDemE
, Depth
+ 1);
5958 EVT VT
= Op
.getValueType();
5959 unsigned VTBits
= VT
.getScalarSizeInBits();
5960 Tmp
+= VTBits
- PackedOp
.getScalarValueSizeInBits();
5968 case SystemZISD::SELECT_CCMASK
:
5969 return computeNumSignBitsBinOp(Op
, DemandedElts
, DAG
, Depth
, 0);
5978 //===----------------------------------------------------------------------===//
5980 //===----------------------------------------------------------------------===//
5982 // Create a new basic block after MBB.
5983 static MachineBasicBlock
*emitBlockAfter(MachineBasicBlock
*MBB
) {
5984 MachineFunction
&MF
= *MBB
->getParent();
5985 MachineBasicBlock
*NewMBB
= MF
.CreateMachineBasicBlock(MBB
->getBasicBlock());
5986 MF
.insert(std::next(MachineFunction::iterator(MBB
)), NewMBB
);
5990 // Split MBB after MI and return the new block (the one that contains
5991 // instructions after MI).
5992 static MachineBasicBlock
*splitBlockAfter(MachineBasicBlock::iterator MI
,
5993 MachineBasicBlock
*MBB
) {
5994 MachineBasicBlock
*NewMBB
= emitBlockAfter(MBB
);
5995 NewMBB
->splice(NewMBB
->begin(), MBB
,
5996 std::next(MachineBasicBlock::iterator(MI
)), MBB
->end());
5997 NewMBB
->transferSuccessorsAndUpdatePHIs(MBB
);
6001 // Split MBB before MI and return the new block (the one that contains MI).
6002 static MachineBasicBlock
*splitBlockBefore(MachineBasicBlock::iterator MI
,
6003 MachineBasicBlock
*MBB
) {
6004 MachineBasicBlock
*NewMBB
= emitBlockAfter(MBB
);
6005 NewMBB
->splice(NewMBB
->begin(), MBB
, MI
, MBB
->end());
6006 NewMBB
->transferSuccessorsAndUpdatePHIs(MBB
);
6010 // Force base value Base into a register before MI. Return the register.
6011 static unsigned forceReg(MachineInstr
&MI
, MachineOperand
&Base
,
6012 const SystemZInstrInfo
*TII
) {
6014 return Base
.getReg();
6016 MachineBasicBlock
*MBB
= MI
.getParent();
6017 MachineFunction
&MF
= *MBB
->getParent();
6018 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6020 unsigned Reg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
6021 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), TII
->get(SystemZ::LA
), Reg
)
6028 // The CC operand of MI might be missing a kill marker because there
6029 // were multiple uses of CC, and ISel didn't know which to mark.
6030 // Figure out whether MI should have had a kill marker.
6031 static bool checkCCKill(MachineInstr
&MI
, MachineBasicBlock
*MBB
) {
6032 // Scan forward through BB for a use/def of CC.
6033 MachineBasicBlock::iterator
miI(std::next(MachineBasicBlock::iterator(MI
)));
6034 for (MachineBasicBlock::iterator miE
= MBB
->end(); miI
!= miE
; ++miI
) {
6035 const MachineInstr
& mi
= *miI
;
6036 if (mi
.readsRegister(SystemZ::CC
))
6038 if (mi
.definesRegister(SystemZ::CC
))
6039 break; // Should have kill-flag - update below.
6042 // If we hit the end of the block, check whether CC is live into a
6044 if (miI
== MBB
->end()) {
6045 for (auto SI
= MBB
->succ_begin(), SE
= MBB
->succ_end(); SI
!= SE
; ++SI
)
6046 if ((*SI
)->isLiveIn(SystemZ::CC
))
6053 // Return true if it is OK for this Select pseudo-opcode to be cascaded
6054 // together with other Select pseudo-opcodes into a single basic-block with
6055 // a conditional jump around it.
6056 static bool isSelectPseudo(MachineInstr
&MI
) {
6057 switch (MI
.getOpcode()) {
6058 case SystemZ::Select32
:
6059 case SystemZ::Select64
:
6060 case SystemZ::SelectF32
:
6061 case SystemZ::SelectF64
:
6062 case SystemZ::SelectF128
:
6063 case SystemZ::SelectVR32
:
6064 case SystemZ::SelectVR64
:
6065 case SystemZ::SelectVR128
:
6073 // Helper function, which inserts PHI functions into SinkMBB:
6074 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
6075 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent Selects
6076 // in [MIItBegin, MIItEnd) range.
6077 static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin
,
6078 MachineBasicBlock::iterator MIItEnd
,
6079 MachineBasicBlock
*TrueMBB
,
6080 MachineBasicBlock
*FalseMBB
,
6081 MachineBasicBlock
*SinkMBB
) {
6082 MachineFunction
*MF
= TrueMBB
->getParent();
6083 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
6085 unsigned CCValid
= MIItBegin
->getOperand(3).getImm();
6086 unsigned CCMask
= MIItBegin
->getOperand(4).getImm();
6087 DebugLoc DL
= MIItBegin
->getDebugLoc();
6089 MachineBasicBlock::iterator SinkInsertionPoint
= SinkMBB
->begin();
6091 // As we are creating the PHIs, we have to be careful if there is more than
6092 // one. Later Selects may reference the results of earlier Selects, but later
6093 // PHIs have to reference the individual true/false inputs from earlier PHIs.
6094 // That also means that PHI construction must work forward from earlier to
6095 // later, and that the code must maintain a mapping from earlier PHI's
6096 // destination registers, and the registers that went into the PHI.
6097 DenseMap
<unsigned, std::pair
<unsigned, unsigned>> RegRewriteTable
;
6099 for (MachineBasicBlock::iterator MIIt
= MIItBegin
; MIIt
!= MIItEnd
; ++MIIt
) {
6100 unsigned DestReg
= MIIt
->getOperand(0).getReg();
6101 unsigned TrueReg
= MIIt
->getOperand(1).getReg();
6102 unsigned FalseReg
= MIIt
->getOperand(2).getReg();
6104 // If this Select we are generating is the opposite condition from
6105 // the jump we generated, then we have to swap the operands for the
6106 // PHI that is going to be generated.
6107 if (MIIt
->getOperand(4).getImm() == (CCValid
^ CCMask
))
6108 std::swap(TrueReg
, FalseReg
);
6110 if (RegRewriteTable
.find(TrueReg
) != RegRewriteTable
.end())
6111 TrueReg
= RegRewriteTable
[TrueReg
].first
;
6113 if (RegRewriteTable
.find(FalseReg
) != RegRewriteTable
.end())
6114 FalseReg
= RegRewriteTable
[FalseReg
].second
;
6116 BuildMI(*SinkMBB
, SinkInsertionPoint
, DL
, TII
->get(SystemZ::PHI
), DestReg
)
6117 .addReg(TrueReg
).addMBB(TrueMBB
)
6118 .addReg(FalseReg
).addMBB(FalseMBB
);
6120 // Add this PHI to the rewrite table.
6121 RegRewriteTable
[DestReg
] = std::make_pair(TrueReg
, FalseReg
);
6125 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
6127 SystemZTargetLowering::emitSelect(MachineInstr
&MI
,
6128 MachineBasicBlock
*MBB
) const {
6129 const SystemZInstrInfo
*TII
=
6130 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6132 unsigned CCValid
= MI
.getOperand(3).getImm();
6133 unsigned CCMask
= MI
.getOperand(4).getImm();
6134 DebugLoc DL
= MI
.getDebugLoc();
6136 // If we have a sequence of Select* pseudo instructions using the
6137 // same condition code value, we want to expand all of them into
6138 // a single pair of basic blocks using the same condition.
6139 MachineInstr
*LastMI
= &MI
;
6140 MachineBasicBlock::iterator NextMIIt
=
6141 std::next(MachineBasicBlock::iterator(MI
));
6143 if (isSelectPseudo(MI
))
6144 while (NextMIIt
!= MBB
->end() && isSelectPseudo(*NextMIIt
) &&
6145 NextMIIt
->getOperand(3).getImm() == CCValid
&&
6146 (NextMIIt
->getOperand(4).getImm() == CCMask
||
6147 NextMIIt
->getOperand(4).getImm() == (CCValid
^ CCMask
))) {
6148 LastMI
= &*NextMIIt
;
6152 MachineBasicBlock
*StartMBB
= MBB
;
6153 MachineBasicBlock
*JoinMBB
= splitBlockBefore(MI
, MBB
);
6154 MachineBasicBlock
*FalseMBB
= emitBlockAfter(StartMBB
);
6156 // Unless CC was killed in the last Select instruction, mark it as
6157 // live-in to both FalseMBB and JoinMBB.
6158 if (!LastMI
->killsRegister(SystemZ::CC
) && !checkCCKill(*LastMI
, JoinMBB
)) {
6159 FalseMBB
->addLiveIn(SystemZ::CC
);
6160 JoinMBB
->addLiveIn(SystemZ::CC
);
6164 // BRC CCMask, JoinMBB
6165 // # fallthrough to FalseMBB
6167 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6168 .addImm(CCValid
).addImm(CCMask
).addMBB(JoinMBB
);
6169 MBB
->addSuccessor(JoinMBB
);
6170 MBB
->addSuccessor(FalseMBB
);
6173 // # fallthrough to JoinMBB
6175 MBB
->addSuccessor(JoinMBB
);
6178 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
6181 MachineBasicBlock::iterator MIItBegin
= MachineBasicBlock::iterator(MI
);
6182 MachineBasicBlock::iterator MIItEnd
=
6183 std::next(MachineBasicBlock::iterator(LastMI
));
6184 createPHIsForSelects(MIItBegin
, MIItEnd
, StartMBB
, FalseMBB
, MBB
);
6186 StartMBB
->erase(MIItBegin
, MIItEnd
);
6190 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
6191 // StoreOpcode is the store to use and Invert says whether the store should
6192 // happen when the condition is false rather than true. If a STORE ON
6193 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
6194 MachineBasicBlock
*SystemZTargetLowering::emitCondStore(MachineInstr
&MI
,
6195 MachineBasicBlock
*MBB
,
6196 unsigned StoreOpcode
,
6197 unsigned STOCOpcode
,
6198 bool Invert
) const {
6199 const SystemZInstrInfo
*TII
=
6200 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6202 unsigned SrcReg
= MI
.getOperand(0).getReg();
6203 MachineOperand Base
= MI
.getOperand(1);
6204 int64_t Disp
= MI
.getOperand(2).getImm();
6205 unsigned IndexReg
= MI
.getOperand(3).getReg();
6206 unsigned CCValid
= MI
.getOperand(4).getImm();
6207 unsigned CCMask
= MI
.getOperand(5).getImm();
6208 DebugLoc DL
= MI
.getDebugLoc();
6210 StoreOpcode
= TII
->getOpcodeForOffset(StoreOpcode
, Disp
);
6212 // Use STOCOpcode if possible. We could use different store patterns in
6213 // order to avoid matching the index register, but the performance trade-offs
6214 // might be more complicated in that case.
6215 if (STOCOpcode
&& !IndexReg
&& Subtarget
.hasLoadStoreOnCond()) {
6219 // ISel pattern matching also adds a load memory operand of the same
6220 // address, so take special care to find the storing memory operand.
6221 MachineMemOperand
*MMO
= nullptr;
6222 for (auto *I
: MI
.memoperands())
6228 BuildMI(*MBB
, MI
, DL
, TII
->get(STOCOpcode
))
6234 .addMemOperand(MMO
);
6236 MI
.eraseFromParent();
6240 // Get the condition needed to branch around the store.
6244 MachineBasicBlock
*StartMBB
= MBB
;
6245 MachineBasicBlock
*JoinMBB
= splitBlockBefore(MI
, MBB
);
6246 MachineBasicBlock
*FalseMBB
= emitBlockAfter(StartMBB
);
6248 // Unless CC was killed in the CondStore instruction, mark it as
6249 // live-in to both FalseMBB and JoinMBB.
6250 if (!MI
.killsRegister(SystemZ::CC
) && !checkCCKill(MI
, JoinMBB
)) {
6251 FalseMBB
->addLiveIn(SystemZ::CC
);
6252 JoinMBB
->addLiveIn(SystemZ::CC
);
6256 // BRC CCMask, JoinMBB
6257 // # fallthrough to FalseMBB
6259 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6260 .addImm(CCValid
).addImm(CCMask
).addMBB(JoinMBB
);
6261 MBB
->addSuccessor(JoinMBB
);
6262 MBB
->addSuccessor(FalseMBB
);
6265 // store %SrcReg, %Disp(%Index,%Base)
6266 // # fallthrough to JoinMBB
6268 BuildMI(MBB
, DL
, TII
->get(StoreOpcode
))
6273 MBB
->addSuccessor(JoinMBB
);
6275 MI
.eraseFromParent();
6279 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
6280 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
6281 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
6282 // BitSize is the width of the field in bits, or 0 if this is a partword
6283 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
6284 // is one of the operands. Invert says whether the field should be
6285 // inverted after performing BinOpcode (e.g. for NAND).
6286 MachineBasicBlock
*SystemZTargetLowering::emitAtomicLoadBinary(
6287 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned BinOpcode
,
6288 unsigned BitSize
, bool Invert
) const {
6289 MachineFunction
&MF
= *MBB
->getParent();
6290 const SystemZInstrInfo
*TII
=
6291 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6292 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6293 bool IsSubWord
= (BitSize
< 32);
6295 // Extract the operands. Base can be a register or a frame index.
6296 // Src2 can be a register or immediate.
6297 unsigned Dest
= MI
.getOperand(0).getReg();
6298 MachineOperand Base
= earlyUseOperand(MI
.getOperand(1));
6299 int64_t Disp
= MI
.getOperand(2).getImm();
6300 MachineOperand Src2
= earlyUseOperand(MI
.getOperand(3));
6301 unsigned BitShift
= (IsSubWord
? MI
.getOperand(4).getReg() : 0);
6302 unsigned NegBitShift
= (IsSubWord
? MI
.getOperand(5).getReg() : 0);
6303 DebugLoc DL
= MI
.getDebugLoc();
6305 BitSize
= MI
.getOperand(6).getImm();
6307 // Subword operations use 32-bit registers.
6308 const TargetRegisterClass
*RC
= (BitSize
<= 32 ?
6309 &SystemZ::GR32BitRegClass
:
6310 &SystemZ::GR64BitRegClass
);
6311 unsigned LOpcode
= BitSize
<= 32 ? SystemZ::L
: SystemZ::LG
;
6312 unsigned CSOpcode
= BitSize
<= 32 ? SystemZ::CS
: SystemZ::CSG
;
6314 // Get the right opcodes for the displacement.
6315 LOpcode
= TII
->getOpcodeForOffset(LOpcode
, Disp
);
6316 CSOpcode
= TII
->getOpcodeForOffset(CSOpcode
, Disp
);
6317 assert(LOpcode
&& CSOpcode
&& "Displacement out of range");
6319 // Create virtual registers for temporary results.
6320 unsigned OrigVal
= MRI
.createVirtualRegister(RC
);
6321 unsigned OldVal
= MRI
.createVirtualRegister(RC
);
6322 unsigned NewVal
= (BinOpcode
|| IsSubWord
?
6323 MRI
.createVirtualRegister(RC
) : Src2
.getReg());
6324 unsigned RotatedOldVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : OldVal
);
6325 unsigned RotatedNewVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : NewVal
);
6327 // Insert a basic block for the main loop.
6328 MachineBasicBlock
*StartMBB
= MBB
;
6329 MachineBasicBlock
*DoneMBB
= splitBlockBefore(MI
, MBB
);
6330 MachineBasicBlock
*LoopMBB
= emitBlockAfter(StartMBB
);
6334 // %OrigVal = L Disp(%Base)
6335 // # fall through to LoopMMB
6337 BuildMI(MBB
, DL
, TII
->get(LOpcode
), OrigVal
).add(Base
).addImm(Disp
).addReg(0);
6338 MBB
->addSuccessor(LoopMBB
);
6341 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
6342 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
6343 // %RotatedNewVal = OP %RotatedOldVal, %Src2
6344 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
6345 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
6347 // # fall through to DoneMMB
6349 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), OldVal
)
6350 .addReg(OrigVal
).addMBB(StartMBB
)
6351 .addReg(Dest
).addMBB(LoopMBB
);
6353 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), RotatedOldVal
)
6354 .addReg(OldVal
).addReg(BitShift
).addImm(0);
6356 // Perform the operation normally and then invert every bit of the field.
6357 unsigned Tmp
= MRI
.createVirtualRegister(RC
);
6358 BuildMI(MBB
, DL
, TII
->get(BinOpcode
), Tmp
).addReg(RotatedOldVal
).add(Src2
);
6360 // XILF with the upper BitSize bits set.
6361 BuildMI(MBB
, DL
, TII
->get(SystemZ::XILF
), RotatedNewVal
)
6362 .addReg(Tmp
).addImm(-1U << (32 - BitSize
));
6364 // Use LCGR and add -1 to the result, which is more compact than
6365 // an XILF, XILH pair.
6366 unsigned Tmp2
= MRI
.createVirtualRegister(RC
);
6367 BuildMI(MBB
, DL
, TII
->get(SystemZ::LCGR
), Tmp2
).addReg(Tmp
);
6368 BuildMI(MBB
, DL
, TII
->get(SystemZ::AGHI
), RotatedNewVal
)
6369 .addReg(Tmp2
).addImm(-1);
6371 } else if (BinOpcode
)
6372 // A simply binary operation.
6373 BuildMI(MBB
, DL
, TII
->get(BinOpcode
), RotatedNewVal
)
6374 .addReg(RotatedOldVal
)
6377 // Use RISBG to rotate Src2 into position and use it to replace the
6378 // field in RotatedOldVal.
6379 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RotatedNewVal
)
6380 .addReg(RotatedOldVal
).addReg(Src2
.getReg())
6381 .addImm(32).addImm(31 + BitSize
).addImm(32 - BitSize
);
6383 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), NewVal
)
6384 .addReg(RotatedNewVal
).addReg(NegBitShift
).addImm(0);
6385 BuildMI(MBB
, DL
, TII
->get(CSOpcode
), Dest
)
6390 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6391 .addImm(SystemZ::CCMASK_CS
).addImm(SystemZ::CCMASK_CS_NE
).addMBB(LoopMBB
);
6392 MBB
->addSuccessor(LoopMBB
);
6393 MBB
->addSuccessor(DoneMBB
);
6395 MI
.eraseFromParent();
6399 // Implement EmitInstrWithCustomInserter for pseudo
6400 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
6401 // instruction that should be used to compare the current field with the
6402 // minimum or maximum value. KeepOldMask is the BRC condition-code mask
6403 // for when the current field should be kept. BitSize is the width of
6404 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
6405 MachineBasicBlock
*SystemZTargetLowering::emitAtomicLoadMinMax(
6406 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned CompareOpcode
,
6407 unsigned KeepOldMask
, unsigned BitSize
) const {
6408 MachineFunction
&MF
= *MBB
->getParent();
6409 const SystemZInstrInfo
*TII
=
6410 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6411 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6412 bool IsSubWord
= (BitSize
< 32);
6414 // Extract the operands. Base can be a register or a frame index.
6415 unsigned Dest
= MI
.getOperand(0).getReg();
6416 MachineOperand Base
= earlyUseOperand(MI
.getOperand(1));
6417 int64_t Disp
= MI
.getOperand(2).getImm();
6418 unsigned Src2
= MI
.getOperand(3).getReg();
6419 unsigned BitShift
= (IsSubWord
? MI
.getOperand(4).getReg() : 0);
6420 unsigned NegBitShift
= (IsSubWord
? MI
.getOperand(5).getReg() : 0);
6421 DebugLoc DL
= MI
.getDebugLoc();
6423 BitSize
= MI
.getOperand(6).getImm();
6425 // Subword operations use 32-bit registers.
6426 const TargetRegisterClass
*RC
= (BitSize
<= 32 ?
6427 &SystemZ::GR32BitRegClass
:
6428 &SystemZ::GR64BitRegClass
);
6429 unsigned LOpcode
= BitSize
<= 32 ? SystemZ::L
: SystemZ::LG
;
6430 unsigned CSOpcode
= BitSize
<= 32 ? SystemZ::CS
: SystemZ::CSG
;
6432 // Get the right opcodes for the displacement.
6433 LOpcode
= TII
->getOpcodeForOffset(LOpcode
, Disp
);
6434 CSOpcode
= TII
->getOpcodeForOffset(CSOpcode
, Disp
);
6435 assert(LOpcode
&& CSOpcode
&& "Displacement out of range");
6437 // Create virtual registers for temporary results.
6438 unsigned OrigVal
= MRI
.createVirtualRegister(RC
);
6439 unsigned OldVal
= MRI
.createVirtualRegister(RC
);
6440 unsigned NewVal
= MRI
.createVirtualRegister(RC
);
6441 unsigned RotatedOldVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : OldVal
);
6442 unsigned RotatedAltVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : Src2
);
6443 unsigned RotatedNewVal
= (IsSubWord
? MRI
.createVirtualRegister(RC
) : NewVal
);
6445 // Insert 3 basic blocks for the loop.
6446 MachineBasicBlock
*StartMBB
= MBB
;
6447 MachineBasicBlock
*DoneMBB
= splitBlockBefore(MI
, MBB
);
6448 MachineBasicBlock
*LoopMBB
= emitBlockAfter(StartMBB
);
6449 MachineBasicBlock
*UseAltMBB
= emitBlockAfter(LoopMBB
);
6450 MachineBasicBlock
*UpdateMBB
= emitBlockAfter(UseAltMBB
);
6454 // %OrigVal = L Disp(%Base)
6455 // # fall through to LoopMMB
6457 BuildMI(MBB
, DL
, TII
->get(LOpcode
), OrigVal
).add(Base
).addImm(Disp
).addReg(0);
6458 MBB
->addSuccessor(LoopMBB
);
6461 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
6462 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
6463 // CompareOpcode %RotatedOldVal, %Src2
6464 // BRC KeepOldMask, UpdateMBB
6466 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), OldVal
)
6467 .addReg(OrigVal
).addMBB(StartMBB
)
6468 .addReg(Dest
).addMBB(UpdateMBB
);
6470 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), RotatedOldVal
)
6471 .addReg(OldVal
).addReg(BitShift
).addImm(0);
6472 BuildMI(MBB
, DL
, TII
->get(CompareOpcode
))
6473 .addReg(RotatedOldVal
).addReg(Src2
);
6474 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6475 .addImm(SystemZ::CCMASK_ICMP
).addImm(KeepOldMask
).addMBB(UpdateMBB
);
6476 MBB
->addSuccessor(UpdateMBB
);
6477 MBB
->addSuccessor(UseAltMBB
);
6480 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
6481 // # fall through to UpdateMMB
6484 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RotatedAltVal
)
6485 .addReg(RotatedOldVal
).addReg(Src2
)
6486 .addImm(32).addImm(31 + BitSize
).addImm(0);
6487 MBB
->addSuccessor(UpdateMBB
);
6490 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
6491 // [ %RotatedAltVal, UseAltMBB ]
6492 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
6493 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
6495 // # fall through to DoneMMB
6497 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), RotatedNewVal
)
6498 .addReg(RotatedOldVal
).addMBB(LoopMBB
)
6499 .addReg(RotatedAltVal
).addMBB(UseAltMBB
);
6501 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), NewVal
)
6502 .addReg(RotatedNewVal
).addReg(NegBitShift
).addImm(0);
6503 BuildMI(MBB
, DL
, TII
->get(CSOpcode
), Dest
)
6508 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6509 .addImm(SystemZ::CCMASK_CS
).addImm(SystemZ::CCMASK_CS_NE
).addMBB(LoopMBB
);
6510 MBB
->addSuccessor(LoopMBB
);
6511 MBB
->addSuccessor(DoneMBB
);
6513 MI
.eraseFromParent();
6517 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
6520 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr
&MI
,
6521 MachineBasicBlock
*MBB
) const {
6523 MachineFunction
&MF
= *MBB
->getParent();
6524 const SystemZInstrInfo
*TII
=
6525 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6526 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6528 // Extract the operands. Base can be a register or a frame index.
6529 unsigned Dest
= MI
.getOperand(0).getReg();
6530 MachineOperand Base
= earlyUseOperand(MI
.getOperand(1));
6531 int64_t Disp
= MI
.getOperand(2).getImm();
6532 unsigned OrigCmpVal
= MI
.getOperand(3).getReg();
6533 unsigned OrigSwapVal
= MI
.getOperand(4).getReg();
6534 unsigned BitShift
= MI
.getOperand(5).getReg();
6535 unsigned NegBitShift
= MI
.getOperand(6).getReg();
6536 int64_t BitSize
= MI
.getOperand(7).getImm();
6537 DebugLoc DL
= MI
.getDebugLoc();
6539 const TargetRegisterClass
*RC
= &SystemZ::GR32BitRegClass
;
6541 // Get the right opcodes for the displacement.
6542 unsigned LOpcode
= TII
->getOpcodeForOffset(SystemZ::L
, Disp
);
6543 unsigned CSOpcode
= TII
->getOpcodeForOffset(SystemZ::CS
, Disp
);
6544 assert(LOpcode
&& CSOpcode
&& "Displacement out of range");
6546 // Create virtual registers for temporary results.
6547 unsigned OrigOldVal
= MRI
.createVirtualRegister(RC
);
6548 unsigned OldVal
= MRI
.createVirtualRegister(RC
);
6549 unsigned CmpVal
= MRI
.createVirtualRegister(RC
);
6550 unsigned SwapVal
= MRI
.createVirtualRegister(RC
);
6551 unsigned StoreVal
= MRI
.createVirtualRegister(RC
);
6552 unsigned RetryOldVal
= MRI
.createVirtualRegister(RC
);
6553 unsigned RetryCmpVal
= MRI
.createVirtualRegister(RC
);
6554 unsigned RetrySwapVal
= MRI
.createVirtualRegister(RC
);
6556 // Insert 2 basic blocks for the loop.
6557 MachineBasicBlock
*StartMBB
= MBB
;
6558 MachineBasicBlock
*DoneMBB
= splitBlockBefore(MI
, MBB
);
6559 MachineBasicBlock
*LoopMBB
= emitBlockAfter(StartMBB
);
6560 MachineBasicBlock
*SetMBB
= emitBlockAfter(LoopMBB
);
6564 // %OrigOldVal = L Disp(%Base)
6565 // # fall through to LoopMMB
6567 BuildMI(MBB
, DL
, TII
->get(LOpcode
), OrigOldVal
)
6571 MBB
->addSuccessor(LoopMBB
);
6574 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
6575 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
6576 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
6577 // %Dest = RLL %OldVal, BitSize(%BitShift)
6578 // ^^ The low BitSize bits contain the field
6580 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
6581 // ^^ Replace the upper 32-BitSize bits of the
6582 // comparison value with those that we loaded,
6583 // so that we can use a full word comparison.
6584 // CR %Dest, %RetryCmpVal
6586 // # Fall through to SetMBB
6588 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), OldVal
)
6589 .addReg(OrigOldVal
).addMBB(StartMBB
)
6590 .addReg(RetryOldVal
).addMBB(SetMBB
);
6591 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), CmpVal
)
6592 .addReg(OrigCmpVal
).addMBB(StartMBB
)
6593 .addReg(RetryCmpVal
).addMBB(SetMBB
);
6594 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), SwapVal
)
6595 .addReg(OrigSwapVal
).addMBB(StartMBB
)
6596 .addReg(RetrySwapVal
).addMBB(SetMBB
);
6597 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), Dest
)
6598 .addReg(OldVal
).addReg(BitShift
).addImm(BitSize
);
6599 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RetryCmpVal
)
6600 .addReg(CmpVal
).addReg(Dest
).addImm(32).addImm(63 - BitSize
).addImm(0);
6601 BuildMI(MBB
, DL
, TII
->get(SystemZ::CR
))
6602 .addReg(Dest
).addReg(RetryCmpVal
);
6603 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6604 .addImm(SystemZ::CCMASK_ICMP
)
6605 .addImm(SystemZ::CCMASK_CMP_NE
).addMBB(DoneMBB
);
6606 MBB
->addSuccessor(DoneMBB
);
6607 MBB
->addSuccessor(SetMBB
);
6610 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
6611 // ^^ Replace the upper 32-BitSize bits of the new
6612 // value with those that we loaded.
6613 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
6614 // ^^ Rotate the new field to its proper position.
6615 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
6617 // # fall through to ExitMMB
6619 BuildMI(MBB
, DL
, TII
->get(SystemZ::RISBG32
), RetrySwapVal
)
6620 .addReg(SwapVal
).addReg(Dest
).addImm(32).addImm(63 - BitSize
).addImm(0);
6621 BuildMI(MBB
, DL
, TII
->get(SystemZ::RLL
), StoreVal
)
6622 .addReg(RetrySwapVal
).addReg(NegBitShift
).addImm(-BitSize
);
6623 BuildMI(MBB
, DL
, TII
->get(CSOpcode
), RetryOldVal
)
6628 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6629 .addImm(SystemZ::CCMASK_CS
).addImm(SystemZ::CCMASK_CS_NE
).addMBB(LoopMBB
);
6630 MBB
->addSuccessor(LoopMBB
);
6631 MBB
->addSuccessor(DoneMBB
);
6633 // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in
6634 // to the block after the loop. At this point, CC may have been defined
6635 // either by the CR in LoopMBB or by the CS in SetMBB.
6636 if (!MI
.registerDefIsDead(SystemZ::CC
))
6637 DoneMBB
->addLiveIn(SystemZ::CC
);
6639 MI
.eraseFromParent();
6643 // Emit a move from two GR64s to a GR128.
6645 SystemZTargetLowering::emitPair128(MachineInstr
&MI
,
6646 MachineBasicBlock
*MBB
) const {
6647 MachineFunction
&MF
= *MBB
->getParent();
6648 const SystemZInstrInfo
*TII
=
6649 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6650 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6651 DebugLoc DL
= MI
.getDebugLoc();
6653 unsigned Dest
= MI
.getOperand(0).getReg();
6654 unsigned Hi
= MI
.getOperand(1).getReg();
6655 unsigned Lo
= MI
.getOperand(2).getReg();
6656 unsigned Tmp1
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
6657 unsigned Tmp2
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
6659 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::IMPLICIT_DEF
), Tmp1
);
6660 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), Tmp2
)
6661 .addReg(Tmp1
).addReg(Hi
).addImm(SystemZ::subreg_h64
);
6662 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), Dest
)
6663 .addReg(Tmp2
).addReg(Lo
).addImm(SystemZ::subreg_l64
);
6665 MI
.eraseFromParent();
6669 // Emit an extension from a GR64 to a GR128. ClearEven is true
6670 // if the high register of the GR128 value must be cleared or false if
6671 // it's "don't care".
6672 MachineBasicBlock
*SystemZTargetLowering::emitExt128(MachineInstr
&MI
,
6673 MachineBasicBlock
*MBB
,
6674 bool ClearEven
) const {
6675 MachineFunction
&MF
= *MBB
->getParent();
6676 const SystemZInstrInfo
*TII
=
6677 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6678 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6679 DebugLoc DL
= MI
.getDebugLoc();
6681 unsigned Dest
= MI
.getOperand(0).getReg();
6682 unsigned Src
= MI
.getOperand(1).getReg();
6683 unsigned In128
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
6685 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::IMPLICIT_DEF
), In128
);
6687 unsigned NewIn128
= MRI
.createVirtualRegister(&SystemZ::GR128BitRegClass
);
6688 unsigned Zero64
= MRI
.createVirtualRegister(&SystemZ::GR64BitRegClass
);
6690 BuildMI(*MBB
, MI
, DL
, TII
->get(SystemZ::LLILL
), Zero64
)
6692 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), NewIn128
)
6693 .addReg(In128
).addReg(Zero64
).addImm(SystemZ::subreg_h64
);
6696 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::INSERT_SUBREG
), Dest
)
6697 .addReg(In128
).addReg(Src
).addImm(SystemZ::subreg_l64
);
6699 MI
.eraseFromParent();
6703 MachineBasicBlock
*SystemZTargetLowering::emitMemMemWrapper(
6704 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
) const {
6705 MachineFunction
&MF
= *MBB
->getParent();
6706 const SystemZInstrInfo
*TII
=
6707 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6708 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6709 DebugLoc DL
= MI
.getDebugLoc();
6711 MachineOperand DestBase
= earlyUseOperand(MI
.getOperand(0));
6712 uint64_t DestDisp
= MI
.getOperand(1).getImm();
6713 MachineOperand SrcBase
= earlyUseOperand(MI
.getOperand(2));
6714 uint64_t SrcDisp
= MI
.getOperand(3).getImm();
6715 uint64_t Length
= MI
.getOperand(4).getImm();
6717 // When generating more than one CLC, all but the last will need to
6718 // branch to the end when a difference is found.
6719 MachineBasicBlock
*EndMBB
= (Length
> 256 && Opcode
== SystemZ::CLC
?
6720 splitBlockAfter(MI
, MBB
) : nullptr);
6722 // Check for the loop form, in which operand 5 is the trip count.
6723 if (MI
.getNumExplicitOperands() > 5) {
6724 bool HaveSingleBase
= DestBase
.isIdenticalTo(SrcBase
);
6726 uint64_t StartCountReg
= MI
.getOperand(5).getReg();
6727 uint64_t StartSrcReg
= forceReg(MI
, SrcBase
, TII
);
6728 uint64_t StartDestReg
= (HaveSingleBase
? StartSrcReg
:
6729 forceReg(MI
, DestBase
, TII
));
6731 const TargetRegisterClass
*RC
= &SystemZ::ADDR64BitRegClass
;
6732 uint64_t ThisSrcReg
= MRI
.createVirtualRegister(RC
);
6733 uint64_t ThisDestReg
= (HaveSingleBase
? ThisSrcReg
:
6734 MRI
.createVirtualRegister(RC
));
6735 uint64_t NextSrcReg
= MRI
.createVirtualRegister(RC
);
6736 uint64_t NextDestReg
= (HaveSingleBase
? NextSrcReg
:
6737 MRI
.createVirtualRegister(RC
));
6739 RC
= &SystemZ::GR64BitRegClass
;
6740 uint64_t ThisCountReg
= MRI
.createVirtualRegister(RC
);
6741 uint64_t NextCountReg
= MRI
.createVirtualRegister(RC
);
6743 MachineBasicBlock
*StartMBB
= MBB
;
6744 MachineBasicBlock
*DoneMBB
= splitBlockBefore(MI
, MBB
);
6745 MachineBasicBlock
*LoopMBB
= emitBlockAfter(StartMBB
);
6746 MachineBasicBlock
*NextMBB
= (EndMBB
? emitBlockAfter(LoopMBB
) : LoopMBB
);
6749 // # fall through to LoopMMB
6750 MBB
->addSuccessor(LoopMBB
);
6753 // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
6754 // [ %NextDestReg, NextMBB ]
6755 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
6756 // [ %NextSrcReg, NextMBB ]
6757 // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
6758 // [ %NextCountReg, NextMBB ]
6759 // ( PFD 2, 768+DestDisp(%ThisDestReg) )
6760 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
6763 // The prefetch is used only for MVC. The JLH is used only for CLC.
6766 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), ThisDestReg
)
6767 .addReg(StartDestReg
).addMBB(StartMBB
)
6768 .addReg(NextDestReg
).addMBB(NextMBB
);
6769 if (!HaveSingleBase
)
6770 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), ThisSrcReg
)
6771 .addReg(StartSrcReg
).addMBB(StartMBB
)
6772 .addReg(NextSrcReg
).addMBB(NextMBB
);
6773 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), ThisCountReg
)
6774 .addReg(StartCountReg
).addMBB(StartMBB
)
6775 .addReg(NextCountReg
).addMBB(NextMBB
);
6776 if (Opcode
== SystemZ::MVC
)
6777 BuildMI(MBB
, DL
, TII
->get(SystemZ::PFD
))
6778 .addImm(SystemZ::PFD_WRITE
)
6779 .addReg(ThisDestReg
).addImm(DestDisp
+ 768).addReg(0);
6780 BuildMI(MBB
, DL
, TII
->get(Opcode
))
6781 .addReg(ThisDestReg
).addImm(DestDisp
).addImm(256)
6782 .addReg(ThisSrcReg
).addImm(SrcDisp
);
6784 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6785 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_NE
)
6787 MBB
->addSuccessor(EndMBB
);
6788 MBB
->addSuccessor(NextMBB
);
6792 // %NextDestReg = LA 256(%ThisDestReg)
6793 // %NextSrcReg = LA 256(%ThisSrcReg)
6794 // %NextCountReg = AGHI %ThisCountReg, -1
6795 // CGHI %NextCountReg, 0
6797 // # fall through to DoneMMB
6799 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
6802 BuildMI(MBB
, DL
, TII
->get(SystemZ::LA
), NextDestReg
)
6803 .addReg(ThisDestReg
).addImm(256).addReg(0);
6804 if (!HaveSingleBase
)
6805 BuildMI(MBB
, DL
, TII
->get(SystemZ::LA
), NextSrcReg
)
6806 .addReg(ThisSrcReg
).addImm(256).addReg(0);
6807 BuildMI(MBB
, DL
, TII
->get(SystemZ::AGHI
), NextCountReg
)
6808 .addReg(ThisCountReg
).addImm(-1);
6809 BuildMI(MBB
, DL
, TII
->get(SystemZ::CGHI
))
6810 .addReg(NextCountReg
).addImm(0);
6811 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6812 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_NE
)
6814 MBB
->addSuccessor(LoopMBB
);
6815 MBB
->addSuccessor(DoneMBB
);
6817 DestBase
= MachineOperand::CreateReg(NextDestReg
, false);
6818 SrcBase
= MachineOperand::CreateReg(NextSrcReg
, false);
6820 if (EndMBB
&& !Length
)
6821 // If the loop handled the whole CLC range, DoneMBB will be empty with
6822 // CC live-through into EndMBB, so add it as live-in.
6823 DoneMBB
->addLiveIn(SystemZ::CC
);
6826 // Handle any remaining bytes with straight-line code.
6827 while (Length
> 0) {
6828 uint64_t ThisLength
= std::min(Length
, uint64_t(256));
6829 // The previous iteration might have created out-of-range displacements.
6830 // Apply them using LAY if so.
6831 if (!isUInt
<12>(DestDisp
)) {
6832 unsigned Reg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
6833 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), TII
->get(SystemZ::LAY
), Reg
)
6837 DestBase
= MachineOperand::CreateReg(Reg
, false);
6840 if (!isUInt
<12>(SrcDisp
)) {
6841 unsigned Reg
= MRI
.createVirtualRegister(&SystemZ::ADDR64BitRegClass
);
6842 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), TII
->get(SystemZ::LAY
), Reg
)
6846 SrcBase
= MachineOperand::CreateReg(Reg
, false);
6849 BuildMI(*MBB
, MI
, DL
, TII
->get(Opcode
))
6855 .setMemRefs(MI
.memoperands());
6856 DestDisp
+= ThisLength
;
6857 SrcDisp
+= ThisLength
;
6858 Length
-= ThisLength
;
6859 // If there's another CLC to go, branch to the end if a difference
6861 if (EndMBB
&& Length
> 0) {
6862 MachineBasicBlock
*NextMBB
= splitBlockBefore(MI
, MBB
);
6863 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6864 .addImm(SystemZ::CCMASK_ICMP
).addImm(SystemZ::CCMASK_CMP_NE
)
6866 MBB
->addSuccessor(EndMBB
);
6867 MBB
->addSuccessor(NextMBB
);
6872 MBB
->addSuccessor(EndMBB
);
6874 MBB
->addLiveIn(SystemZ::CC
);
6877 MI
.eraseFromParent();
6881 // Decompose string pseudo-instruction MI into a loop that continually performs
6882 // Opcode until CC != 3.
6883 MachineBasicBlock
*SystemZTargetLowering::emitStringWrapper(
6884 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
) const {
6885 MachineFunction
&MF
= *MBB
->getParent();
6886 const SystemZInstrInfo
*TII
=
6887 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6888 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
6889 DebugLoc DL
= MI
.getDebugLoc();
6891 uint64_t End1Reg
= MI
.getOperand(0).getReg();
6892 uint64_t Start1Reg
= MI
.getOperand(1).getReg();
6893 uint64_t Start2Reg
= MI
.getOperand(2).getReg();
6894 uint64_t CharReg
= MI
.getOperand(3).getReg();
6896 const TargetRegisterClass
*RC
= &SystemZ::GR64BitRegClass
;
6897 uint64_t This1Reg
= MRI
.createVirtualRegister(RC
);
6898 uint64_t This2Reg
= MRI
.createVirtualRegister(RC
);
6899 uint64_t End2Reg
= MRI
.createVirtualRegister(RC
);
6901 MachineBasicBlock
*StartMBB
= MBB
;
6902 MachineBasicBlock
*DoneMBB
= splitBlockBefore(MI
, MBB
);
6903 MachineBasicBlock
*LoopMBB
= emitBlockAfter(StartMBB
);
6906 // # fall through to LoopMMB
6907 MBB
->addSuccessor(LoopMBB
);
6910 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
6911 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
6913 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
6915 // # fall through to DoneMMB
6917 // The load of R0L can be hoisted by post-RA LICM.
6920 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), This1Reg
)
6921 .addReg(Start1Reg
).addMBB(StartMBB
)
6922 .addReg(End1Reg
).addMBB(LoopMBB
);
6923 BuildMI(MBB
, DL
, TII
->get(SystemZ::PHI
), This2Reg
)
6924 .addReg(Start2Reg
).addMBB(StartMBB
)
6925 .addReg(End2Reg
).addMBB(LoopMBB
);
6926 BuildMI(MBB
, DL
, TII
->get(TargetOpcode::COPY
), SystemZ::R0L
).addReg(CharReg
);
6927 BuildMI(MBB
, DL
, TII
->get(Opcode
))
6928 .addReg(End1Reg
, RegState::Define
).addReg(End2Reg
, RegState::Define
)
6929 .addReg(This1Reg
).addReg(This2Reg
);
6930 BuildMI(MBB
, DL
, TII
->get(SystemZ::BRC
))
6931 .addImm(SystemZ::CCMASK_ANY
).addImm(SystemZ::CCMASK_3
).addMBB(LoopMBB
);
6932 MBB
->addSuccessor(LoopMBB
);
6933 MBB
->addSuccessor(DoneMBB
);
6935 DoneMBB
->addLiveIn(SystemZ::CC
);
6937 MI
.eraseFromParent();
6941 // Update TBEGIN instruction with final opcode and register clobbers.
6942 MachineBasicBlock
*SystemZTargetLowering::emitTransactionBegin(
6943 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
,
6944 bool NoFloat
) const {
6945 MachineFunction
&MF
= *MBB
->getParent();
6946 const TargetFrameLowering
*TFI
= Subtarget
.getFrameLowering();
6947 const SystemZInstrInfo
*TII
= Subtarget
.getInstrInfo();
6950 MI
.setDesc(TII
->get(Opcode
));
6952 // We cannot handle a TBEGIN that clobbers the stack or frame pointer.
6953 // Make sure to add the corresponding GRSM bits if they are missing.
6954 uint64_t Control
= MI
.getOperand(2).getImm();
6955 static const unsigned GPRControlBit
[16] = {
6956 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
6957 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
6959 Control
|= GPRControlBit
[15];
6961 Control
|= GPRControlBit
[11];
6962 MI
.getOperand(2).setImm(Control
);
6964 // Add GPR clobbers.
6965 for (int I
= 0; I
< 16; I
++) {
6966 if ((Control
& GPRControlBit
[I
]) == 0) {
6967 unsigned Reg
= SystemZMC::GR64Regs
[I
];
6968 MI
.addOperand(MachineOperand::CreateReg(Reg
, true, true));
6972 // Add FPR/VR clobbers.
6973 if (!NoFloat
&& (Control
& 4) != 0) {
6974 if (Subtarget
.hasVector()) {
6975 for (int I
= 0; I
< 32; I
++) {
6976 unsigned Reg
= SystemZMC::VR128Regs
[I
];
6977 MI
.addOperand(MachineOperand::CreateReg(Reg
, true, true));
6980 for (int I
= 0; I
< 16; I
++) {
6981 unsigned Reg
= SystemZMC::FP64Regs
[I
];
6982 MI
.addOperand(MachineOperand::CreateReg(Reg
, true, true));
6990 MachineBasicBlock
*SystemZTargetLowering::emitLoadAndTestCmp0(
6991 MachineInstr
&MI
, MachineBasicBlock
*MBB
, unsigned Opcode
) const {
6992 MachineFunction
&MF
= *MBB
->getParent();
6993 MachineRegisterInfo
*MRI
= &MF
.getRegInfo();
6994 const SystemZInstrInfo
*TII
=
6995 static_cast<const SystemZInstrInfo
*>(Subtarget
.getInstrInfo());
6996 DebugLoc DL
= MI
.getDebugLoc();
6998 unsigned SrcReg
= MI
.getOperand(0).getReg();
7000 // Create new virtual register of the same class as source.
7001 const TargetRegisterClass
*RC
= MRI
->getRegClass(SrcReg
);
7002 unsigned DstReg
= MRI
->createVirtualRegister(RC
);
7004 // Replace pseudo with a normal load-and-test that models the def as
7006 BuildMI(*MBB
, MI
, DL
, TII
->get(Opcode
), DstReg
)
7008 MI
.eraseFromParent();
7013 MachineBasicBlock
*SystemZTargetLowering::EmitInstrWithCustomInserter(
7014 MachineInstr
&MI
, MachineBasicBlock
*MBB
) const {
7015 switch (MI
.getOpcode()) {
7016 case SystemZ::Select32
:
7017 case SystemZ::Select64
:
7018 case SystemZ::SelectF32
:
7019 case SystemZ::SelectF64
:
7020 case SystemZ::SelectF128
:
7021 case SystemZ::SelectVR32
:
7022 case SystemZ::SelectVR64
:
7023 case SystemZ::SelectVR128
:
7024 return emitSelect(MI
, MBB
);
7026 case SystemZ::CondStore8Mux
:
7027 return emitCondStore(MI
, MBB
, SystemZ::STCMux
, 0, false);
7028 case SystemZ::CondStore8MuxInv
:
7029 return emitCondStore(MI
, MBB
, SystemZ::STCMux
, 0, true);
7030 case SystemZ::CondStore16Mux
:
7031 return emitCondStore(MI
, MBB
, SystemZ::STHMux
, 0, false);
7032 case SystemZ::CondStore16MuxInv
:
7033 return emitCondStore(MI
, MBB
, SystemZ::STHMux
, 0, true);
7034 case SystemZ::CondStore32Mux
:
7035 return emitCondStore(MI
, MBB
, SystemZ::STMux
, SystemZ::STOCMux
, false);
7036 case SystemZ::CondStore32MuxInv
:
7037 return emitCondStore(MI
, MBB
, SystemZ::STMux
, SystemZ::STOCMux
, true);
7038 case SystemZ::CondStore8
:
7039 return emitCondStore(MI
, MBB
, SystemZ::STC
, 0, false);
7040 case SystemZ::CondStore8Inv
:
7041 return emitCondStore(MI
, MBB
, SystemZ::STC
, 0, true);
7042 case SystemZ::CondStore16
:
7043 return emitCondStore(MI
, MBB
, SystemZ::STH
, 0, false);
7044 case SystemZ::CondStore16Inv
:
7045 return emitCondStore(MI
, MBB
, SystemZ::STH
, 0, true);
7046 case SystemZ::CondStore32
:
7047 return emitCondStore(MI
, MBB
, SystemZ::ST
, SystemZ::STOC
, false);
7048 case SystemZ::CondStore32Inv
:
7049 return emitCondStore(MI
, MBB
, SystemZ::ST
, SystemZ::STOC
, true);
7050 case SystemZ::CondStore64
:
7051 return emitCondStore(MI
, MBB
, SystemZ::STG
, SystemZ::STOCG
, false);
7052 case SystemZ::CondStore64Inv
:
7053 return emitCondStore(MI
, MBB
, SystemZ::STG
, SystemZ::STOCG
, true);
7054 case SystemZ::CondStoreF32
:
7055 return emitCondStore(MI
, MBB
, SystemZ::STE
, 0, false);
7056 case SystemZ::CondStoreF32Inv
:
7057 return emitCondStore(MI
, MBB
, SystemZ::STE
, 0, true);
7058 case SystemZ::CondStoreF64
:
7059 return emitCondStore(MI
, MBB
, SystemZ::STD
, 0, false);
7060 case SystemZ::CondStoreF64Inv
:
7061 return emitCondStore(MI
, MBB
, SystemZ::STD
, 0, true);
7063 case SystemZ::PAIR128
:
7064 return emitPair128(MI
, MBB
);
7065 case SystemZ::AEXT128
:
7066 return emitExt128(MI
, MBB
, false);
7067 case SystemZ::ZEXT128
:
7068 return emitExt128(MI
, MBB
, true);
7070 case SystemZ::ATOMIC_SWAPW
:
7071 return emitAtomicLoadBinary(MI
, MBB
, 0, 0);
7072 case SystemZ::ATOMIC_SWAP_32
:
7073 return emitAtomicLoadBinary(MI
, MBB
, 0, 32);
7074 case SystemZ::ATOMIC_SWAP_64
:
7075 return emitAtomicLoadBinary(MI
, MBB
, 0, 64);
7077 case SystemZ::ATOMIC_LOADW_AR
:
7078 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AR
, 0);
7079 case SystemZ::ATOMIC_LOADW_AFI
:
7080 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AFI
, 0);
7081 case SystemZ::ATOMIC_LOAD_AR
:
7082 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AR
, 32);
7083 case SystemZ::ATOMIC_LOAD_AHI
:
7084 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AHI
, 32);
7085 case SystemZ::ATOMIC_LOAD_AFI
:
7086 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AFI
, 32);
7087 case SystemZ::ATOMIC_LOAD_AGR
:
7088 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AGR
, 64);
7089 case SystemZ::ATOMIC_LOAD_AGHI
:
7090 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AGHI
, 64);
7091 case SystemZ::ATOMIC_LOAD_AGFI
:
7092 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::AGFI
, 64);
7094 case SystemZ::ATOMIC_LOADW_SR
:
7095 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::SR
, 0);
7096 case SystemZ::ATOMIC_LOAD_SR
:
7097 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::SR
, 32);
7098 case SystemZ::ATOMIC_LOAD_SGR
:
7099 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::SGR
, 64);
7101 case SystemZ::ATOMIC_LOADW_NR
:
7102 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 0);
7103 case SystemZ::ATOMIC_LOADW_NILH
:
7104 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 0);
7105 case SystemZ::ATOMIC_LOAD_NR
:
7106 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 32);
7107 case SystemZ::ATOMIC_LOAD_NILL
:
7108 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL
, 32);
7109 case SystemZ::ATOMIC_LOAD_NILH
:
7110 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 32);
7111 case SystemZ::ATOMIC_LOAD_NILF
:
7112 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF
, 32);
7113 case SystemZ::ATOMIC_LOAD_NGR
:
7114 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NGR
, 64);
7115 case SystemZ::ATOMIC_LOAD_NILL64
:
7116 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL64
, 64);
7117 case SystemZ::ATOMIC_LOAD_NILH64
:
7118 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH64
, 64);
7119 case SystemZ::ATOMIC_LOAD_NIHL64
:
7120 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHL64
, 64);
7121 case SystemZ::ATOMIC_LOAD_NIHH64
:
7122 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHH64
, 64);
7123 case SystemZ::ATOMIC_LOAD_NILF64
:
7124 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF64
, 64);
7125 case SystemZ::ATOMIC_LOAD_NIHF64
:
7126 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHF64
, 64);
7128 case SystemZ::ATOMIC_LOADW_OR
:
7129 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OR
, 0);
7130 case SystemZ::ATOMIC_LOADW_OILH
:
7131 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILH
, 0);
7132 case SystemZ::ATOMIC_LOAD_OR
:
7133 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OR
, 32);
7134 case SystemZ::ATOMIC_LOAD_OILL
:
7135 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILL
, 32);
7136 case SystemZ::ATOMIC_LOAD_OILH
:
7137 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILH
, 32);
7138 case SystemZ::ATOMIC_LOAD_OILF
:
7139 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILF
, 32);
7140 case SystemZ::ATOMIC_LOAD_OGR
:
7141 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OGR
, 64);
7142 case SystemZ::ATOMIC_LOAD_OILL64
:
7143 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILL64
, 64);
7144 case SystemZ::ATOMIC_LOAD_OILH64
:
7145 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILH64
, 64);
7146 case SystemZ::ATOMIC_LOAD_OIHL64
:
7147 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OIHL64
, 64);
7148 case SystemZ::ATOMIC_LOAD_OIHH64
:
7149 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OIHH64
, 64);
7150 case SystemZ::ATOMIC_LOAD_OILF64
:
7151 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OILF64
, 64);
7152 case SystemZ::ATOMIC_LOAD_OIHF64
:
7153 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::OIHF64
, 64);
7155 case SystemZ::ATOMIC_LOADW_XR
:
7156 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XR
, 0);
7157 case SystemZ::ATOMIC_LOADW_XILF
:
7158 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XILF
, 0);
7159 case SystemZ::ATOMIC_LOAD_XR
:
7160 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XR
, 32);
7161 case SystemZ::ATOMIC_LOAD_XILF
:
7162 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XILF
, 32);
7163 case SystemZ::ATOMIC_LOAD_XGR
:
7164 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XGR
, 64);
7165 case SystemZ::ATOMIC_LOAD_XILF64
:
7166 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XILF64
, 64);
7167 case SystemZ::ATOMIC_LOAD_XIHF64
:
7168 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::XIHF64
, 64);
7170 case SystemZ::ATOMIC_LOADW_NRi
:
7171 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 0, true);
7172 case SystemZ::ATOMIC_LOADW_NILHi
:
7173 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 0, true);
7174 case SystemZ::ATOMIC_LOAD_NRi
:
7175 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NR
, 32, true);
7176 case SystemZ::ATOMIC_LOAD_NILLi
:
7177 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL
, 32, true);
7178 case SystemZ::ATOMIC_LOAD_NILHi
:
7179 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH
, 32, true);
7180 case SystemZ::ATOMIC_LOAD_NILFi
:
7181 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF
, 32, true);
7182 case SystemZ::ATOMIC_LOAD_NGRi
:
7183 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NGR
, 64, true);
7184 case SystemZ::ATOMIC_LOAD_NILL64i
:
7185 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILL64
, 64, true);
7186 case SystemZ::ATOMIC_LOAD_NILH64i
:
7187 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILH64
, 64, true);
7188 case SystemZ::ATOMIC_LOAD_NIHL64i
:
7189 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHL64
, 64, true);
7190 case SystemZ::ATOMIC_LOAD_NIHH64i
:
7191 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHH64
, 64, true);
7192 case SystemZ::ATOMIC_LOAD_NILF64i
:
7193 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NILF64
, 64, true);
7194 case SystemZ::ATOMIC_LOAD_NIHF64i
:
7195 return emitAtomicLoadBinary(MI
, MBB
, SystemZ::NIHF64
, 64, true);
7197 case SystemZ::ATOMIC_LOADW_MIN
:
7198 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
7199 SystemZ::CCMASK_CMP_LE
, 0);
7200 case SystemZ::ATOMIC_LOAD_MIN_32
:
7201 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
7202 SystemZ::CCMASK_CMP_LE
, 32);
7203 case SystemZ::ATOMIC_LOAD_MIN_64
:
7204 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CGR
,
7205 SystemZ::CCMASK_CMP_LE
, 64);
7207 case SystemZ::ATOMIC_LOADW_MAX
:
7208 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
7209 SystemZ::CCMASK_CMP_GE
, 0);
7210 case SystemZ::ATOMIC_LOAD_MAX_32
:
7211 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CR
,
7212 SystemZ::CCMASK_CMP_GE
, 32);
7213 case SystemZ::ATOMIC_LOAD_MAX_64
:
7214 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CGR
,
7215 SystemZ::CCMASK_CMP_GE
, 64);
7217 case SystemZ::ATOMIC_LOADW_UMIN
:
7218 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
7219 SystemZ::CCMASK_CMP_LE
, 0);
7220 case SystemZ::ATOMIC_LOAD_UMIN_32
:
7221 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
7222 SystemZ::CCMASK_CMP_LE
, 32);
7223 case SystemZ::ATOMIC_LOAD_UMIN_64
:
7224 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLGR
,
7225 SystemZ::CCMASK_CMP_LE
, 64);
7227 case SystemZ::ATOMIC_LOADW_UMAX
:
7228 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
7229 SystemZ::CCMASK_CMP_GE
, 0);
7230 case SystemZ::ATOMIC_LOAD_UMAX_32
:
7231 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLR
,
7232 SystemZ::CCMASK_CMP_GE
, 32);
7233 case SystemZ::ATOMIC_LOAD_UMAX_64
:
7234 return emitAtomicLoadMinMax(MI
, MBB
, SystemZ::CLGR
,
7235 SystemZ::CCMASK_CMP_GE
, 64);
7237 case SystemZ::ATOMIC_CMP_SWAPW
:
7238 return emitAtomicCmpSwapW(MI
, MBB
);
7239 case SystemZ::MVCSequence
:
7240 case SystemZ::MVCLoop
:
7241 return emitMemMemWrapper(MI
, MBB
, SystemZ::MVC
);
7242 case SystemZ::NCSequence
:
7243 case SystemZ::NCLoop
:
7244 return emitMemMemWrapper(MI
, MBB
, SystemZ::NC
);
7245 case SystemZ::OCSequence
:
7246 case SystemZ::OCLoop
:
7247 return emitMemMemWrapper(MI
, MBB
, SystemZ::OC
);
7248 case SystemZ::XCSequence
:
7249 case SystemZ::XCLoop
:
7250 return emitMemMemWrapper(MI
, MBB
, SystemZ::XC
);
7251 case SystemZ::CLCSequence
:
7252 case SystemZ::CLCLoop
:
7253 return emitMemMemWrapper(MI
, MBB
, SystemZ::CLC
);
7254 case SystemZ::CLSTLoop
:
7255 return emitStringWrapper(MI
, MBB
, SystemZ::CLST
);
7256 case SystemZ::MVSTLoop
:
7257 return emitStringWrapper(MI
, MBB
, SystemZ::MVST
);
7258 case SystemZ::SRSTLoop
:
7259 return emitStringWrapper(MI
, MBB
, SystemZ::SRST
);
7260 case SystemZ::TBEGIN
:
7261 return emitTransactionBegin(MI
, MBB
, SystemZ::TBEGIN
, false);
7262 case SystemZ::TBEGIN_nofloat
:
7263 return emitTransactionBegin(MI
, MBB
, SystemZ::TBEGIN
, true);
7264 case SystemZ::TBEGINC
:
7265 return emitTransactionBegin(MI
, MBB
, SystemZ::TBEGINC
, true);
7266 case SystemZ::LTEBRCompare_VecPseudo
:
7267 return emitLoadAndTestCmp0(MI
, MBB
, SystemZ::LTEBR
);
7268 case SystemZ::LTDBRCompare_VecPseudo
:
7269 return emitLoadAndTestCmp0(MI
, MBB
, SystemZ::LTDBR
);
7270 case SystemZ::LTXBRCompare_VecPseudo
:
7271 return emitLoadAndTestCmp0(MI
, MBB
, SystemZ::LTXBR
);
7273 case TargetOpcode::STACKMAP
:
7274 case TargetOpcode::PATCHPOINT
:
7275 return emitPatchPoint(MI
, MBB
);
7278 llvm_unreachable("Unexpected instr type to insert");
7282 // This is only used by the isel schedulers, and is needed only to prevent
7283 // compiler from crashing when list-ilp is used.
7284 const TargetRegisterClass
*
7285 SystemZTargetLowering::getRepRegClassFor(MVT VT
) const {
7286 if (VT
== MVT::Untyped
)
7287 return &SystemZ::ADDR128BitRegClass
;
7288 return TargetLowering::getRepRegClassFor(VT
);